Add Batch 3fd75d36-e872-44df-993c-b5e8065c6218
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_content_list.json +890 -0
- 2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_model.json +1088 -0
- 2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_origin.pdf +3 -0
- 2203.10xxx/2203.10443/full.md +203 -0
- 2203.10xxx/2203.10443/images.zip +3 -0
- 2203.10xxx/2203.10443/layout.json +0 -0
- 2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_content_list.json +1504 -0
- 2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_model.json +0 -0
- 2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_origin.pdf +3 -0
- 2203.10xxx/2203.10444/full.md +346 -0
- 2203.10xxx/2203.10444/images.zip +3 -0
- 2203.10xxx/2203.10444/layout.json +0 -0
- 2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_content_list.json +1993 -0
- 2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_model.json +0 -0
- 2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_origin.pdf +3 -0
- 2203.10xxx/2203.10446/full.md +374 -0
- 2203.10xxx/2203.10446/images.zip +3 -0
- 2203.10xxx/2203.10446/layout.json +0 -0
- 2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_content_list.json +1542 -0
- 2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_model.json +2033 -0
- 2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_origin.pdf +3 -0
- 2203.10xxx/2203.10465/full.md +305 -0
- 2203.10xxx/2203.10465/images.zip +3 -0
- 2203.10xxx/2203.10465/layout.json +0 -0
- 2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_content_list.json +1999 -0
- 2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_model.json +0 -0
- 2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_origin.pdf +3 -0
- 2203.10xxx/2203.10541/full.md +402 -0
- 2203.10xxx/2203.10541/images.zip +3 -0
- 2203.10xxx/2203.10541/layout.json +0 -0
- 2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_content_list.json +0 -0
- 2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_model.json +0 -0
- 2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_origin.pdf +3 -0
- 2203.10xxx/2203.10545/full.md +435 -0
- 2203.10xxx/2203.10545/images.zip +3 -0
- 2203.10xxx/2203.10545/layout.json +0 -0
- 2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_content_list.json +0 -0
- 2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_model.json +0 -0
- 2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_origin.pdf +3 -0
- 2203.10xxx/2203.10552/full.md +476 -0
- 2203.10xxx/2203.10552/images.zip +3 -0
- 2203.10xxx/2203.10552/layout.json +0 -0
- 2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_content_list.json +0 -0
- 2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_model.json +0 -0
- 2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_origin.pdf +3 -0
- 2203.10xxx/2203.10555/full.md +402 -0
- 2203.10xxx/2203.10555/images.zip +3 -0
- 2203.10xxx/2203.10555/layout.json +0 -0
- 2203.10xxx/2203.10576/3d1e200d-486c-45fe-a2fc-9629576fe0af_content_list.json +1761 -0
.gitattributes
CHANGED
|
@@ -6972,3 +6972,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 6972 |
2203.12xxx/2203.12297/65cdc797-acba-43ae-a72c-3cdae7fa2f8e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6973 |
2203.12xxx/2203.12667/d30780b5-db51-4e93-86de-5ccc89355ba9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6974 |
2203.14xxx/2203.14912/2a677f1d-77bf-4419-bbb2-9ebe7232521f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6972 |
2203.12xxx/2203.12297/65cdc797-acba-43ae-a72c-3cdae7fa2f8e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6973 |
2203.12xxx/2203.12667/d30780b5-db51-4e93-86de-5ccc89355ba9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6974 |
2203.14xxx/2203.14912/2a677f1d-77bf-4419-bbb2-9ebe7232521f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6975 |
+
2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6976 |
+
2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6977 |
+
2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6978 |
+
2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6979 |
+
2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6980 |
+
2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6981 |
+
2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6982 |
+
2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6983 |
+
2203.10xxx/2203.10576/3d1e200d-486c-45fe-a2fc-9629576fe0af_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6984 |
+
2203.10xxx/2203.10593/c215dc2b-06d5-43ba-8004-8633d9fee776_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6985 |
+
2203.10xxx/2203.10638/705645ef-3874-43b3-ba74-4e7125e5ce61_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6986 |
+
2203.10xxx/2203.10642/4e3dbeae-f9c9-4c1d-86a7-26f9ece2b35a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6987 |
+
2203.10xxx/2203.10652/39760505-2e43-4568-85bc-462d8d3f9062_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6988 |
+
2203.10xxx/2203.10681/7b3ab8f4-d8c9-427d-a826-965783c52b58_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6989 |
+
2203.10xxx/2203.10702/77942139-0b08-46e1-8b6c-10e425dbe365_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6990 |
+
2203.10xxx/2203.10705/e98ee250-c8c9-424f-a4ac-76dbaf33a9c5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6991 |
+
2203.10xxx/2203.10708/06af9637-0c6a-4de6-a94d-01c32a8e8f69_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6992 |
+
2203.10xxx/2203.10716/406e08b9-9b35-4691-96c5-83e19425b4b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6993 |
+
2203.10xxx/2203.10739/3c833610-fb97-4a5b-b197-03f48b428e73_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6994 |
+
2203.10xxx/2203.10748/55082499-539e-41cc-936b-c72a55090983_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6995 |
+
2203.10xxx/2203.10789/6aa3e9a0-996e-477f-bdb5-9dae696cb418_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6996 |
+
2203.10xxx/2203.10790/344a2678-d466-4cfe-91d8-e2a24f50ed4c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6997 |
+
2203.10xxx/2203.10794/1cb80bac-bb47-41f4-b0fe-bb1528674eb7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6998 |
+
2203.10xxx/2203.10796/14c47596-4878-4d1e-80b8-48a63c34451a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6999 |
+
2203.10xxx/2203.10800/d7a730ff-cfb4-4079-a1d3-2d6780da6a33_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7000 |
+
2203.10xxx/2203.10807/c480dab4-7829-482d-aa08-74ca11ecfc2f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7001 |
+
2203.10xxx/2203.10808/6205188a-8cf7-4fb4-8272-dd91a1c42dcd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7002 |
+
2203.10xxx/2203.10819/8b71500e-2bd5-4930-8eef-08a16963251f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7003 |
+
2203.10xxx/2203.10833/1dc6ecce-1cd0-4413-ba86-4a0f3baf7282_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7004 |
+
2203.10xxx/2203.10856/0c76d919-3fa4-463c-90cf-6070a43c2c09_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7005 |
+
2203.10xxx/2203.10885/33922130-682c-40ac-9887-608696830d2d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7006 |
+
2203.10xxx/2203.10886/9619a950-0dd5-418a-821a-d6fa76eaaebd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7007 |
+
2203.10xxx/2203.10887/f6790c09-1688-4c80-ba2a-c6677bfdce12_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7008 |
+
2203.10xxx/2203.10897/74cd508f-e76f-411f-97df-4c8afccf647e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7009 |
+
2203.10xxx/2203.10900/e359f538-3793-4761-84d2-deda40427c85_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7010 |
+
2203.10xxx/2203.10945/f3a6987b-fe1f-4800-baa9-ba2e858526ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7011 |
+
2203.10xxx/2203.10977/3162ff20-f913-4f69-b9c0-5a1a6047ffd7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7012 |
+
2203.10xxx/2203.10981/ebd53c33-a2db-4726-bf18-0e58849dd9d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7013 |
+
2203.10xxx/2203.10983/d1f7da14-6df2-4bee-8522-adbeef1ead2a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7014 |
+
2203.10xxx/2203.10995/818c190e-8563-43b9-8d9c-0821931e1005_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7015 |
+
2203.11xxx/2203.11006/45a68187-f575-4cc6-be16-5c154bcde426_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7016 |
+
2203.11xxx/2203.11055/e9211ab2-18c8-4dc3-b474-bd04fab8344d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7017 |
+
2203.11xxx/2203.11070/838185ab-4e42-4e03-abb7-78296d05c0fe_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7018 |
+
2203.11xxx/2203.11082/bd07815e-93aa-4f69-9249-5ed95f424901_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7019 |
+
2203.11xxx/2203.11085/c1614104-3a3c-4db5-b6e3-d3445f24a1bc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7020 |
+
2203.11xxx/2203.11086/585ac54d-435f-4669-a685-15452886f23a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7021 |
+
2203.11xxx/2203.11089/2dfc5d2e-64e6-4999-bea0-90c2cc97092e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7022 |
+
2203.11xxx/2203.11092/e6bca8d3-6e11-4adf-9ec7-4c9945e9c023_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7023 |
+
2203.11xxx/2203.11136/74aad41f-afe9-42d0-9935-b554897f3bd4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7024 |
+
2203.11xxx/2203.11139/40f2f47f-eaaa-4769-ac11-802031987be2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7025 |
+
2203.11xxx/2203.11147/5cbf1c8c-291a-44b0-9d7f-a7541fd99385_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7026 |
+
2203.11xxx/2203.11167/9a81741e-c46a-46d8-b532-1825ebeb3217_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7027 |
+
2203.11xxx/2203.11171/4bd80001-2c39-431c-a505-88416374b08c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7028 |
+
2203.11xxx/2203.11178/6930f3d1-1bf7-4a00-ba30-6fa209494c4d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7029 |
+
2203.11xxx/2203.11183/f63c1100-eaa9-4f0e-9252-838018ade98e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7030 |
+
2203.11xxx/2203.11191/ae137d8e-ca71-48be-8ca1-0cba6f0cbb59_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7031 |
+
2203.11xxx/2203.11192/94a657be-5db0-4cf4-bc83-2767f51fccbe_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7032 |
+
2203.11xxx/2203.11205/219d9461-6b7f-4d0b-aa3f-7bdcbbb7382f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7033 |
+
2203.11xxx/2203.11207/82a71c93-b7d3-4e82-8ef1-38d1e464d28e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7034 |
+
2203.11xxx/2203.11213/5b1c9a07-01c7-4bf7-8917-c55bd48c0632_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7035 |
+
2203.11xxx/2203.11242/a73120ed-2c0a-4a17-a1e4-a558501fd395_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7036 |
+
2203.11xxx/2203.11258/63bfb159-7d60-4e7b-a4ef-ba2ab5d466b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7037 |
+
2203.11xxx/2203.11283/2720a692-4cc0-45c1-86e6-424b4773c7ff_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7038 |
+
2203.12xxx/2203.12041/14885a32-e92a-4e06-9e34-6a09173bf715_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_content_list.json
ADDED
|
@@ -0,0 +1,890 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Quantum Multi-Agent Reinforcement Learning via Variational Quantum Circuit Design",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
88,
|
| 8 |
+
63,
|
| 9 |
+
908,
|
| 10 |
+
131
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "†Won Joon Yun, †Yunseok Kwak, †Jae Pyoung Kim, §Hyunhee Cho,",
|
| 17 |
+
"bbox": [
|
| 18 |
+
241,
|
| 19 |
+
150,
|
| 20 |
+
750,
|
| 21 |
+
167
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$\\ddagger$ Soyi Jung, $\\circ$ Jihong Park, and $\\dagger$ Joongheon Kim",
|
| 28 |
+
"bbox": [
|
| 29 |
+
318,
|
| 30 |
+
167,
|
| 31 |
+
671,
|
| 32 |
+
184
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "†School of Electrical Engineering, Korea University, Seoul, Republic of Korea",
|
| 39 |
+
"bbox": [
|
| 40 |
+
233,
|
| 41 |
+
185,
|
| 42 |
+
763,
|
| 43 |
+
200
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "$^{\\S}$ School of Electronic and Electrical Engineering, Sungkyunkwan University, Suwon, Republic of Korea",
|
| 50 |
+
"bbox": [
|
| 51 |
+
148,
|
| 52 |
+
200,
|
| 53 |
+
848,
|
| 54 |
+
215
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "$^{\\ddagger}$ School of Software, Hallym University, Chuncheon, Republic of Korea",
|
| 61 |
+
"bbox": [
|
| 62 |
+
254,
|
| 63 |
+
215,
|
| 64 |
+
741,
|
| 65 |
+
231
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "$^{\\circ}$ School of Information Technology, Deakin University, Geelong, Victoria, Australia",
|
| 72 |
+
"bbox": [
|
| 73 |
+
215,
|
| 74 |
+
232,
|
| 75 |
+
781,
|
| 76 |
+
247
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Abstract-In recent years, quantum computing (QC) has been getting a lot of attention from industry and academia. Especially, among various QC research topics, variational quantum circuit (VQC) enables quantum deep reinforcement learning (QRL). Many studies of QRL have shown that the QRL is superior to the classical reinforcement learning (RL) methods under the constraints of the number of training parameters. This paper extends and demonstrates the QRL to quantum multi-agent RL (QMARL). However, the extension of QRL to QMARL is not straightforward due to the challenge of the noise intermediate-scale quantum (NISQ) and the non-stationary properties in classical multi-agent RL (MARL). Therefore, this paper proposes the centralized training and decentralized execution (CTDE) QMARL framework by designing novel VQCs for the framework to cope with these issues. To corroborate the QMARL framework, this paper conducts the QMARL demonstration in a single-hop environment where edge agents offload packets to clouds. The extensive demonstration shows that the proposed QMARL framework enhances $57.7\\%$ of total reward than classical frameworks.",
|
| 83 |
+
"bbox": [
|
| 84 |
+
73,
|
| 85 |
+
273,
|
| 86 |
+
491,
|
| 87 |
+
513
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "Index Terms—Quantum deep learning, Multi-agent reinforcement learning, Quantum computing",
|
| 94 |
+
"bbox": [
|
| 95 |
+
73,
|
| 96 |
+
527,
|
| 97 |
+
488,
|
| 98 |
+
555
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "I. INTRODUCTION",
|
| 105 |
+
"text_level": 1,
|
| 106 |
+
"bbox": [
|
| 107 |
+
215,
|
| 108 |
+
578,
|
| 109 |
+
351,
|
| 110 |
+
592
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "The recent advances in computing hardware and deep learning algorithms have spurred the ground-breaking developments in distributed learning and multi-agent reinforcement learning (MARL) [1]. The forthcoming innovations in quantum computing hardware and algorithms will accelerate or even revolutionize this trend [2], motivating this research on quantum MARL (QMARL). Indeed, quantum algorithms have huge potential in reducing model parameters without compromising accuracy by taking advantage of quantum entanglement [3]. A remarkable example is the variational quantum circuit (VQC) architecture, also known as a quantum neural network (QNN) [4], [5], which integrates a quantum circuit into a classical deep neural network. The resultant hybrid quantum-classical model enables quantum reinforcement learning (QRL) that is on par with classical reinforcement learning with more model parameters [6], [7], which can accelerate the training and inference speed while saving computing resources [8]. Inspired from this success, in this article we aim to extend QRL to QMARL by integrating VQC into classical MARL. This problem is non-trivial due to the",
|
| 117 |
+
"bbox": [
|
| 118 |
+
73,
|
| 119 |
+
604,
|
| 120 |
+
491,
|
| 121 |
+
906
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "text",
|
| 127 |
+
"text": "trade-off between quantum errors and MARL training stability as we shall elaborate next.",
|
| 128 |
+
"bbox": [
|
| 129 |
+
503,
|
| 130 |
+
273,
|
| 131 |
+
919,
|
| 132 |
+
301
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "In MARL, each agent interacts with other agents in a cooperative or competitive scenario. Such agent interactions often incur the non-stationary reward of each agent, hindering the MARL training convergence. A standard way to cope with this MARL non-stationarity is the centralized training and decentralized execution (CTDE) method wherein the reward is given simultaneously to all agents by concatenating their state-action pairs. In this respect, one can naively implement a VQC version of CTDE as in [6]. Unfortunately, since QRL under VQC represents the state-action pairs using qubits, such a nive CTDE QMARL implementation requires the qubits increasing with the number of agents, and incurs the quantum errors increasing with the number of qubits [9], hindering the MARL convergence and scalability. Under the current noise intermediate-scale quantum (NISQ) era (up to a few hundreds qubits), it is difficult to correct such type of quantum errors due to the insufficient number of qubits. Instead, the quantum errors brought on by quantum gate operations can be properly controlled under NISQ [9]. Motivated from this, we apply a quantum state encoding method to CTDE QMARL, which reduces the dimension of the state-action pairs by making them pass through a set of quantum gates.",
|
| 139 |
+
"bbox": [
|
| 140 |
+
501,
|
| 141 |
+
303,
|
| 142 |
+
921,
|
| 143 |
+
635
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "text",
|
| 149 |
+
"text": "Contributions. The major contributions of this research can be summarized as follows.",
|
| 150 |
+
"bbox": [
|
| 151 |
+
503,
|
| 152 |
+
635,
|
| 153 |
+
919,
|
| 154 |
+
662
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "list",
|
| 160 |
+
"sub_type": "text",
|
| 161 |
+
"list_items": [
|
| 162 |
+
"- We propose novel QMARL by integrating CTDE and quantum state encoding into VQC based MARL.",
|
| 163 |
+
"- By experiments, we demonstrate that the proposed QMARL framework achieves $57.7\\%$ higher total rewards compared to classical MARL baselines under a multiple edge-to-cloud queue management scenario."
|
| 164 |
+
],
|
| 165 |
+
"bbox": [
|
| 166 |
+
519,
|
| 167 |
+
666,
|
| 168 |
+
921,
|
| 169 |
+
757
|
| 170 |
+
],
|
| 171 |
+
"page_idx": 0
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"type": "text",
|
| 175 |
+
"text": "II. QUANTUM COMPUTING AND CIRCUIT",
|
| 176 |
+
"text_level": 1,
|
| 177 |
+
"bbox": [
|
| 178 |
+
566,
|
| 179 |
+
763,
|
| 180 |
+
859,
|
| 181 |
+
777
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 0
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "A. Quantum Computing in a Nutshell",
|
| 188 |
+
"text_level": 1,
|
| 189 |
+
"bbox": [
|
| 190 |
+
503,
|
| 191 |
+
782,
|
| 192 |
+
764,
|
| 193 |
+
797
|
| 194 |
+
],
|
| 195 |
+
"page_idx": 0
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"type": "text",
|
| 199 |
+
"text": "Quantum computing utilizes a qubit as the basic unit of computation. The qubit represents a quantum superposition state between two basis states, which denoted as $|0\\rangle$ and $|1\\rangle$ . Mathematically, there are two ways to describe a qubit state:",
|
| 200 |
+
"bbox": [
|
| 201 |
+
501,
|
| 202 |
+
800,
|
| 203 |
+
921,
|
| 204 |
+
861
|
| 205 |
+
],
|
| 206 |
+
"page_idx": 0
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"type": "equation",
|
| 210 |
+
"text": "\n$$\n\\begin{array}{l} | \\psi \\rangle = \\alpha | 0 \\rangle + \\beta | 1 \\rangle , \\mathrm {w h e r e} \\| \\alpha \\| _ {2} ^ {2} + \\| \\beta \\| _ {2} ^ {2} = 1 \\\\ | \\psi \\rangle = \\cos (\\delta / 2) | 0 \\rangle + e ^ {i \\varphi} \\sin (\\delta / 2) | 1 \\rangle , \\forall \\delta , \\varphi \\in [ - \\pi , \\pi ]. \\\\ \\end{array}\n$$\n",
|
| 211 |
+
"text_format": "latex",
|
| 212 |
+
"bbox": [
|
| 213 |
+
535,
|
| 214 |
+
867,
|
| 215 |
+
890,
|
| 216 |
+
904
|
| 217 |
+
],
|
| 218 |
+
"page_idx": 0
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"type": "aside_text",
|
| 222 |
+
"text": "arXiv:2203.10443v1 [quant-ph] 20 Mar 2022",
|
| 223 |
+
"bbox": [
|
| 224 |
+
22,
|
| 225 |
+
251,
|
| 226 |
+
60,
|
| 227 |
+
724
|
| 228 |
+
],
|
| 229 |
+
"page_idx": 0
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"type": "image",
|
| 233 |
+
"img_path": "images/0bfa51d20d56a666162a57626047e63678811bad4749422f9583812a48dc4754.jpg",
|
| 234 |
+
"image_caption": [
|
| 235 |
+
"Fig. 1: The illustration of VQC."
|
| 236 |
+
],
|
| 237 |
+
"image_footnote": [],
|
| 238 |
+
"bbox": [
|
| 239 |
+
81,
|
| 240 |
+
61,
|
| 241 |
+
486,
|
| 242 |
+
239
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "The former is based on a normalized 2D complex vector, while the latter is based on polar coordinates $(\\delta, \\varphi)$ from a geometric viewpoint. The qubit state is mapped into the surface of a 3-dimensional unit sphere, which is called Bloch sphere. In addition, a quantum gate is a unitary operator transforming a qubit state. For example, $R_{\\mathrm{x}}(\\delta), R_{\\mathrm{y}}(\\delta)$ , and $R_{\\mathrm{z}}(\\delta)$ are rotation operator gates by rotating $\\delta$ around their corresponding axes in the Bloch sphere. These gates are dealing with a single qubit. In contrast, there are quantum gates which operate on multiple qubits, called controlled rotation gates. They act on a qubit according to the signal of several control qubits, which generates quantum entanglement between those qubits. Among them, a Controlled- $X$ (or CNOT) gate is one of widely used control gates which changes the sign of the second qubit if the first qubit is $|1\\rangle$ . These gates allow quantum algorithms to work with their features on VQC, which will be for QMARL.",
|
| 249 |
+
"bbox": [
|
| 250 |
+
73,
|
| 251 |
+
276,
|
| 252 |
+
491,
|
| 253 |
+
517
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "B. Variational Quantum Circuit (VQC)",
|
| 260 |
+
"text_level": 1,
|
| 261 |
+
"bbox": [
|
| 262 |
+
73,
|
| 263 |
+
527,
|
| 264 |
+
344,
|
| 265 |
+
542
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 1
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "VQC is a quantum circuit that utilizes learnable parameters to perform various numerical tasks, including estimation, optimization, approximation, and classification. As shown in Fig. 1, the operation of the general VQC model can be divided into three steps. The first one is state encoding step $U_{enc}$ , and in this step, a classical input information is encoded into corresponding qubit states, which can be treated in the quantum circuit. The next step is variational step $U_{var}$ , and it is for entangling qubit states by controlled gates and rotating qubits by parameterized rotation gates. This process can be repeated in multi-layers with more parameters, which enhances the performance of the circuit. The last one is measurement step $\\mathcal{M}$ , which measures the expectation value of qubit state according to its corresponding computational bases. This process can be formulated as follows:",
|
| 272 |
+
"bbox": [
|
| 273 |
+
73,
|
| 274 |
+
547,
|
| 275 |
+
490,
|
| 276 |
+
773
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "equation",
|
| 282 |
+
"text": "\n$$\nf (x; \\theta) = \\otimes \\Pi_ {M \\in \\mathcal {M}} \\langle 0 | U _ {e n c} ^ {\\dagger} (x) U _ {v a r} ^ {\\dagger} (\\theta) M U _ {v a r} (\\theta) U _ {e n c} (x) | 0 \\rangle ,\n$$\n",
|
| 283 |
+
"text_format": "latex",
|
| 284 |
+
"bbox": [
|
| 285 |
+
76,
|
| 286 |
+
781,
|
| 287 |
+
488,
|
| 288 |
+
800
|
| 289 |
+
],
|
| 290 |
+
"page_idx": 1
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "text",
|
| 294 |
+
"text": "where $\\otimes$ stands for the qubit superposition operator; $f(x;\\theta)$ is the output of VQC with inputs $x$ and circuit parameter $\\theta$ ; $\\mathcal{M}$ is the set of quantum measurement bases in VQC with $|\\mathcal{M}|\\leq n_{qubit}$ where $n_{qubit}$ is the number of qubits. The example of the state encoder in Fig. 1 can be expressed as follows:",
|
| 295 |
+
"bbox": [
|
| 296 |
+
73,
|
| 297 |
+
806,
|
| 298 |
+
491,
|
| 299 |
+
883
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 1
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "equation",
|
| 305 |
+
"text": "\n$$\nU _ {e n c} (s _ {0}, s _ {4}, s _ {8}, s _ {1 2}) = R _ {\\mathrm {x}} (s _ {1 2}) \\cdot R _ {\\mathrm {z}} (s _ {8}) \\cdot R _ {\\mathrm {y}} (s _ {4}) \\cdot R _ {\\mathrm {x}} (s _ {0}).\n$$\n",
|
| 306 |
+
"text_format": "latex",
|
| 307 |
+
"bbox": [
|
| 308 |
+
84,
|
| 309 |
+
891,
|
| 310 |
+
480,
|
| 311 |
+
909
|
| 312 |
+
],
|
| 313 |
+
"page_idx": 1
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "image",
|
| 317 |
+
"img_path": "images/ec760407e76539493ef7189193c4ce4c008c774db76d9f06eb70039c46fcdddc.jpg",
|
| 318 |
+
"image_caption": [
|
| 319 |
+
"Fig. 2: The structure of QMARL framework."
|
| 320 |
+
],
|
| 321 |
+
"image_footnote": [],
|
| 322 |
+
"bbox": [
|
| 323 |
+
506,
|
| 324 |
+
63,
|
| 325 |
+
918,
|
| 326 |
+
247
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 1
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "text",
|
| 332 |
+
"text": "The quantum circuit parameters are updated every training epoch, toward the direction of optimizing the objective output value from VQC. Through this process, VQC is known to be able to approximate any continuous function, which is similar to classical neural network computation [10]. Therefore, VQC is also called quantum neural network (QNN) [11]. In this paper, two different VQCs are used to approximate the optimal actions of actor and the accurate state value of critic.",
|
| 333 |
+
"bbox": [
|
| 334 |
+
501,
|
| 335 |
+
285,
|
| 336 |
+
919,
|
| 337 |
+
404
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 1
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "III. QUANTUM MARL FRAMEWORK",
|
| 344 |
+
"text_level": 1,
|
| 345 |
+
"bbox": [
|
| 346 |
+
580,
|
| 347 |
+
414,
|
| 348 |
+
841,
|
| 349 |
+
428
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 1
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "text",
|
| 355 |
+
"text": "A. QMARL Architecture",
|
| 356 |
+
"text_level": 1,
|
| 357 |
+
"bbox": [
|
| 358 |
+
503,
|
| 359 |
+
431,
|
| 360 |
+
671,
|
| 361 |
+
446
|
| 362 |
+
],
|
| 363 |
+
"page_idx": 1
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"type": "text",
|
| 367 |
+
"text": "Our proposed QMARL is decentralized for scalability, every agent in the QMARL has a VQC-based policy, i.e., agents do not require communications among agents. Fig. 2 shows the VQC that is used in quantum actor (refer to Sec. III-A1) and critic (refer to Sec. III-A2).",
|
| 368 |
+
"bbox": [
|
| 369 |
+
501,
|
| 370 |
+
450,
|
| 371 |
+
919,
|
| 372 |
+
526
|
| 373 |
+
],
|
| 374 |
+
"page_idx": 1
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"type": "text",
|
| 378 |
+
"text": "1) Quantum Actor: For the quantum actor, the VQC will be used to calculate the probabilities of actions of each agent. Then, the quantum policy is written as follows:",
|
| 379 |
+
"bbox": [
|
| 380 |
+
503,
|
| 381 |
+
527,
|
| 382 |
+
919,
|
| 383 |
+
571
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 1
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "equation",
|
| 389 |
+
"text": "\n$$\n\\pi_ {\\theta} \\left(u _ {t} \\mid o _ {t}\\right) = \\operatorname {s o f t m a x} \\left(f \\left(o _ {t}; \\theta\\right)\\right),\n$$\n",
|
| 390 |
+
"text_format": "latex",
|
| 391 |
+
"bbox": [
|
| 392 |
+
606,
|
| 393 |
+
580,
|
| 394 |
+
818,
|
| 395 |
+
595
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 1
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "text",
|
| 401 |
+
"text": "where $\\operatorname{softmax}(\\mathbf{x}) \\triangleq \\left[\\frac{e^{x_1}}{\\sum_{i=1}^N e^{x_i}}, \\dots, \\frac{e^{x_i}}{\\sum_{i=1}^N e^{x_i}}, \\dots, \\frac{e^{x_N}}{\\sum_{i=1}^N e^{x_i}}\\right]$ . At time $t$ , the actor policy of $n$ -th agent makes action decision with the given observation $o_t^n$ , which is denoted as $\\pi_{\\theta^n}(a_t^n | o_t^n)$ . Note that $\\theta^n$ denotes parameters of $n$ -th actor. Then, the action $u_t^n$ is computed as follows:",
|
| 402 |
+
"bbox": [
|
| 403 |
+
503,
|
| 404 |
+
603,
|
| 405 |
+
919,
|
| 406 |
+
685
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 1
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "equation",
|
| 412 |
+
"text": "\n$$\nu _ {t} ^ {n} = \\arg \\max _ {u} \\pi_ {\\theta^ {n}} \\left(u _ {t} ^ {n} \\mid o _ {t} ^ {n}\\right).\n$$\n",
|
| 413 |
+
"text_format": "latex",
|
| 414 |
+
"bbox": [
|
| 415 |
+
619,
|
| 416 |
+
691,
|
| 417 |
+
803,
|
| 418 |
+
714
|
| 419 |
+
],
|
| 420 |
+
"page_idx": 1
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"type": "text",
|
| 424 |
+
"text": "2) Quantum Centralized Critic: We adopt the centralized critic for CTDE as a state-value function. At $t$ , the parameterized critic estimates the discounted returns given $s_t$ as follows:",
|
| 425 |
+
"bbox": [
|
| 426 |
+
503,
|
| 427 |
+
718,
|
| 428 |
+
919,
|
| 429 |
+
763
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 1
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "equation",
|
| 435 |
+
"text": "\n$$\nV ^ {\\psi} (s _ {t}) = f (s _ {t}; \\psi) \\simeq \\mathbb {E} \\left[ \\sum_ {t ^ {\\prime} = t} ^ {T} \\gamma^ {t ^ {\\prime} - t} \\cdot r (s _ {t ^ {\\prime}}, \\mathbf {u} _ {t ^ {\\prime}}) \\Big | s _ {t} = s \\right],\n$$\n",
|
| 436 |
+
"text_format": "latex",
|
| 437 |
+
"bbox": [
|
| 438 |
+
524,
|
| 439 |
+
768,
|
| 440 |
+
898,
|
| 441 |
+
811
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 1
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "where $\\gamma, T, \\mathbf{u}_t$ , and $r(s_{t'}, \\mathbf{u}_{t'})$ stand for a discounted factor $\\gamma \\in [0,1)$ , an episode length, the actions of all agents, and reward functions, respectively. In addition, $\\psi$ presents trainable parameters of a critic. Note that $s_t$ is the ground truth at $t$ . Note that the state encoding is used as shown in green box in Fig. 1 because the state size is larger than the size in observation.",
|
| 448 |
+
"bbox": [
|
| 449 |
+
501,
|
| 450 |
+
816,
|
| 451 |
+
919,
|
| 452 |
+
906
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 1
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "code",
|
| 458 |
+
"sub_type": "algorithm",
|
| 459 |
+
"code_caption": [
|
| 460 |
+
"Algorithm 1: CTDE-based QMARL Training"
|
| 461 |
+
],
|
| 462 |
+
"code_body": "1 Initialize the parameters of actor-critic networks and the replay buffer; $\\Theta \\triangleq \\{\\theta^1,\\dots ,\\theta^N\\}$ $\\psi ,\\phi ,\\mathcal{D} = \\{\\}$ \n2 repeat \n3 $t = 0,s_0 =$ initial state; \n4 while $s_t\\neq$ terminal and $t < e$ episode limit do \n5 for each agent n do \n6 Calculate $\\pi_{\\theta^n}(u_t^n |o_t^n)$ and sample $u_t^n$ . \n7 end \n8 Get reward $r_t$ and next state and observations $s_{t + 1}$ $\\mathbf{o}_{t + 1} = \\{o_1^1,\\dots ,o_t^N\\}$ \n9 $\\mathcal{D} = \\mathcal{D}\\cup \\{(s_t,\\mathbf{o}_t,\\mathbf{u}_t,r_t,s_{t + 1},\\mathbf{o}_{t + 1})\\}$ \n10 $t = t + 1$ step $= \\mathrm{step} + 1$ \n11 end \n12 for each timestep t in each episode in batch D do \n13 Get $V^{\\psi}(s_t);V^{\\phi}(s_{t + 1})$ \n14 Calculate the target $y_{t}$ \n15 end \n16 Calculate $\\nabla_{\\Theta}J,\\nabla_{\\psi}$ ,and update $\\Theta ,\\psi$ \n17 if target update period then \n18 Update the target network, $\\phi \\gets \\psi$ \n19 end \n20 until obtaining optimal policies;",
|
| 463 |
+
"bbox": [
|
| 464 |
+
76,
|
| 465 |
+
80,
|
| 466 |
+
436,
|
| 467 |
+
335
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "table",
|
| 473 |
+
"img_path": "images/e216814231e2389c34bea126edb81cc8f894b300a56990ddeb9c32ee2262a67c.jpg",
|
| 474 |
+
"table_caption": [
|
| 475 |
+
"TABLE I: The MDP of a single-hop offloading environment."
|
| 476 |
+
],
|
| 477 |
+
"table_footnote": [],
|
| 478 |
+
"table_body": "<table><tr><td>Observation</td><td>otn=Δ{qt,e,n, qt-1} ∪Kk=1{qt,c,k}</td></tr><tr><td>Action</td><td>utn∈A≡I×P</td></tr><tr><td>○Destination space</td><td>I△{1,···,K}</td></tr><tr><td>○Packet amount space</td><td>P△{pmin,···,pmax}</td></tr><tr><td>State</td><td>st=Δ∪n=1N{oqn}</td></tr><tr><td>Reward</td><td>r(st, ut) in (1)</td></tr></table>",
|
| 479 |
+
"bbox": [
|
| 480 |
+
102,
|
| 481 |
+
378,
|
| 482 |
+
460,
|
| 483 |
+
486
|
| 484 |
+
],
|
| 485 |
+
"page_idx": 2
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"type": "table",
|
| 489 |
+
"img_path": "images/67d6aebb0fb933ec397daff90f401a37addb6ca0f3433600583c11b5038b9e2d.jpg",
|
| 490 |
+
"table_caption": [
|
| 491 |
+
"TABLE II: The experiment parameters."
|
| 492 |
+
],
|
| 493 |
+
"table_footnote": [],
|
| 494 |
+
"table_body": "<table><tr><td>Parameters</td><td>Values</td></tr><tr><td>The numbers of clouds and edge agents (K, N)</td><td>2, 4</td></tr><tr><td>The packet amount space (P)</td><td>{0.1, 0.2}</td></tr><tr><td>The hyper-parameters of environment (wP, wR)</td><td>(0.3, 4)</td></tr><tr><td>Transmitted packets from the cloud (btc,k)</td><td>0.3</td></tr><tr><td>The capacity of queue (qmax)</td><td>1</td></tr><tr><td>Optimizer</td><td>Adam</td></tr><tr><td>The number of gates in Var</td><td>50</td></tr><tr><td>The number of qubits of actor/critic</td><td>4</td></tr><tr><td>Learning rate of actor/critic</td><td>1 × 10-4, 1 × 10-5</td></tr></table>",
|
| 495 |
+
"bbox": [
|
| 496 |
+
76,
|
| 497 |
+
513,
|
| 498 |
+
496,
|
| 499 |
+
647
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 2
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "text",
|
| 505 |
+
"text": "IV. EXPERIMENTS AND DEMONSTRATIONS",
|
| 506 |
+
"text_level": 1,
|
| 507 |
+
"bbox": [
|
| 508 |
+
130,
|
| 509 |
+
665,
|
| 510 |
+
434,
|
| 511 |
+
679
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 2
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "text",
|
| 517 |
+
"text": "A. Single-Hop Offloading Environment",
|
| 518 |
+
"text_level": 1,
|
| 519 |
+
"bbox": [
|
| 520 |
+
73,
|
| 521 |
+
691,
|
| 522 |
+
339,
|
| 523 |
+
705
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 2
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "The environment used in this paper consists of $K$ clouds and $N$ edges. The clouds and edges have queues $q^{c}$ and $q^{e}$ that temporally store packets. All edge agents offload their packets to clouds. The queue dynamics are as follows:",
|
| 530 |
+
"bbox": [
|
| 531 |
+
73,
|
| 532 |
+
713,
|
| 533 |
+
490,
|
| 534 |
+
773
|
| 535 |
+
],
|
| 536 |
+
"page_idx": 2
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"type": "equation",
|
| 540 |
+
"text": "\n$$\nq _ {t + 1} ^ {i, k} = \\operatorname {c l i p} \\left(q _ {t} ^ {i, k} - u _ {t} ^ {i, k} + b _ {t} ^ {i, k}, 0, q _ {\\max }\\right),\n$$\n",
|
| 541 |
+
"text_format": "latex",
|
| 542 |
+
"bbox": [
|
| 543 |
+
148,
|
| 544 |
+
784,
|
| 545 |
+
415,
|
| 546 |
+
805
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 2
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "where the superscript $i \\in \\{c, e\\}$ identifies the cloud and an edge device. The terms $u_{t}^{i,k}$ and $b_{t}^{i,n}$ imply the total transmitting packet size and the packet arrival of $k$ -th cloud or $n$ -th edge, respectively. Note that $u_{t}^{e,n}$ is $n$ -th edge agent's action. In addition, a clipping function is defined as $\\operatorname{clip}(x, x_{\\min}, x_{\\max}) \\triangleq \\min(x_{\\max}, \\max(x, x_{\\min}))$ .",
|
| 553 |
+
"bbox": [
|
| 554 |
+
73,
|
| 555 |
+
816,
|
| 556 |
+
491,
|
| 557 |
+
909
|
| 558 |
+
],
|
| 559 |
+
"page_idx": 2
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "image",
|
| 563 |
+
"img_path": "images/abc6dead6c7348add17468b85f16c63d736c6b9c8ff448c0859a24482eefb6b9.jpg",
|
| 564 |
+
"image_caption": [
|
| 565 |
+
"(a)"
|
| 566 |
+
],
|
| 567 |
+
"image_footnote": [],
|
| 568 |
+
"bbox": [
|
| 569 |
+
517,
|
| 570 |
+
63,
|
| 571 |
+
699,
|
| 572 |
+
169
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 2
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "image",
|
| 578 |
+
"img_path": "images/90feccc10e886b3244dc12bdc5325c7b41478733d64720415cf9be7e4e58094d.jpg",
|
| 579 |
+
"image_caption": [
|
| 580 |
+
"(b)"
|
| 581 |
+
],
|
| 582 |
+
"image_footnote": [],
|
| 583 |
+
"bbox": [
|
| 584 |
+
720,
|
| 585 |
+
63,
|
| 586 |
+
900,
|
| 587 |
+
169
|
| 588 |
+
],
|
| 589 |
+
"page_idx": 2
|
| 590 |
+
},
|
| 591 |
+
{
|
| 592 |
+
"type": "image",
|
| 593 |
+
"img_path": "images/05ac774f70f75fad06f1bd8ce683598b849e99ab095f42d7035a794e8bd042f3.jpg",
|
| 594 |
+
"image_caption": [
|
| 595 |
+
"(c)"
|
| 596 |
+
],
|
| 597 |
+
"image_footnote": [],
|
| 598 |
+
"bbox": [
|
| 599 |
+
517,
|
| 600 |
+
184,
|
| 601 |
+
697,
|
| 602 |
+
276
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 2
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "image",
|
| 608 |
+
"img_path": "images/54322457be47bebe5d31f48a79a729228e3953eb8ef7903864aef685617d289e.jpg",
|
| 609 |
+
"image_caption": [
|
| 610 |
+
"(d)",
|
| 611 |
+
"Fig. 3: The experimental result of various metrics with comparing different MARL frameworks."
|
| 612 |
+
],
|
| 613 |
+
"image_footnote": [],
|
| 614 |
+
"bbox": [
|
| 615 |
+
722,
|
| 616 |
+
185,
|
| 617 |
+
900,
|
| 618 |
+
276
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 2
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "text",
|
| 624 |
+
"text": "B. Training",
|
| 625 |
+
"text_level": 1,
|
| 626 |
+
"bbox": [
|
| 627 |
+
504,
|
| 628 |
+
344,
|
| 629 |
+
588,
|
| 630 |
+
359
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 2
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"text": "The objective of MARL agents is to maximize the discounted returns. To derive the gradients, we leverage the joint state-value function $V^{\\psi}$ . Our framework uses an multi-agent policy gradient (MAPG), which can be formulated as follows:",
|
| 637 |
+
"bbox": [
|
| 638 |
+
501,
|
| 639 |
+
362,
|
| 640 |
+
919,
|
| 641 |
+
422
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 2
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "equation",
|
| 647 |
+
"text": "\n$$\n\\nabla_ {\\theta^ {n}} J = - \\mathbb {E} _ {\\pi} \\left[ \\sum_ {t = 1} ^ {T} \\sum_ {n = 1} ^ {N} y _ {t} \\nabla_ {\\theta^ {n}} \\log \\pi_ {\\theta} \\left(u _ {t} ^ {n} \\mid o _ {t} ^ {n}\\right) \\right], \\nabla_ {\\psi} J = \\nabla_ {\\psi} \\sum_ {t = 1} ^ {T} \\| y _ {t} \\| ^ {2}\n$$\n",
|
| 648 |
+
"text_format": "latex",
|
| 649 |
+
"bbox": [
|
| 650 |
+
504,
|
| 651 |
+
428,
|
| 652 |
+
929,
|
| 653 |
+
469
|
| 654 |
+
],
|
| 655 |
+
"page_idx": 2
|
| 656 |
+
},
|
| 657 |
+
{
|
| 658 |
+
"type": "text",
|
| 659 |
+
"text": "s.t. $y_{t} = r(s_{t},\\mathbf{u}_{t}) + \\gamma V^{\\phi}(s_{t + 1}) - V^{\\psi}(s_{t})$ , and $\\phi$ is the parameters of target critic. The detailed procedure is in Algorithm 1.",
|
| 660 |
+
"bbox": [
|
| 661 |
+
503,
|
| 662 |
+
474,
|
| 663 |
+
919,
|
| 664 |
+
507
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 2
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "In this paper, we assume that the capacities of edges and clouds are all limited to $q_{\\mathrm{max}}$ and edge agents receive packets from previous hops, where the distribution is uniform $\\forall t_t^{e,n}\\sim \\mathcal{U}(0,w_{\\mathcal{P}}\\cdot q_{\\mathrm{max}})$ . The objective of this scenario is to minimize the total amount of overflowed queue and the event that the queue is empty. Thus, the reward $r(s_{t},\\mathbf{u}_{\\mathbf{t}})$ can be as follows:",
|
| 671 |
+
"bbox": [
|
| 672 |
+
503,
|
| 673 |
+
507,
|
| 674 |
+
919,
|
| 675 |
+
598
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 2
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "equation",
|
| 681 |
+
"text": "\n$$\n- \\sum_ {k = 1} ^ {K} \\left[ \\mathbb {1} _ {\\left(q _ {t + 1} ^ {c, k} = 0\\right)} \\cdot \\tilde {q} _ {t} ^ {c, k} + \\mathbb {1} _ {\\left(q _ {t + 1} ^ {c, k} = q _ {\\max }\\right)} \\cdot \\hat {q} _ {t} ^ {c, k} \\cdot w _ {\\mathcal {R}} \\right], \\tag {1}\n$$\n",
|
| 682 |
+
"text_format": "latex",
|
| 683 |
+
"bbox": [
|
| 684 |
+
531,
|
| 685 |
+
604,
|
| 686 |
+
919,
|
| 687 |
+
646
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 2
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "s.t. $\\tilde{q}_t^{c,k} = |q_t^{c,k} - u_t^{c,k} + b_t^{c,k}|$ and $\\hat{q}_t^{c,k} = |q_{\\max} - \\tilde{q}_t^{c,k}|$ , where $w_{\\mathcal{R}}$ is the hyperparameter of rewards. Note that $r(s_t, \\mathbf{u_t}) \\in [-\\infty, 0]$ (negative) because we consider the occurrence of abnormal queue states (e.g., queue overflow or underflow) as a negative reward. The Markov decision process (MDP) of this environment is presented in Table I.",
|
| 694 |
+
"bbox": [
|
| 695 |
+
503,
|
| 696 |
+
650,
|
| 697 |
+
921,
|
| 698 |
+
744
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 2
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "C. Experimental and Demonstration Setup",
|
| 705 |
+
"text_level": 1,
|
| 706 |
+
"bbox": [
|
| 707 |
+
504,
|
| 708 |
+
753,
|
| 709 |
+
795,
|
| 710 |
+
768
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 2
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "To verify the effectiveness of the proposed QMARL framework (named, Proposed), we compare our proposed QMARL with two comparing methods. Here, 'Comp1' is a CTDE hybrid QMARL framework where the actors use a VQC-based policy and the centralized critic uses a classical neural networks. In addition, 'Comp2' is a CTDE classical MARL framework that is not related to quantum algorithms. Note that the trainable parameters of these three frameworks are all set to 50 for actor and critic computation. Lastly, 'Comp3' is a",
|
| 717 |
+
"bbox": [
|
| 718 |
+
501,
|
| 719 |
+
771,
|
| 720 |
+
921,
|
| 721 |
+
907
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 2
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "image",
|
| 727 |
+
"img_path": "images/9a836bdddb155a6342f38c3074596a3c86cb40b3f733b99a5de153f9f5727334.jpg",
|
| 728 |
+
"image_caption": [
|
| 729 |
+
"Fig. 4: The demonstration of QMARL framework."
|
| 730 |
+
],
|
| 731 |
+
"image_footnote": [],
|
| 732 |
+
"bbox": [
|
| 733 |
+
78,
|
| 734 |
+
59,
|
| 735 |
+
919,
|
| 736 |
+
200
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 3
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "text",
|
| 742 |
+
"text": "classical CTDE MARL where the number of parameters is more than 40K. The simulation parameter settings are listed in Table II. We use python libraries (e.g., torchquantum and pytorch) for deploying VQCs and DL methods, which support GPU acceleration [12]. In addition, all experiments are conducted on AMD Ryzen™ Threadripper™ 1950x and NVIDIA RTX 3090. We have confirmed that the training time of QMARL for 1,000 epochs is not expensive ( $\\approx$ 35 minutes).",
|
| 743 |
+
"bbox": [
|
| 744 |
+
73,
|
| 745 |
+
229,
|
| 746 |
+
491,
|
| 747 |
+
351
|
| 748 |
+
],
|
| 749 |
+
"page_idx": 3
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"type": "text",
|
| 753 |
+
"text": "D. Evaluation Results",
|
| 754 |
+
"text_level": 1,
|
| 755 |
+
"bbox": [
|
| 756 |
+
73,
|
| 757 |
+
364,
|
| 758 |
+
230,
|
| 759 |
+
378
|
| 760 |
+
],
|
| 761 |
+
"page_idx": 3
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "text",
|
| 765 |
+
"text": "1) Reward Convergence: Fig. 3 presents the demonstration results. As shown in Fig. 3(a), the reward of QMARL frameworks is around -3.0 for Proposed and -16.6 for Comp1, whereas the classical MARL frameworks record -22.5 for Comp2 and -2.8 for Comp3, respectively. We calculate the achievability as min-max normalization with the average returns of random walk. Note that the random walk records -33.2 on average. The achievability of QMARL frameworks is $90.9\\%$ for Proposed and $49.8\\%$ for Comp1. However, the classical MARL frameworks achieve $33.2\\%$ for Comp2 and $91.5\\%$ for Comp3. In summary, the proposed QMARL outperforms hybrid QMARL and classical MARL under the constraint of the number of trainable parameters.",
|
| 766 |
+
"bbox": [
|
| 767 |
+
73,
|
| 768 |
+
386,
|
| 769 |
+
491,
|
| 770 |
+
583
|
| 771 |
+
],
|
| 772 |
+
"page_idx": 3
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"type": "text",
|
| 776 |
+
"text": "2) Performance: The average queue states of edges/clouds and clouds are 0.460 for Proposed, 0.480 for Comp1, 0.510 for Comp2, and 0.453 for Comp3, respectively. The ratio of the number of empty queue events records in a high order of Comp2, Comp1, Proposed, and Comp3. However, the overflowed queue is low with the order of Proposed, Comp3, Comp2, and Comp1. According to Fig. 3(a-d), the QMARL framework outperforms both classical and hybrid quantum-classical MARL frameworks under the constraints of the number of trainable parameters.",
|
| 777 |
+
"bbox": [
|
| 778 |
+
73,
|
| 779 |
+
584,
|
| 780 |
+
491,
|
| 781 |
+
734
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 3
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "text",
|
| 787 |
+
"text": "E. Demonstration",
|
| 788 |
+
"text_level": 1,
|
| 789 |
+
"bbox": [
|
| 790 |
+
73,
|
| 791 |
+
750,
|
| 792 |
+
200,
|
| 793 |
+
763
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 3
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "text",
|
| 799 |
+
"text": "Due to high network latency of utilizing quantum clouds, we conduct demonstration on simulation. Fig. 4 shows the visualization of the workflow of our QMARL framework. The superpositioned qubit states (i.e., magnitude and, phase vector) are expressed as $4 \\times 4$ heatmap in hue-lightness-saturation color system. We provide source codes<sup>1</sup> including QMARL, the single-hop environment, and the simulator.",
|
| 800 |
+
"bbox": [
|
| 801 |
+
73,
|
| 802 |
+
772,
|
| 803 |
+
490,
|
| 804 |
+
878
|
| 805 |
+
],
|
| 806 |
+
"page_idx": 3
|
| 807 |
+
},
|
| 808 |
+
{
|
| 809 |
+
"type": "text",
|
| 810 |
+
"text": "V. CONCLUDING REMARKS AND FUTURE WORK",
|
| 811 |
+
"text_level": 1,
|
| 812 |
+
"bbox": [
|
| 813 |
+
539,
|
| 814 |
+
229,
|
| 815 |
+
885,
|
| 816 |
+
243
|
| 817 |
+
],
|
| 818 |
+
"page_idx": 3
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "text",
|
| 822 |
+
"text": "This paper introduces quantum computing concepts to MARL, i.e., QMARL. To resolve the challenge of QMARL, we adopt VQC with state encoding and the concept of CTDE. From the single-hop environment, we verify the superiority of QMARL corresponding to the number of trainable parameters and the feasibility of QMARL. As a future work direction, the implementation of QMARL to the quantum cloud (e.g., IBM quantum, Xanadu, or IonQ) should be interest because the impact of noise is considerable on quantum computing.",
|
| 823 |
+
"bbox": [
|
| 824 |
+
503,
|
| 825 |
+
248,
|
| 826 |
+
921,
|
| 827 |
+
383
|
| 828 |
+
],
|
| 829 |
+
"page_idx": 3
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "text",
|
| 833 |
+
"text": "Acknowledgement. This research was supported by the National Research Foundation of Korea (2022R1A2C2004869 and 2021R1A4A1030775). W.J. Yun and Y. Kwan contributed equally to this work. S. Jung, J. Park, and J. Kim are corresponding authors.",
|
| 834 |
+
"bbox": [
|
| 835 |
+
503,
|
| 836 |
+
395,
|
| 837 |
+
921,
|
| 838 |
+
470
|
| 839 |
+
],
|
| 840 |
+
"page_idx": 3
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"type": "text",
|
| 844 |
+
"text": "REFERENCES",
|
| 845 |
+
"text_level": 1,
|
| 846 |
+
"bbox": [
|
| 847 |
+
663,
|
| 848 |
+
479,
|
| 849 |
+
761,
|
| 850 |
+
492
|
| 851 |
+
],
|
| 852 |
+
"page_idx": 3
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "list",
|
| 856 |
+
"sub_type": "ref_text",
|
| 857 |
+
"list_items": [
|
| 858 |
+
"[1] J. Park, S. Samarakoon, A. Elgabli, J. Kim, M. Dennis, S.-L. Kim, and M. Debbah, \"Communication-efficient and distributed learning over wireless networks: Principles and applications,\" Proceedings of the IEEE, vol. 109, no. 5, pp. 796-819, 2021.",
|
| 859 |
+
"[2] M. Schuld and N. Killoran, \"Is quantum advantage the right goal for quantum machine learning?\" CoRR, vol. abs:2203.01340, 2022.",
|
| 860 |
+
"[3] S. Oh, J. Choi, and J. Kim, \"A tutorial on quantum convolutional neural networks (QCNN),\" in Proc. of IEEE Int'l Conf. on ICT Convergence (ICTC), October 2020.",
|
| 861 |
+
"[4] Z. Hong, J. Wang, X. Qu, X. Zhu, J. Liu, and J. Xiao, \"Quantum convolutional neural network on protein distance prediction,\" in Proc. IEEE Int'l Joint Conf. on Neural Networks (IJCNN), July 2021.",
|
| 862 |
+
"[5] Y. Kwak, W. J. Yun, S. Jung, and J. Kim, \"Quantum neural networks: Concepts, applications, and challenges,\" in Proc. IEEE Int'l Conf. on Ubiquitous and Future Networks (ICUFN), August 2021.",
|
| 863 |
+
"[6] S. Y.-C. Chen, C.-H. H. Yang, J. Qi, P.-Y. Chen, X. Ma, and H.-S. Goan, \"Variational quantum circuits for deep reinforcement learning,\" IEEE Access, vol. 8, pp. 141007-141024, 2020.",
|
| 864 |
+
"[7] Y. Kwak, W. J. Yun, S. Jung, J.-K. Kim, and J. Kim, \"Introduction to quantum reinforcement learning: Theory and PennyLane-based implementation,\" in *PRoc. IEEE Int'l Conf. on ICT Convergence (ICTC)*, October 2021.",
|
| 865 |
+
"[8] G. Carleo, I. Cirac, K. Cranmer, L. Daudet, M. Schuld, N. Tishby, L. Vogt-Maranto, and L. Zdeborova, \"Machine learning and the physical sciences,\" *Reviews of Modern Physics*, vol. 91, no. 4, p. 045002, 2019.",
|
| 866 |
+
"[9] P. W. Shor, \"Scheme for reducing decoherence in quantum computer memory,\" Physical Review A, vol. 52, no. 4, p. R2493, 1995.",
|
| 867 |
+
"[10] J. Biamonte, “Universal variational quantum computation,” *Physical Review A*, vol. 103, no. 3, p. L030401, 2021.",
|
| 868 |
+
"[11] N. Wiebe, A. Kapoor, and K. M. Svore, “Quantum deep learning,” CoRR, vol. abs/1412.3489, 2014.",
|
| 869 |
+
"[12] H. Wang, Y. Ding, J. Gu, Z. Li, Y. Lin, D. Z. Pan, F. T. Chong, and S. Han, \"QuantumNAS: Noise-adaptive search for robust quantum circuits,\" in Proc. IEEE Int'l Symposium on High-Performance Computer Architecture (HPCA), April 2022."
|
| 870 |
+
],
|
| 871 |
+
"bbox": [
|
| 872 |
+
506,
|
| 873 |
+
500,
|
| 874 |
+
921,
|
| 875 |
+
896
|
| 876 |
+
],
|
| 877 |
+
"page_idx": 3
|
| 878 |
+
},
|
| 879 |
+
{
|
| 880 |
+
"type": "page_footnote",
|
| 881 |
+
"text": "1 https://github.com/WonJoon-Yun/Quantum-Multi-Agent-Reinforcement-Learning",
|
| 882 |
+
"bbox": [
|
| 883 |
+
89,
|
| 884 |
+
893,
|
| 885 |
+
372,
|
| 886 |
+
906
|
| 887 |
+
],
|
| 888 |
+
"page_idx": 3
|
| 889 |
+
}
|
| 890 |
+
]
|
2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_model.json
ADDED
|
@@ -0,0 +1,1088 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.252,
|
| 8 |
+
0.061,
|
| 9 |
+
0.726
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2203.10443v1 [quant-ph] 20 Mar 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.089,
|
| 18 |
+
0.064,
|
| 19 |
+
0.91,
|
| 20 |
+
0.132
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Quantum Multi-Agent Reinforcement Learning via Variational Quantum Circuit Design"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.242,
|
| 29 |
+
0.151,
|
| 30 |
+
0.751,
|
| 31 |
+
0.168
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "†Won Joon Yun, †Yunseok Kwak, †Jae Pyoung Kim, §Hyunhee Cho,"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.32,
|
| 40 |
+
0.169,
|
| 41 |
+
0.673,
|
| 42 |
+
0.185
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "\\(\\ddagger\\)Soyi Jung, \\(\\circ\\)Jihong Park, and \\(\\dagger\\)Joongheon Kim"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.235,
|
| 51 |
+
0.186,
|
| 52 |
+
0.764,
|
| 53 |
+
0.201
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "†School of Electrical Engineering, Korea University, Seoul, Republic of Korea"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.15,
|
| 62 |
+
0.201,
|
| 63 |
+
0.849,
|
| 64 |
+
0.216
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "\\(^{\\S}\\)School of Electronic and Electrical Engineering, Sungkyunkwan University, Suwon, Republic of Korea"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.256,
|
| 73 |
+
0.217,
|
| 74 |
+
0.743,
|
| 75 |
+
0.232
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "\\(^{\\ddagger}\\)School of Software, Hallym University, Chuncheon, Republic of Korea"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.217,
|
| 84 |
+
0.233,
|
| 85 |
+
0.782,
|
| 86 |
+
0.248
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "\\(^{\\circ}\\)School of Information Technology, Deakin University, Geelong, Victoria, Australia"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.075,
|
| 95 |
+
0.275,
|
| 96 |
+
0.493,
|
| 97 |
+
0.515
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Abstract-In recent years, quantum computing (QC) has been getting a lot of attention from industry and academia. Especially, among various QC research topics, variational quantum circuit (VQC) enables quantum deep reinforcement learning (QRL). Many studies of QRL have shown that the QRL is superior to the classical reinforcement learning (RL) methods under the constraints of the number of training parameters. This paper extends and demonstrates the QRL to quantum multi-agent RL (QMARL). However, the extension of QRL to QMARL is not straightforward due to the challenge of the noise intermediate-scale quantum (NISQ) and the non-stationary properties in classical multi-agent RL (MARL). Therefore, this paper proposes the centralized training and decentralized execution (CTDE) QMARL framework by designing novel VQCs for the framework to cope with these issues. To corroborate the QMARL framework, this paper conducts the QMARL demonstration in a single-hop environment where edge agents offload packets to clouds. The extensive demonstration shows that the proposed QMARL framework enhances \\(57.7\\%\\) of total reward than classical frameworks."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.075,
|
| 106 |
+
0.528,
|
| 107 |
+
0.49,
|
| 108 |
+
0.556
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Index Terms—Quantum deep learning, Multi-agent reinforcement learning, Quantum computing"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "title",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.217,
|
| 117 |
+
0.579,
|
| 118 |
+
0.352,
|
| 119 |
+
0.593
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "I. INTRODUCTION"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.074,
|
| 128 |
+
0.606,
|
| 129 |
+
0.493,
|
| 130 |
+
0.907
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "The recent advances in computing hardware and deep learning algorithms have spurred the ground-breaking developments in distributed learning and multi-agent reinforcement learning (MARL) [1]. The forthcoming innovations in quantum computing hardware and algorithms will accelerate or even revolutionize this trend [2], motivating this research on quantum MARL (QMARL). Indeed, quantum algorithms have huge potential in reducing model parameters without compromising accuracy by taking advantage of quantum entanglement [3]. A remarkable example is the variational quantum circuit (VQC) architecture, also known as a quantum neural network (QNN) [4], [5], which integrates a quantum circuit into a classical deep neural network. The resultant hybrid quantum-classical model enables quantum reinforcement learning (QRL) that is on par with classical reinforcement learning with more model parameters [6], [7], which can accelerate the training and inference speed while saving computing resources [8]. Inspired from this success, in this article we aim to extend QRL to QMARL by integrating VQC into classical MARL. This problem is non-trivial due to the"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.504,
|
| 139 |
+
0.274,
|
| 140 |
+
0.921,
|
| 141 |
+
0.302
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "trade-off between quantum errors and MARL training stability as we shall elaborate next."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.503,
|
| 150 |
+
0.304,
|
| 151 |
+
0.922,
|
| 152 |
+
0.636
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "In MARL, each agent interacts with other agents in a cooperative or competitive scenario. Such agent interactions often incur the non-stationary reward of each agent, hindering the MARL training convergence. A standard way to cope with this MARL non-stationarity is the centralized training and decentralized execution (CTDE) method wherein the reward is given simultaneously to all agents by concatenating their state-action pairs. In this respect, one can naively implement a VQC version of CTDE as in [6]. Unfortunately, since QRL under VQC represents the state-action pairs using qubits, such a nive CTDE QMARL implementation requires the qubits increasing with the number of agents, and incurs the quantum errors increasing with the number of qubits [9], hindering the MARL convergence and scalability. Under the current noise intermediate-scale quantum (NISQ) era (up to a few hundreds qubits), it is difficult to correct such type of quantum errors due to the insufficient number of qubits. Instead, the quantum errors brought on by quantum gate operations can be properly controlled under NISQ [9]. Motivated from this, we apply a quantum state encoding method to CTDE QMARL, which reduces the dimension of the state-action pairs by making them pass through a set of quantum gates."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.504,
|
| 161 |
+
0.636,
|
| 162 |
+
0.921,
|
| 163 |
+
0.664
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "Contributions. The major contributions of this research can be summarized as follows."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.521,
|
| 172 |
+
0.667,
|
| 173 |
+
0.922,
|
| 174 |
+
0.697
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "- We propose novel QMARL by integrating CTDE and quantum state encoding into VQC based MARL."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.521,
|
| 183 |
+
0.698,
|
| 184 |
+
0.922,
|
| 185 |
+
0.758
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "- By experiments, we demonstrate that the proposed QMARL framework achieves \\(57.7\\%\\) higher total rewards compared to classical MARL baselines under a multiple edge-to-cloud queue management scenario."
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "list",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.521,
|
| 194 |
+
0.667,
|
| 195 |
+
0.922,
|
| 196 |
+
0.758
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": null
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "title",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.567,
|
| 205 |
+
0.765,
|
| 206 |
+
0.86,
|
| 207 |
+
0.779
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": "II. QUANTUM COMPUTING AND CIRCUIT"
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "title",
|
| 214 |
+
"bbox": [
|
| 215 |
+
0.504,
|
| 216 |
+
0.783,
|
| 217 |
+
0.766,
|
| 218 |
+
0.798
|
| 219 |
+
],
|
| 220 |
+
"angle": 0,
|
| 221 |
+
"content": "A. Quantum Computing in a Nutshell"
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"bbox": [
|
| 226 |
+
0.503,
|
| 227 |
+
0.801,
|
| 228 |
+
0.922,
|
| 229 |
+
0.862
|
| 230 |
+
],
|
| 231 |
+
"angle": 0,
|
| 232 |
+
"content": "Quantum computing utilizes a qubit as the basic unit of computation. The qubit represents a quantum superposition state between two basis states, which denoted as \\(|0\\rangle\\) and \\(|1\\rangle\\). Mathematically, there are two ways to describe a qubit state:"
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "equation",
|
| 236 |
+
"bbox": [
|
| 237 |
+
0.537,
|
| 238 |
+
0.868,
|
| 239 |
+
0.892,
|
| 240 |
+
0.905
|
| 241 |
+
],
|
| 242 |
+
"angle": 0,
|
| 243 |
+
"content": "\\[\n\\begin{array}{l} | \\psi \\rangle = \\alpha | 0 \\rangle + \\beta | 1 \\rangle , \\mathrm {w h e r e} \\| \\alpha \\| _ {2} ^ {2} + \\| \\beta \\| _ {2} ^ {2} = 1 \\\\ | \\psi \\rangle = \\cos (\\delta / 2) | 0 \\rangle + e ^ {i \\varphi} \\sin (\\delta / 2) | 1 \\rangle , \\forall \\delta , \\varphi \\in [ - \\pi , \\pi ]. \\\\ \\end{array}\n\\]"
|
| 244 |
+
}
|
| 245 |
+
],
|
| 246 |
+
[
|
| 247 |
+
{
|
| 248 |
+
"type": "image",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.082,
|
| 251 |
+
0.062,
|
| 252 |
+
0.487,
|
| 253 |
+
0.24
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": null
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "image_caption",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.173,
|
| 262 |
+
0.246,
|
| 263 |
+
0.393,
|
| 264 |
+
0.261
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "Fig. 1: The illustration of VQC."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.074,
|
| 273 |
+
0.277,
|
| 274 |
+
0.492,
|
| 275 |
+
0.518
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "The former is based on a normalized 2D complex vector, while the latter is based on polar coordinates \\((\\delta, \\varphi)\\) from a geometric viewpoint. The qubit state is mapped into the surface of a 3-dimensional unit sphere, which is called Bloch sphere. In addition, a quantum gate is a unitary operator transforming a qubit state. For example, \\(R_{\\mathrm{x}}(\\delta), R_{\\mathrm{y}}(\\delta)\\), and \\(R_{\\mathrm{z}}(\\delta)\\) are rotation operator gates by rotating \\(\\delta\\) around their corresponding axes in the Bloch sphere. These gates are dealing with a single qubit. In contrast, there are quantum gates which operate on multiple qubits, called controlled rotation gates. They act on a qubit according to the signal of several control qubits, which generates quantum entanglement between those qubits. Among them, a Controlled-\\(X\\) (or CNOT) gate is one of widely used control gates which changes the sign of the second qubit if the first qubit is \\(|1\\rangle\\). These gates allow quantum algorithms to work with their features on VQC, which will be for QMARL."
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "title",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.075,
|
| 284 |
+
0.528,
|
| 285 |
+
0.345,
|
| 286 |
+
0.543
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "B. Variational Quantum Circuit (VQC)"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.074,
|
| 295 |
+
0.548,
|
| 296 |
+
0.491,
|
| 297 |
+
0.775
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "VQC is a quantum circuit that utilizes learnable parameters to perform various numerical tasks, including estimation, optimization, approximation, and classification. As shown in Fig. 1, the operation of the general VQC model can be divided into three steps. The first one is state encoding step \\( U_{enc} \\), and in this step, a classical input information is encoded into corresponding qubit states, which can be treated in the quantum circuit. The next step is variational step \\( U_{var} \\), and it is for entangling qubit states by controlled gates and rotating qubits by parameterized rotation gates. This process can be repeated in multi-layers with more parameters, which enhances the performance of the circuit. The last one is measurement step \\( \\mathcal{M} \\), which measures the expectation value of qubit state according to its corresponding computational bases. This process can be formulated as follows:"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "equation",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.077,
|
| 306 |
+
0.782,
|
| 307 |
+
0.49,
|
| 308 |
+
0.801
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "\\[\nf (x; \\theta) = \\otimes \\Pi_ {M \\in \\mathcal {M}} \\langle 0 | U _ {e n c} ^ {\\dagger} (x) U _ {v a r} ^ {\\dagger} (\\theta) M U _ {v a r} (\\theta) U _ {e n c} (x) | 0 \\rangle ,\n\\]"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.075,
|
| 317 |
+
0.808,
|
| 318 |
+
0.492,
|
| 319 |
+
0.884
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "where \\(\\otimes\\) stands for the qubit superposition operator; \\(f(x;\\theta)\\) is the output of VQC with inputs \\(x\\) and circuit parameter \\(\\theta\\); \\(\\mathcal{M}\\) is the set of quantum measurement bases in VQC with \\(|\\mathcal{M}|\\leq n_{qubit}\\) where \\(n_{qubit}\\) is the number of qubits. The example of the state encoder in Fig. 1 can be expressed as follows:"
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "equation",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.085,
|
| 328 |
+
0.892,
|
| 329 |
+
0.482,
|
| 330 |
+
0.91
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "\\[\nU _ {e n c} (s _ {0}, s _ {4}, s _ {8}, s _ {1 2}) = R _ {\\mathrm {x}} (s _ {1 2}) \\cdot R _ {\\mathrm {z}} (s _ {8}) \\cdot R _ {\\mathrm {y}} (s _ {4}) \\cdot R _ {\\mathrm {x}} (s _ {0}).\n\\]"
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "image",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.508,
|
| 339 |
+
0.064,
|
| 340 |
+
0.919,
|
| 341 |
+
0.248
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": null
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "image_caption",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.558,
|
| 350 |
+
0.256,
|
| 351 |
+
0.866,
|
| 352 |
+
0.271
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "Fig. 2: The structure of QMARL framework."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.503,
|
| 361 |
+
0.286,
|
| 362 |
+
0.921,
|
| 363 |
+
0.405
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "The quantum circuit parameters are updated every training epoch, toward the direction of optimizing the objective output value from VQC. Through this process, VQC is known to be able to approximate any continuous function, which is similar to classical neural network computation [10]. Therefore, VQC is also called quantum neural network (QNN) [11]. In this paper, two different VQCs are used to approximate the optimal actions of actor and the accurate state value of critic."
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "title",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.581,
|
| 372 |
+
0.415,
|
| 373 |
+
0.843,
|
| 374 |
+
0.429
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "III. QUANTUM MARL FRAMEWORK"
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "title",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.504,
|
| 383 |
+
0.433,
|
| 384 |
+
0.673,
|
| 385 |
+
0.447
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "A. QMARL Architecture"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.503,
|
| 394 |
+
0.452,
|
| 395 |
+
0.921,
|
| 396 |
+
0.527
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "Our proposed QMARL is decentralized for scalability, every agent in the QMARL has a VQC-based policy, i.e., agents do not require communications among agents. Fig. 2 shows the VQC that is used in quantum actor (refer to Sec. III-A1) and critic (refer to Sec. III-A2)."
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"bbox": [
|
| 404 |
+
0.504,
|
| 405 |
+
0.528,
|
| 406 |
+
0.92,
|
| 407 |
+
0.573
|
| 408 |
+
],
|
| 409 |
+
"angle": 0,
|
| 410 |
+
"content": "1) Quantum Actor: For the quantum actor, the VQC will be used to calculate the probabilities of actions of each agent. Then, the quantum policy is written as follows:"
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "equation",
|
| 414 |
+
"bbox": [
|
| 415 |
+
0.607,
|
| 416 |
+
0.581,
|
| 417 |
+
0.819,
|
| 418 |
+
0.597
|
| 419 |
+
],
|
| 420 |
+
"angle": 0,
|
| 421 |
+
"content": "\\[\n\\pi_ {\\theta} \\left(u _ {t} \\mid o _ {t}\\right) = \\operatorname {s o f t m a x} \\left(f \\left(o _ {t}; \\theta\\right)\\right),\n\\]"
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"bbox": [
|
| 426 |
+
0.504,
|
| 427 |
+
0.604,
|
| 428 |
+
0.92,
|
| 429 |
+
0.686
|
| 430 |
+
],
|
| 431 |
+
"angle": 0,
|
| 432 |
+
"content": "where \\(\\operatorname{softmax}(\\mathbf{x}) \\triangleq \\left[\\frac{e^{x_1}}{\\sum_{i=1}^N e^{x_i}}, \\dots, \\frac{e^{x_i}}{\\sum_{i=1}^N e^{x_i}}, \\dots, \\frac{e^{x_N}}{\\sum_{i=1}^N e^{x_i}}\\right]\\). At time \\(t\\), the actor policy of \\(n\\)-th agent makes action decision with the given observation \\(o_t^n\\), which is denoted as \\(\\pi_{\\theta^n}(a_t^n | o_t^n)\\). Note that \\(\\theta^n\\) denotes parameters of \\(n\\)-th actor. Then, the action \\(u_t^n\\) is computed as follows:"
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "equation",
|
| 436 |
+
"bbox": [
|
| 437 |
+
0.62,
|
| 438 |
+
0.693,
|
| 439 |
+
0.805,
|
| 440 |
+
0.715
|
| 441 |
+
],
|
| 442 |
+
"angle": 0,
|
| 443 |
+
"content": "\\[\nu _ {t} ^ {n} = \\arg \\max _ {u} \\pi_ {\\theta^ {n}} \\left(u _ {t} ^ {n} \\mid o _ {t} ^ {n}\\right).\n\\]"
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"bbox": [
|
| 448 |
+
0.504,
|
| 449 |
+
0.719,
|
| 450 |
+
0.92,
|
| 451 |
+
0.765
|
| 452 |
+
],
|
| 453 |
+
"angle": 0,
|
| 454 |
+
"content": "2) Quantum Centralized Critic: We adopt the centralized critic for CTDE as a state-value function. At \\( t \\), the parameterized critic estimates the discounted returns given \\( s_t \\) as follows:"
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "equation",
|
| 458 |
+
"bbox": [
|
| 459 |
+
0.526,
|
| 460 |
+
0.77,
|
| 461 |
+
0.899,
|
| 462 |
+
0.812
|
| 463 |
+
],
|
| 464 |
+
"angle": 0,
|
| 465 |
+
"content": "\\[\nV ^ {\\psi} (s _ {t}) = f (s _ {t}; \\psi) \\simeq \\mathbb {E} \\left[ \\sum_ {t ^ {\\prime} = t} ^ {T} \\gamma^ {t ^ {\\prime} - t} \\cdot r (s _ {t ^ {\\prime}}, \\mathbf {u} _ {t ^ {\\prime}}) \\Big | s _ {t} = s \\right],\n\\]"
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"bbox": [
|
| 470 |
+
0.503,
|
| 471 |
+
0.817,
|
| 472 |
+
0.921,
|
| 473 |
+
0.907
|
| 474 |
+
],
|
| 475 |
+
"angle": 0,
|
| 476 |
+
"content": "where \\(\\gamma, T, \\mathbf{u}_t\\), and \\(r(s_{t'}, \\mathbf{u}_{t'})\\) stand for a discounted factor \\(\\gamma \\in [0,1)\\), an episode length, the actions of all agents, and reward functions, respectively. In addition, \\(\\psi\\) presents trainable parameters of a critic. Note that \\(s_t\\) is the ground truth at \\(t\\). Note that the state encoding is used as shown in green box in Fig. 1 because the state size is larger than the size in observation."
|
| 477 |
+
}
|
| 478 |
+
],
|
| 479 |
+
[
|
| 480 |
+
{
|
| 481 |
+
"type": "code_caption",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.084,
|
| 484 |
+
0.064,
|
| 485 |
+
0.398,
|
| 486 |
+
0.079
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "Algorithm 1: CTDE-based QMARL Training"
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "algorithm",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.078,
|
| 495 |
+
0.082,
|
| 496 |
+
0.437,
|
| 497 |
+
0.337
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "1 Initialize the parameters of actor-critic networks and the replay buffer; \\(\\Theta \\triangleq \\{\\theta^1,\\dots ,\\theta^N\\}\\) \\(\\psi ,\\phi ,\\mathcal{D} = \\{\\}\\) \n2 repeat \n3 \\(t = 0,s_0 =\\) initial state; \n4 while \\(s_t\\neq\\) terminal and \\(t < e\\) episode limit do \n5 for each agent n do \n6 Calculate \\(\\pi_{\\theta^n}(u_t^n |o_t^n)\\) and sample \\(u_t^n\\) . \n7 end \n8 Get reward \\(r_t\\) and next state and observations \\(s_{t + 1}\\) \\(\\mathbf{o}_{t + 1} = \\{o_1^1,\\dots ,o_t^N\\}\\) \n9 \\(\\mathcal{D} = \\mathcal{D}\\cup \\{(s_t,\\mathbf{o}_t,\\mathbf{u}_t,r_t,s_{t + 1},\\mathbf{o}_{t + 1})\\}\\) \n10 \\(t = t + 1\\) step \\(= \\mathrm{step} + 1\\) \n11 end \n12 for each timestep t in each episode in batch D do \n13 Get \\(V^{\\psi}(s_t);V^{\\phi}(s_{t + 1})\\) \n14 Calculate the target \\(y_{t}\\) \n15 end \n16 Calculate \\(\\nabla_{\\Theta}J,\\nabla_{\\psi}\\) ,and update \\(\\Theta ,\\psi\\) \n17 if target update period then \n18 Update the target network, \\(\\phi \\gets \\psi\\) \n19 end \n20 until obtaining optimal policies;"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "table_caption",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.076,
|
| 506 |
+
0.358,
|
| 507 |
+
0.49,
|
| 508 |
+
0.373
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "TABLE I: The MDP of a single-hop offloading environment."
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "table",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.104,
|
| 517 |
+
0.38,
|
| 518 |
+
0.462,
|
| 519 |
+
0.487
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "<table><tr><td>Observation</td><td>otn=Δ{qt,e,n, qt-1} ∪Kk=1{qt,c,k}</td></tr><tr><td>Action</td><td>utn∈A≡I×P</td></tr><tr><td>○Destination space</td><td>I△{1,···,K}</td></tr><tr><td>○Packet amount space</td><td>P△{pmin,···,pmax}</td></tr><tr><td>State</td><td>st=Δ∪n=1N{oqn}</td></tr><tr><td>Reward</td><td>r(st, ut) in (1)</td></tr></table>"
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "table_caption",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.15,
|
| 528 |
+
0.492,
|
| 529 |
+
0.416,
|
| 530 |
+
0.507
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "TABLE II: The experiment parameters."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "table",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.077,
|
| 539 |
+
0.515,
|
| 540 |
+
0.498,
|
| 541 |
+
0.648
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "<table><tr><td>Parameters</td><td>Values</td></tr><tr><td>The numbers of clouds and edge agents (K, N)</td><td>2, 4</td></tr><tr><td>The packet amount space (P)</td><td>{0.1, 0.2}</td></tr><tr><td>The hyper-parameters of environment (wP, wR)</td><td>(0.3, 4)</td></tr><tr><td>Transmitted packets from the cloud (btc,k)</td><td>0.3</td></tr><tr><td>The capacity of queue (qmax)</td><td>1</td></tr><tr><td>Optimizer</td><td>Adam</td></tr><tr><td>The number of gates in Var</td><td>50</td></tr><tr><td>The number of qubits of actor/critic</td><td>4</td></tr><tr><td>Learning rate of actor/critic</td><td>1 × 10-4, 1 × 10-5</td></tr></table>"
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "title",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.131,
|
| 550 |
+
0.666,
|
| 551 |
+
0.435,
|
| 552 |
+
0.68
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "IV. EXPERIMENTS AND DEMONSTRATIONS"
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "title",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.075,
|
| 561 |
+
0.692,
|
| 562 |
+
0.341,
|
| 563 |
+
0.707
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "A. Single-Hop Offloading Environment"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.074,
|
| 572 |
+
0.714,
|
| 573 |
+
0.491,
|
| 574 |
+
0.775
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "The environment used in this paper consists of \\( K \\) clouds and \\( N \\) edges. The clouds and edges have queues \\( q^{c} \\) and \\( q^{e} \\) that temporally store packets. All edge agents offload their packets to clouds. The queue dynamics are as follows:"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "equation",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.15,
|
| 583 |
+
0.785,
|
| 584 |
+
0.416,
|
| 585 |
+
0.806
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "\\[\nq _ {t + 1} ^ {i, k} = \\operatorname {c l i p} \\left(q _ {t} ^ {i, k} - u _ {t} ^ {i, k} + b _ {t} ^ {i, k}, 0, q _ {\\max }\\right),\n\\]"
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"bbox": [
|
| 593 |
+
0.074,
|
| 594 |
+
0.817,
|
| 595 |
+
0.492,
|
| 596 |
+
0.91
|
| 597 |
+
],
|
| 598 |
+
"angle": 0,
|
| 599 |
+
"content": "where the superscript \\( i \\in \\{c, e\\} \\) identifies the cloud and an edge device. The terms \\( u_{t}^{i,k} \\) and \\( b_{t}^{i,n} \\) imply the total transmitting packet size and the packet arrival of \\( k \\)-th cloud or \\( n \\)-th edge, respectively. Note that \\( u_{t}^{e,n} \\) is \\( n \\)-th edge agent's action. In addition, a clipping function is defined as \\( \\operatorname{clip}(x, x_{\\min}, x_{\\max}) \\triangleq \\min(x_{\\max}, \\max(x, x_{\\min})) \\)."
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "image",
|
| 603 |
+
"bbox": [
|
| 604 |
+
0.518,
|
| 605 |
+
0.064,
|
| 606 |
+
0.7,
|
| 607 |
+
0.17
|
| 608 |
+
],
|
| 609 |
+
"angle": 0,
|
| 610 |
+
"content": null
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"type": "image_caption",
|
| 614 |
+
"bbox": [
|
| 615 |
+
0.603,
|
| 616 |
+
0.171,
|
| 617 |
+
0.621,
|
| 618 |
+
0.182
|
| 619 |
+
],
|
| 620 |
+
"angle": 0,
|
| 621 |
+
"content": "(a)"
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "image",
|
| 625 |
+
"bbox": [
|
| 626 |
+
0.721,
|
| 627 |
+
0.064,
|
| 628 |
+
0.901,
|
| 629 |
+
0.17
|
| 630 |
+
],
|
| 631 |
+
"angle": 0,
|
| 632 |
+
"content": null
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "image_caption",
|
| 636 |
+
"bbox": [
|
| 637 |
+
0.806,
|
| 638 |
+
0.17,
|
| 639 |
+
0.826,
|
| 640 |
+
0.181
|
| 641 |
+
],
|
| 642 |
+
"angle": 0,
|
| 643 |
+
"content": "(b)"
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "image",
|
| 647 |
+
"bbox": [
|
| 648 |
+
0.518,
|
| 649 |
+
0.185,
|
| 650 |
+
0.699,
|
| 651 |
+
0.277
|
| 652 |
+
],
|
| 653 |
+
"angle": 0,
|
| 654 |
+
"content": null
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "image_caption",
|
| 658 |
+
"bbox": [
|
| 659 |
+
0.603,
|
| 660 |
+
0.279,
|
| 661 |
+
0.621,
|
| 662 |
+
0.29
|
| 663 |
+
],
|
| 664 |
+
"angle": 0,
|
| 665 |
+
"content": "(c)"
|
| 666 |
+
},
|
| 667 |
+
{
|
| 668 |
+
"type": "image",
|
| 669 |
+
"bbox": [
|
| 670 |
+
0.723,
|
| 671 |
+
0.186,
|
| 672 |
+
0.901,
|
| 673 |
+
0.277
|
| 674 |
+
],
|
| 675 |
+
"angle": 0,
|
| 676 |
+
"content": null
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"type": "image_caption",
|
| 680 |
+
"bbox": [
|
| 681 |
+
0.807,
|
| 682 |
+
0.278,
|
| 683 |
+
0.826,
|
| 684 |
+
0.29
|
| 685 |
+
],
|
| 686 |
+
"angle": 0,
|
| 687 |
+
"content": "(d)"
|
| 688 |
+
},
|
| 689 |
+
{
|
| 690 |
+
"type": "image_caption",
|
| 691 |
+
"bbox": [
|
| 692 |
+
0.504,
|
| 693 |
+
0.298,
|
| 694 |
+
0.921,
|
| 695 |
+
0.328
|
| 696 |
+
],
|
| 697 |
+
"angle": 0,
|
| 698 |
+
"content": "Fig. 3: The experimental result of various metrics with comparing different MARL frameworks."
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"type": "title",
|
| 702 |
+
"bbox": [
|
| 703 |
+
0.505,
|
| 704 |
+
0.345,
|
| 705 |
+
0.589,
|
| 706 |
+
0.36
|
| 707 |
+
],
|
| 708 |
+
"angle": 0,
|
| 709 |
+
"content": "B. Training"
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "text",
|
| 713 |
+
"bbox": [
|
| 714 |
+
0.503,
|
| 715 |
+
0.363,
|
| 716 |
+
0.921,
|
| 717 |
+
0.424
|
| 718 |
+
],
|
| 719 |
+
"angle": 0,
|
| 720 |
+
"content": "The objective of MARL agents is to maximize the discounted returns. To derive the gradients, we leverage the joint state-value function \\( V^{\\psi} \\). Our framework uses an multi-agent policy gradient (MAPG), which can be formulated as follows:"
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "equation",
|
| 724 |
+
"bbox": [
|
| 725 |
+
0.505,
|
| 726 |
+
0.429,
|
| 727 |
+
0.931,
|
| 728 |
+
0.47
|
| 729 |
+
],
|
| 730 |
+
"angle": 0,
|
| 731 |
+
"content": "\\[\n\\nabla_ {\\theta^ {n}} J = - \\mathbb {E} _ {\\pi} \\left[ \\sum_ {t = 1} ^ {T} \\sum_ {n = 1} ^ {N} y _ {t} \\nabla_ {\\theta^ {n}} \\log \\pi_ {\\theta} \\left(u _ {t} ^ {n} \\mid o _ {t} ^ {n}\\right) \\right], \\nabla_ {\\psi} J = \\nabla_ {\\psi} \\sum_ {t = 1} ^ {T} \\| y _ {t} \\| ^ {2}\n\\]"
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "text",
|
| 735 |
+
"bbox": [
|
| 736 |
+
0.504,
|
| 737 |
+
0.476,
|
| 738 |
+
0.921,
|
| 739 |
+
0.508
|
| 740 |
+
],
|
| 741 |
+
"angle": 0,
|
| 742 |
+
"content": "s.t. \\( y_{t} = r(s_{t},\\mathbf{u}_{t}) + \\gamma V^{\\phi}(s_{t + 1}) - V^{\\psi}(s_{t}) \\), and \\( \\phi \\) is the parameters of target critic. The detailed procedure is in Algorithm 1."
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"bbox": [
|
| 747 |
+
0.504,
|
| 748 |
+
0.508,
|
| 749 |
+
0.921,
|
| 750 |
+
0.599
|
| 751 |
+
],
|
| 752 |
+
"angle": 0,
|
| 753 |
+
"content": "In this paper, we assume that the capacities of edges and clouds are all limited to \\(q_{\\mathrm{max}}\\) and edge agents receive packets from previous hops, where the distribution is uniform \\(\\forall t_t^{e,n}\\sim \\mathcal{U}(0,w_{\\mathcal{P}}\\cdot q_{\\mathrm{max}})\\). The objective of this scenario is to minimize the total amount of overflowed queue and the event that the queue is empty. Thus, the reward \\(r(s_{t},\\mathbf{u}_{\\mathbf{t}})\\) can be as follows:"
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "equation",
|
| 757 |
+
"bbox": [
|
| 758 |
+
0.532,
|
| 759 |
+
0.605,
|
| 760 |
+
0.921,
|
| 761 |
+
0.647
|
| 762 |
+
],
|
| 763 |
+
"angle": 0,
|
| 764 |
+
"content": "\\[\n- \\sum_ {k = 1} ^ {K} \\left[ \\mathbb {1} _ {\\left(q _ {t + 1} ^ {c, k} = 0\\right)} \\cdot \\tilde {q} _ {t} ^ {c, k} + \\mathbb {1} _ {\\left(q _ {t + 1} ^ {c, k} = q _ {\\max }\\right)} \\cdot \\hat {q} _ {t} ^ {c, k} \\cdot w _ {\\mathcal {R}} \\right], \\tag {1}\n\\]"
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "text",
|
| 768 |
+
"bbox": [
|
| 769 |
+
0.504,
|
| 770 |
+
0.651,
|
| 771 |
+
0.922,
|
| 772 |
+
0.745
|
| 773 |
+
],
|
| 774 |
+
"angle": 0,
|
| 775 |
+
"content": "s.t. \\(\\tilde{q}_t^{c,k} = |q_t^{c,k} - u_t^{c,k} + b_t^{c,k}|\\) and \\(\\hat{q}_t^{c,k} = |q_{\\max} - \\tilde{q}_t^{c,k}|\\), where \\(w_{\\mathcal{R}}\\) is the hyperparameter of rewards. Note that \\(r(s_t, \\mathbf{u_t}) \\in [-\\infty, 0]\\) (negative) because we consider the occurrence of abnormal queue states (e.g., queue overflow or underflow) as a negative reward. The Markov decision process (MDP) of this environment is presented in Table I."
|
| 776 |
+
},
|
| 777 |
+
{
|
| 778 |
+
"type": "title",
|
| 779 |
+
"bbox": [
|
| 780 |
+
0.505,
|
| 781 |
+
0.754,
|
| 782 |
+
0.797,
|
| 783 |
+
0.769
|
| 784 |
+
],
|
| 785 |
+
"angle": 0,
|
| 786 |
+
"content": "C. Experimental and Demonstration Setup"
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "text",
|
| 790 |
+
"bbox": [
|
| 791 |
+
0.503,
|
| 792 |
+
0.772,
|
| 793 |
+
0.922,
|
| 794 |
+
0.908
|
| 795 |
+
],
|
| 796 |
+
"angle": 0,
|
| 797 |
+
"content": "To verify the effectiveness of the proposed QMARL framework (named, Proposed), we compare our proposed QMARL with two comparing methods. Here, 'Comp1' is a CTDE hybrid QMARL framework where the actors use a VQC-based policy and the centralized critic uses a classical neural networks. In addition, 'Comp2' is a CTDE classical MARL framework that is not related to quantum algorithms. Note that the trainable parameters of these three frameworks are all set to 50 for actor and critic computation. Lastly, 'Comp3' is a"
|
| 798 |
+
}
|
| 799 |
+
],
|
| 800 |
+
[
|
| 801 |
+
{
|
| 802 |
+
"type": "image",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.08,
|
| 805 |
+
0.06,
|
| 806 |
+
0.92,
|
| 807 |
+
0.201
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": null
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "image_caption",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.325,
|
| 816 |
+
0.205,
|
| 817 |
+
0.672,
|
| 818 |
+
0.22
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "Fig. 4: The demonstration of QMARL framework."
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "text",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.074,
|
| 827 |
+
0.23,
|
| 828 |
+
0.493,
|
| 829 |
+
0.352
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "classical CTDE MARL where the number of parameters is more than 40K. The simulation parameter settings are listed in Table II. We use python libraries (e.g., torchquantum and pytorch) for deploying VQCs and DL methods, which support GPU acceleration [12]. In addition, all experiments are conducted on AMD Ryzen™ Threadripper™ 1950x and NVIDIA RTX 3090. We have confirmed that the training time of QMARL for 1,000 epochs is not expensive (\\(\\approx\\) 35 minutes)."
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "title",
|
| 836 |
+
"bbox": [
|
| 837 |
+
0.075,
|
| 838 |
+
0.366,
|
| 839 |
+
0.231,
|
| 840 |
+
0.38
|
| 841 |
+
],
|
| 842 |
+
"angle": 0,
|
| 843 |
+
"content": "D. Evaluation Results"
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "text",
|
| 847 |
+
"bbox": [
|
| 848 |
+
0.074,
|
| 849 |
+
0.387,
|
| 850 |
+
0.492,
|
| 851 |
+
0.584
|
| 852 |
+
],
|
| 853 |
+
"angle": 0,
|
| 854 |
+
"content": "1) Reward Convergence: Fig. 3 presents the demonstration results. As shown in Fig. 3(a), the reward of QMARL frameworks is around -3.0 for Proposed and -16.6 for Comp1, whereas the classical MARL frameworks record -22.5 for Comp2 and -2.8 for Comp3, respectively. We calculate the achievability as min-max normalization with the average returns of random walk. Note that the random walk records -33.2 on average. The achievability of QMARL frameworks is \\(90.9\\%\\) for Proposed and \\(49.8\\%\\) for Comp1. However, the classical MARL frameworks achieve \\(33.2\\%\\) for Comp2 and \\(91.5\\%\\) for Comp3. In summary, the proposed QMARL outperforms hybrid QMARL and classical MARL under the constraint of the number of trainable parameters."
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "text",
|
| 858 |
+
"bbox": [
|
| 859 |
+
0.074,
|
| 860 |
+
0.585,
|
| 861 |
+
0.493,
|
| 862 |
+
0.736
|
| 863 |
+
],
|
| 864 |
+
"angle": 0,
|
| 865 |
+
"content": "2) Performance: The average queue states of edges/clouds and clouds are 0.460 for Proposed, 0.480 for Comp1, 0.510 for Comp2, and 0.453 for Comp3, respectively. The ratio of the number of empty queue events records in a high order of Comp2, Comp1, Proposed, and Comp3. However, the overflowed queue is low with the order of Proposed, Comp3, Comp2, and Comp1. According to Fig. 3(a-d), the QMARL framework outperforms both classical and hybrid quantum-classical MARL frameworks under the constraints of the number of trainable parameters."
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "title",
|
| 869 |
+
"bbox": [
|
| 870 |
+
0.075,
|
| 871 |
+
0.751,
|
| 872 |
+
0.202,
|
| 873 |
+
0.765
|
| 874 |
+
],
|
| 875 |
+
"angle": 0,
|
| 876 |
+
"content": "E. Demonstration"
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "text",
|
| 880 |
+
"bbox": [
|
| 881 |
+
0.074,
|
| 882 |
+
0.773,
|
| 883 |
+
0.491,
|
| 884 |
+
0.879
|
| 885 |
+
],
|
| 886 |
+
"angle": 0,
|
| 887 |
+
"content": "Due to high network latency of utilizing quantum clouds, we conduct demonstration on simulation. Fig. 4 shows the visualization of the workflow of our QMARL framework. The superpositioned qubit states (i.e., magnitude and, phase vector) are expressed as \\(4 \\times 4\\) heatmap in hue-lightness-saturation color system. We provide source codes<sup>1</sup> including QMARL, the single-hop environment, and the simulator."
|
| 888 |
+
},
|
| 889 |
+
{
|
| 890 |
+
"type": "title",
|
| 891 |
+
"bbox": [
|
| 892 |
+
0.54,
|
| 893 |
+
0.231,
|
| 894 |
+
0.886,
|
| 895 |
+
0.244
|
| 896 |
+
],
|
| 897 |
+
"angle": 0,
|
| 898 |
+
"content": "V. CONCLUDING REMARKS AND FUTURE WORK"
|
| 899 |
+
},
|
| 900 |
+
{
|
| 901 |
+
"type": "text",
|
| 902 |
+
"bbox": [
|
| 903 |
+
0.504,
|
| 904 |
+
0.249,
|
| 905 |
+
0.922,
|
| 906 |
+
0.385
|
| 907 |
+
],
|
| 908 |
+
"angle": 0,
|
| 909 |
+
"content": "This paper introduces quantum computing concepts to MARL, i.e., QMARL. To resolve the challenge of QMARL, we adopt VQC with state encoding and the concept of CTDE. From the single-hop environment, we verify the superiority of QMARL corresponding to the number of trainable parameters and the feasibility of QMARL. As a future work direction, the implementation of QMARL to the quantum cloud (e.g., IBM quantum, Xanadu, or IonQ) should be interest because the impact of noise is considerable on quantum computing."
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"type": "text",
|
| 913 |
+
"bbox": [
|
| 914 |
+
0.504,
|
| 915 |
+
0.396,
|
| 916 |
+
0.922,
|
| 917 |
+
0.472
|
| 918 |
+
],
|
| 919 |
+
"angle": 0,
|
| 920 |
+
"content": "Acknowledgement. This research was supported by the National Research Foundation of Korea (2022R1A2C2004869 and 2021R1A4A1030775). W.J. Yun and Y. Kwan contributed equally to this work. S. Jung, J. Park, and J. Kim are corresponding authors."
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "title",
|
| 924 |
+
"bbox": [
|
| 925 |
+
0.665,
|
| 926 |
+
0.48,
|
| 927 |
+
0.762,
|
| 928 |
+
0.493
|
| 929 |
+
],
|
| 930 |
+
"angle": 0,
|
| 931 |
+
"content": "REFERENCES"
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "ref_text",
|
| 935 |
+
"bbox": [
|
| 936 |
+
0.515,
|
| 937 |
+
0.5,
|
| 938 |
+
0.922,
|
| 939 |
+
0.546
|
| 940 |
+
],
|
| 941 |
+
"angle": 0,
|
| 942 |
+
"content": "[1] J. Park, S. Samarakoon, A. Elgabli, J. Kim, M. Dennis, S.-L. Kim, and M. Debbah, \"Communication-efficient and distributed learning over wireless networks: Principles and applications,\" Proceedings of the IEEE, vol. 109, no. 5, pp. 796-819, 2021."
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "ref_text",
|
| 946 |
+
"bbox": [
|
| 947 |
+
0.515,
|
| 948 |
+
0.547,
|
| 949 |
+
0.922,
|
| 950 |
+
0.568
|
| 951 |
+
],
|
| 952 |
+
"angle": 0,
|
| 953 |
+
"content": "[2] M. Schuld and N. Killoran, \"Is quantum advantage the right goal for quantum machine learning?\" CoRR, vol. abs:2203.01340, 2022."
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "ref_text",
|
| 957 |
+
"bbox": [
|
| 958 |
+
0.514,
|
| 959 |
+
0.569,
|
| 960 |
+
0.921,
|
| 961 |
+
0.602
|
| 962 |
+
],
|
| 963 |
+
"angle": 0,
|
| 964 |
+
"content": "[3] S. Oh, J. Choi, and J. Kim, \"A tutorial on quantum convolutional neural networks (QCNN),\" in Proc. of IEEE Int'l Conf. on ICT Convergence (ICTC), October 2020."
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "ref_text",
|
| 968 |
+
"bbox": [
|
| 969 |
+
0.514,
|
| 970 |
+
0.603,
|
| 971 |
+
0.921,
|
| 972 |
+
0.637
|
| 973 |
+
],
|
| 974 |
+
"angle": 0,
|
| 975 |
+
"content": "[4] Z. Hong, J. Wang, X. Qu, X. Zhu, J. Liu, and J. Xiao, \"Quantum convolutional neural network on protein distance prediction,\" in Proc. IEEE Int'l Joint Conf. on Neural Networks (IJCNN), July 2021."
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"type": "ref_text",
|
| 979 |
+
"bbox": [
|
| 980 |
+
0.514,
|
| 981 |
+
0.637,
|
| 982 |
+
0.921,
|
| 983 |
+
0.669
|
| 984 |
+
],
|
| 985 |
+
"angle": 0,
|
| 986 |
+
"content": "[5] Y. Kwak, W. J. Yun, S. Jung, and J. Kim, \"Quantum neural networks: Concepts, applications, and challenges,\" in Proc. IEEE Int'l Conf. on Ubiquitous and Future Networks (ICUFN), August 2021."
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "ref_text",
|
| 990 |
+
"bbox": [
|
| 991 |
+
0.514,
|
| 992 |
+
0.67,
|
| 993 |
+
0.921,
|
| 994 |
+
0.704
|
| 995 |
+
],
|
| 996 |
+
"angle": 0,
|
| 997 |
+
"content": "[6] S. Y.-C. Chen, C.-H. H. Yang, J. Qi, P.-Y. Chen, X. Ma, and H.-S. Goan, \"Variational quantum circuits for deep reinforcement learning,\" IEEE Access, vol. 8, pp. 141007-141024, 2020."
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"type": "ref_text",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
0.514,
|
| 1003 |
+
0.705,
|
| 1004 |
+
0.921,
|
| 1005 |
+
0.749
|
| 1006 |
+
],
|
| 1007 |
+
"angle": 0,
|
| 1008 |
+
"content": "[7] Y. Kwak, W. J. Yun, S. Jung, J.-K. Kim, and J. Kim, \"Introduction to quantum reinforcement learning: Theory and PennyLane-based implementation,\" in *PRoc. IEEE Int'l Conf. on ICT Convergence (ICTC)*, October 2021."
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "ref_text",
|
| 1012 |
+
"bbox": [
|
| 1013 |
+
0.514,
|
| 1014 |
+
0.75,
|
| 1015 |
+
0.921,
|
| 1016 |
+
0.784
|
| 1017 |
+
],
|
| 1018 |
+
"angle": 0,
|
| 1019 |
+
"content": "[8] G. Carleo, I. Cirac, K. Cranmer, L. Daudet, M. Schuld, N. Tishby, L. Vogt-Maranto, and L. Zdeborova, \"Machine learning and the physical sciences,\" *Reviews of Modern Physics*, vol. 91, no. 4, p. 045002, 2019."
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "ref_text",
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
0.514,
|
| 1025 |
+
0.785,
|
| 1026 |
+
0.921,
|
| 1027 |
+
0.806
|
| 1028 |
+
],
|
| 1029 |
+
"angle": 0,
|
| 1030 |
+
"content": "[9] P. W. Shor, \"Scheme for reducing decoherence in quantum computer memory,\" Physical Review A, vol. 52, no. 4, p. R2493, 1995."
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "ref_text",
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
0.508,
|
| 1036 |
+
0.807,
|
| 1037 |
+
0.921,
|
| 1038 |
+
0.829
|
| 1039 |
+
],
|
| 1040 |
+
"angle": 0,
|
| 1041 |
+
"content": "[10] J. Biamonte, “Universal variational quantum computation,” *Physical Review A*, vol. 103, no. 3, p. L030401, 2021."
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "ref_text",
|
| 1045 |
+
"bbox": [
|
| 1046 |
+
0.508,
|
| 1047 |
+
0.83,
|
| 1048 |
+
0.921,
|
| 1049 |
+
0.851
|
| 1050 |
+
],
|
| 1051 |
+
"angle": 0,
|
| 1052 |
+
"content": "[11] N. Wiebe, A. Kapoor, and K. M. Svore, “Quantum deep learning,” CoRR, vol. abs/1412.3489, 2014."
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "ref_text",
|
| 1056 |
+
"bbox": [
|
| 1057 |
+
0.508,
|
| 1058 |
+
0.852,
|
| 1059 |
+
0.921,
|
| 1060 |
+
0.897
|
| 1061 |
+
],
|
| 1062 |
+
"angle": 0,
|
| 1063 |
+
"content": "[12] H. Wang, Y. Ding, J. Gu, Z. Li, Y. Lin, D. Z. Pan, F. T. Chong, and S. Han, \"QuantumNAS: Noise-adaptive search for robust quantum circuits,\" in Proc. IEEE Int'l Symposium on High-Performance Computer Architecture (HPCA), April 2022."
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "list",
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
0.508,
|
| 1069 |
+
0.5,
|
| 1070 |
+
0.922,
|
| 1071 |
+
0.897
|
| 1072 |
+
],
|
| 1073 |
+
"angle": 0,
|
| 1074 |
+
"content": null
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "page_footnote",
|
| 1078 |
+
"bbox": [
|
| 1079 |
+
0.09,
|
| 1080 |
+
0.895,
|
| 1081 |
+
0.374,
|
| 1082 |
+
0.907
|
| 1083 |
+
],
|
| 1084 |
+
"angle": 0,
|
| 1085 |
+
"content": "1 https://github.com/WonJoon-Yun/Quantum-Multi-Agent-Reinforcement-Learning"
|
| 1086 |
+
}
|
| 1087 |
+
]
|
| 1088 |
+
]
|
2203.10xxx/2203.10443/6daea8a6-87fa-4fcf-9b9d-5cef99f9e400_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f8665b2d2df36d66e9c362e4726cd63770b69d8d8a3563a4eb8f0b611e8f629
|
| 3 |
+
size 1291849
|
2203.10xxx/2203.10443/full.md
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Quantum Multi-Agent Reinforcement Learning via Variational Quantum Circuit Design
|
| 2 |
+
|
| 3 |
+
†Won Joon Yun, †Yunseok Kwak, †Jae Pyoung Kim, §Hyunhee Cho,
|
| 4 |
+
|
| 5 |
+
$\ddagger$ Soyi Jung, $\circ$ Jihong Park, and $\dagger$ Joongheon Kim
|
| 6 |
+
|
| 7 |
+
†School of Electrical Engineering, Korea University, Seoul, Republic of Korea
|
| 8 |
+
|
| 9 |
+
$^{\S}$ School of Electronic and Electrical Engineering, Sungkyunkwan University, Suwon, Republic of Korea
|
| 10 |
+
|
| 11 |
+
$^{\ddagger}$ School of Software, Hallym University, Chuncheon, Republic of Korea
|
| 12 |
+
|
| 13 |
+
$^{\circ}$ School of Information Technology, Deakin University, Geelong, Victoria, Australia
|
| 14 |
+
|
| 15 |
+
Abstract-In recent years, quantum computing (QC) has been getting a lot of attention from industry and academia. Especially, among various QC research topics, variational quantum circuit (VQC) enables quantum deep reinforcement learning (QRL). Many studies of QRL have shown that the QRL is superior to the classical reinforcement learning (RL) methods under the constraints of the number of training parameters. This paper extends and demonstrates the QRL to quantum multi-agent RL (QMARL). However, the extension of QRL to QMARL is not straightforward due to the challenge of the noise intermediate-scale quantum (NISQ) and the non-stationary properties in classical multi-agent RL (MARL). Therefore, this paper proposes the centralized training and decentralized execution (CTDE) QMARL framework by designing novel VQCs for the framework to cope with these issues. To corroborate the QMARL framework, this paper conducts the QMARL demonstration in a single-hop environment where edge agents offload packets to clouds. The extensive demonstration shows that the proposed QMARL framework enhances $57.7\%$ of total reward than classical frameworks.
|
| 16 |
+
|
| 17 |
+
Index Terms—Quantum deep learning, Multi-agent reinforcement learning, Quantum computing
|
| 18 |
+
|
| 19 |
+
# I. INTRODUCTION
|
| 20 |
+
|
| 21 |
+
The recent advances in computing hardware and deep learning algorithms have spurred the ground-breaking developments in distributed learning and multi-agent reinforcement learning (MARL) [1]. The forthcoming innovations in quantum computing hardware and algorithms will accelerate or even revolutionize this trend [2], motivating this research on quantum MARL (QMARL). Indeed, quantum algorithms have huge potential in reducing model parameters without compromising accuracy by taking advantage of quantum entanglement [3]. A remarkable example is the variational quantum circuit (VQC) architecture, also known as a quantum neural network (QNN) [4], [5], which integrates a quantum circuit into a classical deep neural network. The resultant hybrid quantum-classical model enables quantum reinforcement learning (QRL) that is on par with classical reinforcement learning with more model parameters [6], [7], which can accelerate the training and inference speed while saving computing resources [8]. Inspired from this success, in this article we aim to extend QRL to QMARL by integrating VQC into classical MARL. This problem is non-trivial due to the
|
| 22 |
+
|
| 23 |
+
trade-off between quantum errors and MARL training stability as we shall elaborate next.
|
| 24 |
+
|
| 25 |
+
In MARL, each agent interacts with other agents in a cooperative or competitive scenario. Such agent interactions often incur the non-stationary reward of each agent, hindering the MARL training convergence. A standard way to cope with this MARL non-stationarity is the centralized training and decentralized execution (CTDE) method wherein the reward is given simultaneously to all agents by concatenating their state-action pairs. In this respect, one can naively implement a VQC version of CTDE as in [6]. Unfortunately, since QRL under VQC represents the state-action pairs using qubits, such a nive CTDE QMARL implementation requires the qubits increasing with the number of agents, and incurs the quantum errors increasing with the number of qubits [9], hindering the MARL convergence and scalability. Under the current noise intermediate-scale quantum (NISQ) era (up to a few hundreds qubits), it is difficult to correct such type of quantum errors due to the insufficient number of qubits. Instead, the quantum errors brought on by quantum gate operations can be properly controlled under NISQ [9]. Motivated from this, we apply a quantum state encoding method to CTDE QMARL, which reduces the dimension of the state-action pairs by making them pass through a set of quantum gates.
|
| 26 |
+
|
| 27 |
+
Contributions. The major contributions of this research can be summarized as follows.
|
| 28 |
+
|
| 29 |
+
- We propose novel QMARL by integrating CTDE and quantum state encoding into VQC based MARL.
|
| 30 |
+
- By experiments, we demonstrate that the proposed QMARL framework achieves $57.7\%$ higher total rewards compared to classical MARL baselines under a multiple edge-to-cloud queue management scenario.
|
| 31 |
+
|
| 32 |
+
# II. QUANTUM COMPUTING AND CIRCUIT
|
| 33 |
+
|
| 34 |
+
# A. Quantum Computing in a Nutshell
|
| 35 |
+
|
| 36 |
+
Quantum computing utilizes a qubit as the basic unit of computation. The qubit represents a quantum superposition state between two basis states, which denoted as $|0\rangle$ and $|1\rangle$ . Mathematically, there are two ways to describe a qubit state:
|
| 37 |
+
|
| 38 |
+
$$
|
| 39 |
+
\begin{array}{l} | \psi \rangle = \alpha | 0 \rangle + \beta | 1 \rangle , \mathrm {w h e r e} \| \alpha \| _ {2} ^ {2} + \| \beta \| _ {2} ^ {2} = 1 \\ | \psi \rangle = \cos (\delta / 2) | 0 \rangle + e ^ {i \varphi} \sin (\delta / 2) | 1 \rangle , \forall \delta , \varphi \in [ - \pi , \pi ]. \\ \end{array}
|
| 40 |
+
$$
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Fig. 1: The illustration of VQC.
|
| 44 |
+
|
| 45 |
+
The former is based on a normalized 2D complex vector, while the latter is based on polar coordinates $(\delta, \varphi)$ from a geometric viewpoint. The qubit state is mapped into the surface of a 3-dimensional unit sphere, which is called Bloch sphere. In addition, a quantum gate is a unitary operator transforming a qubit state. For example, $R_{\mathrm{x}}(\delta), R_{\mathrm{y}}(\delta)$ , and $R_{\mathrm{z}}(\delta)$ are rotation operator gates by rotating $\delta$ around their corresponding axes in the Bloch sphere. These gates are dealing with a single qubit. In contrast, there are quantum gates which operate on multiple qubits, called controlled rotation gates. They act on a qubit according to the signal of several control qubits, which generates quantum entanglement between those qubits. Among them, a Controlled- $X$ (or CNOT) gate is one of widely used control gates which changes the sign of the second qubit if the first qubit is $|1\rangle$ . These gates allow quantum algorithms to work with their features on VQC, which will be for QMARL.
|
| 46 |
+
|
| 47 |
+
# B. Variational Quantum Circuit (VQC)
|
| 48 |
+
|
| 49 |
+
VQC is a quantum circuit that utilizes learnable parameters to perform various numerical tasks, including estimation, optimization, approximation, and classification. As shown in Fig. 1, the operation of the general VQC model can be divided into three steps. The first one is state encoding step $U_{enc}$ , and in this step, a classical input information is encoded into corresponding qubit states, which can be treated in the quantum circuit. The next step is variational step $U_{var}$ , and it is for entangling qubit states by controlled gates and rotating qubits by parameterized rotation gates. This process can be repeated in multi-layers with more parameters, which enhances the performance of the circuit. The last one is measurement step $\mathcal{M}$ , which measures the expectation value of qubit state according to its corresponding computational bases. This process can be formulated as follows:
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
f (x; \theta) = \otimes \Pi_ {M \in \mathcal {M}} \langle 0 | U _ {e n c} ^ {\dagger} (x) U _ {v a r} ^ {\dagger} (\theta) M U _ {v a r} (\theta) U _ {e n c} (x) | 0 \rangle ,
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
where $\otimes$ stands for the qubit superposition operator; $f(x;\theta)$ is the output of VQC with inputs $x$ and circuit parameter $\theta$ ; $\mathcal{M}$ is the set of quantum measurement bases in VQC with $|\mathcal{M}|\leq n_{qubit}$ where $n_{qubit}$ is the number of qubits. The example of the state encoder in Fig. 1 can be expressed as follows:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
U _ {e n c} (s _ {0}, s _ {4}, s _ {8}, s _ {1 2}) = R _ {\mathrm {x}} (s _ {1 2}) \cdot R _ {\mathrm {z}} (s _ {8}) \cdot R _ {\mathrm {y}} (s _ {4}) \cdot R _ {\mathrm {x}} (s _ {0}).
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Fig. 2: The structure of QMARL framework.
|
| 63 |
+
|
| 64 |
+
The quantum circuit parameters are updated every training epoch, toward the direction of optimizing the objective output value from VQC. Through this process, VQC is known to be able to approximate any continuous function, which is similar to classical neural network computation [10]. Therefore, VQC is also called quantum neural network (QNN) [11]. In this paper, two different VQCs are used to approximate the optimal actions of actor and the accurate state value of critic.
|
| 65 |
+
|
| 66 |
+
# III. QUANTUM MARL FRAMEWORK
|
| 67 |
+
|
| 68 |
+
# A. QMARL Architecture
|
| 69 |
+
|
| 70 |
+
Our proposed QMARL is decentralized for scalability, every agent in the QMARL has a VQC-based policy, i.e., agents do not require communications among agents. Fig. 2 shows the VQC that is used in quantum actor (refer to Sec. III-A1) and critic (refer to Sec. III-A2).
|
| 71 |
+
|
| 72 |
+
1) Quantum Actor: For the quantum actor, the VQC will be used to calculate the probabilities of actions of each agent. Then, the quantum policy is written as follows:
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\pi_ {\theta} \left(u _ {t} \mid o _ {t}\right) = \operatorname {s o f t m a x} \left(f \left(o _ {t}; \theta\right)\right),
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $\operatorname{softmax}(\mathbf{x}) \triangleq \left[\frac{e^{x_1}}{\sum_{i=1}^N e^{x_i}}, \dots, \frac{e^{x_i}}{\sum_{i=1}^N e^{x_i}}, \dots, \frac{e^{x_N}}{\sum_{i=1}^N e^{x_i}}\right]$ . At time $t$ , the actor policy of $n$ -th agent makes action decision with the given observation $o_t^n$ , which is denoted as $\pi_{\theta^n}(a_t^n | o_t^n)$ . Note that $\theta^n$ denotes parameters of $n$ -th actor. Then, the action $u_t^n$ is computed as follows:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
u _ {t} ^ {n} = \arg \max _ {u} \pi_ {\theta^ {n}} \left(u _ {t} ^ {n} \mid o _ {t} ^ {n}\right).
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
2) Quantum Centralized Critic: We adopt the centralized critic for CTDE as a state-value function. At $t$ , the parameterized critic estimates the discounted returns given $s_t$ as follows:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
V ^ {\psi} (s _ {t}) = f (s _ {t}; \psi) \simeq \mathbb {E} \left[ \sum_ {t ^ {\prime} = t} ^ {T} \gamma^ {t ^ {\prime} - t} \cdot r (s _ {t ^ {\prime}}, \mathbf {u} _ {t ^ {\prime}}) \Big | s _ {t} = s \right],
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\gamma, T, \mathbf{u}_t$ , and $r(s_{t'}, \mathbf{u}_{t'})$ stand for a discounted factor $\gamma \in [0,1)$ , an episode length, the actions of all agents, and reward functions, respectively. In addition, $\psi$ presents trainable parameters of a critic. Note that $s_t$ is the ground truth at $t$ . Note that the state encoding is used as shown in green box in Fig. 1 because the state size is larger than the size in observation.
|
| 91 |
+
|
| 92 |
+
Algorithm 1: CTDE-based QMARL Training
|
| 93 |
+
1 Initialize the parameters of actor-critic networks and the replay buffer; $\Theta \triangleq \{\theta^1,\dots ,\theta^N\}$ $\psi ,\phi ,\mathcal{D} = \{\}$
|
| 94 |
+
2 repeat
|
| 95 |
+
3 $t = 0,s_0 =$ initial state;
|
| 96 |
+
4 while $s_t\neq$ terminal and $t < e$ episode limit do
|
| 97 |
+
5 for each agent n do
|
| 98 |
+
6 Calculate $\pi_{\theta^n}(u_t^n |o_t^n)$ and sample $u_t^n$ .
|
| 99 |
+
7 end
|
| 100 |
+
8 Get reward $r_t$ and next state and observations $s_{t + 1}$ $\mathbf{o}_{t + 1} = \{o_1^1,\dots ,o_t^N\}$
|
| 101 |
+
9 $\mathcal{D} = \mathcal{D}\cup \{(s_t,\mathbf{o}_t,\mathbf{u}_t,r_t,s_{t + 1},\mathbf{o}_{t + 1})\}$
|
| 102 |
+
10 $t = t + 1$ step $= \mathrm{step} + 1$
|
| 103 |
+
11 end
|
| 104 |
+
12 for each timestep t in each episode in batch D do
|
| 105 |
+
13 Get $V^{\psi}(s_t);V^{\phi}(s_{t + 1})$
|
| 106 |
+
14 Calculate the target $y_{t}$
|
| 107 |
+
15 end
|
| 108 |
+
16 Calculate $\nabla_{\Theta}J,\nabla_{\psi}$ ,and update $\Theta ,\psi$
|
| 109 |
+
17 if target update period then
|
| 110 |
+
18 Update the target network, $\phi \gets \psi$
|
| 111 |
+
19 end
|
| 112 |
+
20 until obtaining optimal policies;
|
| 113 |
+
|
| 114 |
+
TABLE I: The MDP of a single-hop offloading environment.
|
| 115 |
+
|
| 116 |
+
<table><tr><td>Observation</td><td>otn=Δ{qt,e,n, qt-1} ∪Kk=1{qt,c,k}</td></tr><tr><td>Action</td><td>utn∈A≡I×P</td></tr><tr><td>○Destination space</td><td>I△{1,···,K}</td></tr><tr><td>○Packet amount space</td><td>P△{pmin,···,pmax}</td></tr><tr><td>State</td><td>st=Δ∪n=1N{oqn}</td></tr><tr><td>Reward</td><td>r(st, ut) in (1)</td></tr></table>
|
| 117 |
+
|
| 118 |
+
TABLE II: The experiment parameters.
|
| 119 |
+
|
| 120 |
+
<table><tr><td>Parameters</td><td>Values</td></tr><tr><td>The numbers of clouds and edge agents (K, N)</td><td>2, 4</td></tr><tr><td>The packet amount space (P)</td><td>{0.1, 0.2}</td></tr><tr><td>The hyper-parameters of environment (wP, wR)</td><td>(0.3, 4)</td></tr><tr><td>Transmitted packets from the cloud (btc,k)</td><td>0.3</td></tr><tr><td>The capacity of queue (qmax)</td><td>1</td></tr><tr><td>Optimizer</td><td>Adam</td></tr><tr><td>The number of gates in Var</td><td>50</td></tr><tr><td>The number of qubits of actor/critic</td><td>4</td></tr><tr><td>Learning rate of actor/critic</td><td>1 × 10-4, 1 × 10-5</td></tr></table>
|
| 121 |
+
|
| 122 |
+
# IV. EXPERIMENTS AND DEMONSTRATIONS
|
| 123 |
+
|
| 124 |
+
# A. Single-Hop Offloading Environment
|
| 125 |
+
|
| 126 |
+
The environment used in this paper consists of $K$ clouds and $N$ edges. The clouds and edges have queues $q^{c}$ and $q^{e}$ that temporally store packets. All edge agents offload their packets to clouds. The queue dynamics are as follows:
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
q _ {t + 1} ^ {i, k} = \operatorname {c l i p} \left(q _ {t} ^ {i, k} - u _ {t} ^ {i, k} + b _ {t} ^ {i, k}, 0, q _ {\max }\right),
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
where the superscript $i \in \{c, e\}$ identifies the cloud and an edge device. The terms $u_{t}^{i,k}$ and $b_{t}^{i,n}$ imply the total transmitting packet size and the packet arrival of $k$ -th cloud or $n$ -th edge, respectively. Note that $u_{t}^{e,n}$ is $n$ -th edge agent's action. In addition, a clipping function is defined as $\operatorname{clip}(x, x_{\min}, x_{\max}) \triangleq \min(x_{\max}, \max(x, x_{\min}))$ .
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
(a)
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
(b)
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
(c)
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
(d)
|
| 145 |
+
Fig. 3: The experimental result of various metrics with comparing different MARL frameworks.
|
| 146 |
+
|
| 147 |
+
# B. Training
|
| 148 |
+
|
| 149 |
+
The objective of MARL agents is to maximize the discounted returns. To derive the gradients, we leverage the joint state-value function $V^{\psi}$ . Our framework uses an multi-agent policy gradient (MAPG), which can be formulated as follows:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\nabla_ {\theta^ {n}} J = - \mathbb {E} _ {\pi} \left[ \sum_ {t = 1} ^ {T} \sum_ {n = 1} ^ {N} y _ {t} \nabla_ {\theta^ {n}} \log \pi_ {\theta} \left(u _ {t} ^ {n} \mid o _ {t} ^ {n}\right) \right], \nabla_ {\psi} J = \nabla_ {\psi} \sum_ {t = 1} ^ {T} \| y _ {t} \| ^ {2}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
s.t. $y_{t} = r(s_{t},\mathbf{u}_{t}) + \gamma V^{\phi}(s_{t + 1}) - V^{\psi}(s_{t})$ , and $\phi$ is the parameters of target critic. The detailed procedure is in Algorithm 1.
|
| 156 |
+
|
| 157 |
+
In this paper, we assume that the capacities of edges and clouds are all limited to $q_{\mathrm{max}}$ and edge agents receive packets from previous hops, where the distribution is uniform $\forall t_t^{e,n}\sim \mathcal{U}(0,w_{\mathcal{P}}\cdot q_{\mathrm{max}})$ . The objective of this scenario is to minimize the total amount of overflowed queue and the event that the queue is empty. Thus, the reward $r(s_{t},\mathbf{u}_{\mathbf{t}})$ can be as follows:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
- \sum_ {k = 1} ^ {K} \left[ \mathbb {1} _ {\left(q _ {t + 1} ^ {c, k} = 0\right)} \cdot \tilde {q} _ {t} ^ {c, k} + \mathbb {1} _ {\left(q _ {t + 1} ^ {c, k} = q _ {\max }\right)} \cdot \hat {q} _ {t} ^ {c, k} \cdot w _ {\mathcal {R}} \right], \tag {1}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
s.t. $\tilde{q}_t^{c,k} = |q_t^{c,k} - u_t^{c,k} + b_t^{c,k}|$ and $\hat{q}_t^{c,k} = |q_{\max} - \tilde{q}_t^{c,k}|$ , where $w_{\mathcal{R}}$ is the hyperparameter of rewards. Note that $r(s_t, \mathbf{u_t}) \in [-\infty, 0]$ (negative) because we consider the occurrence of abnormal queue states (e.g., queue overflow or underflow) as a negative reward. The Markov decision process (MDP) of this environment is presented in Table I.
|
| 164 |
+
|
| 165 |
+
# C. Experimental and Demonstration Setup
|
| 166 |
+
|
| 167 |
+
To verify the effectiveness of the proposed QMARL framework (named, Proposed), we compare our proposed QMARL with two comparing methods. Here, 'Comp1' is a CTDE hybrid QMARL framework where the actors use a VQC-based policy and the centralized critic uses a classical neural networks. In addition, 'Comp2' is a CTDE classical MARL framework that is not related to quantum algorithms. Note that the trainable parameters of these three frameworks are all set to 50 for actor and critic computation. Lastly, 'Comp3' is a
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Fig. 4: The demonstration of QMARL framework.
|
| 171 |
+
|
| 172 |
+
classical CTDE MARL where the number of parameters is more than 40K. The simulation parameter settings are listed in Table II. We use python libraries (e.g., torchquantum and pytorch) for deploying VQCs and DL methods, which support GPU acceleration [12]. In addition, all experiments are conducted on AMD Ryzen™ Threadripper™ 1950x and NVIDIA RTX 3090. We have confirmed that the training time of QMARL for 1,000 epochs is not expensive ( $\approx$ 35 minutes).
|
| 173 |
+
|
| 174 |
+
# D. Evaluation Results
|
| 175 |
+
|
| 176 |
+
1) Reward Convergence: Fig. 3 presents the demonstration results. As shown in Fig. 3(a), the reward of QMARL frameworks is around -3.0 for Proposed and -16.6 for Comp1, whereas the classical MARL frameworks record -22.5 for Comp2 and -2.8 for Comp3, respectively. We calculate the achievability as min-max normalization with the average returns of random walk. Note that the random walk records -33.2 on average. The achievability of QMARL frameworks is $90.9\%$ for Proposed and $49.8\%$ for Comp1. However, the classical MARL frameworks achieve $33.2\%$ for Comp2 and $91.5\%$ for Comp3. In summary, the proposed QMARL outperforms hybrid QMARL and classical MARL under the constraint of the number of trainable parameters.
|
| 177 |
+
|
| 178 |
+
2) Performance: The average queue states of edges/clouds and clouds are 0.460 for Proposed, 0.480 for Comp1, 0.510 for Comp2, and 0.453 for Comp3, respectively. The ratio of the number of empty queue events records in a high order of Comp2, Comp1, Proposed, and Comp3. However, the overflowed queue is low with the order of Proposed, Comp3, Comp2, and Comp1. According to Fig. 3(a-d), the QMARL framework outperforms both classical and hybrid quantum-classical MARL frameworks under the constraints of the number of trainable parameters.
|
| 179 |
+
|
| 180 |
+
# E. Demonstration
|
| 181 |
+
|
| 182 |
+
Due to high network latency of utilizing quantum clouds, we conduct demonstration on simulation. Fig. 4 shows the visualization of the workflow of our QMARL framework. The superpositioned qubit states (i.e., magnitude and, phase vector) are expressed as $4 \times 4$ heatmap in hue-lightness-saturation color system. We provide source codes<sup>1</sup> including QMARL, the single-hop environment, and the simulator.
|
| 183 |
+
|
| 184 |
+
# V. CONCLUDING REMARKS AND FUTURE WORK
|
| 185 |
+
|
| 186 |
+
This paper introduces quantum computing concepts to MARL, i.e., QMARL. To resolve the challenge of QMARL, we adopt VQC with state encoding and the concept of CTDE. From the single-hop environment, we verify the superiority of QMARL corresponding to the number of trainable parameters and the feasibility of QMARL. As a future work direction, the implementation of QMARL to the quantum cloud (e.g., IBM quantum, Xanadu, or IonQ) should be interest because the impact of noise is considerable on quantum computing.
|
| 187 |
+
|
| 188 |
+
Acknowledgement. This research was supported by the National Research Foundation of Korea (2022R1A2C2004869 and 2021R1A4A1030775). W.J. Yun and Y. Kwan contributed equally to this work. S. Jung, J. Park, and J. Kim are corresponding authors.
|
| 189 |
+
|
| 190 |
+
# REFERENCES
|
| 191 |
+
|
| 192 |
+
[1] J. Park, S. Samarakoon, A. Elgabli, J. Kim, M. Dennis, S.-L. Kim, and M. Debbah, "Communication-efficient and distributed learning over wireless networks: Principles and applications," Proceedings of the IEEE, vol. 109, no. 5, pp. 796-819, 2021.
|
| 193 |
+
[2] M. Schuld and N. Killoran, "Is quantum advantage the right goal for quantum machine learning?" CoRR, vol. abs:2203.01340, 2022.
|
| 194 |
+
[3] S. Oh, J. Choi, and J. Kim, "A tutorial on quantum convolutional neural networks (QCNN)," in Proc. of IEEE Int'l Conf. on ICT Convergence (ICTC), October 2020.
|
| 195 |
+
[4] Z. Hong, J. Wang, X. Qu, X. Zhu, J. Liu, and J. Xiao, "Quantum convolutional neural network on protein distance prediction," in Proc. IEEE Int'l Joint Conf. on Neural Networks (IJCNN), July 2021.
|
| 196 |
+
[5] Y. Kwak, W. J. Yun, S. Jung, and J. Kim, "Quantum neural networks: Concepts, applications, and challenges," in Proc. IEEE Int'l Conf. on Ubiquitous and Future Networks (ICUFN), August 2021.
|
| 197 |
+
[6] S. Y.-C. Chen, C.-H. H. Yang, J. Qi, P.-Y. Chen, X. Ma, and H.-S. Goan, "Variational quantum circuits for deep reinforcement learning," IEEE Access, vol. 8, pp. 141007-141024, 2020.
|
| 198 |
+
[7] Y. Kwak, W. J. Yun, S. Jung, J.-K. Kim, and J. Kim, "Introduction to quantum reinforcement learning: Theory and PennyLane-based implementation," in *PRoc. IEEE Int'l Conf. on ICT Convergence (ICTC)*, October 2021.
|
| 199 |
+
[8] G. Carleo, I. Cirac, K. Cranmer, L. Daudet, M. Schuld, N. Tishby, L. Vogt-Maranto, and L. Zdeborova, "Machine learning and the physical sciences," *Reviews of Modern Physics*, vol. 91, no. 4, p. 045002, 2019.
|
| 200 |
+
[9] P. W. Shor, "Scheme for reducing decoherence in quantum computer memory," Physical Review A, vol. 52, no. 4, p. R2493, 1995.
|
| 201 |
+
[10] J. Biamonte, “Universal variational quantum computation,” *Physical Review A*, vol. 103, no. 3, p. L030401, 2021.
|
| 202 |
+
[11] N. Wiebe, A. Kapoor, and K. M. Svore, “Quantum deep learning,” CoRR, vol. abs/1412.3489, 2014.
|
| 203 |
+
[12] H. Wang, Y. Ding, J. Gu, Z. Li, Y. Lin, D. Z. Pan, F. T. Chong, and S. Han, "QuantumNAS: Noise-adaptive search for robust quantum circuits," in Proc. IEEE Int'l Symposium on High-Performance Computer Architecture (HPCA), April 2022.
|
2203.10xxx/2203.10443/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78081784fefa6e0b78baedc5dc20b6ae3fc902264843d0a6069e9ebdc806ced6
|
| 3 |
+
size 378523
|
2203.10xxx/2203.10443/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_content_list.json
ADDED
|
@@ -0,0 +1,1504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "VGSE: Visually-Grounded Semantic Embeddings for Zero-Shot Learning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
112,
|
| 8 |
+
130,
|
| 9 |
+
854,
|
| 10 |
+
152
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Wenjia $\\mathrm{Xu}^{1,7,8}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
125,
|
| 19 |
+
179,
|
| 20 |
+
241,
|
| 21 |
+
199
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Yongqin Xian2",
|
| 28 |
+
"bbox": [
|
| 29 |
+
267,
|
| 30 |
+
181,
|
| 31 |
+
385,
|
| 32 |
+
199
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Jiuniu Wang5,7,8",
|
| 39 |
+
"bbox": [
|
| 40 |
+
411,
|
| 41 |
+
181,
|
| 42 |
+
540,
|
| 43 |
+
199
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Bernt Schiele3",
|
| 50 |
+
"bbox": [
|
| 51 |
+
566,
|
| 52 |
+
181,
|
| 53 |
+
683,
|
| 54 |
+
198
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Zeynep Akata $^{3,4,6}$",
|
| 61 |
+
"bbox": [
|
| 62 |
+
709,
|
| 63 |
+
181,
|
| 64 |
+
849,
|
| 65 |
+
198
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "<sup>1</sup> Beijing University of Posts and Telecommunications <sup>2</sup> ETH Zurich",
|
| 72 |
+
"bbox": [
|
| 73 |
+
197,
|
| 74 |
+
205,
|
| 75 |
+
761,
|
| 76 |
+
220
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "3 Max Planck Institute for Informatics 4 University of Tübingen",
|
| 83 |
+
"bbox": [
|
| 84 |
+
88,
|
| 85 |
+
223,
|
| 86 |
+
614,
|
| 87 |
+
239
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "<sup>5</sup> City University of Hong Kong",
|
| 94 |
+
"bbox": [
|
| 95 |
+
640,
|
| 96 |
+
223,
|
| 97 |
+
893,
|
| 98 |
+
241
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "6 Max Planck Institute for Intelligent Systems",
|
| 105 |
+
"bbox": [
|
| 106 |
+
310,
|
| 107 |
+
241,
|
| 108 |
+
671,
|
| 109 |
+
258
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "<sup>7</sup> University of Chinese Academy of Sciences <sup>8</sup> Aerospace Information Research Institute, CAS",
|
| 116 |
+
"bbox": [
|
| 117 |
+
99,
|
| 118 |
+
258,
|
| 119 |
+
875,
|
| 120 |
+
275
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "Abstract",
|
| 127 |
+
"text_level": 1,
|
| 128 |
+
"bbox": [
|
| 129 |
+
233,
|
| 130 |
+
311,
|
| 131 |
+
312,
|
| 132 |
+
325
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "Human-annotated attributes serve as powerful semantic embeddings in zero-shot learning. However, their annotation process is labor-intensive and needs expert supervision. Current unsupervised semantic embeddings, i.e., word embeddings, enable knowledge transfer between classes. However, word embeddings do not always reflect visual similarities and result in inferior zero-shot performance. We propose to discover semantic embeddings containing discriminative visual properties for zero-shot learning, without requiring any human annotation. Our model visually divides a set of images from seen classes into clusters of local image regions according to their visual similarity, and further imposes their class discrimination and semantic relatedness. To associate these clusters with previously unseen classes, we use external knowledge, e.g., word embeddings and propose a novel class relation discovery module. Through quantitative and qualitative evaluation, we demonstrate that our model discovers semantic embeddings that model the visual properties of both seen and unseen classes. Furthermore, we demonstrate on three benchmarks that our visually-grounded semantic embeddings further improve performance over word embeddings across various ZSL models by a large margin. Code is available at https://github.com/wenjiaXu/VGSE",
|
| 139 |
+
"bbox": [
|
| 140 |
+
75,
|
| 141 |
+
343,
|
| 142 |
+
472,
|
| 143 |
+
691
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "text",
|
| 149 |
+
"text": "1. Introduction",
|
| 150 |
+
"text_level": 1,
|
| 151 |
+
"bbox": [
|
| 152 |
+
76,
|
| 153 |
+
722,
|
| 154 |
+
207,
|
| 155 |
+
736
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "Semantic embeddings aggregated for every class live in a vector space that associates different classes even when visual examples of these classes are not available. Therefore, they facilitate the knowledge transfer in zero-shot learning (ZSL) [1,28,42,59] and are used as side-information in other computer vision tasks like fashion trend forecast [4,23,64], face recognition and manipulation [11,27,29], and domain adaptation [10, 24].",
|
| 162 |
+
"bbox": [
|
| 163 |
+
75,
|
| 164 |
+
748,
|
| 165 |
+
468,
|
| 166 |
+
868
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 0
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "Human annotated attributes [19, 36, 55], characteristic properties of objects annotated by human experts, are widely",
|
| 173 |
+
"bbox": [
|
| 174 |
+
76,
|
| 175 |
+
869,
|
| 176 |
+
468,
|
| 177 |
+
901
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "image",
|
| 183 |
+
"img_path": "images/86f7700bbed03b3a7cb2d744abd8bbbdfda834a672109ee54be6219246ed40d2.jpg",
|
| 184 |
+
"image_caption": [
|
| 185 |
+
"Human-Annotated Attributes"
|
| 186 |
+
],
|
| 187 |
+
"image_footnote": [],
|
| 188 |
+
"bbox": [
|
| 189 |
+
501,
|
| 190 |
+
311,
|
| 191 |
+
691,
|
| 192 |
+
435
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 0
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "image",
|
| 198 |
+
"img_path": "images/d501fec83643f07dbcf21593e486ef902160dbd1e2bb84cac77a4debe8c77dda.jpg",
|
| 199 |
+
"image_caption": [
|
| 200 |
+
"Semantic Embedding Discovery by VGSE",
|
| 201 |
+
"Figure 1. Human-annotated attributes (left) are labor-intensive to collect, and may neglect some local visual properties shared between classes. We propose to discover semantic embeddings via visually clustering image patches and predicting the class relations."
|
| 202 |
+
],
|
| 203 |
+
"image_footnote": [],
|
| 204 |
+
"bbox": [
|
| 205 |
+
705,
|
| 206 |
+
314,
|
| 207 |
+
887,
|
| 208 |
+
434
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 0
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "used as semantic embeddings [61, 62]. However, obtaining attributes is often a labor-intensive two-step process. First, domain experts carefully design an attribute vocabulary, e.g., color, shape, etc., and then human annotators indicate the presence or absence of an attribute in an image or a class (as shown in Figure 1). The labeling effort devoted to human-annotated attributes hinders its applicability of performing zero-shot learning for more datasets in realistic settings [30].",
|
| 215 |
+
"bbox": [
|
| 216 |
+
496,
|
| 217 |
+
546,
|
| 218 |
+
893,
|
| 219 |
+
667
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 0
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "Previous works tackle this problem by using word embeddings for class names [31, 38], or semantic embeddings from online encyclopedia articles [3, 39, 67]. Though they model the semantic relation between classes without using human annotation, some of these relations may not be visually detectable by machines, resulting in a poor performance in zero-shot learning. Similarly, discriminative visual cues may not all be represented in those semantic embeddings.",
|
| 226 |
+
"bbox": [
|
| 227 |
+
496,
|
| 228 |
+
670,
|
| 229 |
+
893,
|
| 230 |
+
791
|
| 231 |
+
],
|
| 232 |
+
"page_idx": 0
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"text": "To this end, we propose the Visually-Grounded Semantic Embedding (VGSE) Network to discover semantic embeddings with minimal human supervision (we only use category labels for seen class images). Our network explicitly explores visual clusters that relate image regions from different categories, which is useful for knowledge transfer between classes under zero-shot learning settings (see our",
|
| 237 |
+
"bbox": [
|
| 238 |
+
496,
|
| 239 |
+
795,
|
| 240 |
+
893,
|
| 241 |
+
900
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 0
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "aside_text",
|
| 247 |
+
"text": "arXiv:2203.10444v2 [cs.CV] 26 May 2023",
|
| 248 |
+
"bbox": [
|
| 249 |
+
22,
|
| 250 |
+
255,
|
| 251 |
+
58,
|
| 252 |
+
705
|
| 253 |
+
],
|
| 254 |
+
"page_idx": 0
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"text": "learnt clusters in Figure 1). To fully unearth the visual properties shared across different categories, our model discovers semantic embeddings by assigning image patches into various clusters according to their visual similarity. Besides, we further impose class discrimination and semantic relatedness of the semantic embeddings, to benefit their ability in transferring knowledge between classes in ZSL.",
|
| 259 |
+
"bbox": [
|
| 260 |
+
75,
|
| 261 |
+
90,
|
| 262 |
+
470,
|
| 263 |
+
196
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 1
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "text",
|
| 269 |
+
"text": "To sum up, our work makes the following contributions. (1) We propose a visually-grounded semantic embedding (VGSE) network that learns visual clusters from seen classes, and automatically predicts the semantic embeddings for each category by building the relationship between seen and unseen classes given unsupervised external knowledge sources. (2) On three zero-shot learning benchmarks (i.e. AWA2, CUB, and SUN), our learned VGSE semantic embeddings consistently improve the performance of word embeddings over five SOTA methods. (3) Through qualitative evaluation and user study, we demonstrate that our VGSE embeddings contain rich visual information like fine-grained attributes, and convey human-understandable semantics that facilitates knowledge transfer between classes.",
|
| 270 |
+
"bbox": [
|
| 271 |
+
75,
|
| 272 |
+
198,
|
| 273 |
+
473,
|
| 274 |
+
411
|
| 275 |
+
],
|
| 276 |
+
"page_idx": 1
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "text",
|
| 280 |
+
"text": "2. Related Work",
|
| 281 |
+
"text_level": 1,
|
| 282 |
+
"bbox": [
|
| 283 |
+
76,
|
| 284 |
+
428,
|
| 285 |
+
218,
|
| 286 |
+
444
|
| 287 |
+
],
|
| 288 |
+
"page_idx": 1
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"type": "text",
|
| 292 |
+
"text": "Zero-shot Learning aims to classify images from novel classes that do not appear during training. Existing ZSL methods usually assume that both the seen and unseen classes share a common semantic space, thus the key insight of performing ZSL is to transfer knowledge from seen classes to unseen classes. To assign the image to a semantic class embedding, many classical approaches learn a compatibility function to associate visual and semantic space [1, 20, 48, 58]. Recent works mainly focus on synthesizing image features or classifier weights with a generative model [43, 60, 61], or training enhanced image features extractors with visual attention [66, 68] or local prototypes [62].",
|
| 293 |
+
"bbox": [
|
| 294 |
+
75,
|
| 295 |
+
457,
|
| 296 |
+
470,
|
| 297 |
+
638
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 1
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "text",
|
| 303 |
+
"text": "Semantic embeddings are crucial in relating different categories with shared characteristics, i.e., the semantic space. Despite their importance, semantic embeddings are relatively under-explored in zero-shot learning. Human-annotated attributes [19, 36, 55, 59], i.e., the properties of objects such as color and shape, are the most commonly used semantic embeddings in zero-shot learning. Though the attributes can be discriminative for each class, their annotation process is labor-intensive and require expert knowledge [50, 55, 65]. We propose to discover visual properties through patch-level clustering over image datasets, and predict semantic embeddings automatically, where no additional human annotation is required except for the class labels of seen class images.",
|
| 304 |
+
"bbox": [
|
| 305 |
+
75,
|
| 306 |
+
640,
|
| 307 |
+
470,
|
| 308 |
+
835
|
| 309 |
+
],
|
| 310 |
+
"page_idx": 1
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"type": "text",
|
| 314 |
+
"text": "Semantic Embeddings with Minimal Supervision is drawing attention in image classification [6, 9, 26, 40, 45], transfer learning [10, 37, 54] and low-shot learning problems [3, 25, 33, 44, 50, 65]. Semantic embeddings collected",
|
| 315 |
+
"bbox": [
|
| 316 |
+
75,
|
| 317 |
+
839,
|
| 318 |
+
470,
|
| 319 |
+
901
|
| 320 |
+
],
|
| 321 |
+
"page_idx": 1
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"type": "text",
|
| 325 |
+
"text": "from text corpora are alternatives to manual annotations, which include word embeddings learned from large corpora [31, 38, 49, 63], semantic relations such as knowledge graphs [9, 26, 56], and semantic similarities [12, 57], etc. More recently, [3, 39, 41, 67] collect attribute-class associations from online encyclopedia articles that describe each category. The semantic similarity can be encoded by a taxonomical hierarchy or by incorporating co-occurrence statistics of words within the document. However, this may not reflect visual similarity, e.g., sheep is semantically close to dog since they often co-occur in online articles, while visually sheep is closer to a deer. We focus on discovering visually-grounded semantic embeddings in the image space, and further incorporate the semantic relations between classes into our semantic embedding for better zero-shot knowledge transfer.",
|
| 326 |
+
"bbox": [
|
| 327 |
+
496,
|
| 328 |
+
90,
|
| 329 |
+
893,
|
| 330 |
+
318
|
| 331 |
+
],
|
| 332 |
+
"page_idx": 1
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"type": "text",
|
| 336 |
+
"text": "Learning Visual Properties from Image Patches. Previous attempts for discovering middle-level representations for classification include exploring image-level embeddings by learning binary codes or classe representation [6, 40, 51], and further introducing humans in the loop to discover localized and nameable attributes [18, 35]. However, those methods discover properties depicted in the whole image, which might result in a combination of several semantics covering several objects (parts) that are hard to interpret [35]. Visual transformer [17] and BagNets [8] showed that image patches can work as powerful visual words conveying visual cues for class discrimination. Bag of visual words (BOVW) models [13, 47] propose to cluster image patches to learn a codebook and form image representations. However, BOVW extracts hand-crafted features followed by k-means clustering, while we learn clustering in an end-to-end manner via deep neural networks. Considering the above problem, we propose to learn visual properties by clustering image patches, and predict the semantic embeddings with the visual properties depicted by patch clusters.",
|
| 337 |
+
"bbox": [
|
| 338 |
+
496,
|
| 339 |
+
319,
|
| 340 |
+
895,
|
| 341 |
+
622
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 1
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "text",
|
| 347 |
+
"text": "More closely related to our work are the ones learning discriminative image regions that can represent each class through clustering of local patches [15, 16, 45, 46], e.g., finding representative elements to discriminate one class from others. Instead of picking up the most salient patches in each class, we aim to learn visual properties that are shared among different classes for most of the image patches appearing in the dataset. Besides, unlike some above methods that divide an image into a grid of square patches, we propose to use segmentation-based region proposals to obtain semantic image regions (e.g., the entire head could represent one semantic region).",
|
| 348 |
+
"bbox": [
|
| 349 |
+
496,
|
| 350 |
+
622,
|
| 351 |
+
893,
|
| 352 |
+
803
|
| 353 |
+
],
|
| 354 |
+
"page_idx": 1
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"type": "text",
|
| 358 |
+
"text": "3. Visually-Grounded Semantic Embedding",
|
| 359 |
+
"text_level": 1,
|
| 360 |
+
"bbox": [
|
| 361 |
+
498,
|
| 362 |
+
814,
|
| 363 |
+
870,
|
| 364 |
+
832
|
| 365 |
+
],
|
| 366 |
+
"page_idx": 1
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"text": "We are interested in the (generalized) zero-shot learning task where the training and test classes are disjoint sets. The training set $\\{(x_{n},y_{n})|x_{n}\\in X^{s},y_{n}\\in Y^{s}\\}_{n = 1}^{N_{s}}$ consists of images $x_{n}$ and their labels $y_{n}$ from the seen classes $Y^{s}$ . In",
|
| 371 |
+
"bbox": [
|
| 372 |
+
496,
|
| 373 |
+
839,
|
| 374 |
+
893,
|
| 375 |
+
902
|
| 376 |
+
],
|
| 377 |
+
"page_idx": 1
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "image",
|
| 381 |
+
"img_path": "images/70c77aa9f2288ad46578d44a88f1e9abd75eda7af7ed6a98ef6e5112070c880a.jpg",
|
| 382 |
+
"image_caption": [
|
| 383 |
+
"Figure 2. Our visually-grounded semantic embedding network consists of two modules. The Patch Clustering (PC) module learns clusters from patch images, and predicts semantic embeddings for seen classes with their images. The Class Relation (CR) module predicts the unseen class embeddings $\\phi^{VGSE}(y_m)$ using unseen and seen class relations learned from external knowledge, e.g., word2vec. For instance, the embedding for unseen class sheep is predicted using the semantic embeddings of the seen classes, e.g., antelope, cow, deer, and so on."
|
| 384 |
+
],
|
| 385 |
+
"image_footnote": [],
|
| 386 |
+
"bbox": [
|
| 387 |
+
109,
|
| 388 |
+
89,
|
| 389 |
+
529,
|
| 390 |
+
319
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 2
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "image",
|
| 396 |
+
"img_path": "images/d5217b9c0543df29f1584b39231d45a79584cfe8bc9c693fe4b872afd02c1216.jpg",
|
| 397 |
+
"image_caption": [],
|
| 398 |
+
"image_footnote": [],
|
| 399 |
+
"bbox": [
|
| 400 |
+
545,
|
| 401 |
+
90,
|
| 402 |
+
864,
|
| 403 |
+
321
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 2
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "the ZSL setting, test images are classified into unseen classes $Y^{u}$ , and in the GZSL setting, into both $Y^{s}$ and $Y^{u}$ with the help of a semantic embedding space, e.g., human annotated attributes. Since human-annotated attributes are costly to obtain, while prior unsupervised semantic embeddings are incomplete to describe the rich visual world, we propose to automatically discover a set of $D_{v}$ visual clusters as the semantic embedding, denoted by $\\Phi^{VGSE} \\in \\mathbb{R}^{(|Y^{u}| + |Y^{s}|) \\times D_{v}}$ . The semantic embeddings for seen classes $\\{\\phi^{VGSE}(y) | y \\in Y^{s}\\}$ , describing diverse visual properties of each category, are learned on seen classes images $X^{s}$ . The semantic embeddings for unseen classes $\\{\\phi^{VGSE}(y) | y \\in Y^{u}\\}$ is predicted with the help of unsupervised word embeddings, e.g., w2v embeddings for class names $\\Phi^{w} \\in \\mathbb{R}^{(|Y^{u}| + |Y^{s}|) \\times D_{w}}$ .",
|
| 410 |
+
"bbox": [
|
| 411 |
+
75,
|
| 412 |
+
416,
|
| 413 |
+
472,
|
| 414 |
+
627
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 2
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "text",
|
| 420 |
+
"text": "Our Visually-Grounded Semantic Embedding (VGSE) Network (see Figure 2) consists of two main modules. (1) The Patch Clustering (PC) module takes the training dataset as input, and clusters the image patches into $D_v$ visual clusters. Given one input image $x_n$ , PC can predict the cluster probability $a_n \\in \\mathbb{R}^{D_v}$ indicating how likely the image would contain the visual property appearing in each cluster. (2) Since unseen class images cannot be observed during training, we propose the Class Relation (CR) module to infer the semantic embeddings of unseen classes. Finally, the learned semantic embedding $\\Phi^{\\mathrm{VGSE}}$ can be used to perform downstream tasks, e.g., Zero-Shot Learning.",
|
| 421 |
+
"bbox": [
|
| 422 |
+
75,
|
| 423 |
+
628,
|
| 424 |
+
470,
|
| 425 |
+
811
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 2
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "text",
|
| 431 |
+
"text": "3.1. Patch Clustering (PC) Module",
|
| 432 |
+
"text_level": 1,
|
| 433 |
+
"bbox": [
|
| 434 |
+
76,
|
| 435 |
+
825,
|
| 436 |
+
346,
|
| 437 |
+
843
|
| 438 |
+
],
|
| 439 |
+
"page_idx": 2
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"type": "text",
|
| 443 |
+
"text": "Patch image generation. Patch-level embeddings allow us to explore the visual properties that appear in local image regions [17, 55], e.g., the shape and texture of animal",
|
| 444 |
+
"bbox": [
|
| 445 |
+
75,
|
| 446 |
+
854,
|
| 447 |
+
470,
|
| 448 |
+
902
|
| 449 |
+
],
|
| 450 |
+
"page_idx": 2
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"type": "text",
|
| 454 |
+
"text": "body parts or the objects in scenes. To obtain image patches that cover the entire semantic image region (e.g. an animal head), we segment an image into regularly shaped regions via an unsupervised compact watershed segmentation algorithm [32]. As shown in Figure 2, for each image $x_{n}$ , we find the smallest bounding box that fully covers each segment and crop $x$ into $N_{t}$ patches $\\{x_{nt}\\}_{t=1}^{N_{t}}$ that cover different parts of the image. The number of patches $N_{t}$ is empirically set to be around 9, as we observed in initial experiments that larger patches may include too many attributes, while smaller patches will be too tiny to contain any visual attribute. In this way, we reconstruct our training set consisting of image patches $\\{(x_{nt},y_{n}) | x_{nt} \\in X^{sp}, y_{n} \\in Y^{s}\\}_{n=1}^{N_{s}}$ , here $|X^{sp}| = N_{s}N_{t}$ , and $N_{s}$ is the train set size.",
|
| 455 |
+
"bbox": [
|
| 456 |
+
496,
|
| 457 |
+
416,
|
| 458 |
+
893,
|
| 459 |
+
628
|
| 460 |
+
],
|
| 461 |
+
"page_idx": 2
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"type": "text",
|
| 465 |
+
"text": "Patch clustering. Our patch clustering module is a differentiable middle layer, that simultaneously learns image patch representations and clustering. As shown in Figure 2 (left), we start from a deep neural network that extracts patch feature $\\theta(x_{nt}) \\in \\mathbb{R}^{D_f}$ , where we use a ResNet [22] pretrained on ImageNet [14] as in other ZSL models [59, 61]. Afterwards, a clustering layer $H: \\mathbb{R}^{D_f} \\to \\mathbb{R}^{D_v}$ converts the feature representation into cluster scores:",
|
| 466 |
+
"bbox": [
|
| 467 |
+
496,
|
| 468 |
+
630,
|
| 469 |
+
893,
|
| 470 |
+
752
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 2
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "equation",
|
| 476 |
+
"text": "\n$$\na _ {n t} = H \\circ \\theta (x _ {n t}), \\tag {1}\n$$\n",
|
| 477 |
+
"text_format": "latex",
|
| 478 |
+
"bbox": [
|
| 479 |
+
625,
|
| 480 |
+
765,
|
| 481 |
+
893,
|
| 482 |
+
781
|
| 483 |
+
],
|
| 484 |
+
"page_idx": 2
|
| 485 |
+
},
|
| 486 |
+
{
|
| 487 |
+
"type": "text",
|
| 488 |
+
"text": "where $a_{nt}^{k}$ (the $k$ -th element of $a_{nt}$ ) indicates the probability of assigning image patch $x_{nt}$ to cluster $k$ , e.g., the patch clusters of spotty fur, fluffy head in Figure 2.",
|
| 489 |
+
"bbox": [
|
| 490 |
+
496,
|
| 491 |
+
792,
|
| 492 |
+
890,
|
| 493 |
+
840
|
| 494 |
+
],
|
| 495 |
+
"page_idx": 2
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"type": "text",
|
| 499 |
+
"text": "A pretext task can be adopted to obtain semantically meaningful representations [21, 34, 53] in an unsupervised manner. Our pretext task [53] enforces the image patch $x_{nt}$ and its neighbors being predicted to the same clusters. We",
|
| 500 |
+
"bbox": [
|
| 501 |
+
496,
|
| 502 |
+
840,
|
| 503 |
+
893,
|
| 504 |
+
900
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 2
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "text",
|
| 510 |
+
"text": "retrieve nearest patch neighbors of $x_{nt}$ as $X_{nb}^{sp}$ by the $\\mathcal{L}_2$ distance of patch features $\\| \\theta(x_{nt}) - \\theta(x_i)\\|_2$ , where $x_i \\in X^{sp}$ and $x_i \\neq x_{nt}$ . The clustering loss is defined as",
|
| 511 |
+
"bbox": [
|
| 512 |
+
76,
|
| 513 |
+
90,
|
| 514 |
+
472,
|
| 515 |
+
137
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 3
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "equation",
|
| 521 |
+
"text": "\n$$\n\\mathcal {L} _ {c l u} = - \\sum_ {x _ {n t} \\in X ^ {s p}} \\sum_ {x _ {i} \\in X _ {n b} ^ {s p}} \\log \\left(a _ {n t} ^ {T} a _ {i}\\right), \\tag {2}\n$$\n",
|
| 522 |
+
"text_format": "latex",
|
| 523 |
+
"bbox": [
|
| 524 |
+
145,
|
| 525 |
+
145,
|
| 526 |
+
470,
|
| 527 |
+
180
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 3
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "where $a_{i} = H\\circ \\theta (x_{i})$ . $\\mathcal{L}_{clu}$ imposes consistent cluster assignment for $x_{nt}$ and its neighbors. To avoid all images being assigned to the same cluster, we follow [53] to add an entropy penalty as follows:",
|
| 534 |
+
"bbox": [
|
| 535 |
+
76,
|
| 536 |
+
190,
|
| 537 |
+
468,
|
| 538 |
+
251
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 3
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "equation",
|
| 544 |
+
"text": "\n$$\n\\mathcal {L} _ {p e l} = \\sum_ {k = 1} ^ {D _ {v}} \\bar {a} _ {n t} ^ {k} \\log \\bar {a} _ {n t} ^ {k}, \\quad \\bar {a} _ {n t} ^ {k} = \\frac {1}{N _ {s} N _ {t}} \\sum_ {x _ {n t} \\in X ^ {s p}} a _ {n t} ^ {k}, \\tag {3}\n$$\n",
|
| 545 |
+
"text_format": "latex",
|
| 546 |
+
"bbox": [
|
| 547 |
+
84,
|
| 548 |
+
261,
|
| 549 |
+
470,
|
| 550 |
+
303
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 3
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "ensuring that images are spread uniformly over all clusters.",
|
| 557 |
+
"bbox": [
|
| 558 |
+
76,
|
| 559 |
+
311,
|
| 560 |
+
467,
|
| 561 |
+
327
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 3
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "Class discrimination. To impose class discrimination information into the learnt clusters, we propose to apply an cluster-to-class layer $Q: \\mathbb{R}^{D_v} \\to \\mathbb{R}^{|Y^s|}$ to map the cluster prediction of each image to the class probability, i.e., $p(y|x_{nt}) = \\text{softmax}(Q \\circ \\theta(x_{nt}))$ . We train this module with the following cross-entropy loss,",
|
| 568 |
+
"bbox": [
|
| 569 |
+
76,
|
| 570 |
+
329,
|
| 571 |
+
468,
|
| 572 |
+
421
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 3
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "equation",
|
| 578 |
+
"text": "\n$$\n\\mathcal {L} _ {c l s} = - \\log \\frac {\\exp (p (y _ {n} | x _ {n t}))}{\\sum_ {\\hat {y} \\in Y ^ {s}} \\exp (p (\\hat {y} | x _ {n t}))}. \\tag {4}\n$$\n",
|
| 579 |
+
"text_format": "latex",
|
| 580 |
+
"bbox": [
|
| 581 |
+
142,
|
| 582 |
+
429,
|
| 583 |
+
468,
|
| 584 |
+
465
|
| 585 |
+
],
|
| 586 |
+
"page_idx": 3
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"type": "text",
|
| 590 |
+
"text": "Semantic relatedness. We further encourage the learned visual clusters to be transferable between classes, to benefit the downstream zero-shot learning tasks. We learn clusters shared between semantically related classes, e.g., horse share more semantic information with deer than with dolphin. We implement this by mapping the learned cluster probability to the semantic space constructed by w2v embeddings $\\Phi^w$ . The cluster-to-semantic layer $S: \\mathbb{R}^{D_v} \\to \\mathbb{R}^{D_w}$ is trained by regressing the w2v embedding for each class,",
|
| 591 |
+
"bbox": [
|
| 592 |
+
76,
|
| 593 |
+
474,
|
| 594 |
+
468,
|
| 595 |
+
611
|
| 596 |
+
],
|
| 597 |
+
"page_idx": 3
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "equation",
|
| 601 |
+
"text": "\n$$\n\\mathcal {L} _ {s e m} = \\left\\| S \\circ a _ {n t} - \\phi^ {w} (y _ {n}) \\right\\| _ {2}, \\tag {5}\n$$\n",
|
| 602 |
+
"text_format": "latex",
|
| 603 |
+
"bbox": [
|
| 604 |
+
165,
|
| 605 |
+
621,
|
| 606 |
+
468,
|
| 607 |
+
638
|
| 608 |
+
],
|
| 609 |
+
"page_idx": 3
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"type": "text",
|
| 613 |
+
"text": "where $y_{n}$ denotes the ground truth class, and $\\phi^w (y_n)\\in$ $\\mathbb{R}^{D_w}$ represents the w2v embedding for the class $y_{n}$",
|
| 614 |
+
"bbox": [
|
| 615 |
+
76,
|
| 616 |
+
647,
|
| 617 |
+
468,
|
| 618 |
+
678
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 3
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "text",
|
| 624 |
+
"text": "The overall objective for training the model is as follows:",
|
| 625 |
+
"bbox": [
|
| 626 |
+
96,
|
| 627 |
+
678,
|
| 628 |
+
468,
|
| 629 |
+
693
|
| 630 |
+
],
|
| 631 |
+
"page_idx": 3
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"type": "equation",
|
| 635 |
+
"text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {c l u} + \\lambda \\mathcal {L} _ {p e l} + \\beta \\mathcal {L} _ {c l s} + \\gamma \\mathcal {L} _ {s e m}. \\tag {6}\n$$\n",
|
| 636 |
+
"text_format": "latex",
|
| 637 |
+
"bbox": [
|
| 638 |
+
143,
|
| 639 |
+
708,
|
| 640 |
+
468,
|
| 641 |
+
724
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 3
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "text",
|
| 647 |
+
"text": "Predict seen semantic embeddings. After we learned the visual clusters, given one input image patch $x_{nt}$ , the model extracts the feature $\\theta(x_{nt})$ followed by predicting the cluster probability $a_{nt} = H \\circ \\theta(x_{nt}) \\in \\mathbb{R}^{D_v}$ where each dimension indicates the likelihood that the image patch $x_{nt}$ being assigned to a certain cluster learned by this module.",
|
| 648 |
+
"bbox": [
|
| 649 |
+
76,
|
| 650 |
+
732,
|
| 651 |
+
468,
|
| 652 |
+
821
|
| 653 |
+
],
|
| 654 |
+
"page_idx": 3
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "text",
|
| 658 |
+
"text": "The image embedding $a_{n}\\in \\mathbb{R}^{D_{v}}$ for $x_{n}$ is calculated by averaging the patch embedding in that image:",
|
| 659 |
+
"bbox": [
|
| 660 |
+
76,
|
| 661 |
+
821,
|
| 662 |
+
468,
|
| 663 |
+
853
|
| 664 |
+
],
|
| 665 |
+
"page_idx": 3
|
| 666 |
+
},
|
| 667 |
+
{
|
| 668 |
+
"type": "equation",
|
| 669 |
+
"text": "\n$$\na _ {n} = \\frac {1}{N _ {t}} \\sum_ {t = 1} ^ {N _ {t}} a _ {n t}. \\tag {7}\n$$\n",
|
| 670 |
+
"text_format": "latex",
|
| 671 |
+
"bbox": [
|
| 672 |
+
207,
|
| 673 |
+
862,
|
| 674 |
+
468,
|
| 675 |
+
902
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "Similarly, we calculate the semantic embedding for $y_{n}$ by averaging the embeddings of all images belonging to $y_{n}$ :",
|
| 682 |
+
"bbox": [
|
| 683 |
+
500,
|
| 684 |
+
90,
|
| 685 |
+
890,
|
| 686 |
+
122
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 3
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "equation",
|
| 692 |
+
"text": "\n$$\n\\phi^ {V G S E} \\left(y _ {n}\\right) = \\frac {1}{\\left| I _ {i} \\right|} \\sum_ {j \\in I _ {i}} a _ {j}, \\tag {8}\n$$\n",
|
| 693 |
+
"text_format": "latex",
|
| 694 |
+
"bbox": [
|
| 695 |
+
609,
|
| 696 |
+
130,
|
| 697 |
+
890,
|
| 698 |
+
167
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "where $I_{i}$ is the indexes of all images belonging to class $y_{n}$ , and $a_{j}$ denotes the image embedding of the $j$ -th image.",
|
| 705 |
+
"bbox": [
|
| 706 |
+
498,
|
| 707 |
+
176,
|
| 708 |
+
890,
|
| 709 |
+
207
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 3
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"text": "3.2. Class Relation (CR) Module",
|
| 716 |
+
"text_level": 1,
|
| 717 |
+
"bbox": [
|
| 718 |
+
500,
|
| 719 |
+
215,
|
| 720 |
+
751,
|
| 721 |
+
231
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "While seen semantic embeddings can be estimated from training images using Eq. 8, how to compute the unseen semantic embeddings is not straightforward since their training images are not available. As semantically related categories share common properties, e.g., sheep and cow both live on grasslands, we propose to learn a Class Relation Module to formulate the similarity between seen classes $Y^{s}$ and unseen classes $Y^{u}$ . In general, any external knowledge, e.g., word2vec [31, 38] or human-annotated attributes, can be utilized to formulate the relationship between two classes. Here we use word2vec learned from a large online corpus to minimize the human annotation effort. Below, we present two solutions to learn the class relations: (1) directly averaging the semantic embeddings from the neighbor seen classes in the word2vec spaces, (2) optimizing a similarity matrix between unseen and seen classes.",
|
| 728 |
+
"bbox": [
|
| 729 |
+
498,
|
| 730 |
+
238,
|
| 731 |
+
893,
|
| 732 |
+
479
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 3
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "text",
|
| 738 |
+
"text": "Weighted Average (WAvg). For unseen class $y_{m}$ , we first retrieve several nearest class neighbours in seen classes by the similarity measured with $\\mathcal{L}_2$ distance over w2v embedding space, and we denote the neighbor classes set as $Y_{nb}^{s}$ . The semantic embedding vector for $y_{m}$ is calculated as the weighted combination [5] of seen semantic embeddings:",
|
| 739 |
+
"bbox": [
|
| 740 |
+
498,
|
| 741 |
+
482,
|
| 742 |
+
893,
|
| 743 |
+
574
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 3
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "equation",
|
| 749 |
+
"text": "\n$$\n\\phi^ {V G S E} \\left(y _ {m}\\right) = \\frac {1}{\\left| Y _ {n b} ^ {s} \\right|} \\sum_ {\\tilde {y} \\in Y _ {n b} ^ {s}} \\operatorname {s i m} \\left(y _ {m}, \\tilde {y}\\right) \\cdot \\phi^ {V G S E} (\\tilde {y}), \\tag {9}\n$$\n",
|
| 750 |
+
"text_format": "latex",
|
| 751 |
+
"bbox": [
|
| 752 |
+
521,
|
| 753 |
+
582,
|
| 754 |
+
890,
|
| 755 |
+
621
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 3
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "equation",
|
| 761 |
+
"text": "\n$$\n\\operatorname {s i m} \\left(y _ {m}, \\tilde {y}\\right) = \\exp \\left(- \\eta \\left\\| \\phi^ {w} \\left(y _ {m}\\right) - \\phi^ {w} (\\tilde {y}) \\right\\| _ {2}\\right), \\tag {10}\n$$\n",
|
| 762 |
+
"text_format": "latex",
|
| 763 |
+
"bbox": [
|
| 764 |
+
524,
|
| 765 |
+
645,
|
| 766 |
+
890,
|
| 767 |
+
664
|
| 768 |
+
],
|
| 769 |
+
"page_idx": 3
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"type": "text",
|
| 773 |
+
"text": "where exp stands for the exponential function and $\\eta$ is a hyperparameter to adjust the similarity weight. We denote our semantic embeddings learned with weighted average strategy as VGSE-WAvg.",
|
| 774 |
+
"bbox": [
|
| 775 |
+
498,
|
| 776 |
+
672,
|
| 777 |
+
890,
|
| 778 |
+
733
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 3
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "text",
|
| 784 |
+
"text": "Similarity Matrix Optimization (SMO). Given the w2v embeddings $\\phi^w(Y^s) \\in \\mathbb{R}^{|Y^s| \\times D_w}$ of seen classes and embedding $\\phi^w(y_m)$ for unseen class $y_m$ , we learn a similarity mapping $r \\in \\mathbb{R}^{|Y^s|}$ , where $r_i$ denotes the similarity between the unseen class $y_m$ and the $i$ -th seen class. The similarity mapping is learned via the following optimization problem:",
|
| 785 |
+
"bbox": [
|
| 786 |
+
498,
|
| 787 |
+
734,
|
| 788 |
+
893,
|
| 789 |
+
825
|
| 790 |
+
],
|
| 791 |
+
"page_idx": 3
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"type": "equation",
|
| 795 |
+
"text": "\n$$\n\\min _ {r} \\left\\| \\phi^ {w} (y _ {m}) - r ^ {T} \\phi^ {w} (Y ^ {s}) \\right\\| _ {2}\n$$\n",
|
| 796 |
+
"text_format": "latex",
|
| 797 |
+
"bbox": [
|
| 798 |
+
566,
|
| 799 |
+
835,
|
| 800 |
+
771,
|
| 801 |
+
857
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 3
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "equation",
|
| 807 |
+
"text": "\n$$\n\\text {s . t .} \\quad \\alpha < r < 1 \\quad a n d \\quad \\sum_ {i = 1} ^ {| Y ^ {s} |} r _ {i} = 1. \\tag {11}\n$$\n",
|
| 808 |
+
"text_format": "latex",
|
| 809 |
+
"bbox": [
|
| 810 |
+
581,
|
| 811 |
+
861,
|
| 812 |
+
890,
|
| 813 |
+
902
|
| 814 |
+
],
|
| 815 |
+
"page_idx": 3
|
| 816 |
+
},
|
| 817 |
+
{
|
| 818 |
+
"type": "text",
|
| 819 |
+
"text": "Here $\\alpha$ is the lower bound which can be either 0 or $-1$ , indicating whether we only learn positive class relations or we learn negative relations as well. We base this mapping on the assumption that semantic embeddings follow linear analogy, e.g., $\\phi^w (\\mathrm{king}) - \\phi^w (\\mathrm{man}) + \\phi^w (\\mathrm{woman})\\approx \\phi^w (\\mathrm{queen})$ , which holds for w2v embeddings and our semantic embeddings $\\phi^{VGSE}$ . After the mapping is learned, we can predict the semantic embeddings for the unseen class $y_{m}$ as:",
|
| 820 |
+
"bbox": [
|
| 821 |
+
75,
|
| 822 |
+
90,
|
| 823 |
+
472,
|
| 824 |
+
212
|
| 825 |
+
],
|
| 826 |
+
"page_idx": 4
|
| 827 |
+
},
|
| 828 |
+
{
|
| 829 |
+
"type": "equation",
|
| 830 |
+
"text": "\n$$\n\\phi^ {V G S E} \\left(y _ {m}\\right) = r ^ {T} \\phi^ {V G S E} \\left(Y _ {s}\\right), \\tag {12}\n$$\n",
|
| 831 |
+
"text_format": "latex",
|
| 832 |
+
"bbox": [
|
| 833 |
+
179,
|
| 834 |
+
220,
|
| 835 |
+
470,
|
| 836 |
+
242
|
| 837 |
+
],
|
| 838 |
+
"page_idx": 4
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "text",
|
| 842 |
+
"text": "where the value of each discovered semantic embedding for unseen class $y_{m}$ is the weighted sum of all seen class semantic embeddings. We denote our semantic embeddings learned with similarity matrix optimization (SMO) as VGSE-SMO.",
|
| 843 |
+
"bbox": [
|
| 844 |
+
75,
|
| 845 |
+
250,
|
| 846 |
+
472,
|
| 847 |
+
311
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 4
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "text",
|
| 853 |
+
"text": "4. Experiments",
|
| 854 |
+
"text_level": 1,
|
| 855 |
+
"bbox": [
|
| 856 |
+
76,
|
| 857 |
+
324,
|
| 858 |
+
209,
|
| 859 |
+
342
|
| 860 |
+
],
|
| 861 |
+
"page_idx": 4
|
| 862 |
+
},
|
| 863 |
+
{
|
| 864 |
+
"type": "text",
|
| 865 |
+
"text": "After introducing the datasets and experimental settings, we demonstrate that our VGSE outperforms unsupervised word embeddings over three benchmark datasets and this phenomenon generalizes to five SOTA ZSL models (§4.1). With extensive ablation studies, we showcase clustering with images patches is effective for learning the semantic embeddings, and demonstrate the effectiveness of the PC module and CR module (§4.2). In the end, we present visual clusters as qualitative results (§4.3, §4.4).",
|
| 866 |
+
"bbox": [
|
| 867 |
+
75,
|
| 868 |
+
349,
|
| 869 |
+
472,
|
| 870 |
+
484
|
| 871 |
+
],
|
| 872 |
+
"page_idx": 4
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"type": "text",
|
| 876 |
+
"text": "Dataset. We validate our model on three ZSL benchmark datasets. AWA2 [59] is a coarse-grained dataset for animal categorization, containing 30,475 images from 50 classes, where 40 classes are seen and 10 are unseen classes. CUB [55] is a fine-grained dataset for bird classification, containing 11,788 images and 200 classes, where 150 classes are seen and 50 are unseen classes. SUN [36] is also a fine-grained dataset for scene classification, with 14,340 images coming from 717 scene classes, where 645 classes are seen and 72 are unseen classes.",
|
| 877 |
+
"bbox": [
|
| 878 |
+
75,
|
| 879 |
+
488,
|
| 880 |
+
472,
|
| 881 |
+
638
|
| 882 |
+
],
|
| 883 |
+
"page_idx": 4
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"type": "text",
|
| 887 |
+
"text": "Implementation details. Specifically, in the patch clustering (PC) module we learn seen-semantic embeddings with train set (seen classes) proposed by [59], the unseen-class embeddings are predicted in the class relation (CR) module without seeing unseen images. We adopt ResNet50 [22] pretrained on ImageNet1K [14] as the backbone. The cluster number $D_v$ is set as 150 for three datasets. For the Weighted Average module in Eq. 9, we set $\\eta$ as 5 for all datasets, and use 5 neighbors for all datasets. For the similarity matrix optimization in Eq. 11, we set $\\alpha$ as -1 for AWA2 and CUB, and as 0 for SUN. More details are in the supplementary.",
|
| 888 |
+
"bbox": [
|
| 889 |
+
75,
|
| 890 |
+
641,
|
| 891 |
+
472,
|
| 892 |
+
808
|
| 893 |
+
],
|
| 894 |
+
"page_idx": 4
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"type": "text",
|
| 898 |
+
"text": "Semantic embeddings for ZSL. To be fair, we compare our VGSE semantic embeddings with other alternatives using the same image features and ZSL models. All the image features are extracted from ResNet101 [22] pretrained on ImageNet [14]. We follow the data split provided by [59]. The semantic embeddings are L2 normalized following [59]. All",
|
| 899 |
+
"bbox": [
|
| 900 |
+
75,
|
| 901 |
+
810,
|
| 902 |
+
472,
|
| 903 |
+
901
|
| 904 |
+
],
|
| 905 |
+
"page_idx": 4
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"type": "text",
|
| 909 |
+
"text": "ablation studies use the SJE [2,62] as the ZSL model as it is simple to train. Besides, we verify the generalization ability of our semantic embeddings over five state-of-the-art ZSL models with their official code. The non-generative models include SJE [2], APN [62], GEM-ZSL [28], learning a compatibility function between image and semantic embeddings. The generative approaches consist of CADA-VAE [43] and f-VAEGAN-D2 [61], learning a generative model that synthesizes image features of unseen classes from their semantic embeddings. Note that for all ZSL models, we use the same hyperparameters as proposed in their original papers for all semantic embeddings with no hyperparameter tuning.",
|
| 910 |
+
"bbox": [
|
| 911 |
+
496,
|
| 912 |
+
90,
|
| 913 |
+
893,
|
| 914 |
+
273
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "text",
|
| 920 |
+
"text": "4.1. Comparing with the State-of-the-Art",
|
| 921 |
+
"text_level": 1,
|
| 922 |
+
"bbox": [
|
| 923 |
+
498,
|
| 924 |
+
282,
|
| 925 |
+
818,
|
| 926 |
+
299
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 4
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "We first compare our semantic embeddings VGSE-SMO with the unsupervised word embeddings w2v [31] on three benchmark datasets and five ZSL models. We further compare ours with other state-of-the-art methods that learn semantic embeddings with less human annotation.",
|
| 933 |
+
"bbox": [
|
| 934 |
+
496,
|
| 935 |
+
306,
|
| 936 |
+
893,
|
| 937 |
+
381
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 4
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "text",
|
| 943 |
+
"text": "VGSE surpasses w2v by a large margin. The results shown in Table 1 demonstrate that our VGSE-SMO semantic embeddings significantly outperform word embedding w2v on all datasets and all ZSL models. Considering the non-generative ZSL models, VGSE-SMO outperform w2v on all three datasets by a large margin. In particular, on AWA2 dataset, when coupled with GEM-ZSL, our VGSE-SMO boosts the ZSL performance of w2v from $50.2\\%$ to $58.0\\%$ . On the fine-grained datasets CUB and SUN, VGSE-SMO achieves even higher accuracy boosts. For example, when coupled with the APN model, VGSE-SMO increases the ZSL accuracy of CUB from $22.7\\%$ to $28.9\\%$ , and the accuracy of SUN from $23.6\\%$ to $38.1\\%$ . These results demonstrate that our approach not only works well on generic object categories, but also has great potential to benefit the challenging fine-grained classification task. VGSE improves the GZSL performance of both seen and unseen classes, yielding a much better harmonic mean (e.g., when trained with SJE, VGSE-SMO improves over the harmonic mean of w2v by $8.0\\%$ on AWA2, $10.3\\%$ on CUB, and $7.6\\%$ on SUN). These results indicate that our VGSE facilitates the model to learn a better compatibility function between image and semantic embeddings, for both seen and unseen classes.",
|
| 944 |
+
"bbox": [
|
| 945 |
+
496,
|
| 946 |
+
383,
|
| 947 |
+
893,
|
| 948 |
+
731
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 4
|
| 951 |
+
},
|
| 952 |
+
{
|
| 953 |
+
"type": "text",
|
| 954 |
+
"text": "Our VGSE semantic embeddings show great potential on generative models as well. In particular, VGSE coupled with f-VAEGAN-D2 surpasses all other methods by a wide margin on SUN and CUB datasets, i.e., we obtain $35.0\\%$ vs $32.7\\%$ (w2v) on CUB, and $41.1\\%$ vs $39.6\\%$ (w2v) on SUN. As our embeddings are more machine detectable than w2v, introducing visual properties to the conditional GAN will allow them to generate more discriminative image features.",
|
| 955 |
+
"bbox": [
|
| 956 |
+
496,
|
| 957 |
+
732,
|
| 958 |
+
893,
|
| 959 |
+
853
|
| 960 |
+
],
|
| 961 |
+
"page_idx": 4
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"type": "text",
|
| 965 |
+
"text": "VGSE outperforms SOTA weakly supervised ZSL semantic embeddings. We compare VGSE with other works that learn ZSL semantic embeddings with less human annotation.",
|
| 966 |
+
"bbox": [
|
| 967 |
+
496,
|
| 968 |
+
854,
|
| 969 |
+
893,
|
| 970 |
+
900
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 4
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "table",
|
| 976 |
+
"img_path": "images/fb63e88bd7a057f79a32116ff613a5b163cc2b3ce3dd480b735fc6c3e816702d.jpg",
|
| 977 |
+
"table_caption": [],
|
| 978 |
+
"table_footnote": [],
|
| 979 |
+
"table_body": "<table><tr><td rowspan=\"3\"></td><td rowspan=\"3\">ZSL Model</td><td rowspan=\"3\">Semantic Embeddings</td><td colspan=\"3\">Zero-Shot Learning</td><td colspan=\"8\">Generalized Zero-Shot Learning</td><td></td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td><td colspan=\"3\">AWA2</td><td colspan=\"3\">CUB</td><td colspan=\"2\">SUN</td><td></td></tr><tr><td>T1</td><td>T1</td><td>T1</td><td>u</td><td>s</td><td>H</td><td>u</td><td>s</td><td>H</td><td>u</td><td>s</td><td>H</td></tr><tr><td rowspan=\"4\">Generative</td><td rowspan=\"2\">CADE-VAE [43]</td><td>w2v [31]</td><td>49.0</td><td>22.5</td><td>37.8</td><td>38.6</td><td>60.1</td><td>47.0</td><td>16.3</td><td>39.7</td><td>23.1</td><td>26.0</td><td>28.2</td><td>27.0</td></tr><tr><td>VGSE-SMO (Ours)</td><td>52.7</td><td>24.8</td><td>40.3</td><td>46.9</td><td>61.6</td><td>53.9</td><td>18.3</td><td>44.5</td><td>25.9</td><td>29.4</td><td>29.6</td><td>29.5</td></tr><tr><td rowspan=\"2\">f-VAEGAN-D2 [61]</td><td>w2v [31]</td><td>58.4</td><td>32.7</td><td>39.6</td><td>46.7</td><td>59.0</td><td>52.2</td><td>23.0</td><td>44.5</td><td>30.3</td><td>25.9</td><td>33.3</td><td>29.1</td></tr><tr><td>VGSE-SMO (Ours)</td><td>61.3</td><td>35.0</td><td>41.1</td><td>45.7</td><td>66.7</td><td>54.2</td><td>24.1</td><td>45.7</td><td>31.5</td><td>25.5</td><td>35.7</td><td>29.8</td></tr><tr><td rowspan=\"6\">Non-Generative</td><td rowspan=\"2\">SJE [2]</td><td>w2v [31]</td><td>53.7</td><td>14.4</td><td>26.3</td><td>39.7</td><td>65.3</td><td>48.8</td><td>13.2</td><td>28.6</td><td>18.0</td><td>19.8</td><td>18.6</td><td>19.2</td></tr><tr><td>VGSE-SMO (Ours)</td><td>62.4</td><td>26.1</td><td>35.8</td><td>46.8</td><td>72.3</td><td>56.8</td><td>16.4</td><td>44.7</td><td>28.3</td><td>28.7</td><td>25.2</td><td>26.8</td></tr><tr><td rowspan=\"2\">GEM-ZSL [28]</td><td>w2v [31]</td><td>50.2</td><td>25.7</td><td>-</td><td>40.1</td><td>80.0</td><td>53.4</td><td>11.2</td><td>48.8</td><td>18.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VGSE-SMO (Ours)</td><td>58.0</td><td>29.1</td><td>-</td><td>49.1</td><td>78.2</td><td>60.3</td><td>13.1</td><td>43.0</td><td>20.0</td><td>-</td><td>-</td><td>-</td></tr><tr><td rowspan=\"2\">APN [62]</td><td>w2v [31]</td><td>59.6</td><td>22.7</td><td>23.6</td><td>41.8</td><td>75.0</td><td>53.7</td><td>17.6</td><td>29.4</td><td>22.1</td><td>16.3</td><td>15.3</td><td>15.8</td></tr><tr><td>VGSE-SMO (Ours)</td><td>64.0</td><td>28.9</td><td>38.1</td><td>51.2</td><td>81.8</td><td>63.0</td><td>21.9</td><td>45.5</td><td>29.5</td><td>24.1</td><td>31.8</td><td>27.4</td></tr></table>",
|
| 980 |
+
"bbox": [
|
| 981 |
+
138,
|
| 982 |
+
87,
|
| 983 |
+
831,
|
| 984 |
+
273
|
| 985 |
+
],
|
| 986 |
+
"page_idx": 5
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "table",
|
| 990 |
+
"img_path": "images/94b04188937347cf4f0a5af1f0f2b616f3fd0bd528d461870daf3a1e984baaed.jpg",
|
| 991 |
+
"table_caption": [
|
| 992 |
+
"Table 1. Comparing our VGSE-SMO, with w2v semantic embedding over state-of-the-art ZSL models. In ZSL, we measure Top-1 accuracy (T1) on unseen classes, in GZSL on seen/unseen $(\\mathbf{s} / \\mathbf{u})$ classes and their harmonic mean (H). Feature Generating Methods, i.e., f-VAEGAN-D2, and CADA-VAE generating synthetic training samples, and SJE, APN, GEM-ZSL using only real image features."
|
| 993 |
+
],
|
| 994 |
+
"table_footnote": [],
|
| 995 |
+
"table_body": "<table><tr><td rowspan=\"2\">Semantic Embeddings</td><td rowspan=\"2\">External knowledge</td><td colspan=\"3\">Zero-shot learning</td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td></tr><tr><td>w2v [31]</td><td>w2v</td><td>58.4</td><td>32.7</td><td>39.6</td></tr><tr><td>ZSLNS [39]</td><td>T</td><td>57.4</td><td>27.8</td><td>-</td></tr><tr><td>GAZSL [67]</td><td>T</td><td>-</td><td>34.4</td><td>-</td></tr><tr><td>Auto-dis [3]</td><td>T</td><td>52.0</td><td>-</td><td>-</td></tr><tr><td>CAAP [5]</td><td>T and H</td><td>55.3</td><td>31.9</td><td>35.5</td></tr><tr><td>VGSE-SMO (Ours)</td><td>w2v</td><td>61.3 ± 0.3</td><td>35.0 ± 0.2</td><td>41.1 ± 0.3</td></tr></table>",
|
| 996 |
+
"bbox": [
|
| 997 |
+
81,
|
| 998 |
+
349,
|
| 999 |
+
468,
|
| 1000 |
+
468
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 5
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "text",
|
| 1006 |
+
"text": "CAAP [5] learns the unseen semantic embeddings with the help of w2v and the human annotated attributes for seen classes. Auto-Dis [3] collects attributes from online encyclopedia articles that describe each category, and learn attribute-class association with the supervision of visual data and category label. GAZSL [67] and ZSLNS [39] learn semantic embeddings from wikipedia articles.",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
75,
|
| 1009 |
+
561,
|
| 1010 |
+
468,
|
| 1011 |
+
667
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 5
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "text",
|
| 1017 |
+
"text": "The results shown in Table 2 demonstrate that our VGSE embedding, using only w2v as external knowledge, surpasses all other methods that uses textual articles on three datasets. In particular, our VGSE-SMO achieves an accuracy of $61.3\\%$ on AWA2, improving the closest semantic embedding w2v by $2.9\\%$ . On SUN, we also outperform the closest semantic embedding w2v by $1.5\\%$ .",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
75,
|
| 1020 |
+
667,
|
| 1021 |
+
470,
|
| 1022 |
+
773
|
| 1023 |
+
],
|
| 1024 |
+
"page_idx": 5
|
| 1025 |
+
},
|
| 1026 |
+
{
|
| 1027 |
+
"type": "text",
|
| 1028 |
+
"text": "4.2. Ablation study",
|
| 1029 |
+
"text_level": 1,
|
| 1030 |
+
"bbox": [
|
| 1031 |
+
76,
|
| 1032 |
+
782,
|
| 1033 |
+
227,
|
| 1034 |
+
800
|
| 1035 |
+
],
|
| 1036 |
+
"page_idx": 5
|
| 1037 |
+
},
|
| 1038 |
+
{
|
| 1039 |
+
"type": "text",
|
| 1040 |
+
"text": "We provide ablation studies for our PC and CR modules. Is PC module effective? We first ask if learning semantic embeddings through clustering is effective in terms of ZSL accuracy, when compared to other alternatives. We compare our semantic embeddings against the following baselines:",
|
| 1041 |
+
"bbox": [
|
| 1042 |
+
75,
|
| 1043 |
+
806,
|
| 1044 |
+
468,
|
| 1045 |
+
883
|
| 1046 |
+
],
|
| 1047 |
+
"page_idx": 5
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "text",
|
| 1051 |
+
"text": "ResNet features are extracted by feeding image patch",
|
| 1052 |
+
"bbox": [
|
| 1053 |
+
96,
|
| 1054 |
+
885,
|
| 1055 |
+
468,
|
| 1056 |
+
901
|
| 1057 |
+
],
|
| 1058 |
+
"page_idx": 5
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "table",
|
| 1062 |
+
"img_path": "images/f31a18b68945f6d03a8a5b4aeb3f000c2f7374b9a054d8792990d9b6b8f491bd.jpg",
|
| 1063 |
+
"table_caption": [
|
| 1064 |
+
"Table 2. Comparing with state-of-the-art methods for learning semantic embeddings with less human annotation (T: online textual articles, H: human annotation) using same image features and ZSL model (f-VAEGAN-d2 [61])."
|
| 1065 |
+
],
|
| 1066 |
+
"table_footnote": [],
|
| 1067 |
+
"table_body": "<table><tr><td rowspan=\"2\">Semantic Embeddings</td><td colspan=\"3\">Zero-shot learning</td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td></tr><tr><td>k-means-SMO</td><td>54.5 ± 0.4</td><td>15.0 ± 0.5</td><td>25.2 ± 0.4</td></tr><tr><td>ResNet-SMO</td><td>55.3 ± 0.2</td><td>15.4 ± 0.1</td><td>25.1 ± 0.1</td></tr><tr><td>Lclu + Lpel (baseline + SMO)</td><td>56.6 ± 0.2</td><td>16.7 ± 0.2</td><td>26.3 ± 0.3</td></tr><tr><td>+ Lcls</td><td>61.2 ± 0.1</td><td>23.7 ± 0.2</td><td>30.5 ± 0.2</td></tr><tr><td>+ Lsem (VGSE-SMO)</td><td>62.4 ± 0.3</td><td>26.1 ± 0.3</td><td>35.8 ± 0.2</td></tr><tr><td>VGSE-WAvg</td><td>57.7 ± 0.2</td><td>25.8 ± 0.3</td><td>35.3 ± 0.2</td></tr></table>",
|
| 1068 |
+
"bbox": [
|
| 1069 |
+
504,
|
| 1070 |
+
349,
|
| 1071 |
+
890,
|
| 1072 |
+
479
|
| 1073 |
+
],
|
| 1074 |
+
"page_idx": 5
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "text",
|
| 1078 |
+
"text": "Table 3. Ablation study over the PC module reporting ZSL T1 on AWA2, CUB, and SUN (mean accuracy and std over 5 runs). The baseline is the PC module with the cluster loss $\\mathcal{L}_{clu}$ and $\\mathcal{L}_{pel}$ . Our full model VGSE-SMO is trained with two additional losses $\\mathcal{L}_{cls}$ , $\\mathcal{L}_{sem}$ . Two kinds of semantic embeddings learned from k-means clustering and pretrained ResNet are listed below for comparison.",
|
| 1079 |
+
"bbox": [
|
| 1080 |
+
498,
|
| 1081 |
+
489,
|
| 1082 |
+
893,
|
| 1083 |
+
574
|
| 1084 |
+
],
|
| 1085 |
+
"page_idx": 5
|
| 1086 |
+
},
|
| 1087 |
+
{
|
| 1088 |
+
"type": "text",
|
| 1089 |
+
"text": "$x_{nt}$ to a pretrained ResNet50. We follow Eq. 7 and Eq. 8 to predict semantic embeddings for seen classes. $K$ -means clustering is an alternative for our clustering model. We cluster the patch images features $\\theta(x_{nt})$ learned from our PC module into $D_v$ visual clusters. The patch embedding $a_{nt}^k$ is defined as the cosine similarity between the patch feature $\\theta(x_{nt})$ and the cluster center. In both cases the unseen semantic embeddings are predicted with our SMO module.",
|
| 1090 |
+
"bbox": [
|
| 1091 |
+
496,
|
| 1092 |
+
609,
|
| 1093 |
+
893,
|
| 1094 |
+
731
|
| 1095 |
+
],
|
| 1096 |
+
"page_idx": 5
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"type": "text",
|
| 1100 |
+
"text": "We ablate our losses and compare our VGSE-SMO with the two alternatives, then report ZSL results on three benchmark datasets in Table 3. First, the k-means-SMO achieves on par results with our baseline model trained with only the cluster losses $\\mathcal{L}_{clu}$ and $\\mathcal{L}_{pel}$ [53], the reason we adopt [53] instead of k-means is that we can easily train the network with our proposed losses in an end-to-end manner. Second, the addition of the classification loss $\\mathcal{L}_{cls}$ leads to notable improvement over the baseline model trained with $\\mathcal{L}_{clu}$ and $\\mathcal{L}_{pel}$ , and the semantic relatedness loss $\\mathcal{L}_{sem}$ further improve the performance of our semantic embeddings, e.g., in total, we gain",
|
| 1101 |
+
"bbox": [
|
| 1102 |
+
496,
|
| 1103 |
+
734,
|
| 1104 |
+
893,
|
| 1105 |
+
902
|
| 1106 |
+
],
|
| 1107 |
+
"page_idx": 5
|
| 1108 |
+
},
|
| 1109 |
+
{
|
| 1110 |
+
"type": "text",
|
| 1111 |
+
"text": "$5.8\\%$ , $9.4\\%$ and $9.5\\%$ improvement on AWA2, CUB, and SUN, respectively. The result demonstrates that imposing class discrimination and semantic relatedness leads to better performance in the ZSL setting. Third, our VGSE-SMO embeddings improve over the ResNet-SMO embeddings by $7.1\\%$ , $10.7\\%$ and $10.7\\%$ on AWA2, CUB, and SUN, respectively. We conjecture that the visual clusters learned in our model is shared among different classes and lead to better generalization ability when the training and testing sets are disjoint (see qualitative results in Figure 1 and Section 4.3).",
|
| 1112 |
+
"bbox": [
|
| 1113 |
+
75,
|
| 1114 |
+
90,
|
| 1115 |
+
472,
|
| 1116 |
+
242
|
| 1117 |
+
],
|
| 1118 |
+
"page_idx": 6
|
| 1119 |
+
},
|
| 1120 |
+
{
|
| 1121 |
+
"type": "text",
|
| 1122 |
+
"text": "How many clusters are needed? To measure the influence of the cluster number $D_v$ on our semantic embeddings, we train the PC module with various $D_v$ (results shown in Figure 3a). When the unseen semantic embeddings are predicted under an oracle setting (predicted from the unseen class images), various dimension $D_v$ does not influence the classification accuracy on unseen classes (the orange curve). While under the ZSL setting where unseen semantic embeddings are predicted from class relations (VGSE-SMO), the cluster numbers influence the ZSL performance. Before the cluster number increases up to a breaking point ( $D_v = 200$ ), the ability of the semantic embeddings is also improved (from $58.4\\%$ to $62.5\\%$ ), since the learned clusters contain visually similar patches from different classes, which can model the visual relation between classes. However, increasing the number of clusters leads to small pure clusters (patches coming from one single category), resulting in poor generalization between seen and unseen classes.",
|
| 1123 |
+
"bbox": [
|
| 1124 |
+
75,
|
| 1125 |
+
244,
|
| 1126 |
+
473,
|
| 1127 |
+
517
|
| 1128 |
+
],
|
| 1129 |
+
"page_idx": 6
|
| 1130 |
+
},
|
| 1131 |
+
{
|
| 1132 |
+
"type": "text",
|
| 1133 |
+
"text": "SMO vs WAvg. We compare our two class relation functions VGSE-WAvg and VGSE-SMO in Table 3 (Row 7 and 6). The results demonstrate that VGSE-WAvg works on par with VGSE-SMO on SUN and CUB datasets, with $< 0.5\\%$ performance gap. While on AWA2 dataset, VGSE-SMO yields better ZSL performance (with $62.4\\%$ ) than VGSE-WAvg (with $57.7\\%$ ). The results indicate that predicting the unseen semantic embeddings with the weighted average of a few seen classes semantic embeddings (VGSE-WAvg) is working well for fine-grained datasets since the visual discrepancy between classes is small. However, for coarse-grained dataset AWA2, the class relation function considering all the seen classes embeddings (VGSE-SMO) works better.",
|
| 1134 |
+
"bbox": [
|
| 1135 |
+
75,
|
| 1136 |
+
518,
|
| 1137 |
+
470,
|
| 1138 |
+
717
|
| 1139 |
+
],
|
| 1140 |
+
"page_idx": 6
|
| 1141 |
+
},
|
| 1142 |
+
{
|
| 1143 |
+
"type": "text",
|
| 1144 |
+
"text": "Ablation over patches. We further study if using patches for clustering is better than using the whole image, and how many patches do we need from one image. The experiment results in Figure 3b demonstrate that with the patch number increase from 1 (single image clustering) to 9, the ZSL performance increases as well, since the image patches used for semantic embedding learning contain semantic object parts and thus result in better knowledge transfer between seen and unseen classes. However, for a large $N_{t}$ , the patches might be too tiny to contain consistent semantic, thus resulting in performance dropping, e.g., the ZSL accuracy on AWA2 drops from $62.4\\%$ ( $N_{t} = 9$ ) to $58.7\\%$ ( $N_{t} = 128$ ). We also",
|
| 1145 |
+
"bbox": [
|
| 1146 |
+
75,
|
| 1147 |
+
719,
|
| 1148 |
+
472,
|
| 1149 |
+
901
|
| 1150 |
+
],
|
| 1151 |
+
"page_idx": 6
|
| 1152 |
+
},
|
| 1153 |
+
{
|
| 1154 |
+
"type": "image",
|
| 1155 |
+
"img_path": "images/948994bd14ae0f91d13ba2900064fdd86fc5fe34d39351698121885c993e9712.jpg",
|
| 1156 |
+
"image_caption": [
|
| 1157 |
+
"(a) cluster number $D_{v}$"
|
| 1158 |
+
],
|
| 1159 |
+
"image_footnote": [],
|
| 1160 |
+
"bbox": [
|
| 1161 |
+
514,
|
| 1162 |
+
99,
|
| 1163 |
+
684,
|
| 1164 |
+
214
|
| 1165 |
+
],
|
| 1166 |
+
"page_idx": 6
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "image",
|
| 1170 |
+
"img_path": "images/143a149f33f6ba663ad99ca2c134936ce37b7c0cbc42cbc92284905ad63755eb.jpg",
|
| 1171 |
+
"image_caption": [
|
| 1172 |
+
"(b) patch number $N_{t}$",
|
| 1173 |
+
"Figure 3. (a) Influence of the cluster number $D_v = 50, \\ldots, 3000$ . In the oracle setting, we feed unseen classes images to the PC module to predict unseen semantic embeddings. (b) Influence of the patch number $N_t$ we used per image with the watershed segmentation for obtaining our VGSE-SMO class embeddings. $N_t = 1$ uses the whole image (no patches). \"3×3 grid\" crops the image into 9 square patches. Both plots report ZSL accuracy with SJE model trained on AWA2 dataset (mean and std over 5 runs)."
|
| 1174 |
+
],
|
| 1175 |
+
"image_footnote": [],
|
| 1176 |
+
"bbox": [
|
| 1177 |
+
702,
|
| 1178 |
+
99,
|
| 1179 |
+
880,
|
| 1180 |
+
214
|
| 1181 |
+
],
|
| 1182 |
+
"page_idx": 6
|
| 1183 |
+
},
|
| 1184 |
+
{
|
| 1185 |
+
"type": "table",
|
| 1186 |
+
"img_path": "images/b0f14ee33fa9f9fd29e272ca01fc12cc29f8ad9fd1d1208e275318a91bd6df96.jpg",
|
| 1187 |
+
"table_caption": [],
|
| 1188 |
+
"table_footnote": [],
|
| 1189 |
+
"table_body": "<table><tr><td rowspan=\"2\">Semantic Embeddings</td><td colspan=\"2\">AWA2</td><td colspan=\"2\">CUB</td></tr><tr><td>T1</td><td>H</td><td>T1</td><td>H</td></tr><tr><td>w2v [31]</td><td>53.7 ± 0.2</td><td>48.8 ± 0.1</td><td>14.4 ± 0.3</td><td>18.0 ± 0.2</td></tr><tr><td>VGSE-SMO (w2v)</td><td>62.4 ± 0.1</td><td>56.8 ± 0.1</td><td>26.1 ± 0.2</td><td>28.3 ± 0.1</td></tr><tr><td>glove [38]</td><td>38.8 ± 0.2</td><td>38.7 ± 0.3</td><td>19.3 ± 0.2</td><td>13.4 ± 0.1</td></tr><tr><td>VGSE-SMO (glove)</td><td>46.5 ± 0.1</td><td>46.0 ± 0.1</td><td>25.2 ± 0.3</td><td>27.1 ± 0.2</td></tr><tr><td>fasttext [7]</td><td>47.7 ± 0.1</td><td>44.6 ± 0.3</td><td>-</td><td>-</td></tr><tr><td>VGSE-SMO (fasttext)</td><td>51.9 ± 0.2</td><td>53.2 ± 0.1</td><td>-</td><td>-</td></tr><tr><td>Attribute</td><td>62.8 ± 0.1</td><td>62.6 ± 0.3</td><td>56.4 ± 0.2</td><td>49.4 ± 0.1</td></tr><tr><td>VGSE-SMO (Attribute)</td><td>66.7 ± 0.1</td><td>64.9 ± 0.1</td><td>56.8 ± 0.1</td><td>50.9 ± 0.2</td></tr></table>",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
511,
|
| 1192 |
+
367,
|
| 1193 |
+
883,
|
| 1194 |
+
503
|
| 1195 |
+
],
|
| 1196 |
+
"page_idx": 6
|
| 1197 |
+
},
|
| 1198 |
+
{
|
| 1199 |
+
"type": "text",
|
| 1200 |
+
"text": "Table 4. Evaluating the external knowledge, i.e., word embeddings w2v [31], glove [38], fasttext [7], and the human annotated attributes, for our VGSE-SMO embeddings, e.g., VGSE-SMO (glove) indicates that CR module is trained with glove embedding. T1: top-1 accuracy in ZSL, H: harmonic mean in GZSL trained with SJE [2] on AWA2, and CUB (std over 5 runs).",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
496,
|
| 1203 |
+
513,
|
| 1204 |
+
893,
|
| 1205 |
+
597
|
| 1206 |
+
],
|
| 1207 |
+
"page_idx": 6
|
| 1208 |
+
},
|
| 1209 |
+
{
|
| 1210 |
+
"type": "text",
|
| 1211 |
+
"text": "compare the patches generated by watershed segmentation proposal with using $3 \\times 3$ grid patches ( $N_{t} = 9$ ), and we found that using watershed as the region proposal results in accuracy boost (8.2% on AWA2) compared to the regular grid patch, since the former patches tend to cover more complete object parts rather than random cropped regions.",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
496,
|
| 1214 |
+
625,
|
| 1215 |
+
893,
|
| 1216 |
+
717
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 6
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "text",
|
| 1222 |
+
"text": "Can we do better with human annotated attributes? Table 4 shows the performance of our model when different external knowledge is used to verdict the unseen class embeddings in the CR module. Nearly all of our conclusions from former section carry over, e.g., VGSE-SMO class embeddings outperform the other class embeddings by a large margin. For instance, we improve the ZSL accuracy over glove by $7.7\\%$ (AWA2) and $5.9\\%$ (CUB). Furthermore, VGSE-SMO (Attribute) also outperform Attribute on both AWA2 and CUB dataset, i.e., we achieve $66.7\\%$ (ZSL) on AWA2, compared to human attributes with $62.8\\%$ . The results demonstrate that our",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
496,
|
| 1225 |
+
719,
|
| 1226 |
+
895,
|
| 1227 |
+
900
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 6
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "image",
|
| 1233 |
+
"img_path": "images/bab24942220fd7d9f44bf7f38e5dbd63918219efc342c7c20b0d0cd9ac79ff79.jpg",
|
| 1234 |
+
"image_caption": [
|
| 1235 |
+
"Figure 4. T-SNE embeddings of image patches from AWA2. Each colored dot region represents one visual cluster learnt by our VGSE model. We sample the seen (in blue) and unseen images (in orange) from the cluster center with their class names shown nearby."
|
| 1236 |
+
],
|
| 1237 |
+
"image_footnote": [],
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
125,
|
| 1240 |
+
90,
|
| 1241 |
+
433,
|
| 1242 |
+
315
|
| 1243 |
+
],
|
| 1244 |
+
"page_idx": 7
|
| 1245 |
+
},
|
| 1246 |
+
{
|
| 1247 |
+
"type": "text",
|
| 1248 |
+
"text": "VGSE-SMO embeddings coupled with visually-grounded information can not only outperform the unsupervised word embeddings, but also improve over human attributes in transferring knowledge under the zero-shot setting.",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
75,
|
| 1251 |
+
411,
|
| 1252 |
+
470,
|
| 1253 |
+
472
|
| 1254 |
+
],
|
| 1255 |
+
"page_idx": 7
|
| 1256 |
+
},
|
| 1257 |
+
{
|
| 1258 |
+
"type": "text",
|
| 1259 |
+
"text": "4.3. Qualitative Results",
|
| 1260 |
+
"text_level": 1,
|
| 1261 |
+
"bbox": [
|
| 1262 |
+
76,
|
| 1263 |
+
482,
|
| 1264 |
+
261,
|
| 1265 |
+
500
|
| 1266 |
+
],
|
| 1267 |
+
"page_idx": 7
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "text",
|
| 1271 |
+
"text": "In Figure 4, we show the 2D visualization of image patches in the AWA2, where 10,000 image patches are presented by projecting their embeddings $a_{nt}$ onto two dimensions with t-SNE [52]. To picture their distribution on the embedding space, we sample several visual clusters (dots marked in the same color) and the image patches from the cluster center of both seen and unseen categories. Note that the unseen patches are not used to predict the unseen semantic embeddings, but only used for visualization here.",
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
75,
|
| 1274 |
+
507,
|
| 1275 |
+
470,
|
| 1276 |
+
643
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 7
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "text",
|
| 1282 |
+
"text": "We observe that samples in the same cluster tend to gather together, indicating that the embeddings provide discriminative information. Besides, images patches in one cluster do convey consistent visual properties, though coming from disjoint categories. For instance, the white fur appears on rabbit, polar bear, and fox are clustered into one group, and the striped fur from tiger, zebra, and bobcat gather together because of their similar texture. We further observe that nearly all clusters consist images from more than one categories. For instance, the horns from seen classes ox, deer, rhinoceros, and unseen class sheep, that with slightly different shape but same semantic, are clustered together. Similar phenomenon can be observed on the spotted fur and animals in ocean clusters. It indicates that the clusters we learned contain semantic properties shared across seen classes, and can be transferred to unseen classes. Another interesting observation is that our VGSE clusters discover visual properties",
|
| 1283 |
+
"bbox": [
|
| 1284 |
+
75,
|
| 1285 |
+
643,
|
| 1286 |
+
472,
|
| 1287 |
+
902
|
| 1288 |
+
],
|
| 1289 |
+
"page_idx": 7
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "text",
|
| 1293 |
+
"text": "that my be neglected by human-annotated attributes, e.g., the cage appear for hamsters and rat, and the black and white fur not only appear on gaint panda but also on sheeps.",
|
| 1294 |
+
"bbox": [
|
| 1295 |
+
496,
|
| 1296 |
+
90,
|
| 1297 |
+
890,
|
| 1298 |
+
137
|
| 1299 |
+
],
|
| 1300 |
+
"page_idx": 7
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "text",
|
| 1304 |
+
"text": "4.4. Human Evaluation",
|
| 1305 |
+
"text_level": 1,
|
| 1306 |
+
"bbox": [
|
| 1307 |
+
498,
|
| 1308 |
+
150,
|
| 1309 |
+
684,
|
| 1310 |
+
165
|
| 1311 |
+
],
|
| 1312 |
+
"page_idx": 7
|
| 1313 |
+
},
|
| 1314 |
+
{
|
| 1315 |
+
"type": "text",
|
| 1316 |
+
"text": "To evaluate if our VGSE conveys consistent visual and semantic properties, we randomly pick 50 clusters, each equipped with 30 images from the cluster center, and ask 5 postgraduate students without prior knowledge of ZSL to examine the clusters and answer the following three questions. Q1: Do images in this cluster contain consistent visual property? Q2: Do images in this cluster convey consistent semantic information? Q3: Please name the semantics you observed from the clusters, if your answer to Q2 is true. We do the same user study to 50 randomly picked clusters from the k-means clustering model. The results reveal that in $88.5\\%$ and $87.0\\%$ cases, users think our clusters convey consistent visual and semantic information. While for k-means clusters, the results are $71.5\\%$ and $71.0\\%$ , respectively. The user evaluation results agree with the quantitative results in Table 3, which demonstrates that the class embeddings containing consistent visual and semantic information can significantly benefit the ZSL performance. Interestingly, by viewing VGSE clusters, users can easily discover semantics and even fine-grained attributes not depicted by human-annotated attributes, i.e., the fangs and horns in figure 1. Note that the whole process, i.e., naming 50 attributes for 40 classes, took less than 1 hour for each user.",
|
| 1317 |
+
"bbox": [
|
| 1318 |
+
496,
|
| 1319 |
+
175,
|
| 1320 |
+
893,
|
| 1321 |
+
523
|
| 1322 |
+
],
|
| 1323 |
+
"page_idx": 7
|
| 1324 |
+
},
|
| 1325 |
+
{
|
| 1326 |
+
"type": "text",
|
| 1327 |
+
"text": "5. Conclusion",
|
| 1328 |
+
"text_level": 1,
|
| 1329 |
+
"bbox": [
|
| 1330 |
+
500,
|
| 1331 |
+
541,
|
| 1332 |
+
619,
|
| 1333 |
+
556
|
| 1334 |
+
],
|
| 1335 |
+
"page_idx": 7
|
| 1336 |
+
},
|
| 1337 |
+
{
|
| 1338 |
+
"type": "text",
|
| 1339 |
+
"text": "We develop a Visually-Grounded Semantic Embedding Network (VGSE) to learn distinguishing semantic embeddings for zero-shot learning with minimal human supervision. By clustering image patches with respect to their visual similarity, our network explores various semantic clusters shared between classes. Experiments on three benchmark datasets demonstrate that our semantic embeddings predicted from the class-relation module are generalizable to unseen classes, i.e., achieving significant improvement compared with word embeddings when trained with five models in both ZSL and GZSL settings. We further show that the visually augmented semantic embedding outperforms other semantic embeddings learned with minimal human supervision. The qualitative results verify that we discover visually consistent clusters that generalize from seen to unseen classes and can unearth the fine-grained properties not depicted by humans.",
|
| 1340 |
+
"bbox": [
|
| 1341 |
+
496,
|
| 1342 |
+
568,
|
| 1343 |
+
893,
|
| 1344 |
+
811
|
| 1345 |
+
],
|
| 1346 |
+
"page_idx": 7
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "text",
|
| 1350 |
+
"text": "Acknowledgements",
|
| 1351 |
+
"text_level": 1,
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
500,
|
| 1354 |
+
828,
|
| 1355 |
+
668,
|
| 1356 |
+
845
|
| 1357 |
+
],
|
| 1358 |
+
"page_idx": 7
|
| 1359 |
+
},
|
| 1360 |
+
{
|
| 1361 |
+
"type": "text",
|
| 1362 |
+
"text": "This work has been partially funded by the ERC 853489 - DEXIM and by the DFG - EXC number 2064/1 - Project number 390727645.",
|
| 1363 |
+
"bbox": [
|
| 1364 |
+
498,
|
| 1365 |
+
854,
|
| 1366 |
+
893,
|
| 1367 |
+
900
|
| 1368 |
+
],
|
| 1369 |
+
"page_idx": 7
|
| 1370 |
+
},
|
| 1371 |
+
{
|
| 1372 |
+
"type": "text",
|
| 1373 |
+
"text": "References",
|
| 1374 |
+
"text_level": 1,
|
| 1375 |
+
"bbox": [
|
| 1376 |
+
78,
|
| 1377 |
+
89,
|
| 1378 |
+
173,
|
| 1379 |
+
104
|
| 1380 |
+
],
|
| 1381 |
+
"page_idx": 8
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "list",
|
| 1385 |
+
"sub_type": "ref_text",
|
| 1386 |
+
"list_items": [
|
| 1387 |
+
"[1] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. T-PAMI, 2015. 1, 2",
|
| 1388 |
+
"[2] Zeynep Akata, Scott Reed, Daniel Walter, Honglak Lee, and Bernt Schiele. Evaluation of output embeddings for fine-grained image classification. In CVPR, 2015. 5, 6, 7",
|
| 1389 |
+
"[3] Ziad Al-Halah and Rainer Stiefelhagen. Automatic discovery, association estimation and learning of semantic attributes for a thousand categories. In CVPR, 2017. 1, 2, 6",
|
| 1390 |
+
"[4] Ziad Al-Halah, Rainer Stiefelhagen, and Kristen Grauman. Fashion forward: Forecasting visual style in fashion. In ICCV, 2017. 1",
|
| 1391 |
+
"[5] Ziad Al-Halah, Makarand Tapaswi, and Rainer Stiefelhagen. Recovering the missing link: Predicting class-attribute associations for unsupervised zero-shot learning. In CVPR, 2016. 4, 6",
|
| 1392 |
+
"[6] Alessandro Bergamo, Lorenzo Torresani, and Andrew W Fitzgibbon. Picodes: Learning a compact code for novel-category recognition. In NIPS. CiteSeer, 2011. 2",
|
| 1393 |
+
"[7] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. Enriching word vectors with subword information. Transactions of the Association for Computational Linguistics, 5:135-146, 2017. 7",
|
| 1394 |
+
"[8] Wieland Brendel and Matthias Bethge. Approximating cnns with bag-of-local-features models works surprisingly well onImagenet. *ICLR*, 2019. 2",
|
| 1395 |
+
"[9] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Generating visual representations for zero-shot classification. In ICCV Workshops, 2017. 2",
|
| 1396 |
+
"[10] Qiang Chen, Junshi Huang, Rogerio Feris, Lisa M Brown, Jian Dong, and Shuicheng Yan. Deep domain adaptation for describing people based on fine-grained clothing attributes. In CVPR, 2015. 1, 2",
|
| 1397 |
+
"[11] Yu Chen, Ying Tai, Xiaoming Liu, Chunhua Shen, and Jian Yang. Fsrnet: End-to-end learning face super-resolution with facial priors. In CVPR, 2018. 1",
|
| 1398 |
+
"[12] Rudi L Cilibrasi and Paul MB Vitanyi. The google similarity distance. IEEE Transactions on knowledge and data engineering, 2007. 2",
|
| 1399 |
+
"[13] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2. Prague, 2004. 2",
|
| 1400 |
+
"[14] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 3, 5",
|
| 1401 |
+
"[15] Carl Doersch, Abhinav Gupta, and Alexei A Efros. Mid-level visual element discovery as discriminative mode seeking. In NIPS, 2013. 2",
|
| 1402 |
+
"[16] Carl Doersch, Saurabh Singh, Abhinav Gupta, Josef Sivic, and Alexei Efros. What makes paris look like paris? ACM Transactions on Graphics, 2012. 2",
|
| 1403 |
+
"[17] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner,"
|
| 1404 |
+
],
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
78,
|
| 1407 |
+
114,
|
| 1408 |
+
470,
|
| 1409 |
+
898
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 8
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "list",
|
| 1415 |
+
"sub_type": "ref_text",
|
| 1416 |
+
"list_items": [
|
| 1417 |
+
"Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 2, 3",
|
| 1418 |
+
"[18] Kun Duan, Devi Parikh, David Crandall, and Kristen Grauman. Discovering localized attributes for fine-grained recognition. In CVPR. IEEE, 2012. 2",
|
| 1419 |
+
"[19] Ali Farhadi, Ian Endres, Derek Hoiem, and David Forsyth. Describing objects by their attributes. In CVPR. IEEE, 2009. 1, 2",
|
| 1420 |
+
"[20] Andrea Frome, Greg Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. NeurIPS, 2013. 2",
|
| 1421 |
+
"[21] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020. 3",
|
| 1422 |
+
"[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 3, 5",
|
| 1423 |
+
"[23] Wei-Lin Hsiao and Kristen Grauman. Learning the latent\" look\": Unsupervised discovery of a style-coherent embedding from fashion images. In ICCV, 2017. 1",
|
| 1424 |
+
"[24] Masato Ishii, Takashi Takenouchi, and Masashi Sugiyama. Zero-shot domain adaptation based on attribute information. In Asian Conference on Machine Learning. PMLR, 2019. 1",
|
| 1425 |
+
"[25] Huajie Jiang, Ruiping Wang, Shiguang Shan, Yi Yang, and Xilin Chen. Learning discriminative latent attributes for zero-shot classification. In ICCV, 2017. 2",
|
| 1426 |
+
"[26] Michael Kampffmeyer, Yinbo Chen, Xiaodan Liang, Hao Wang, Yujia Zhang, and Eric P Xing. Rethinking knowledge graph propagation for zero-shot learning. In CVPR, 2019. 2",
|
| 1427 |
+
"[27] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 1",
|
| 1428 |
+
"[28] Yang Liu, Lei Zhou, Xiao Bai, Yifei Huang, Lin Gu, Jun Zhou, and Tatsuya Harada. Goal-oriented gaze estimation for zero-shot learning. In CVPR, 2021. 1, 5, 6",
|
| 1429 |
+
"[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 1",
|
| 1430 |
+
"[30] Utkarsh Mall, Bharath Hariharan, and Kavita Bala. Field-guide-inspired zero-shot learning. In CVPR, 2021. 1",
|
| 1431 |
+
"[31] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed representations of words and phrases and their compositionality. NeurIPS, 2013. 1, 2, 4, 5, 6, 7",
|
| 1432 |
+
"[32] Peer Neubert and Peter Protzel. Compact watershed and preemptive slic: On improving trade-offs of superpixel segmentation algorithms. In ICPR. IEEE, 2014. 3",
|
| 1433 |
+
"[33] Ishan Nigam, Pavel Tokmakov, and Deva Ramanan. Towards latent attribute discovery from triplet similarities. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 2",
|
| 1434 |
+
"[34] Mehdi Noroozi and Paolo Favaro. Unsupervised learning of visual representations by solving jigsaw puzzles. In ECCV, 2016. 3",
|
| 1435 |
+
"[35] Devi Parikh and Kristen Grauman. Interactively building a discriminative vocabulary of nameable attributes. In CVPR. IEEE, 2011. 2"
|
| 1436 |
+
],
|
| 1437 |
+
"bbox": [
|
| 1438 |
+
503,
|
| 1439 |
+
92,
|
| 1440 |
+
893,
|
| 1441 |
+
898
|
| 1442 |
+
],
|
| 1443 |
+
"page_idx": 8
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "list",
|
| 1447 |
+
"sub_type": "ref_text",
|
| 1448 |
+
"list_items": [
|
| 1449 |
+
"[36] Genevieve Patterson, Chen Xu, Hang Su, and James Hays. The sun attribute database: Beyond categories for deeper scene understanding. IJCV, 2014. 1, 2, 5",
|
| 1450 |
+
"[37] Peixi Peng, Yonghong Tian, Tao Xiang, Yaowei Wang, Massimiliano Pontil, and Tiejun Huang. Joint semantic and latent attribute modelling for cross-class transfer learning. T-PAMI, 2017. 2",
|
| 1451 |
+
"[38] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In EMNLP, 2014. 1, 2, 4, 7",
|
| 1452 |
+
"[39] Ruizhi Qiao, Lingqiao Liu, Chunhua Shen, and Anton Van Den Hengel. Less is more: zero-shot learning from online textual documents with noise suppression. In CVPR, 2016. 1, 2, 6",
|
| 1453 |
+
"[40] Mohammad Rastegari, Ali Farhadi, and David Forsyth. Attribute discovery via predictable discriminative binary codes. In ECCV. Springer, 2012. 2",
|
| 1454 |
+
"[41] Marcus Rohrbach, Michael Stark, György Szarvas, Iryna Gurevych, and Bernt Schiele. What helps where-and why? semantic relatedness for knowledge transfer. In CVPR, 2010. 2",
|
| 1455 |
+
"[42] Edgar Schonfeld, Sayna Ebrahimi, Samarth Sinha, Trevor Darrell, and Zeynep Akata. Generalized zero-and few-shot learning via aligned variational autoencoders. In CVPR, 2019. 1",
|
| 1456 |
+
"[43] Edgar Schonfeld, Sayna Ebrahimi, Samarth Sinha, Trevor Darrell, and Zeynep Akata. Generalized zero-and few-shot learning via aligned variational autoencoders. In CVPR, 2019, 2, 5, 6",
|
| 1457 |
+
"[44] Viktoriia Sharmanska, Novi Quadrianto, and Christoph H Lampert. Augmented attribute representations. In ECCV. Springer, 2012. 2",
|
| 1458 |
+
"[45] Ronan Sicre, Yannis Avrithis, Ewa Kijak, and Frédéric Jurie. Unsupervised part learning for visual recognition. In CVPR, 2017. 2",
|
| 1459 |
+
"[46] Saurabh Singh, Abhinav Gupta, and Alexei A Efros. Unsupervised discovery of mid-level discriminative patches. In ECCV, 2012. 2",
|
| 1460 |
+
"[47] Josef Sivic and Andrew Zisserman. Video google: A text retrieval approach to object matching in videos. In Computer Vision, IEEE International Conference on, volume 3, pages 1470-1470. IEEE Computer Society, 2003. 2",
|
| 1461 |
+
"[48] Richard Socher, Milind Ganjoo, Hamsa Sridhar, Osbert Bastani, Christopher D Manning, and Andrew Y Ng. Zero-shot learning through cross-modal transfer. NeurIPS, 2013. 2",
|
| 1462 |
+
"[49] Richard Socher, Milind Ganjoo, Hamsa Sridhar, Osbert Bastani, Christopher D Manning, and Andrew Y Ng. Zero-shot learning through cross-modal transfer. NeurIPS, 2013. 2",
|
| 1463 |
+
"[50] Jie Song, Chengchao Shen, Jie Lei, An-Xiang Zeng, Kairi Ou, Dacheng Tao, and Mingli Song. Selective zero-shot classification with augmented attributes. In ECCV, 2018. 2",
|
| 1464 |
+
"[51] Lorenzo Torresani, Martin Szummer, and Andrew Fitzgibbon. Efficient object category recognition using classmes. In ECCV. Springer, 2010. 2",
|
| 1465 |
+
"[52] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-SNE. Journal of Machine Learning Research, 2008. 8"
|
| 1466 |
+
],
|
| 1467 |
+
"bbox": [
|
| 1468 |
+
78,
|
| 1469 |
+
90,
|
| 1470 |
+
470,
|
| 1471 |
+
898
|
| 1472 |
+
],
|
| 1473 |
+
"page_idx": 9
|
| 1474 |
+
},
|
| 1475 |
+
{
|
| 1476 |
+
"type": "list",
|
| 1477 |
+
"sub_type": "ref_text",
|
| 1478 |
+
"list_items": [
|
| 1479 |
+
"[53] Wouter Van Gansbeke, Simon Vandenhende, Stamatios Georgoulis, Marc Proesmans, and Luc Van Gool. Scan: Learning to classify images without labels. In ECCV, 2020. 3, 4, 6",
|
| 1480 |
+
"[54] Sirion Vittayakorn, Takayuki Umeda, Kazuhiko Murasaki, Kyoko Sudo, Takayuki Okatani, and Kota Yamaguchi. Automatic attribute discovery with neural activations. In ECCV, 2016. 2",
|
| 1481 |
+
"[55] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The Caltech-UCSD Birds-200-2011 Dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 1, 2, 3, 5",
|
| 1482 |
+
"[56] Xiaolong Wang, Yufei Ye, and Abhinav Gupta. Zero-shot recognition via semantic embeddings and knowledge graphs. In CVPR, 2018. 2",
|
| 1483 |
+
"[57] Lei Wu, Xian-Sheng Hua, Nenghai Yu, Wei-Ying Ma, and Shipeng Li. Flickr distance: a relationship measure for visual concepts. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2011. 2",
|
| 1484 |
+
"[58] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 2",
|
| 1485 |
+
"[59] Yongqin Xian, Christoph H Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning-a comprehensive evaluation of the good, the bad and the ugly. T-PAMI, 2019. 1, 2, 3, 5",
|
| 1486 |
+
"[60] Yongqin Xian, Tobias Lorenz, Bernt Schiele, and Zeynep Akata. Feature generating networks for zero-shot learning. In CVPR, 2018. 2",
|
| 1487 |
+
"[61] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. f-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 1, 2, 3, 5, 6",
|
| 1488 |
+
"[62] Wenjia Xu, Yongqin Xian, Jiuniu Wang, Bernt Schiele, and Zeynep Akata. Attribute prototype network for zero-shot learning. *NeurIPS*, 2020. 1, 2, 5, 6",
|
| 1489 |
+
"[63] Ikuya Yamada, Akari Asai, Jin Sakuma, Hiroyuki Shindo, Hideaki Takeda, Yoshiyasu Takefuji, and Yuji Matsumoto. Wikipedia2vec: An efficient toolkit for learning and visualizing the embeddings of words and entities from wikipedia. ACL, 2020. 2",
|
| 1490 |
+
"[64] Xun Yang, Xiangnan He, Xiang Wang, Yunshan Ma, Fuli Feng, Meng Wang, and Tat-Seng Chua. Interpretable fashion matching with rich attributes. In ACM SIGIR, 2019. 1",
|
| 1491 |
+
"[65] Felix X Yu, Liangliang Cao, Rogerio S Feris, John R Smith, and Shih-Fu Chang. Designing category-level attributes for discriminative visual recognition. In CVPR, 2013. 2",
|
| 1492 |
+
"[66] Yunlong Yu, Zhong Ji, Yanwei Fu, Jichang Guo, Yanwei Pang, Zhongfei Mark Zhang, et al. Stacked semantics-guided attention model for fine-grained zero-shot learning. In NeurIPS, 2018. 2",
|
| 1493 |
+
"[67] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In CVPR, 2018. 1, 2, 6",
|
| 1494 |
+
"[68] Yizhe Zhu, Jianwen Xie, Zhiqiang Tang, Xi Peng, and Ahmed Elgammal. Semantic-guided multi-attention localization for zero-shot learning. In NeurIPS, 2019. 2"
|
| 1495 |
+
],
|
| 1496 |
+
"bbox": [
|
| 1497 |
+
501,
|
| 1498 |
+
92,
|
| 1499 |
+
893,
|
| 1500 |
+
871
|
| 1501 |
+
],
|
| 1502 |
+
"page_idx": 9
|
| 1503 |
+
}
|
| 1504 |
+
]
|
2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10444/a14277f4-9805-49c5-8141-e66a860f32a2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20172034b3f4ea6571833e1e4947c45af538e309600e9a9163375af88da06509
|
| 3 |
+
size 6622753
|
2203.10xxx/2203.10444/full.md
ADDED
|
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# VGSE: Visually-Grounded Semantic Embeddings for Zero-Shot Learning
|
| 2 |
+
|
| 3 |
+
Wenjia $\mathrm{Xu}^{1,7,8}$
|
| 4 |
+
|
| 5 |
+
Yongqin Xian2
|
| 6 |
+
|
| 7 |
+
Jiuniu Wang5,7,8
|
| 8 |
+
|
| 9 |
+
Bernt Schiele3
|
| 10 |
+
|
| 11 |
+
Zeynep Akata $^{3,4,6}$
|
| 12 |
+
|
| 13 |
+
<sup>1</sup> Beijing University of Posts and Telecommunications <sup>2</sup> ETH Zurich
|
| 14 |
+
|
| 15 |
+
3 Max Planck Institute for Informatics 4 University of Tübingen
|
| 16 |
+
|
| 17 |
+
<sup>5</sup> City University of Hong Kong
|
| 18 |
+
|
| 19 |
+
6 Max Planck Institute for Intelligent Systems
|
| 20 |
+
|
| 21 |
+
<sup>7</sup> University of Chinese Academy of Sciences <sup>8</sup> Aerospace Information Research Institute, CAS
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
Human-annotated attributes serve as powerful semantic embeddings in zero-shot learning. However, their annotation process is labor-intensive and needs expert supervision. Current unsupervised semantic embeddings, i.e., word embeddings, enable knowledge transfer between classes. However, word embeddings do not always reflect visual similarities and result in inferior zero-shot performance. We propose to discover semantic embeddings containing discriminative visual properties for zero-shot learning, without requiring any human annotation. Our model visually divides a set of images from seen classes into clusters of local image regions according to their visual similarity, and further imposes their class discrimination and semantic relatedness. To associate these clusters with previously unseen classes, we use external knowledge, e.g., word embeddings and propose a novel class relation discovery module. Through quantitative and qualitative evaluation, we demonstrate that our model discovers semantic embeddings that model the visual properties of both seen and unseen classes. Furthermore, we demonstrate on three benchmarks that our visually-grounded semantic embeddings further improve performance over word embeddings across various ZSL models by a large margin. Code is available at https://github.com/wenjiaXu/VGSE
|
| 26 |
+
|
| 27 |
+
# 1. Introduction
|
| 28 |
+
|
| 29 |
+
Semantic embeddings aggregated for every class live in a vector space that associates different classes even when visual examples of these classes are not available. Therefore, they facilitate the knowledge transfer in zero-shot learning (ZSL) [1,28,42,59] and are used as side-information in other computer vision tasks like fashion trend forecast [4,23,64], face recognition and manipulation [11,27,29], and domain adaptation [10, 24].
|
| 30 |
+
|
| 31 |
+
Human annotated attributes [19, 36, 55], characteristic properties of objects annotated by human experts, are widely
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Human-Annotated Attributes
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
Semantic Embedding Discovery by VGSE
|
| 38 |
+
Figure 1. Human-annotated attributes (left) are labor-intensive to collect, and may neglect some local visual properties shared between classes. We propose to discover semantic embeddings via visually clustering image patches and predicting the class relations.
|
| 39 |
+
|
| 40 |
+
used as semantic embeddings [61, 62]. However, obtaining attributes is often a labor-intensive two-step process. First, domain experts carefully design an attribute vocabulary, e.g., color, shape, etc., and then human annotators indicate the presence or absence of an attribute in an image or a class (as shown in Figure 1). The labeling effort devoted to human-annotated attributes hinders its applicability of performing zero-shot learning for more datasets in realistic settings [30].
|
| 41 |
+
|
| 42 |
+
Previous works tackle this problem by using word embeddings for class names [31, 38], or semantic embeddings from online encyclopedia articles [3, 39, 67]. Though they model the semantic relation between classes without using human annotation, some of these relations may not be visually detectable by machines, resulting in a poor performance in zero-shot learning. Similarly, discriminative visual cues may not all be represented in those semantic embeddings.
|
| 43 |
+
|
| 44 |
+
To this end, we propose the Visually-Grounded Semantic Embedding (VGSE) Network to discover semantic embeddings with minimal human supervision (we only use category labels for seen class images). Our network explicitly explores visual clusters that relate image regions from different categories, which is useful for knowledge transfer between classes under zero-shot learning settings (see our
|
| 45 |
+
|
| 46 |
+
learnt clusters in Figure 1). To fully unearth the visual properties shared across different categories, our model discovers semantic embeddings by assigning image patches into various clusters according to their visual similarity. Besides, we further impose class discrimination and semantic relatedness of the semantic embeddings, to benefit their ability in transferring knowledge between classes in ZSL.
|
| 47 |
+
|
| 48 |
+
To sum up, our work makes the following contributions. (1) We propose a visually-grounded semantic embedding (VGSE) network that learns visual clusters from seen classes, and automatically predicts the semantic embeddings for each category by building the relationship between seen and unseen classes given unsupervised external knowledge sources. (2) On three zero-shot learning benchmarks (i.e. AWA2, CUB, and SUN), our learned VGSE semantic embeddings consistently improve the performance of word embeddings over five SOTA methods. (3) Through qualitative evaluation and user study, we demonstrate that our VGSE embeddings contain rich visual information like fine-grained attributes, and convey human-understandable semantics that facilitates knowledge transfer between classes.
|
| 49 |
+
|
| 50 |
+
# 2. Related Work
|
| 51 |
+
|
| 52 |
+
Zero-shot Learning aims to classify images from novel classes that do not appear during training. Existing ZSL methods usually assume that both the seen and unseen classes share a common semantic space, thus the key insight of performing ZSL is to transfer knowledge from seen classes to unseen classes. To assign the image to a semantic class embedding, many classical approaches learn a compatibility function to associate visual and semantic space [1, 20, 48, 58]. Recent works mainly focus on synthesizing image features or classifier weights with a generative model [43, 60, 61], or training enhanced image features extractors with visual attention [66, 68] or local prototypes [62].
|
| 53 |
+
|
| 54 |
+
Semantic embeddings are crucial in relating different categories with shared characteristics, i.e., the semantic space. Despite their importance, semantic embeddings are relatively under-explored in zero-shot learning. Human-annotated attributes [19, 36, 55, 59], i.e., the properties of objects such as color and shape, are the most commonly used semantic embeddings in zero-shot learning. Though the attributes can be discriminative for each class, their annotation process is labor-intensive and require expert knowledge [50, 55, 65]. We propose to discover visual properties through patch-level clustering over image datasets, and predict semantic embeddings automatically, where no additional human annotation is required except for the class labels of seen class images.
|
| 55 |
+
|
| 56 |
+
Semantic Embeddings with Minimal Supervision is drawing attention in image classification [6, 9, 26, 40, 45], transfer learning [10, 37, 54] and low-shot learning problems [3, 25, 33, 44, 50, 65]. Semantic embeddings collected
|
| 57 |
+
|
| 58 |
+
from text corpora are alternatives to manual annotations, which include word embeddings learned from large corpora [31, 38, 49, 63], semantic relations such as knowledge graphs [9, 26, 56], and semantic similarities [12, 57], etc. More recently, [3, 39, 41, 67] collect attribute-class associations from online encyclopedia articles that describe each category. The semantic similarity can be encoded by a taxonomical hierarchy or by incorporating co-occurrence statistics of words within the document. However, this may not reflect visual similarity, e.g., sheep is semantically close to dog since they often co-occur in online articles, while visually sheep is closer to a deer. We focus on discovering visually-grounded semantic embeddings in the image space, and further incorporate the semantic relations between classes into our semantic embedding for better zero-shot knowledge transfer.
|
| 59 |
+
|
| 60 |
+
Learning Visual Properties from Image Patches. Previous attempts for discovering middle-level representations for classification include exploring image-level embeddings by learning binary codes or classe representation [6, 40, 51], and further introducing humans in the loop to discover localized and nameable attributes [18, 35]. However, those methods discover properties depicted in the whole image, which might result in a combination of several semantics covering several objects (parts) that are hard to interpret [35]. Visual transformer [17] and BagNets [8] showed that image patches can work as powerful visual words conveying visual cues for class discrimination. Bag of visual words (BOVW) models [13, 47] propose to cluster image patches to learn a codebook and form image representations. However, BOVW extracts hand-crafted features followed by k-means clustering, while we learn clustering in an end-to-end manner via deep neural networks. Considering the above problem, we propose to learn visual properties by clustering image patches, and predict the semantic embeddings with the visual properties depicted by patch clusters.
|
| 61 |
+
|
| 62 |
+
More closely related to our work are the ones learning discriminative image regions that can represent each class through clustering of local patches [15, 16, 45, 46], e.g., finding representative elements to discriminate one class from others. Instead of picking up the most salient patches in each class, we aim to learn visual properties that are shared among different classes for most of the image patches appearing in the dataset. Besides, unlike some above methods that divide an image into a grid of square patches, we propose to use segmentation-based region proposals to obtain semantic image regions (e.g., the entire head could represent one semantic region).
|
| 63 |
+
|
| 64 |
+
# 3. Visually-Grounded Semantic Embedding
|
| 65 |
+
|
| 66 |
+
We are interested in the (generalized) zero-shot learning task where the training and test classes are disjoint sets. The training set $\{(x_{n},y_{n})|x_{n}\in X^{s},y_{n}\in Y^{s}\}_{n = 1}^{N_{s}}$ consists of images $x_{n}$ and their labels $y_{n}$ from the seen classes $Y^{s}$ . In
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
Figure 2. Our visually-grounded semantic embedding network consists of two modules. The Patch Clustering (PC) module learns clusters from patch images, and predicts semantic embeddings for seen classes with their images. The Class Relation (CR) module predicts the unseen class embeddings $\phi^{VGSE}(y_m)$ using unseen and seen class relations learned from external knowledge, e.g., word2vec. For instance, the embedding for unseen class sheep is predicted using the semantic embeddings of the seen classes, e.g., antelope, cow, deer, and so on.
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
|
| 73 |
+
the ZSL setting, test images are classified into unseen classes $Y^{u}$ , and in the GZSL setting, into both $Y^{s}$ and $Y^{u}$ with the help of a semantic embedding space, e.g., human annotated attributes. Since human-annotated attributes are costly to obtain, while prior unsupervised semantic embeddings are incomplete to describe the rich visual world, we propose to automatically discover a set of $D_{v}$ visual clusters as the semantic embedding, denoted by $\Phi^{VGSE} \in \mathbb{R}^{(|Y^{u}| + |Y^{s}|) \times D_{v}}$ . The semantic embeddings for seen classes $\{\phi^{VGSE}(y) | y \in Y^{s}\}$ , describing diverse visual properties of each category, are learned on seen classes images $X^{s}$ . The semantic embeddings for unseen classes $\{\phi^{VGSE}(y) | y \in Y^{u}\}$ is predicted with the help of unsupervised word embeddings, e.g., w2v embeddings for class names $\Phi^{w} \in \mathbb{R}^{(|Y^{u}| + |Y^{s}|) \times D_{w}}$ .
|
| 74 |
+
|
| 75 |
+
Our Visually-Grounded Semantic Embedding (VGSE) Network (see Figure 2) consists of two main modules. (1) The Patch Clustering (PC) module takes the training dataset as input, and clusters the image patches into $D_v$ visual clusters. Given one input image $x_n$ , PC can predict the cluster probability $a_n \in \mathbb{R}^{D_v}$ indicating how likely the image would contain the visual property appearing in each cluster. (2) Since unseen class images cannot be observed during training, we propose the Class Relation (CR) module to infer the semantic embeddings of unseen classes. Finally, the learned semantic embedding $\Phi^{\mathrm{VGSE}}$ can be used to perform downstream tasks, e.g., Zero-Shot Learning.
|
| 76 |
+
|
| 77 |
+
# 3.1. Patch Clustering (PC) Module
|
| 78 |
+
|
| 79 |
+
Patch image generation. Patch-level embeddings allow us to explore the visual properties that appear in local image regions [17, 55], e.g., the shape and texture of animal
|
| 80 |
+
|
| 81 |
+
body parts or the objects in scenes. To obtain image patches that cover the entire semantic image region (e.g. an animal head), we segment an image into regularly shaped regions via an unsupervised compact watershed segmentation algorithm [32]. As shown in Figure 2, for each image $x_{n}$ , we find the smallest bounding box that fully covers each segment and crop $x$ into $N_{t}$ patches $\{x_{nt}\}_{t=1}^{N_{t}}$ that cover different parts of the image. The number of patches $N_{t}$ is empirically set to be around 9, as we observed in initial experiments that larger patches may include too many attributes, while smaller patches will be too tiny to contain any visual attribute. In this way, we reconstruct our training set consisting of image patches $\{(x_{nt},y_{n}) | x_{nt} \in X^{sp}, y_{n} \in Y^{s}\}_{n=1}^{N_{s}}$ , here $|X^{sp}| = N_{s}N_{t}$ , and $N_{s}$ is the train set size.
|
| 82 |
+
|
| 83 |
+
Patch clustering. Our patch clustering module is a differentiable middle layer, that simultaneously learns image patch representations and clustering. As shown in Figure 2 (left), we start from a deep neural network that extracts patch feature $\theta(x_{nt}) \in \mathbb{R}^{D_f}$ , where we use a ResNet [22] pretrained on ImageNet [14] as in other ZSL models [59, 61]. Afterwards, a clustering layer $H: \mathbb{R}^{D_f} \to \mathbb{R}^{D_v}$ converts the feature representation into cluster scores:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
a _ {n t} = H \circ \theta (x _ {n t}), \tag {1}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $a_{nt}^{k}$ (the $k$ -th element of $a_{nt}$ ) indicates the probability of assigning image patch $x_{nt}$ to cluster $k$ , e.g., the patch clusters of spotty fur, fluffy head in Figure 2.
|
| 90 |
+
|
| 91 |
+
A pretext task can be adopted to obtain semantically meaningful representations [21, 34, 53] in an unsupervised manner. Our pretext task [53] enforces the image patch $x_{nt}$ and its neighbors being predicted to the same clusters. We
|
| 92 |
+
|
| 93 |
+
retrieve nearest patch neighbors of $x_{nt}$ as $X_{nb}^{sp}$ by the $\mathcal{L}_2$ distance of patch features $\| \theta(x_{nt}) - \theta(x_i)\|_2$ , where $x_i \in X^{sp}$ and $x_i \neq x_{nt}$ . The clustering loss is defined as
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathcal {L} _ {c l u} = - \sum_ {x _ {n t} \in X ^ {s p}} \sum_ {x _ {i} \in X _ {n b} ^ {s p}} \log \left(a _ {n t} ^ {T} a _ {i}\right), \tag {2}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $a_{i} = H\circ \theta (x_{i})$ . $\mathcal{L}_{clu}$ imposes consistent cluster assignment for $x_{nt}$ and its neighbors. To avoid all images being assigned to the same cluster, we follow [53] to add an entropy penalty as follows:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathcal {L} _ {p e l} = \sum_ {k = 1} ^ {D _ {v}} \bar {a} _ {n t} ^ {k} \log \bar {a} _ {n t} ^ {k}, \quad \bar {a} _ {n t} ^ {k} = \frac {1}{N _ {s} N _ {t}} \sum_ {x _ {n t} \in X ^ {s p}} a _ {n t} ^ {k}, \tag {3}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
ensuring that images are spread uniformly over all clusters.
|
| 106 |
+
|
| 107 |
+
Class discrimination. To impose class discrimination information into the learnt clusters, we propose to apply an cluster-to-class layer $Q: \mathbb{R}^{D_v} \to \mathbb{R}^{|Y^s|}$ to map the cluster prediction of each image to the class probability, i.e., $p(y|x_{nt}) = \text{softmax}(Q \circ \theta(x_{nt}))$ . We train this module with the following cross-entropy loss,
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathcal {L} _ {c l s} = - \log \frac {\exp (p (y _ {n} | x _ {n t}))}{\sum_ {\hat {y} \in Y ^ {s}} \exp (p (\hat {y} | x _ {n t}))}. \tag {4}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Semantic relatedness. We further encourage the learned visual clusters to be transferable between classes, to benefit the downstream zero-shot learning tasks. We learn clusters shared between semantically related classes, e.g., horse share more semantic information with deer than with dolphin. We implement this by mapping the learned cluster probability to the semantic space constructed by w2v embeddings $\Phi^w$ . The cluster-to-semantic layer $S: \mathbb{R}^{D_v} \to \mathbb{R}^{D_w}$ is trained by regressing the w2v embedding for each class,
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathcal {L} _ {s e m} = \left\| S \circ a _ {n t} - \phi^ {w} (y _ {n}) \right\| _ {2}, \tag {5}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $y_{n}$ denotes the ground truth class, and $\phi^w (y_n)\in$ $\mathbb{R}^{D_w}$ represents the w2v embedding for the class $y_{n}$
|
| 120 |
+
|
| 121 |
+
The overall objective for training the model is as follows:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathcal {L} = \mathcal {L} _ {c l u} + \lambda \mathcal {L} _ {p e l} + \beta \mathcal {L} _ {c l s} + \gamma \mathcal {L} _ {s e m}. \tag {6}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
Predict seen semantic embeddings. After we learned the visual clusters, given one input image patch $x_{nt}$ , the model extracts the feature $\theta(x_{nt})$ followed by predicting the cluster probability $a_{nt} = H \circ \theta(x_{nt}) \in \mathbb{R}^{D_v}$ where each dimension indicates the likelihood that the image patch $x_{nt}$ being assigned to a certain cluster learned by this module.
|
| 128 |
+
|
| 129 |
+
The image embedding $a_{n}\in \mathbb{R}^{D_{v}}$ for $x_{n}$ is calculated by averaging the patch embedding in that image:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
a _ {n} = \frac {1}{N _ {t}} \sum_ {t = 1} ^ {N _ {t}} a _ {n t}. \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
Similarly, we calculate the semantic embedding for $y_{n}$ by averaging the embeddings of all images belonging to $y_{n}$ :
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\phi^ {V G S E} \left(y _ {n}\right) = \frac {1}{\left| I _ {i} \right|} \sum_ {j \in I _ {i}} a _ {j}, \tag {8}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $I_{i}$ is the indexes of all images belonging to class $y_{n}$ , and $a_{j}$ denotes the image embedding of the $j$ -th image.
|
| 142 |
+
|
| 143 |
+
# 3.2. Class Relation (CR) Module
|
| 144 |
+
|
| 145 |
+
While seen semantic embeddings can be estimated from training images using Eq. 8, how to compute the unseen semantic embeddings is not straightforward since their training images are not available. As semantically related categories share common properties, e.g., sheep and cow both live on grasslands, we propose to learn a Class Relation Module to formulate the similarity between seen classes $Y^{s}$ and unseen classes $Y^{u}$ . In general, any external knowledge, e.g., word2vec [31, 38] or human-annotated attributes, can be utilized to formulate the relationship between two classes. Here we use word2vec learned from a large online corpus to minimize the human annotation effort. Below, we present two solutions to learn the class relations: (1) directly averaging the semantic embeddings from the neighbor seen classes in the word2vec spaces, (2) optimizing a similarity matrix between unseen and seen classes.
|
| 146 |
+
|
| 147 |
+
Weighted Average (WAvg). For unseen class $y_{m}$ , we first retrieve several nearest class neighbours in seen classes by the similarity measured with $\mathcal{L}_2$ distance over w2v embedding space, and we denote the neighbor classes set as $Y_{nb}^{s}$ . The semantic embedding vector for $y_{m}$ is calculated as the weighted combination [5] of seen semantic embeddings:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\phi^ {V G S E} \left(y _ {m}\right) = \frac {1}{\left| Y _ {n b} ^ {s} \right|} \sum_ {\tilde {y} \in Y _ {n b} ^ {s}} \operatorname {s i m} \left(y _ {m}, \tilde {y}\right) \cdot \phi^ {V G S E} (\tilde {y}), \tag {9}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
\operatorname {s i m} \left(y _ {m}, \tilde {y}\right) = \exp \left(- \eta \left\| \phi^ {w} \left(y _ {m}\right) - \phi^ {w} (\tilde {y}) \right\| _ {2}\right), \tag {10}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
where exp stands for the exponential function and $\eta$ is a hyperparameter to adjust the similarity weight. We denote our semantic embeddings learned with weighted average strategy as VGSE-WAvg.
|
| 158 |
+
|
| 159 |
+
Similarity Matrix Optimization (SMO). Given the w2v embeddings $\phi^w(Y^s) \in \mathbb{R}^{|Y^s| \times D_w}$ of seen classes and embedding $\phi^w(y_m)$ for unseen class $y_m$ , we learn a similarity mapping $r \in \mathbb{R}^{|Y^s|}$ , where $r_i$ denotes the similarity between the unseen class $y_m$ and the $i$ -th seen class. The similarity mapping is learned via the following optimization problem:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
\min _ {r} \left\| \phi^ {w} (y _ {m}) - r ^ {T} \phi^ {w} (Y ^ {s}) \right\| _ {2}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\text {s . t .} \quad \alpha < r < 1 \quad a n d \quad \sum_ {i = 1} ^ {| Y ^ {s} |} r _ {i} = 1. \tag {11}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
Here $\alpha$ is the lower bound which can be either 0 or $-1$ , indicating whether we only learn positive class relations or we learn negative relations as well. We base this mapping on the assumption that semantic embeddings follow linear analogy, e.g., $\phi^w (\mathrm{king}) - \phi^w (\mathrm{man}) + \phi^w (\mathrm{woman})\approx \phi^w (\mathrm{queen})$ , which holds for w2v embeddings and our semantic embeddings $\phi^{VGSE}$ . After the mapping is learned, we can predict the semantic embeddings for the unseen class $y_{m}$ as:
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
\phi^ {V G S E} \left(y _ {m}\right) = r ^ {T} \phi^ {V G S E} \left(Y _ {s}\right), \tag {12}
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
where the value of each discovered semantic embedding for unseen class $y_{m}$ is the weighted sum of all seen class semantic embeddings. We denote our semantic embeddings learned with similarity matrix optimization (SMO) as VGSE-SMO.
|
| 176 |
+
|
| 177 |
+
# 4. Experiments
|
| 178 |
+
|
| 179 |
+
After introducing the datasets and experimental settings, we demonstrate that our VGSE outperforms unsupervised word embeddings over three benchmark datasets and this phenomenon generalizes to five SOTA ZSL models (§4.1). With extensive ablation studies, we showcase clustering with images patches is effective for learning the semantic embeddings, and demonstrate the effectiveness of the PC module and CR module (§4.2). In the end, we present visual clusters as qualitative results (§4.3, §4.4).
|
| 180 |
+
|
| 181 |
+
Dataset. We validate our model on three ZSL benchmark datasets. AWA2 [59] is a coarse-grained dataset for animal categorization, containing 30,475 images from 50 classes, where 40 classes are seen and 10 are unseen classes. CUB [55] is a fine-grained dataset for bird classification, containing 11,788 images and 200 classes, where 150 classes are seen and 50 are unseen classes. SUN [36] is also a fine-grained dataset for scene classification, with 14,340 images coming from 717 scene classes, where 645 classes are seen and 72 are unseen classes.
|
| 182 |
+
|
| 183 |
+
Implementation details. Specifically, in the patch clustering (PC) module we learn seen-semantic embeddings with train set (seen classes) proposed by [59], the unseen-class embeddings are predicted in the class relation (CR) module without seeing unseen images. We adopt ResNet50 [22] pretrained on ImageNet1K [14] as the backbone. The cluster number $D_v$ is set as 150 for three datasets. For the Weighted Average module in Eq. 9, we set $\eta$ as 5 for all datasets, and use 5 neighbors for all datasets. For the similarity matrix optimization in Eq. 11, we set $\alpha$ as -1 for AWA2 and CUB, and as 0 for SUN. More details are in the supplementary.
|
| 184 |
+
|
| 185 |
+
Semantic embeddings for ZSL. To be fair, we compare our VGSE semantic embeddings with other alternatives using the same image features and ZSL models. All the image features are extracted from ResNet101 [22] pretrained on ImageNet [14]. We follow the data split provided by [59]. The semantic embeddings are L2 normalized following [59]. All
|
| 186 |
+
|
| 187 |
+
ablation studies use the SJE [2,62] as the ZSL model as it is simple to train. Besides, we verify the generalization ability of our semantic embeddings over five state-of-the-art ZSL models with their official code. The non-generative models include SJE [2], APN [62], GEM-ZSL [28], learning a compatibility function between image and semantic embeddings. The generative approaches consist of CADA-VAE [43] and f-VAEGAN-D2 [61], learning a generative model that synthesizes image features of unseen classes from their semantic embeddings. Note that for all ZSL models, we use the same hyperparameters as proposed in their original papers for all semantic embeddings with no hyperparameter tuning.
|
| 188 |
+
|
| 189 |
+
# 4.1. Comparing with the State-of-the-Art
|
| 190 |
+
|
| 191 |
+
We first compare our semantic embeddings VGSE-SMO with the unsupervised word embeddings w2v [31] on three benchmark datasets and five ZSL models. We further compare ours with other state-of-the-art methods that learn semantic embeddings with less human annotation.
|
| 192 |
+
|
| 193 |
+
VGSE surpasses w2v by a large margin. The results shown in Table 1 demonstrate that our VGSE-SMO semantic embeddings significantly outperform word embedding w2v on all datasets and all ZSL models. Considering the non-generative ZSL models, VGSE-SMO outperform w2v on all three datasets by a large margin. In particular, on AWA2 dataset, when coupled with GEM-ZSL, our VGSE-SMO boosts the ZSL performance of w2v from $50.2\%$ to $58.0\%$ . On the fine-grained datasets CUB and SUN, VGSE-SMO achieves even higher accuracy boosts. For example, when coupled with the APN model, VGSE-SMO increases the ZSL accuracy of CUB from $22.7\%$ to $28.9\%$ , and the accuracy of SUN from $23.6\%$ to $38.1\%$ . These results demonstrate that our approach not only works well on generic object categories, but also has great potential to benefit the challenging fine-grained classification task. VGSE improves the GZSL performance of both seen and unseen classes, yielding a much better harmonic mean (e.g., when trained with SJE, VGSE-SMO improves over the harmonic mean of w2v by $8.0\%$ on AWA2, $10.3\%$ on CUB, and $7.6\%$ on SUN). These results indicate that our VGSE facilitates the model to learn a better compatibility function between image and semantic embeddings, for both seen and unseen classes.
|
| 194 |
+
|
| 195 |
+
Our VGSE semantic embeddings show great potential on generative models as well. In particular, VGSE coupled with f-VAEGAN-D2 surpasses all other methods by a wide margin on SUN and CUB datasets, i.e., we obtain $35.0\%$ vs $32.7\%$ (w2v) on CUB, and $41.1\%$ vs $39.6\%$ (w2v) on SUN. As our embeddings are more machine detectable than w2v, introducing visual properties to the conditional GAN will allow them to generate more discriminative image features.
|
| 196 |
+
|
| 197 |
+
VGSE outperforms SOTA weakly supervised ZSL semantic embeddings. We compare VGSE with other works that learn ZSL semantic embeddings with less human annotation.
|
| 198 |
+
|
| 199 |
+
<table><tr><td rowspan="3"></td><td rowspan="3">ZSL Model</td><td rowspan="3">Semantic Embeddings</td><td colspan="3">Zero-Shot Learning</td><td colspan="8">Generalized Zero-Shot Learning</td><td></td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td><td colspan="3">AWA2</td><td colspan="3">CUB</td><td colspan="2">SUN</td><td></td></tr><tr><td>T1</td><td>T1</td><td>T1</td><td>u</td><td>s</td><td>H</td><td>u</td><td>s</td><td>H</td><td>u</td><td>s</td><td>H</td></tr><tr><td rowspan="4">Generative</td><td rowspan="2">CADE-VAE [43]</td><td>w2v [31]</td><td>49.0</td><td>22.5</td><td>37.8</td><td>38.6</td><td>60.1</td><td>47.0</td><td>16.3</td><td>39.7</td><td>23.1</td><td>26.0</td><td>28.2</td><td>27.0</td></tr><tr><td>VGSE-SMO (Ours)</td><td>52.7</td><td>24.8</td><td>40.3</td><td>46.9</td><td>61.6</td><td>53.9</td><td>18.3</td><td>44.5</td><td>25.9</td><td>29.4</td><td>29.6</td><td>29.5</td></tr><tr><td rowspan="2">f-VAEGAN-D2 [61]</td><td>w2v [31]</td><td>58.4</td><td>32.7</td><td>39.6</td><td>46.7</td><td>59.0</td><td>52.2</td><td>23.0</td><td>44.5</td><td>30.3</td><td>25.9</td><td>33.3</td><td>29.1</td></tr><tr><td>VGSE-SMO (Ours)</td><td>61.3</td><td>35.0</td><td>41.1</td><td>45.7</td><td>66.7</td><td>54.2</td><td>24.1</td><td>45.7</td><td>31.5</td><td>25.5</td><td>35.7</td><td>29.8</td></tr><tr><td rowspan="6">Non-Generative</td><td rowspan="2">SJE [2]</td><td>w2v [31]</td><td>53.7</td><td>14.4</td><td>26.3</td><td>39.7</td><td>65.3</td><td>48.8</td><td>13.2</td><td>28.6</td><td>18.0</td><td>19.8</td><td>18.6</td><td>19.2</td></tr><tr><td>VGSE-SMO (Ours)</td><td>62.4</td><td>26.1</td><td>35.8</td><td>46.8</td><td>72.3</td><td>56.8</td><td>16.4</td><td>44.7</td><td>28.3</td><td>28.7</td><td>25.2</td><td>26.8</td></tr><tr><td rowspan="2">GEM-ZSL [28]</td><td>w2v [31]</td><td>50.2</td><td>25.7</td><td>-</td><td>40.1</td><td>80.0</td><td>53.4</td><td>11.2</td><td>48.8</td><td>18.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VGSE-SMO (Ours)</td><td>58.0</td><td>29.1</td><td>-</td><td>49.1</td><td>78.2</td><td>60.3</td><td>13.1</td><td>43.0</td><td>20.0</td><td>-</td><td>-</td><td>-</td></tr><tr><td rowspan="2">APN [62]</td><td>w2v [31]</td><td>59.6</td><td>22.7</td><td>23.6</td><td>41.8</td><td>75.0</td><td>53.7</td><td>17.6</td><td>29.4</td><td>22.1</td><td>16.3</td><td>15.3</td><td>15.8</td></tr><tr><td>VGSE-SMO (Ours)</td><td>64.0</td><td>28.9</td><td>38.1</td><td>51.2</td><td>81.8</td><td>63.0</td><td>21.9</td><td>45.5</td><td>29.5</td><td>24.1</td><td>31.8</td><td>27.4</td></tr></table>
|
| 200 |
+
|
| 201 |
+
Table 1. Comparing our VGSE-SMO, with w2v semantic embedding over state-of-the-art ZSL models. In ZSL, we measure Top-1 accuracy (T1) on unseen classes, in GZSL on seen/unseen $(\mathbf{s} / \mathbf{u})$ classes and their harmonic mean (H). Feature Generating Methods, i.e., f-VAEGAN-D2, and CADA-VAE generating synthetic training samples, and SJE, APN, GEM-ZSL using only real image features.
|
| 202 |
+
|
| 203 |
+
<table><tr><td rowspan="2">Semantic Embeddings</td><td rowspan="2">External knowledge</td><td colspan="3">Zero-shot learning</td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td></tr><tr><td>w2v [31]</td><td>w2v</td><td>58.4</td><td>32.7</td><td>39.6</td></tr><tr><td>ZSLNS [39]</td><td>T</td><td>57.4</td><td>27.8</td><td>-</td></tr><tr><td>GAZSL [67]</td><td>T</td><td>-</td><td>34.4</td><td>-</td></tr><tr><td>Auto-dis [3]</td><td>T</td><td>52.0</td><td>-</td><td>-</td></tr><tr><td>CAAP [5]</td><td>T and H</td><td>55.3</td><td>31.9</td><td>35.5</td></tr><tr><td>VGSE-SMO (Ours)</td><td>w2v</td><td>61.3 ± 0.3</td><td>35.0 ± 0.2</td><td>41.1 ± 0.3</td></tr></table>
|
| 204 |
+
|
| 205 |
+
CAAP [5] learns the unseen semantic embeddings with the help of w2v and the human annotated attributes for seen classes. Auto-Dis [3] collects attributes from online encyclopedia articles that describe each category, and learn attribute-class association with the supervision of visual data and category label. GAZSL [67] and ZSLNS [39] learn semantic embeddings from wikipedia articles.
|
| 206 |
+
|
| 207 |
+
The results shown in Table 2 demonstrate that our VGSE embedding, using only w2v as external knowledge, surpasses all other methods that uses textual articles on three datasets. In particular, our VGSE-SMO achieves an accuracy of $61.3\%$ on AWA2, improving the closest semantic embedding w2v by $2.9\%$ . On SUN, we also outperform the closest semantic embedding w2v by $1.5\%$ .
|
| 208 |
+
|
| 209 |
+
# 4.2. Ablation study
|
| 210 |
+
|
| 211 |
+
We provide ablation studies for our PC and CR modules. Is PC module effective? We first ask if learning semantic embeddings through clustering is effective in terms of ZSL accuracy, when compared to other alternatives. We compare our semantic embeddings against the following baselines:
|
| 212 |
+
|
| 213 |
+
ResNet features are extracted by feeding image patch
|
| 214 |
+
|
| 215 |
+
Table 2. Comparing with state-of-the-art methods for learning semantic embeddings with less human annotation (T: online textual articles, H: human annotation) using same image features and ZSL model (f-VAEGAN-d2 [61]).
|
| 216 |
+
|
| 217 |
+
<table><tr><td rowspan="2">Semantic Embeddings</td><td colspan="3">Zero-shot learning</td></tr><tr><td>AWA2</td><td>CUB</td><td>SUN</td></tr><tr><td>k-means-SMO</td><td>54.5 ± 0.4</td><td>15.0 ± 0.5</td><td>25.2 ± 0.4</td></tr><tr><td>ResNet-SMO</td><td>55.3 ± 0.2</td><td>15.4 ± 0.1</td><td>25.1 ± 0.1</td></tr><tr><td>Lclu + Lpel (baseline + SMO)</td><td>56.6 ± 0.2</td><td>16.7 ± 0.2</td><td>26.3 ± 0.3</td></tr><tr><td>+ Lcls</td><td>61.2 ± 0.1</td><td>23.7 ± 0.2</td><td>30.5 ± 0.2</td></tr><tr><td>+ Lsem (VGSE-SMO)</td><td>62.4 ± 0.3</td><td>26.1 ± 0.3</td><td>35.8 ± 0.2</td></tr><tr><td>VGSE-WAvg</td><td>57.7 ± 0.2</td><td>25.8 ± 0.3</td><td>35.3 ± 0.2</td></tr></table>
|
| 218 |
+
|
| 219 |
+
Table 3. Ablation study over the PC module reporting ZSL T1 on AWA2, CUB, and SUN (mean accuracy and std over 5 runs). The baseline is the PC module with the cluster loss $\mathcal{L}_{clu}$ and $\mathcal{L}_{pel}$ . Our full model VGSE-SMO is trained with two additional losses $\mathcal{L}_{cls}$ , $\mathcal{L}_{sem}$ . Two kinds of semantic embeddings learned from k-means clustering and pretrained ResNet are listed below for comparison.
|
| 220 |
+
|
| 221 |
+
$x_{nt}$ to a pretrained ResNet50. We follow Eq. 7 and Eq. 8 to predict semantic embeddings for seen classes. $K$ -means clustering is an alternative for our clustering model. We cluster the patch images features $\theta(x_{nt})$ learned from our PC module into $D_v$ visual clusters. The patch embedding $a_{nt}^k$ is defined as the cosine similarity between the patch feature $\theta(x_{nt})$ and the cluster center. In both cases the unseen semantic embeddings are predicted with our SMO module.
|
| 222 |
+
|
| 223 |
+
We ablate our losses and compare our VGSE-SMO with the two alternatives, then report ZSL results on three benchmark datasets in Table 3. First, the k-means-SMO achieves on par results with our baseline model trained with only the cluster losses $\mathcal{L}_{clu}$ and $\mathcal{L}_{pel}$ [53], the reason we adopt [53] instead of k-means is that we can easily train the network with our proposed losses in an end-to-end manner. Second, the addition of the classification loss $\mathcal{L}_{cls}$ leads to notable improvement over the baseline model trained with $\mathcal{L}_{clu}$ and $\mathcal{L}_{pel}$ , and the semantic relatedness loss $\mathcal{L}_{sem}$ further improve the performance of our semantic embeddings, e.g., in total, we gain
|
| 224 |
+
|
| 225 |
+
$5.8\%$ , $9.4\%$ and $9.5\%$ improvement on AWA2, CUB, and SUN, respectively. The result demonstrates that imposing class discrimination and semantic relatedness leads to better performance in the ZSL setting. Third, our VGSE-SMO embeddings improve over the ResNet-SMO embeddings by $7.1\%$ , $10.7\%$ and $10.7\%$ on AWA2, CUB, and SUN, respectively. We conjecture that the visual clusters learned in our model is shared among different classes and lead to better generalization ability when the training and testing sets are disjoint (see qualitative results in Figure 1 and Section 4.3).
|
| 226 |
+
|
| 227 |
+
How many clusters are needed? To measure the influence of the cluster number $D_v$ on our semantic embeddings, we train the PC module with various $D_v$ (results shown in Figure 3a). When the unseen semantic embeddings are predicted under an oracle setting (predicted from the unseen class images), various dimension $D_v$ does not influence the classification accuracy on unseen classes (the orange curve). While under the ZSL setting where unseen semantic embeddings are predicted from class relations (VGSE-SMO), the cluster numbers influence the ZSL performance. Before the cluster number increases up to a breaking point ( $D_v = 200$ ), the ability of the semantic embeddings is also improved (from $58.4\%$ to $62.5\%$ ), since the learned clusters contain visually similar patches from different classes, which can model the visual relation between classes. However, increasing the number of clusters leads to small pure clusters (patches coming from one single category), resulting in poor generalization between seen and unseen classes.
|
| 228 |
+
|
| 229 |
+
SMO vs WAvg. We compare our two class relation functions VGSE-WAvg and VGSE-SMO in Table 3 (Row 7 and 6). The results demonstrate that VGSE-WAvg works on par with VGSE-SMO on SUN and CUB datasets, with $< 0.5\%$ performance gap. While on AWA2 dataset, VGSE-SMO yields better ZSL performance (with $62.4\%$ ) than VGSE-WAvg (with $57.7\%$ ). The results indicate that predicting the unseen semantic embeddings with the weighted average of a few seen classes semantic embeddings (VGSE-WAvg) is working well for fine-grained datasets since the visual discrepancy between classes is small. However, for coarse-grained dataset AWA2, the class relation function considering all the seen classes embeddings (VGSE-SMO) works better.
|
| 230 |
+
|
| 231 |
+
Ablation over patches. We further study if using patches for clustering is better than using the whole image, and how many patches do we need from one image. The experiment results in Figure 3b demonstrate that with the patch number increase from 1 (single image clustering) to 9, the ZSL performance increases as well, since the image patches used for semantic embedding learning contain semantic object parts and thus result in better knowledge transfer between seen and unseen classes. However, for a large $N_{t}$ , the patches might be too tiny to contain consistent semantic, thus resulting in performance dropping, e.g., the ZSL accuracy on AWA2 drops from $62.4\%$ ( $N_{t} = 9$ ) to $58.7\%$ ( $N_{t} = 128$ ). We also
|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
(a) cluster number $D_{v}$
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
(b) patch number $N_{t}$
|
| 238 |
+
Figure 3. (a) Influence of the cluster number $D_v = 50, \ldots, 3000$ . In the oracle setting, we feed unseen classes images to the PC module to predict unseen semantic embeddings. (b) Influence of the patch number $N_t$ we used per image with the watershed segmentation for obtaining our VGSE-SMO class embeddings. $N_t = 1$ uses the whole image (no patches). "3×3 grid" crops the image into 9 square patches. Both plots report ZSL accuracy with SJE model trained on AWA2 dataset (mean and std over 5 runs).
|
| 239 |
+
|
| 240 |
+
<table><tr><td rowspan="2">Semantic Embeddings</td><td colspan="2">AWA2</td><td colspan="2">CUB</td></tr><tr><td>T1</td><td>H</td><td>T1</td><td>H</td></tr><tr><td>w2v [31]</td><td>53.7 ± 0.2</td><td>48.8 ± 0.1</td><td>14.4 ± 0.3</td><td>18.0 ± 0.2</td></tr><tr><td>VGSE-SMO (w2v)</td><td>62.4 ± 0.1</td><td>56.8 ± 0.1</td><td>26.1 ± 0.2</td><td>28.3 ± 0.1</td></tr><tr><td>glove [38]</td><td>38.8 ± 0.2</td><td>38.7 ± 0.3</td><td>19.3 ± 0.2</td><td>13.4 ± 0.1</td></tr><tr><td>VGSE-SMO (glove)</td><td>46.5 ± 0.1</td><td>46.0 ± 0.1</td><td>25.2 ± 0.3</td><td>27.1 ± 0.2</td></tr><tr><td>fasttext [7]</td><td>47.7 ± 0.1</td><td>44.6 ± 0.3</td><td>-</td><td>-</td></tr><tr><td>VGSE-SMO (fasttext)</td><td>51.9 ± 0.2</td><td>53.2 ± 0.1</td><td>-</td><td>-</td></tr><tr><td>Attribute</td><td>62.8 ± 0.1</td><td>62.6 ± 0.3</td><td>56.4 ± 0.2</td><td>49.4 ± 0.1</td></tr><tr><td>VGSE-SMO (Attribute)</td><td>66.7 ± 0.1</td><td>64.9 ± 0.1</td><td>56.8 ± 0.1</td><td>50.9 ± 0.2</td></tr></table>
|
| 241 |
+
|
| 242 |
+
Table 4. Evaluating the external knowledge, i.e., word embeddings w2v [31], glove [38], fasttext [7], and the human annotated attributes, for our VGSE-SMO embeddings, e.g., VGSE-SMO (glove) indicates that CR module is trained with glove embedding. T1: top-1 accuracy in ZSL, H: harmonic mean in GZSL trained with SJE [2] on AWA2, and CUB (std over 5 runs).
|
| 243 |
+
|
| 244 |
+
compare the patches generated by watershed segmentation proposal with using $3 \times 3$ grid patches ( $N_{t} = 9$ ), and we found that using watershed as the region proposal results in accuracy boost (8.2% on AWA2) compared to the regular grid patch, since the former patches tend to cover more complete object parts rather than random cropped regions.
|
| 245 |
+
|
| 246 |
+
Can we do better with human annotated attributes? Table 4 shows the performance of our model when different external knowledge is used to verdict the unseen class embeddings in the CR module. Nearly all of our conclusions from former section carry over, e.g., VGSE-SMO class embeddings outperform the other class embeddings by a large margin. For instance, we improve the ZSL accuracy over glove by $7.7\%$ (AWA2) and $5.9\%$ (CUB). Furthermore, VGSE-SMO (Attribute) also outperform Attribute on both AWA2 and CUB dataset, i.e., we achieve $66.7\%$ (ZSL) on AWA2, compared to human attributes with $62.8\%$ . The results demonstrate that our
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
Figure 4. T-SNE embeddings of image patches from AWA2. Each colored dot region represents one visual cluster learnt by our VGSE model. We sample the seen (in blue) and unseen images (in orange) from the cluster center with their class names shown nearby.
|
| 250 |
+
|
| 251 |
+
VGSE-SMO embeddings coupled with visually-grounded information can not only outperform the unsupervised word embeddings, but also improve over human attributes in transferring knowledge under the zero-shot setting.
|
| 252 |
+
|
| 253 |
+
# 4.3. Qualitative Results
|
| 254 |
+
|
| 255 |
+
In Figure 4, we show the 2D visualization of image patches in the AWA2, where 10,000 image patches are presented by projecting their embeddings $a_{nt}$ onto two dimensions with t-SNE [52]. To picture their distribution on the embedding space, we sample several visual clusters (dots marked in the same color) and the image patches from the cluster center of both seen and unseen categories. Note that the unseen patches are not used to predict the unseen semantic embeddings, but only used for visualization here.
|
| 256 |
+
|
| 257 |
+
We observe that samples in the same cluster tend to gather together, indicating that the embeddings provide discriminative information. Besides, images patches in one cluster do convey consistent visual properties, though coming from disjoint categories. For instance, the white fur appears on rabbit, polar bear, and fox are clustered into one group, and the striped fur from tiger, zebra, and bobcat gather together because of their similar texture. We further observe that nearly all clusters consist images from more than one categories. For instance, the horns from seen classes ox, deer, rhinoceros, and unseen class sheep, that with slightly different shape but same semantic, are clustered together. Similar phenomenon can be observed on the spotted fur and animals in ocean clusters. It indicates that the clusters we learned contain semantic properties shared across seen classes, and can be transferred to unseen classes. Another interesting observation is that our VGSE clusters discover visual properties
|
| 258 |
+
|
| 259 |
+
that my be neglected by human-annotated attributes, e.g., the cage appear for hamsters and rat, and the black and white fur not only appear on gaint panda but also on sheeps.
|
| 260 |
+
|
| 261 |
+
# 4.4. Human Evaluation
|
| 262 |
+
|
| 263 |
+
To evaluate if our VGSE conveys consistent visual and semantic properties, we randomly pick 50 clusters, each equipped with 30 images from the cluster center, and ask 5 postgraduate students without prior knowledge of ZSL to examine the clusters and answer the following three questions. Q1: Do images in this cluster contain consistent visual property? Q2: Do images in this cluster convey consistent semantic information? Q3: Please name the semantics you observed from the clusters, if your answer to Q2 is true. We do the same user study to 50 randomly picked clusters from the k-means clustering model. The results reveal that in $88.5\%$ and $87.0\%$ cases, users think our clusters convey consistent visual and semantic information. While for k-means clusters, the results are $71.5\%$ and $71.0\%$ , respectively. The user evaluation results agree with the quantitative results in Table 3, which demonstrates that the class embeddings containing consistent visual and semantic information can significantly benefit the ZSL performance. Interestingly, by viewing VGSE clusters, users can easily discover semantics and even fine-grained attributes not depicted by human-annotated attributes, i.e., the fangs and horns in figure 1. Note that the whole process, i.e., naming 50 attributes for 40 classes, took less than 1 hour for each user.
|
| 264 |
+
|
| 265 |
+
# 5. Conclusion
|
| 266 |
+
|
| 267 |
+
We develop a Visually-Grounded Semantic Embedding Network (VGSE) to learn distinguishing semantic embeddings for zero-shot learning with minimal human supervision. By clustering image patches with respect to their visual similarity, our network explores various semantic clusters shared between classes. Experiments on three benchmark datasets demonstrate that our semantic embeddings predicted from the class-relation module are generalizable to unseen classes, i.e., achieving significant improvement compared with word embeddings when trained with five models in both ZSL and GZSL settings. We further show that the visually augmented semantic embedding outperforms other semantic embeddings learned with minimal human supervision. The qualitative results verify that we discover visually consistent clusters that generalize from seen to unseen classes and can unearth the fine-grained properties not depicted by humans.
|
| 268 |
+
|
| 269 |
+
# Acknowledgements
|
| 270 |
+
|
| 271 |
+
This work has been partially funded by the ERC 853489 - DEXIM and by the DFG - EXC number 2064/1 - Project number 390727645.
|
| 272 |
+
|
| 273 |
+
# References
|
| 274 |
+
|
| 275 |
+
[1] Zeynep Akata, Florent Perronnin, Zaid Harchaoui, and Cordelia Schmid. Label-embedding for image classification. T-PAMI, 2015. 1, 2
|
| 276 |
+
[2] Zeynep Akata, Scott Reed, Daniel Walter, Honglak Lee, and Bernt Schiele. Evaluation of output embeddings for fine-grained image classification. In CVPR, 2015. 5, 6, 7
|
| 277 |
+
[3] Ziad Al-Halah and Rainer Stiefelhagen. Automatic discovery, association estimation and learning of semantic attributes for a thousand categories. In CVPR, 2017. 1, 2, 6
|
| 278 |
+
[4] Ziad Al-Halah, Rainer Stiefelhagen, and Kristen Grauman. Fashion forward: Forecasting visual style in fashion. In ICCV, 2017. 1
|
| 279 |
+
[5] Ziad Al-Halah, Makarand Tapaswi, and Rainer Stiefelhagen. Recovering the missing link: Predicting class-attribute associations for unsupervised zero-shot learning. In CVPR, 2016. 4, 6
|
| 280 |
+
[6] Alessandro Bergamo, Lorenzo Torresani, and Andrew W Fitzgibbon. Picodes: Learning a compact code for novel-category recognition. In NIPS. CiteSeer, 2011. 2
|
| 281 |
+
[7] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. Enriching word vectors with subword information. Transactions of the Association for Computational Linguistics, 5:135-146, 2017. 7
|
| 282 |
+
[8] Wieland Brendel and Matthias Bethge. Approximating cnns with bag-of-local-features models works surprisingly well onImagenet. *ICLR*, 2019. 2
|
| 283 |
+
[9] Maxime Bucher, Stéphane Herbin, and Frédéric Jurie. Generating visual representations for zero-shot classification. In ICCV Workshops, 2017. 2
|
| 284 |
+
[10] Qiang Chen, Junshi Huang, Rogerio Feris, Lisa M Brown, Jian Dong, and Shuicheng Yan. Deep domain adaptation for describing people based on fine-grained clothing attributes. In CVPR, 2015. 1, 2
|
| 285 |
+
[11] Yu Chen, Ying Tai, Xiaoming Liu, Chunhua Shen, and Jian Yang. Fsrnet: End-to-end learning face super-resolution with facial priors. In CVPR, 2018. 1
|
| 286 |
+
[12] Rudi L Cilibrasi and Paul MB Vitanyi. The google similarity distance. IEEE Transactions on knowledge and data engineering, 2007. 2
|
| 287 |
+
[13] Gabriella Csurka, Christopher Dance, Lixin Fan, Jutta Willamowski, and Cedric Bray. Visual categorization with bags of keypoints. In Workshop on statistical learning in computer vision, ECCV, volume 1, pages 1-2. Prague, 2004. 2
|
| 288 |
+
[14] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 3, 5
|
| 289 |
+
[15] Carl Doersch, Abhinav Gupta, and Alexei A Efros. Mid-level visual element discovery as discriminative mode seeking. In NIPS, 2013. 2
|
| 290 |
+
[16] Carl Doersch, Saurabh Singh, Abhinav Gupta, Josef Sivic, and Alexei Efros. What makes paris look like paris? ACM Transactions on Graphics, 2012. 2
|
| 291 |
+
[17] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner,
|
| 292 |
+
|
| 293 |
+
Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. ICLR, 2021. 2, 3
|
| 294 |
+
[18] Kun Duan, Devi Parikh, David Crandall, and Kristen Grauman. Discovering localized attributes for fine-grained recognition. In CVPR. IEEE, 2012. 2
|
| 295 |
+
[19] Ali Farhadi, Ian Endres, Derek Hoiem, and David Forsyth. Describing objects by their attributes. In CVPR. IEEE, 2009. 1, 2
|
| 296 |
+
[20] Andrea Frome, Greg Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. Devise: A deep visual-semantic embedding model. NeurIPS, 2013. 2
|
| 297 |
+
[21] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020. 3
|
| 298 |
+
[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 3, 5
|
| 299 |
+
[23] Wei-Lin Hsiao and Kristen Grauman. Learning the latent" look": Unsupervised discovery of a style-coherent embedding from fashion images. In ICCV, 2017. 1
|
| 300 |
+
[24] Masato Ishii, Takashi Takenouchi, and Masashi Sugiyama. Zero-shot domain adaptation based on attribute information. In Asian Conference on Machine Learning. PMLR, 2019. 1
|
| 301 |
+
[25] Huajie Jiang, Ruiping Wang, Shiguang Shan, Yi Yang, and Xilin Chen. Learning discriminative latent attributes for zero-shot classification. In ICCV, 2017. 2
|
| 302 |
+
[26] Michael Kampffmeyer, Yinbo Chen, Xiaodan Liang, Hao Wang, Yujia Zhang, and Eric P Xing. Rethinking knowledge graph propagation for zero-shot learning. In CVPR, 2019. 2
|
| 303 |
+
[27] Cheng-Han Lee, Ziwei Liu, Lingyun Wu, and Ping Luo. Maskgan: Towards diverse and interactive facial image manipulation. In CVPR, 2020. 1
|
| 304 |
+
[28] Yang Liu, Lei Zhou, Xiao Bai, Yifei Huang, Lin Gu, Jun Zhou, and Tatsuya Harada. Goal-oriented gaze estimation for zero-shot learning. In CVPR, 2021. 1, 5, 6
|
| 305 |
+
[29] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In ICCV, 2015. 1
|
| 306 |
+
[30] Utkarsh Mall, Bharath Hariharan, and Kavita Bala. Field-guide-inspired zero-shot learning. In CVPR, 2021. 1
|
| 307 |
+
[31] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed representations of words and phrases and their compositionality. NeurIPS, 2013. 1, 2, 4, 5, 6, 7
|
| 308 |
+
[32] Peer Neubert and Peter Protzel. Compact watershed and preemptive slic: On improving trade-offs of superpixel segmentation algorithms. In ICPR. IEEE, 2014. 3
|
| 309 |
+
[33] Ishan Nigam, Pavel Tokmakov, and Deva Ramanan. Towards latent attribute discovery from triplet similarities. In Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019. 2
|
| 310 |
+
[34] Mehdi Noroozi and Paolo Favaro. Unsupervised learning of visual representations by solving jigsaw puzzles. In ECCV, 2016. 3
|
| 311 |
+
[35] Devi Parikh and Kristen Grauman. Interactively building a discriminative vocabulary of nameable attributes. In CVPR. IEEE, 2011. 2
|
| 312 |
+
|
| 313 |
+
[36] Genevieve Patterson, Chen Xu, Hang Su, and James Hays. The sun attribute database: Beyond categories for deeper scene understanding. IJCV, 2014. 1, 2, 5
|
| 314 |
+
[37] Peixi Peng, Yonghong Tian, Tao Xiang, Yaowei Wang, Massimiliano Pontil, and Tiejun Huang. Joint semantic and latent attribute modelling for cross-class transfer learning. T-PAMI, 2017. 2
|
| 315 |
+
[38] Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation. In EMNLP, 2014. 1, 2, 4, 7
|
| 316 |
+
[39] Ruizhi Qiao, Lingqiao Liu, Chunhua Shen, and Anton Van Den Hengel. Less is more: zero-shot learning from online textual documents with noise suppression. In CVPR, 2016. 1, 2, 6
|
| 317 |
+
[40] Mohammad Rastegari, Ali Farhadi, and David Forsyth. Attribute discovery via predictable discriminative binary codes. In ECCV. Springer, 2012. 2
|
| 318 |
+
[41] Marcus Rohrbach, Michael Stark, György Szarvas, Iryna Gurevych, and Bernt Schiele. What helps where-and why? semantic relatedness for knowledge transfer. In CVPR, 2010. 2
|
| 319 |
+
[42] Edgar Schonfeld, Sayna Ebrahimi, Samarth Sinha, Trevor Darrell, and Zeynep Akata. Generalized zero-and few-shot learning via aligned variational autoencoders. In CVPR, 2019. 1
|
| 320 |
+
[43] Edgar Schonfeld, Sayna Ebrahimi, Samarth Sinha, Trevor Darrell, and Zeynep Akata. Generalized zero-and few-shot learning via aligned variational autoencoders. In CVPR, 2019, 2, 5, 6
|
| 321 |
+
[44] Viktoriia Sharmanska, Novi Quadrianto, and Christoph H Lampert. Augmented attribute representations. In ECCV. Springer, 2012. 2
|
| 322 |
+
[45] Ronan Sicre, Yannis Avrithis, Ewa Kijak, and Frédéric Jurie. Unsupervised part learning for visual recognition. In CVPR, 2017. 2
|
| 323 |
+
[46] Saurabh Singh, Abhinav Gupta, and Alexei A Efros. Unsupervised discovery of mid-level discriminative patches. In ECCV, 2012. 2
|
| 324 |
+
[47] Josef Sivic and Andrew Zisserman. Video google: A text retrieval approach to object matching in videos. In Computer Vision, IEEE International Conference on, volume 3, pages 1470-1470. IEEE Computer Society, 2003. 2
|
| 325 |
+
[48] Richard Socher, Milind Ganjoo, Hamsa Sridhar, Osbert Bastani, Christopher D Manning, and Andrew Y Ng. Zero-shot learning through cross-modal transfer. NeurIPS, 2013. 2
|
| 326 |
+
[49] Richard Socher, Milind Ganjoo, Hamsa Sridhar, Osbert Bastani, Christopher D Manning, and Andrew Y Ng. Zero-shot learning through cross-modal transfer. NeurIPS, 2013. 2
|
| 327 |
+
[50] Jie Song, Chengchao Shen, Jie Lei, An-Xiang Zeng, Kairi Ou, Dacheng Tao, and Mingli Song. Selective zero-shot classification with augmented attributes. In ECCV, 2018. 2
|
| 328 |
+
[51] Lorenzo Torresani, Martin Szummer, and Andrew Fitzgibbon. Efficient object category recognition using classmes. In ECCV. Springer, 2010. 2
|
| 329 |
+
[52] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-SNE. Journal of Machine Learning Research, 2008. 8
|
| 330 |
+
|
| 331 |
+
[53] Wouter Van Gansbeke, Simon Vandenhende, Stamatios Georgoulis, Marc Proesmans, and Luc Van Gool. Scan: Learning to classify images without labels. In ECCV, 2020. 3, 4, 6
|
| 332 |
+
[54] Sirion Vittayakorn, Takayuki Umeda, Kazuhiko Murasaki, Kyoko Sudo, Takayuki Okatani, and Kota Yamaguchi. Automatic attribute discovery with neural activations. In ECCV, 2016. 2
|
| 333 |
+
[55] C. Wah, S. Branson, P. Welinder, P. Perona, and S. Belongie. The Caltech-UCSD Birds-200-2011 Dataset. Technical Report CNS-TR-2011-001, California Institute of Technology, 2011. 1, 2, 3, 5
|
| 334 |
+
[56] Xiaolong Wang, Yufei Ye, and Abhinav Gupta. Zero-shot recognition via semantic embeddings and knowledge graphs. In CVPR, 2018. 2
|
| 335 |
+
[57] Lei Wu, Xian-Sheng Hua, Nenghai Yu, Wei-Ying Ma, and Shipeng Li. Flickr distance: a relationship measure for visual concepts. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2011. 2
|
| 336 |
+
[58] Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. Latent embeddings for zero-shot classification. In CVPR, 2016. 2
|
| 337 |
+
[59] Yongqin Xian, Christoph H Lampert, Bernt Schiele, and Zeynep Akata. Zero-shot learning-a comprehensive evaluation of the good, the bad and the ugly. T-PAMI, 2019. 1, 2, 3, 5
|
| 338 |
+
[60] Yongqin Xian, Tobias Lorenz, Bernt Schiele, and Zeynep Akata. Feature generating networks for zero-shot learning. In CVPR, 2018. 2
|
| 339 |
+
[61] Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. f-vaegan-d2: A feature generating framework for any-shot learning. In CVPR, 2019. 1, 2, 3, 5, 6
|
| 340 |
+
[62] Wenjia Xu, Yongqin Xian, Jiuniu Wang, Bernt Schiele, and Zeynep Akata. Attribute prototype network for zero-shot learning. *NeurIPS*, 2020. 1, 2, 5, 6
|
| 341 |
+
[63] Ikuya Yamada, Akari Asai, Jin Sakuma, Hiroyuki Shindo, Hideaki Takeda, Yoshiyasu Takefuji, and Yuji Matsumoto. Wikipedia2vec: An efficient toolkit for learning and visualizing the embeddings of words and entities from wikipedia. ACL, 2020. 2
|
| 342 |
+
[64] Xun Yang, Xiangnan He, Xiang Wang, Yunshan Ma, Fuli Feng, Meng Wang, and Tat-Seng Chua. Interpretable fashion matching with rich attributes. In ACM SIGIR, 2019. 1
|
| 343 |
+
[65] Felix X Yu, Liangliang Cao, Rogerio S Feris, John R Smith, and Shih-Fu Chang. Designing category-level attributes for discriminative visual recognition. In CVPR, 2013. 2
|
| 344 |
+
[66] Yunlong Yu, Zhong Ji, Yanwei Fu, Jichang Guo, Yanwei Pang, Zhongfei Mark Zhang, et al. Stacked semantics-guided attention model for fine-grained zero-shot learning. In NeurIPS, 2018. 2
|
| 345 |
+
[67] Yizhe Zhu, Mohamed Elhoseiny, Bingchen Liu, Xi Peng, and Ahmed Elgammal. A generative adversarial approach for zero-shot learning from noisy texts. In CVPR, 2018. 1, 2, 6
|
| 346 |
+
[68] Yizhe Zhu, Jianwen Xie, Zhiqiang Tang, Xi Peng, and Ahmed Elgammal. Semantic-guided multi-attention localization for zero-shot learning. In NeurIPS, 2019. 2
|
2203.10xxx/2203.10444/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5c848acd70f88570354605d3e6f98761ec673b6fea83494238c3db6375d958a
|
| 3 |
+
size 458569
|
2203.10xxx/2203.10444/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_content_list.json
ADDED
|
@@ -0,0 +1,1993 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "A 3D Generative Model for Structure-Based Drug Design",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
318,
|
| 8 |
+
122,
|
| 9 |
+
679,
|
| 10 |
+
174
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Shitong Luo",
|
| 17 |
+
"text_level": 1,
|
| 18 |
+
"bbox": [
|
| 19 |
+
276,
|
| 20 |
+
224,
|
| 21 |
+
367,
|
| 22 |
+
239
|
| 23 |
+
],
|
| 24 |
+
"page_idx": 0
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"type": "text",
|
| 28 |
+
"text": "Helixon Research luost@helixon.com luost26@gmail.com",
|
| 29 |
+
"bbox": [
|
| 30 |
+
246,
|
| 31 |
+
241,
|
| 32 |
+
398,
|
| 33 |
+
282
|
| 34 |
+
],
|
| 35 |
+
"page_idx": 0
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"type": "text",
|
| 39 |
+
"text": "Jianzhu Ma",
|
| 40 |
+
"text_level": 1,
|
| 41 |
+
"bbox": [
|
| 42 |
+
284,
|
| 43 |
+
303,
|
| 44 |
+
370,
|
| 45 |
+
316
|
| 46 |
+
],
|
| 47 |
+
"page_idx": 0
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"type": "text",
|
| 51 |
+
"text": "Peking University majianzhu@pku.edu.cn",
|
| 52 |
+
"bbox": [
|
| 53 |
+
238,
|
| 54 |
+
316,
|
| 55 |
+
413,
|
| 56 |
+
345
|
| 57 |
+
],
|
| 58 |
+
"page_idx": 0
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"type": "text",
|
| 62 |
+
"text": "Jiaqi Guan",
|
| 63 |
+
"text_level": 1,
|
| 64 |
+
"bbox": [
|
| 65 |
+
571,
|
| 66 |
+
224,
|
| 67 |
+
653,
|
| 68 |
+
241
|
| 69 |
+
],
|
| 70 |
+
"page_idx": 0
|
| 71 |
+
},
|
| 72 |
+
{
|
| 73 |
+
"type": "text",
|
| 74 |
+
"text": "University of Illinois Urbana-Champaign jiaqi@illinois.edu",
|
| 75 |
+
"bbox": [
|
| 76 |
+
477,
|
| 77 |
+
241,
|
| 78 |
+
750,
|
| 79 |
+
268
|
| 80 |
+
],
|
| 81 |
+
"page_idx": 0
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"type": "text",
|
| 85 |
+
"text": "Jian Peng",
|
| 86 |
+
"text_level": 1,
|
| 87 |
+
"bbox": [
|
| 88 |
+
584,
|
| 89 |
+
303,
|
| 90 |
+
658,
|
| 91 |
+
316
|
| 92 |
+
],
|
| 93 |
+
"page_idx": 0
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"type": "text",
|
| 97 |
+
"text": "University of Illinois Urbana-Champaign jianpeng@illinois.edu",
|
| 98 |
+
"bbox": [
|
| 99 |
+
485,
|
| 100 |
+
316,
|
| 101 |
+
756,
|
| 102 |
+
345
|
| 103 |
+
],
|
| 104 |
+
"page_idx": 0
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"type": "text",
|
| 108 |
+
"text": "Abstract",
|
| 109 |
+
"text_level": 1,
|
| 110 |
+
"bbox": [
|
| 111 |
+
459,
|
| 112 |
+
380,
|
| 113 |
+
537,
|
| 114 |
+
396
|
| 115 |
+
],
|
| 116 |
+
"page_idx": 0
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "text",
|
| 120 |
+
"text": "We study a fundamental problem in structure-based drug design — generating molecules that bind to specific protein binding sites. While we have witnessed the great success of deep generative models in drug design, the existing methods are mostly string-based or graph-based. They are limited by the lack of spatial information and thus unable to be applied to structure-based design tasks. Particularly, such models have no or little knowledge of how molecules interact with their target proteins exactly in 3D space. In this paper, we propose a 3D generative model that generates molecules given a designated 3D protein binding site. Specifically, given a binding site as the 3D context, our model estimates the probability density of atom's occurrences in 3D space — positions that are more likely to have atoms will be assigned higher probability. To generate 3D molecules, we propose an auto-regressive sampling scheme — atoms are sampled sequentially from the learned distribution until there is no room for new atoms. Combined with this sampling scheme, our model can generate valid and diverse molecules, which could be applicable to various structure-based molecular design tasks such as molecule sampling and linker design. Experimental results demonstrate that molecules sampled from our model exhibit high binding affinity to specific targets and good drug properties such as drug-likeness even if the model is not explicitly optimized for them.",
|
| 121 |
+
"bbox": [
|
| 122 |
+
228,
|
| 123 |
+
411,
|
| 124 |
+
767,
|
| 125 |
+
675
|
| 126 |
+
],
|
| 127 |
+
"page_idx": 0
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"type": "text",
|
| 131 |
+
"text": "1 Introduction",
|
| 132 |
+
"text_level": 1,
|
| 133 |
+
"bbox": [
|
| 134 |
+
171,
|
| 135 |
+
698,
|
| 136 |
+
313,
|
| 137 |
+
714
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "text",
|
| 143 |
+
"text": "Designing molecules that bind to a specific protein binding site, also known as structure-based drug design, is one of the most challenging tasks in drug discovery [2]. Searching for suitable molecule candidates in silico usually involves massive computational efforts because of the enormous space of synthetically feasible chemicals [22] and conformational degree of freedom of both compound and protein structures [11].",
|
| 144 |
+
"bbox": [
|
| 145 |
+
169,
|
| 146 |
+
729,
|
| 147 |
+
826,
|
| 148 |
+
800
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 0
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "text",
|
| 154 |
+
"text": "In recent years, we have witnessed the success of machine learning approaches to problems in drug design, especially on molecule generation. Most of these approaches use deep generative models to propose drug candidates by learning the underlying distribution of desirable molecules. However, most of such methods are generally SMILES/string-based [10, 17] or graph-based [18, 19, 13, 14]. They are limited by the lack of spatial information and unable to perceive how molecules interact with proteins in 3D space. Hence, these methods are not applicable to generating molecules that fit to a specific protein structure which is also known as the drug target. Another line of work studies",
|
| 155 |
+
"bbox": [
|
| 156 |
+
169,
|
| 157 |
+
805,
|
| 158 |
+
826,
|
| 159 |
+
905
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 0
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "aside_text",
|
| 165 |
+
"text": "arXiv:2203.10446v2 [q-bio.BM] 12 Nov 2022",
|
| 166 |
+
"bbox": [
|
| 167 |
+
22,
|
| 168 |
+
239,
|
| 169 |
+
60,
|
| 170 |
+
726
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 0
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "footer",
|
| 176 |
+
"text": "35th Conference on Neural Information Processing Systems (NeurIPS 2021).",
|
| 177 |
+
"bbox": [
|
| 178 |
+
171,
|
| 179 |
+
922,
|
| 180 |
+
630,
|
| 181 |
+
936
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 0
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "generating molecules directly in 3D space [8, 28, 29, 20, 30, 15]. Most of them [8, 28, 29] can only handle very small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms. [20] proposes to generate voxelized molecular images and use a post-processing algorithm to reconstruct molecular structures. Though this method could produce drug-scale molecules for specific protein pockets, the quality of the sampling is heavily limited by voxelization. Therefore, generating high-quality drug molecules for specific 3D protein binding sites remains challenging.",
|
| 188 |
+
"bbox": [
|
| 189 |
+
169,
|
| 190 |
+
90,
|
| 191 |
+
823,
|
| 192 |
+
188
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 1
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "text",
|
| 198 |
+
"text": "In this work, we propose a 3D generative model to approach this task. Specifically, we aim at modeling the distribution of atom occurrence in the 3D space of the binding site. Formally, given a binding site $\\mathcal{C}$ as input, we model the distribution $p(e,r|\\mathcal{C})$ , where $\\boldsymbol{r} \\in \\mathbb{R}^3$ is an arbitrary 3D coordinate and $e$ is atom type. To realize this distribution, we design a neural network architecture which takes as input a query 3D coordinate $\\boldsymbol{r}$ , conditional on the 3D context $\\mathcal{C}$ , and outputs the probability of $\\boldsymbol{r}$ being occupied by an atom of a particular chemical element. In order to ensure the distribution is equivariant to $\\mathcal{C}$ 's rotation and translation, we utilize rotationally invariant graph neural networks to perceive the context of each query coordinate.",
|
| 199 |
+
"bbox": [
|
| 200 |
+
169,
|
| 201 |
+
194,
|
| 202 |
+
826,
|
| 203 |
+
305
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 1
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "Despite having a neural network to model the distribution of atom occurrence $p(e, \\boldsymbol{r} | \\mathcal{C})$ , how to generate valid and diverse molecules still remains technically challenging, mainly for the following two reasons: First, simply drawing i.i.d. samples from the distribution $p(e, \\boldsymbol{r} | \\mathcal{C})$ does not yield valid molecules because atoms within a molecule are not independent of each other. Second, a desirable sampling algorithm should capture the multi-modality of the feasible chemical space, i.e. it should be able to generate a diverse set of desired molecules given a specific binding context. To tackle the challenge, we propose an auto-regressive sampling algorithm. In specific, we start with a context consisting of only protein atoms. Then, we iteratively sample one atom from the distribution at each step and add it to the context to be used in the next step, until there is no room for new atoms. Compared to other recent methods [20, 23], our auto-regressive algorithm is simpler and more advantageous. It does not rely on post-processing algorithms to infer atom placements from density. More importantly, it is capable of multi-modal sampling by the nature of auto-regressive, avoiding additional latent variables via VAEs [16] or GANs [9] which would bring about extra architectural complexity and training difficulty.",
|
| 210 |
+
"bbox": [
|
| 211 |
+
169,
|
| 212 |
+
311,
|
| 213 |
+
826,
|
| 214 |
+
505
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "We conduct extensive experiments to evaluate our approach. Quantitative and qualitative results show that: (1) our method is able to generate diverse drug-like molecules that have high binding affinity to specific targets based on 3D structures of protein binding sites; (2) our method is able to generate molecules with fairly high drug-likeness score (QED) [4] and synthetic accessibility score (SA) [6] even if the model is not specifically optimized for them; (3) in addition to molecule generation, the proposed method is also applicable to other relevant tasks such as linker design.",
|
| 221 |
+
"bbox": [
|
| 222 |
+
169,
|
| 223 |
+
511,
|
| 224 |
+
823,
|
| 225 |
+
595
|
| 226 |
+
],
|
| 227 |
+
"page_idx": 1
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"type": "text",
|
| 231 |
+
"text": "2 Related Work",
|
| 232 |
+
"text_level": 1,
|
| 233 |
+
"bbox": [
|
| 234 |
+
171,
|
| 235 |
+
614,
|
| 236 |
+
323,
|
| 237 |
+
630
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "SMILES-Based and Graph-Based Molecule Generation Deep generative models have been prevalent in molecule design. The overall idea is to use deep generative models to propose molecule candidates by learning the underlying distribution of desirable molecules. Existing works can be roughly divided into two classes — string-based and graph-based. String-based methods represent molecules as linear strings, e.g. SMILES strings [34], making a wide range of language modeling tools readily applicable. For example, [5, 10, 26] utilize recurrent neural networks to learn a language model of SMILES strings. However, string-based representations fail to capture molecular similarities, making it a sub-optimal representation for molecules [13]. In contrast, graph representations are more natural, and graph-based approaches have drawn great attention. The majority of graph-based models generate molecules in an auto-regressive fashion, i.e., adding atoms or fragments sequentially, which could be implemented based upon VAEs [13], normalizing flows [27], reinforcement learning [35, 14], etc. Despite the progress made in string-based and graph-based approaches, they are limited by the lack of spatial information and thus unable to be directly applied to structure-based drug design tasks [2]. Specifically, as 1D/2D-based methods, they are unable to perceive how molecules interact with their target proteins exactly in 3D space.",
|
| 244 |
+
"bbox": [
|
| 245 |
+
169,
|
| 246 |
+
646,
|
| 247 |
+
826,
|
| 248 |
+
854
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 1
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "text",
|
| 254 |
+
"text": "Molecule Generation in 3D Space There has been another line of methods that generate molecules directly in 3D space. [8] proposes an auto-regressive model which takes a partially generated molecule as input and outputs the next atom's chemical element and the distances to previous atoms and places",
|
| 255 |
+
"bbox": [
|
| 256 |
+
169,
|
| 257 |
+
869,
|
| 258 |
+
823,
|
| 259 |
+
912
|
| 260 |
+
],
|
| 261 |
+
"page_idx": 1
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"type": "page_number",
|
| 265 |
+
"text": "2",
|
| 266 |
+
"bbox": [
|
| 267 |
+
491,
|
| 268 |
+
935,
|
| 269 |
+
504,
|
| 270 |
+
946
|
| 271 |
+
],
|
| 272 |
+
"page_idx": 1
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"type": "text",
|
| 276 |
+
"text": "the atoms in the 3D space according to the distance constraints. [28, 29] approach this task via reinforcement learning by generating 3D molecules in a sequential way. Different from the previous method[8], they mainly rely on a reward function derived from the potential energy function of atomic systems. These works could generate realistic 3D molecules. However, they can only handle small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms.",
|
| 277 |
+
"bbox": [
|
| 278 |
+
169,
|
| 279 |
+
90,
|
| 280 |
+
823,
|
| 281 |
+
175
|
| 282 |
+
],
|
| 283 |
+
"page_idx": 2
|
| 284 |
+
},
|
| 285 |
+
{
|
| 286 |
+
"type": "text",
|
| 287 |
+
"text": "[20, 23] propose a non-autoregressive approach to 3D molecular generation which is able to generate drug-scale molecules. It represents molecules as 3D images by voxelizing molecules onto 3D meshgrids. In this way, the molecular generation problem is transformed into an image generation problem, making it possible to leverage sophisticated image generation techniques. In specific, it employs convolutional neural network-based VAEs [16] or GANs [9] to generate such molecular images. It also attempts to fuse the binding site structures into the generative network, enabling the model to generate molecules for designated binding targets. In order to reconstruct the molecular structures from images, it leverages a post-processing algorithm to search for atom placements that best fit the image. In comparison to previous methods which can only generate small 3D molecules, this method can generate drug-scale 3D molecules. However, the quality of its generated molecules is not satisfying because of the following major limitations. First, it is hardly scalable to large binding pockets, as the number of voxels grows cubically to the size of the binding site. Second, the resolution of the 3D molecular images is another bottleneck that significantly limits the precision due to the same scalability issue. Last, conventional CNNs are not rotation-equivariant, which is crucial for modeling molecular systems [25].",
|
| 288 |
+
"bbox": [
|
| 289 |
+
169,
|
| 290 |
+
181,
|
| 291 |
+
826,
|
| 292 |
+
388
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 2
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "3 Method",
|
| 299 |
+
"text_level": 1,
|
| 300 |
+
"bbox": [
|
| 301 |
+
171,
|
| 302 |
+
407,
|
| 303 |
+
272,
|
| 304 |
+
422
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 2
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "Our goal is to generate a set of atoms that is able to form a valid drug-like molecule fitting to a specific binding site. To this end, we first present a 3D generative model in Section 3.1 that predicts the probability of atom occurrence in 3D space of the binding site. Second, we present in Section 3.2 the auto-regressive sampling algorithm for generating valid and multi-modal molecules from the model. Finally, in Section 3.3, we derive the training objective, by which the model learns to predict where should be placed and atoms and what type of atom should be placed.",
|
| 311 |
+
"bbox": [
|
| 312 |
+
169,
|
| 313 |
+
438,
|
| 314 |
+
823,
|
| 315 |
+
522
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 2
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "3.1 3D Generative Model Design",
|
| 322 |
+
"text_level": 1,
|
| 323 |
+
"bbox": [
|
| 324 |
+
171,
|
| 325 |
+
537,
|
| 326 |
+
415,
|
| 327 |
+
553
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 2
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "A binding site can be defined as a set of atoms $\\mathcal{C} = \\{(a_i, r_i)\\}_{i=1}^{N_b}$ , where $N_b$ is the number of atoms in the binding site, $a_i$ is the $i$ -th atom's attributes such as chemical element, belonging amino acid, etc., and $r_i$ is its 3D coordinate. To generate atoms in the binding site, we consider modeling the probability of atom occurring at some position $r$ in the site. Formally, this is to model the density $p(e|r,\\mathcal{C})$ , where $r \\in \\mathbb{R}^3$ is an arbitrary 3D coordinate, and $e \\in \\mathcal{E} = \\{\\mathrm{H},\\mathrm{C},\\mathrm{O},\\ldots\\}$ is the chemical element. Intuitively, this density can be interpreted as a classifier that takes as input a 3D coordinate $r$ conditional on $\\mathcal{C}$ and predicts the probability of $r$ being occupied by an atom of type $e$ .",
|
| 334 |
+
"bbox": [
|
| 335 |
+
169,
|
| 336 |
+
564,
|
| 337 |
+
823,
|
| 338 |
+
662
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 2
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "text",
|
| 344 |
+
"text": "To model $p(e|r,\\mathcal{C})$ , we devise a model consisting of two parts: Context Encoder learns the representation of each atom in the context $\\mathcal{C}$ via graph neural networks. Spatial Classifier takes as input a query position $\\pmb{r}$ , then aggregates the representation of contextual atoms nearby it, and finally predicts $p(e|r,\\mathcal{C})$ . The implementation of these two parts is detailed as follows.",
|
| 345 |
+
"bbox": [
|
| 346 |
+
169,
|
| 347 |
+
667,
|
| 348 |
+
826,
|
| 349 |
+
724
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 2
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "text",
|
| 355 |
+
"text": "Context Encoder The purpose of the context encoder is to extract information-rich representations for each atom in $\\mathcal{C}$ . We assume a desirable representation should satisfy two properties: (1) context-awareness: the representation of an atom should not only encode the property of the atom itself, but also encode its context. (2) rotational and translational invariance: since the physical and biological properties of the system do not change according to rigid transforms, the representations that reflect these properties should be invariant to rigid transforms as well. To this end, we employ rotationally and translationally invariant graph neural networks [25] as the backbone of the context encoder, described as follows.",
|
| 356 |
+
"bbox": [
|
| 357 |
+
169,
|
| 358 |
+
738,
|
| 359 |
+
826,
|
| 360 |
+
849
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 2
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "text",
|
| 366 |
+
"text": "First of all, since there is generally no natural topology in $\\mathcal{C}$ , we construct a $k$ -nearest-neighbor graph based on inter-atomic distances, denoted as $\\mathcal{G} = \\langle \\mathcal{C}, \\mathbf{A} \\rangle$ , where $\\mathbf{A}$ is the adjacency matrix. We also denote the $k$ -NN neighborhood of atom $i$ as $N_k(\\mathbf{r}_i)$ for convenience. The context encoder will take $\\mathcal{G}$ as input and output structure-aware node embeddings.",
|
| 367 |
+
"bbox": [
|
| 368 |
+
169,
|
| 369 |
+
854,
|
| 370 |
+
823,
|
| 371 |
+
912
|
| 372 |
+
],
|
| 373 |
+
"page_idx": 2
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"type": "page_number",
|
| 377 |
+
"text": "3",
|
| 378 |
+
"bbox": [
|
| 379 |
+
493,
|
| 380 |
+
935,
|
| 381 |
+
504,
|
| 382 |
+
946
|
| 383 |
+
],
|
| 384 |
+
"page_idx": 2
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"type": "image",
|
| 388 |
+
"img_path": "images/a8b33c0ac46ea33bc8fd0c1719bbd6f8c81942c7e3a727391f7967c1d16e1245.jpg",
|
| 389 |
+
"image_caption": [
|
| 390 |
+
"Figure 1: An illustration of the sampling process. Atoms are sampled sequentially. The probability density changes as we place new atoms. The sampling process naturally diverges, leading to different samples."
|
| 391 |
+
],
|
| 392 |
+
"image_footnote": [],
|
| 393 |
+
"bbox": [
|
| 394 |
+
171,
|
| 395 |
+
85,
|
| 396 |
+
826,
|
| 397 |
+
239
|
| 398 |
+
],
|
| 399 |
+
"page_idx": 3
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"text": "The first layer of the encoder is a linear layer. It maps atomic attributes $\\{a_i\\}$ to initial embeddings $\\{h_i^{(0)}\\}$ . Then, these embeddings along with the graph structure $A$ are fed into $L$ message passing layers. Specifically, the formula of message passing takes the form:",
|
| 404 |
+
"bbox": [
|
| 405 |
+
171,
|
| 406 |
+
311,
|
| 407 |
+
823,
|
| 408 |
+
359
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 3
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "equation",
|
| 414 |
+
"text": "\n$$\n\\boldsymbol {h} _ {i} ^ {(\\ell + 1)} = \\sigma \\left(\\boldsymbol {W} _ {0} ^ {\\ell} \\boldsymbol {h} _ {i} ^ {(\\ell)} + \\sum_ {j \\in N _ {k} (\\boldsymbol {r} _ {i})} \\boldsymbol {W} _ {1} ^ {\\ell} \\boldsymbol {w} \\left(d _ {i j}\\right) \\odot \\boldsymbol {W} _ {2} ^ {\\ell} \\boldsymbol {h} _ {j} ^ {(\\ell)}\\right), \\tag {1}\n$$\n",
|
| 415 |
+
"text_format": "latex",
|
| 416 |
+
"bbox": [
|
| 417 |
+
297,
|
| 418 |
+
364,
|
| 419 |
+
823,
|
| 420 |
+
414
|
| 421 |
+
],
|
| 422 |
+
"page_idx": 3
|
| 423 |
+
},
|
| 424 |
+
{
|
| 425 |
+
"type": "text",
|
| 426 |
+
"text": "where $\\boldsymbol{w}(\\cdot)$ is a weight network and $d_{ij}$ denotes the distance between atom $i$ and atom $j$ . The formula is similar to continuous filter convolution [25]. Note that, the weight of message from $j$ to $i$ depends only on $d_{ij}$ , ensuring its invariance to rotation and translation. Finally, we obtain $\\{\\pmb{h}_i^{(L)}\\}$ a set of embeddings for each atom in $\\mathcal{C}$ .",
|
| 427 |
+
"bbox": [
|
| 428 |
+
169,
|
| 429 |
+
421,
|
| 430 |
+
826,
|
| 431 |
+
481
|
| 432 |
+
],
|
| 433 |
+
"page_idx": 3
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"type": "text",
|
| 437 |
+
"text": "Spatial Classifier The spatial classifier takes as input a query position $\\boldsymbol{r} \\in \\mathbb{R}^3$ and predicts the type of atom occupying $\\boldsymbol{r}$ . In order to make successful predictions, the model should be able to perceive the context around $\\boldsymbol{r}$ . Therefore, the first step of this part is to aggregate atom embeddings from the context encoder:",
|
| 438 |
+
"bbox": [
|
| 439 |
+
169,
|
| 440 |
+
494,
|
| 441 |
+
823,
|
| 442 |
+
547
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 3
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "equation",
|
| 448 |
+
"text": "\n$$\n\\boldsymbol {v} = \\sum_ {j \\in N _ {k} (\\boldsymbol {r})} \\boldsymbol {W} _ {0} \\boldsymbol {w} _ {\\text {a g g r}} \\left(\\| \\boldsymbol {r} - \\boldsymbol {r} _ {j} \\|\\right) \\odot \\boldsymbol {W} _ {1} \\boldsymbol {h} _ {j} ^ {(L)}, \\tag {2}\n$$\n",
|
| 449 |
+
"text_format": "latex",
|
| 450 |
+
"bbox": [
|
| 451 |
+
344,
|
| 452 |
+
547,
|
| 453 |
+
823,
|
| 454 |
+
580
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 3
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"text": "where $N_{k}(\\boldsymbol{r})$ is the $k$ -nearest neighborhood of $\\boldsymbol{r}$ . Note that we weight different embedding using the weight network $\\boldsymbol{w}_{\\mathrm{aggr}}(\\cdot)$ according to distances because it is necessary to distinguish the contribution of different atoms in the context. Finally, in order to predict $p(e|\\boldsymbol{r},\\mathcal{C})$ , the aggregated feature $\\boldsymbol{v}$ is then passed to a classical multi-layer perceptron classifier:",
|
| 461 |
+
"bbox": [
|
| 462 |
+
169,
|
| 463 |
+
585,
|
| 464 |
+
823,
|
| 465 |
+
642
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 3
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "equation",
|
| 471 |
+
"text": "\n$$\n\\boldsymbol {c} = \\operatorname {M L P} (\\boldsymbol {v}), \\tag {3}\n$$\n",
|
| 472 |
+
"text_format": "latex",
|
| 473 |
+
"bbox": [
|
| 474 |
+
447,
|
| 475 |
+
648,
|
| 476 |
+
823,
|
| 477 |
+
664
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 3
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "text",
|
| 483 |
+
"text": "where $c$ is the non-normalized probability of chemical elements. The estimated probability of position $r$ being occupied by atom of type $e$ is:",
|
| 484 |
+
"bbox": [
|
| 485 |
+
169,
|
| 486 |
+
671,
|
| 487 |
+
823,
|
| 488 |
+
700
|
| 489 |
+
],
|
| 490 |
+
"page_idx": 3
|
| 491 |
+
},
|
| 492 |
+
{
|
| 493 |
+
"type": "equation",
|
| 494 |
+
"text": "\n$$\np (e | \\boldsymbol {r}, \\mathcal {C}) = \\frac {\\exp (\\boldsymbol {c} [ e ])}{1 + \\sum_ {e ^ {\\prime} \\in \\mathcal {E}} \\exp (\\boldsymbol {c} [ e ^ {\\prime} ])}, \\tag {4}\n$$\n",
|
| 495 |
+
"text_format": "latex",
|
| 496 |
+
"bbox": [
|
| 497 |
+
375,
|
| 498 |
+
705,
|
| 499 |
+
823,
|
| 500 |
+
741
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 3
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"text": "where $\\mathcal{E}$ is the set of possible chemical elements. Unlike typical classifiers that apply softmax to $c$ , we make use of the extra degree of freedom by adding 1 to the denominator, so that the probability of \"nothing\" can be expressed as:",
|
| 507 |
+
"bbox": [
|
| 508 |
+
169,
|
| 509 |
+
746,
|
| 510 |
+
826,
|
| 511 |
+
789
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 3
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "equation",
|
| 517 |
+
"text": "\n$$\np (\\text {N o t h i n g} | \\boldsymbol {r}, \\mathcal {C}) = \\frac {1}{1 + \\sum \\exp \\left(\\boldsymbol {c} \\left[ e ^ {\\prime} \\right]\\right)}. \\tag {5}\n$$\n",
|
| 518 |
+
"text_format": "latex",
|
| 519 |
+
"bbox": [
|
| 520 |
+
362,
|
| 521 |
+
796,
|
| 522 |
+
823,
|
| 523 |
+
829
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 3
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "3.2 Sampling",
|
| 530 |
+
"text_level": 1,
|
| 531 |
+
"bbox": [
|
| 532 |
+
171,
|
| 533 |
+
842,
|
| 534 |
+
279,
|
| 535 |
+
858
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 3
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"text": "Sampling a molecule amounts to generating a set of atoms $\\{(e_i, r_i)\\}_{i=1}^{N_a}$ . However, formulating an effective sampling algorithm is non-trivial because of the following three challenges. First, we have to define the joint distribution of $e$ and $r$ , i.e. $p(e, r|C)$ , from which we can jointly sample an atom's",
|
| 542 |
+
"bbox": [
|
| 543 |
+
169,
|
| 544 |
+
868,
|
| 545 |
+
825,
|
| 546 |
+
912
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 3
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "page_number",
|
| 552 |
+
"text": "4",
|
| 553 |
+
"bbox": [
|
| 554 |
+
493,
|
| 555 |
+
935,
|
| 556 |
+
504,
|
| 557 |
+
946
|
| 558 |
+
],
|
| 559 |
+
"page_idx": 3
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"text": "chemical element and its position. Second, notice that simply drawing i.i.d. samples from $p(e, \\mathbf{r}|\\mathcal{C})$ doesn't make sense because atoms are clearly not independent of each other. Thus, the sampling algorithm should be able to attend to the dependencies between atoms. Third, the sampling algorithm should produce multi-modal samples. This is important because in reality there is usually more than one molecule that can bind to a specific target.",
|
| 564 |
+
"bbox": [
|
| 565 |
+
169,
|
| 566 |
+
90,
|
| 567 |
+
823,
|
| 568 |
+
161
|
| 569 |
+
],
|
| 570 |
+
"page_idx": 4
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "text",
|
| 574 |
+
"text": "In the following, we first define the joint distribution $p(e, \\boldsymbol{r} | \\mathcal{C})$ . Then, we present an auto-regressive sampling algorithm to tackle the second and the third challenges.",
|
| 575 |
+
"bbox": [
|
| 576 |
+
169,
|
| 577 |
+
166,
|
| 578 |
+
823,
|
| 579 |
+
196
|
| 580 |
+
],
|
| 581 |
+
"page_idx": 4
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "text",
|
| 585 |
+
"text": "Joint Distribution We define the joint distribution of coordinate $\\mathbf{r}$ and atom type $e$ using Eq.4:",
|
| 586 |
+
"bbox": [
|
| 587 |
+
169,
|
| 588 |
+
209,
|
| 589 |
+
808,
|
| 590 |
+
224
|
| 591 |
+
],
|
| 592 |
+
"page_idx": 4
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "equation",
|
| 596 |
+
"text": "\n$$\np (e, \\boldsymbol {r} | \\mathcal {C}) = \\frac {\\exp (\\boldsymbol {c} [ e ])}{Z}, \\tag {6}\n$$\n",
|
| 597 |
+
"text_format": "latex",
|
| 598 |
+
"bbox": [
|
| 599 |
+
416,
|
| 600 |
+
229,
|
| 601 |
+
823,
|
| 602 |
+
258
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 4
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "text",
|
| 608 |
+
"text": "where $Z$ is an unknown normalizing constant and $c$ is a function of $r$ and $\\mathcal{C}$ as defined in Eq.3. Though $p(e, r)$ is a non-normalized distribution, drawing samples from it would be efficient because the dimension of $r$ is only 3. Viable sampling methods include Markov chain Monte Carlo (MCMC) or discretization.",
|
| 609 |
+
"bbox": [
|
| 610 |
+
169,
|
| 611 |
+
263,
|
| 612 |
+
826,
|
| 613 |
+
319
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 4
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "Auto-Regressive Sampling We sample a molecule by progressively sampling one atom at each step. In specific, at step $t$ , the context $C_t$ contains not only protein atoms but also $t$ atoms sampled beforehand. Sampled atoms in $C_t$ are treated equally as protein atoms in the model, but they have different attributes in order to differentiate themselves from protein atoms. Then, the $(t + 1)$ -th atom will be sampled from $p(e, r | C_t)$ and will be added to $C_t$ , leading to the context for next step $C_{t + 1}$ . The sampling process is illustrated in Figure 1. Formally, we have:",
|
| 620 |
+
"bbox": [
|
| 621 |
+
169,
|
| 622 |
+
334,
|
| 623 |
+
825,
|
| 624 |
+
417
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 4
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "equation",
|
| 630 |
+
"text": "\n$$\n\\left(e _ {t + 1}, \\boldsymbol {r} _ {t + 1}\\right) \\sim p (e, \\boldsymbol {r} | \\mathcal {C} _ {t}),\n$$\n",
|
| 631 |
+
"text_format": "latex",
|
| 632 |
+
"bbox": [
|
| 633 |
+
398,
|
| 634 |
+
421,
|
| 635 |
+
575,
|
| 636 |
+
439
|
| 637 |
+
],
|
| 638 |
+
"page_idx": 4
|
| 639 |
+
},
|
| 640 |
+
{
|
| 641 |
+
"type": "equation",
|
| 642 |
+
"text": "\n$$\n\\mathcal {C} _ {t + 1} \\leftarrow \\mathcal {C} _ {t} \\cup \\{(e _ {t + 1}, \\boldsymbol {r} _ {t + 1}) \\}. \\tag {7}\n$$\n",
|
| 643 |
+
"text_format": "latex",
|
| 644 |
+
"bbox": [
|
| 645 |
+
398,
|
| 646 |
+
434,
|
| 647 |
+
823,
|
| 648 |
+
455
|
| 649 |
+
],
|
| 650 |
+
"page_idx": 4
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"type": "text",
|
| 654 |
+
"text": "To determine when the auto-regressive sampling should stop, we employ an auxiliary network. The network takes as input the embedding of previously sampled atoms, and classifies them into two categories: frontier and non-frontier. If all the existing atoms are non-frontier, which means there is no room for more atoms, the sampling will be terminated. Finally, we use OpenBabel [21, 20] to obtain bonds of generated structures.",
|
| 655 |
+
"bbox": [
|
| 656 |
+
169,
|
| 657 |
+
467,
|
| 658 |
+
823,
|
| 659 |
+
537
|
| 660 |
+
],
|
| 661 |
+
"page_idx": 4
|
| 662 |
+
},
|
| 663 |
+
{
|
| 664 |
+
"type": "text",
|
| 665 |
+
"text": "In summary, the proposed auto-regressive algorithm succeeds to settle the aforementioned two challenges. First, the model is aware of other atoms when placing new atoms, thus being able to consider the dependencies between them. Second, auto-regressive sampling is a stochastic process. Its sampling path naturally diverges, leading to diverse samples.",
|
| 666 |
+
"bbox": [
|
| 667 |
+
169,
|
| 668 |
+
542,
|
| 669 |
+
826,
|
| 670 |
+
599
|
| 671 |
+
],
|
| 672 |
+
"page_idx": 4
|
| 673 |
+
},
|
| 674 |
+
{
|
| 675 |
+
"type": "text",
|
| 676 |
+
"text": "3.3 Training",
|
| 677 |
+
"text_level": 1,
|
| 678 |
+
"bbox": [
|
| 679 |
+
171,
|
| 680 |
+
614,
|
| 681 |
+
274,
|
| 682 |
+
630
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 4
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "text",
|
| 688 |
+
"text": "As we adopt auto-regressive sampling strategies, we propose a cloze-filling training scheme — at training time, a random portion of the target molecule is masked, and the network learns to predict the masked part from the observable part and the binding site. This emulates the sampling process where the model can only observe partial molecules. The training loss consists of three terms described below.",
|
| 689 |
+
"bbox": [
|
| 690 |
+
169,
|
| 691 |
+
641,
|
| 692 |
+
823,
|
| 693 |
+
709
|
| 694 |
+
],
|
| 695 |
+
"page_idx": 4
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"type": "text",
|
| 699 |
+
"text": "First, to make sure the model is able to predict positions that actually have atoms (positive positions), we include a binary cross entropy loss to contrast positive positions against negative positions:",
|
| 700 |
+
"bbox": [
|
| 701 |
+
169,
|
| 702 |
+
715,
|
| 703 |
+
826,
|
| 704 |
+
744
|
| 705 |
+
],
|
| 706 |
+
"page_idx": 4
|
| 707 |
+
},
|
| 708 |
+
{
|
| 709 |
+
"type": "equation",
|
| 710 |
+
"text": "\n$$\nL _ {\\mathrm {B C E}} = - \\mathbb {E} _ {\\boldsymbol {r} \\sim p _ {+}} \\left[ \\log \\left(1 - p (\\text {N o t h i n g} | \\boldsymbol {r}, \\mathcal {C})\\right) \\right] - \\mathbb {E} _ {\\boldsymbol {r} \\sim p _ {-}} \\left[ \\log p (\\text {N o t h i n g} | \\boldsymbol {r}, \\mathcal {C}) \\right]. \\tag {8}\n$$\n",
|
| 711 |
+
"text_format": "latex",
|
| 712 |
+
"bbox": [
|
| 713 |
+
233,
|
| 714 |
+
750,
|
| 715 |
+
823,
|
| 716 |
+
767
|
| 717 |
+
],
|
| 718 |
+
"page_idx": 4
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"type": "text",
|
| 722 |
+
"text": "Here, $p_{+}$ is a positive sampler that yields coordinates of masked atoms. $p_{-}$ is a negative sampler that yields random coordinates in the ambient space. $p_{-}$ is empirically defined as a Gaussian mixture model containing $|\\mathcal{C}|$ components centered at each atom in $\\mathcal{C}$ . The standard deviation of each component is set to $2\\AA$ in order to cover the ambient space. Intuitively, the first term in Eq.8 increases the likelihood of atom placement for positions that should get an atom. The second term decreases the likelihood for other positions.",
|
| 723 |
+
"bbox": [
|
| 724 |
+
169,
|
| 725 |
+
771,
|
| 726 |
+
825,
|
| 727 |
+
856
|
| 728 |
+
],
|
| 729 |
+
"page_idx": 4
|
| 730 |
+
},
|
| 731 |
+
{
|
| 732 |
+
"type": "text",
|
| 733 |
+
"text": "Second, our model should be able to predict the chemical element of atoms. Hence, we further include a standard categorical cross entropy loss:",
|
| 734 |
+
"bbox": [
|
| 735 |
+
169,
|
| 736 |
+
863,
|
| 737 |
+
823,
|
| 738 |
+
891
|
| 739 |
+
],
|
| 740 |
+
"page_idx": 4
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"type": "equation",
|
| 744 |
+
"text": "\n$$\nL _ {\\mathrm {C A T}} = - \\mathbb {E} _ {(e, \\boldsymbol {r}) \\sim p _ {+}} [ \\log p (e | \\boldsymbol {r}, \\mathcal {C}) ]. \\tag {9}\n$$\n",
|
| 745 |
+
"text_format": "latex",
|
| 746 |
+
"bbox": [
|
| 747 |
+
377,
|
| 748 |
+
896,
|
| 749 |
+
823,
|
| 750 |
+
914
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 4
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "page_number",
|
| 756 |
+
"text": "5",
|
| 757 |
+
"bbox": [
|
| 758 |
+
493,
|
| 759 |
+
935,
|
| 760 |
+
503,
|
| 761 |
+
946
|
| 762 |
+
],
|
| 763 |
+
"page_idx": 4
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"type": "image",
|
| 767 |
+
"img_path": "images/26173af1982533db59e79d07c71af72de54caecdd0ba2c065fe89e90f7a5ecee.jpg",
|
| 768 |
+
"image_caption": [
|
| 769 |
+
"Figure 2: (a) A portion of the molecule is masked. (b) Positive coordinates are drawn from the masked atoms' positions and negative coordinates are drawn from the ambient space. (c) Both positive and negative coordinates are fed into the model. The model predicts the probability of atom occurrence at the coordinates. (d) Training losses are computed based on the discrepancy between predicted probabilities and ground truth."
|
| 770 |
+
],
|
| 771 |
+
"image_footnote": [],
|
| 772 |
+
"bbox": [
|
| 773 |
+
178,
|
| 774 |
+
90,
|
| 775 |
+
823,
|
| 776 |
+
272
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 5
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "Third, as introduced in Section 3.2, the sampling algorithm requires a frontier network to tell whether the sampling should be terminated. This leads to the last term — a standard binary cross entropy loss for training the frontier network:",
|
| 783 |
+
"bbox": [
|
| 784 |
+
169,
|
| 785 |
+
398,
|
| 786 |
+
823,
|
| 787 |
+
443
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 5
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "equation",
|
| 793 |
+
"text": "\n$$\nL _ {\\mathrm {F}} = \\sum_ {i \\in \\mathcal {F} \\subseteq \\mathcal {C}} \\log \\sigma \\left(F \\left(\\boldsymbol {h} _ {i}\\right)\\right) + \\sum_ {i \\notin \\mathcal {F} \\subseteq \\mathcal {C}} \\log \\left(1 - \\sigma \\left(F \\left(\\boldsymbol {h} _ {i}\\right)\\right)\\right), \\tag {10}\n$$\n",
|
| 794 |
+
"text_format": "latex",
|
| 795 |
+
"bbox": [
|
| 796 |
+
310,
|
| 797 |
+
455,
|
| 798 |
+
823,
|
| 799 |
+
491
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 5
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"text": "where $\\mathcal{F}$ is the set of frontier atoms in $\\mathcal{C}$ , $\\sigma$ is the sigmoid function, and $F(\\cdot)$ is the frontier network that takes atom embedding as input and predicts the logit probability of the atom being a frontier. During training, an atom is regarded as a frontier if and only if (1) the atom is a part of the target molecule, and (2) at least one of its bonded atom is masked.",
|
| 806 |
+
"bbox": [
|
| 807 |
+
169,
|
| 808 |
+
503,
|
| 809 |
+
826,
|
| 810 |
+
559
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 5
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "Finally, by summing up $L_{\\mathrm{BCE}}$ , $L_{\\mathrm{CAT}}$ , and $L_{\\mathrm{F}}$ , we obtain the full training loss $L = L_{\\mathrm{BCE}} + L_{\\mathrm{CAT}} + L_{\\mathrm{F}}$ . The full training process is illustrated in Figure 2.",
|
| 817 |
+
"bbox": [
|
| 818 |
+
169,
|
| 819 |
+
566,
|
| 820 |
+
826,
|
| 821 |
+
595
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 5
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "text",
|
| 827 |
+
"text": "4 Experiments",
|
| 828 |
+
"text_level": 1,
|
| 829 |
+
"bbox": [
|
| 830 |
+
171,
|
| 831 |
+
622,
|
| 832 |
+
313,
|
| 833 |
+
638
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 5
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "text",
|
| 839 |
+
"text": "We evaluate the proposed method on two relevant structure-based drug design tasks: (1) Molecule Design is to generate molecules for given binding sites (Section 4.1), and (2) Linker Prediction is to generate substructures to link two given fragments in the binding site. (Section 4.2). Below, we describe common setups shared across tasks. Detailed task-specific setups are provided in each subsection.",
|
| 840 |
+
"bbox": [
|
| 841 |
+
169,
|
| 842 |
+
656,
|
| 843 |
+
826,
|
| 844 |
+
726
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 5
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "text",
|
| 850 |
+
"text": "Data We use the CrossDocked dataset [7] following [20]. The dataset originally contains 22.5 million docked protein-ligand pairs at different levels of quality. We filter out data points whose binding pose RMSD is greater than $1\\AA$ , leading to a refined subset consisting of 184,057 data points. We use mmseqs2 [31] to cluster data at $30\\%$ sequence identity, and randomly draw 100,000 protein-ligand pairs for training and 100 proteins from remaining clusters for testing.",
|
| 851 |
+
"bbox": [
|
| 852 |
+
169,
|
| 853 |
+
748,
|
| 854 |
+
823,
|
| 855 |
+
821
|
| 856 |
+
],
|
| 857 |
+
"page_idx": 5
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"type": "text",
|
| 861 |
+
"text": "Model We trained a universal model for all the tasks. The number of message passing layers in context encoder $L$ is 6, and the hidden dimension is 256. We train the model using the Adam optimizer at learning rate 0.0001. Other details about model architectures and training parameters are provided in the supplementary material and the open source repository: https://github.com/luost26/3D-Generative-SBDD.",
|
| 862 |
+
"bbox": [
|
| 863 |
+
169,
|
| 864 |
+
842,
|
| 865 |
+
826,
|
| 866 |
+
911
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 5
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "page_number",
|
| 872 |
+
"text": "6",
|
| 873 |
+
"bbox": [
|
| 874 |
+
493,
|
| 875 |
+
936,
|
| 876 |
+
504,
|
| 877 |
+
946
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 5
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "table",
|
| 883 |
+
"img_path": "images/c4eaf4a326a1a45988e6da817f4d6a284ac5e4f2fe40987950f26c6762eef558.jpg",
|
| 884 |
+
"table_caption": [],
|
| 885 |
+
"table_footnote": [],
|
| 886 |
+
"table_body": "<table><tr><td colspan=\"2\">Metric</td><td>liGAN</td><td>Ours</td><td>Ref</td></tr><tr><td rowspan=\"2\">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-6.144</td><td>-6.344</td><td>-7.158</td></tr><tr><td>Med.</td><td>-6.100</td><td>-6.200</td><td>-6.950</td></tr><tr><td rowspan=\"2\">QED (↑)</td><td>Avg.</td><td>0.371</td><td>0.525</td><td>0.484</td></tr><tr><td>Med.</td><td>0.369</td><td>0.519</td><td>0.469</td></tr><tr><td rowspan=\"2\">SA (↑)</td><td>Avg.</td><td>0.591</td><td>0.657</td><td>0.733</td></tr><tr><td>Med.</td><td>0.570</td><td>0.650</td><td>0.745</td></tr><tr><td rowspan=\"2\">High Affinity (%, ↑)</td><td>Avg.</td><td>23.77</td><td>29.09</td><td>-</td></tr><tr><td>Med.</td><td>11.00</td><td>18.50</td><td>-</td></tr><tr><td rowspan=\"2\">Diversity (↑)</td><td>Avg.</td><td>0.655</td><td>0.720</td><td>-</td></tr><tr><td>Med.</td><td>0.676</td><td>0.736</td><td>-</td></tr></table>",
|
| 887 |
+
"bbox": [
|
| 888 |
+
173,
|
| 889 |
+
128,
|
| 890 |
+
486,
|
| 891 |
+
306
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 6
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "text",
|
| 897 |
+
"text": "Table 1: Mean and median values of the four metrics on generation quality. $(\\uparrow)$ indicates higher is better. $(\\downarrow)$ indicates lower is better.",
|
| 898 |
+
"bbox": [
|
| 899 |
+
171,
|
| 900 |
+
309,
|
| 901 |
+
488,
|
| 902 |
+
351
|
| 903 |
+
],
|
| 904 |
+
"page_idx": 6
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "image",
|
| 908 |
+
"img_path": "images/0750d40b7f9d7642865d615631a14f49fe5145c37827fffc1d8665bf43e514e4.jpg",
|
| 909 |
+
"image_caption": [
|
| 910 |
+
"Figure 3: Distributions of Vina, QED, and SA scores over all the generated molecules."
|
| 911 |
+
],
|
| 912 |
+
"image_footnote": [],
|
| 913 |
+
"bbox": [
|
| 914 |
+
509,
|
| 915 |
+
132,
|
| 916 |
+
823,
|
| 917 |
+
292
|
| 918 |
+
],
|
| 919 |
+
"page_idx": 6
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"type": "text",
|
| 923 |
+
"text": "4.1 Molecule Design",
|
| 924 |
+
"text_level": 1,
|
| 925 |
+
"bbox": [
|
| 926 |
+
171,
|
| 927 |
+
383,
|
| 928 |
+
328,
|
| 929 |
+
400
|
| 930 |
+
],
|
| 931 |
+
"page_idx": 6
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "text",
|
| 935 |
+
"text": "In this task, we generate molecules for specific binding sites with our model and baselines. The input to models are binding sites extracted from the proteins in the testing set. We sample 100 unique molecules for each target.",
|
| 936 |
+
"bbox": [
|
| 937 |
+
169,
|
| 938 |
+
410,
|
| 939 |
+
823,
|
| 940 |
+
452
|
| 941 |
+
],
|
| 942 |
+
"page_idx": 6
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "text",
|
| 946 |
+
"text": "Baselines We compare our approach with the state-of-the-art baseline liGAN [20]. liGAN is based on conventional 3D convolutional neural networks. It generates voxelized molecular images and relies on a post-processing algorithm to reconstruct the molecule from the generated image.",
|
| 947 |
+
"bbox": [
|
| 948 |
+
169,
|
| 949 |
+
465,
|
| 950 |
+
823,
|
| 951 |
+
510
|
| 952 |
+
],
|
| 953 |
+
"page_idx": 6
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "text",
|
| 957 |
+
"text": "Metrics We evaluate the quality of generated molecules from three main aspects: (1) Binding Affinity measures how well the generated molecules fit the binding site. We use Vina [33, 1] to compute the binding affinity (Vina Score). Before feeding the molecules to Vina, we employ the universal force fields (UFF) [24] to refine the generated structures following [20]. (2) Drug Likeness reflects how much a molecule is like a drug. We use QED score [4] as the metric for drug-likeness. (3) Synthesizability assesses the ease of synthesis of generated molecules. We use normalized SA score [6, 35] to measure molecules' synthesizability.",
|
| 958 |
+
"bbox": [
|
| 959 |
+
169,
|
| 960 |
+
523,
|
| 961 |
+
823,
|
| 962 |
+
621
|
| 963 |
+
],
|
| 964 |
+
"page_idx": 6
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "text",
|
| 968 |
+
"text": "In order to evaluate the generation quality and diversity for each binding site, we define two additional metrics: (1) Percentage of Samples with High Affinity, which measures the percentage of a binding site's generated molecules whose binding affinity is higher than or equal to the reference ligand. (2) Diversity [14], which measures the diversity of generated molecules for a binding site. It is calculated by averaging pairwise Tanimoto similarities [3, 32] over Morgan fingerprints among the generated molecules of a target.",
|
| 969 |
+
"bbox": [
|
| 970 |
+
169,
|
| 971 |
+
627,
|
| 972 |
+
826,
|
| 973 |
+
710
|
| 974 |
+
],
|
| 975 |
+
"page_idx": 6
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"type": "text",
|
| 979 |
+
"text": "Results We first calculate Vina Score, QED, and SA for each of the generated molecules. Figure 3 presents the histogram of these three metrics and Table 1 shows the mean and median values of them over all generated molecules. For each binding site, we further calculate Percentage of Samples with High Affinity and Diversity. We report their mean and median values in the bottom half of Table 1. From the quantitative results, we find that in general, our model is able to discover diverse molecules that have higher binding affinity to specific targets. Besides, the generated molecules from our model also exhibit other desirable properties including fairly high drug-likeness and synthesizeability. When compared to the CNN baseline liGAN [20], our method achieves clearly better performance on all metrics, especially on the drug-likeness score QED, which indicates that our model produces more realistic drug-like molecules.",
|
| 980 |
+
"bbox": [
|
| 981 |
+
169,
|
| 982 |
+
724,
|
| 983 |
+
826,
|
| 984 |
+
864
|
| 985 |
+
],
|
| 986 |
+
"page_idx": 6
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "text",
|
| 990 |
+
"text": "To better understand the results, we select two binding sites in the testing set and visualize their top affinity samples for closer inspection. The top row of Figure 4 is the first example (PDB ID:2hcj). The average QED and SA scores of the generated molecules for this target are 0.483 and 0.663",
|
| 991 |
+
"bbox": [
|
| 992 |
+
169,
|
| 993 |
+
869,
|
| 994 |
+
826,
|
| 995 |
+
912
|
| 996 |
+
],
|
| 997 |
+
"page_idx": 6
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"type": "page_number",
|
| 1001 |
+
"text": "7",
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
493,
|
| 1004 |
+
935,
|
| 1005 |
+
503,
|
| 1006 |
+
946
|
| 1007 |
+
],
|
| 1008 |
+
"page_idx": 6
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "image",
|
| 1012 |
+
"img_path": "images/6f2b391077fc840855fb1e87adbff3f03585de1dafee1347c6f7a48f3f463bc0.jpg",
|
| 1013 |
+
"image_caption": [
|
| 1014 |
+
"Ours (2hcj)",
|
| 1015 |
+
"Figure 4: Generated molecules with top binding affinity and the reference molecule for two representative binding sites. Lower Vina score indicates higher binding affinity."
|
| 1016 |
+
],
|
| 1017 |
+
"image_footnote": [],
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
174,
|
| 1020 |
+
101,
|
| 1021 |
+
823,
|
| 1022 |
+
372
|
| 1023 |
+
],
|
| 1024 |
+
"page_idx": 7
|
| 1025 |
+
},
|
| 1026 |
+
{
|
| 1027 |
+
"type": "text",
|
| 1028 |
+
"text": "respectively, around the median of these two scores. $8\\%$ of the generated molecules have higher binding affinity than the reference molecule, below the median $18.5\\%$ . The second example (PDB ID:4r1u) is shown in the bottom row. The average QED and SA scores are 0.728 and 0.785, and $18\\%$ of sampled molecules achieve higher binding affinity. From these two examples in Figure 4, we can see that the generated molecules have overall structures similar to the reference molecule and they share some common important substructures, which indicates that the generated molecules fit into the binding site as well as the reference one. Besides, the top affinity molecules generally achieve QED and SA score comparable to or even higher than the reference molecule, which reflects that the top affinity molecules not only fit well into the binding site but also exhibit desirable quality. In conclusion, the above two representative cases evidence the model's ability to generate drug-like and high binding affinity molecules for designated targets.",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
169,
|
| 1031 |
+
455,
|
| 1032 |
+
826,
|
| 1033 |
+
609
|
| 1034 |
+
],
|
| 1035 |
+
"page_idx": 7
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"type": "text",
|
| 1039 |
+
"text": "4.2 Linker Prediction",
|
| 1040 |
+
"text_level": 1,
|
| 1041 |
+
"bbox": [
|
| 1042 |
+
171,
|
| 1043 |
+
630,
|
| 1044 |
+
338,
|
| 1045 |
+
645
|
| 1046 |
+
],
|
| 1047 |
+
"page_idx": 7
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "text",
|
| 1051 |
+
"text": "Linker prediction is to build a molecule that incorporates two given disconnected fragments in the context of a binding site [12]. Our model is capable of linker design without any task-specific adaptation or re-training. In specific, given a binding site and some fragments as input, we compose the initial context $\\mathcal{C}_0$ containing both the binding site and the fragments. Then, we run the auto-regressive sampling algorithm to sequentially add atoms until the molecule is comp",
|
| 1052 |
+
"bbox": [
|
| 1053 |
+
169,
|
| 1054 |
+
659,
|
| 1055 |
+
519,
|
| 1056 |
+
784
|
| 1057 |
+
],
|
| 1058 |
+
"page_idx": 7
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "table",
|
| 1062 |
+
"img_path": "images/dc2a8a7ef13974197742026facbf2e61ef7467fc3c772eba35ba08c86ec7e78e.jpg",
|
| 1063 |
+
"table_caption": [
|
| 1064 |
+
"Table 2: Performance of linker prediction."
|
| 1065 |
+
],
|
| 1066 |
+
"table_footnote": [],
|
| 1067 |
+
"table_body": "<table><tr><td colspan=\"2\">Metric</td><td>DeLinker</td><td>Ours</td></tr><tr><td rowspan=\"2\">Similarity (↑)</td><td>Avg.</td><td>0.612</td><td>0.701</td></tr><tr><td>Med.</td><td>0.600</td><td>0.722</td></tr><tr><td colspan=\"2\">Recovered (%, ↑)</td><td>40.00</td><td>48.33</td></tr><tr><td rowspan=\"2\">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-8.512</td><td>-8.603</td></tr><tr><td>Med.</td><td>-8.576</td><td>-8.575</td></tr></table>",
|
| 1068 |
+
"bbox": [
|
| 1069 |
+
535,
|
| 1070 |
+
679,
|
| 1071 |
+
818,
|
| 1072 |
+
782
|
| 1073 |
+
],
|
| 1074 |
+
"page_idx": 7
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "text",
|
| 1078 |
+
"text": "Data Preparation Following [12], we construct fragments of molecules in the testing set by enumerating possible double-cuts of acyclic single bonds. The pre-processing results in 120 data points in total. Each of them consists of two disconnected molecule fragments.",
|
| 1079 |
+
"bbox": [
|
| 1080 |
+
169,
|
| 1081 |
+
805,
|
| 1082 |
+
823,
|
| 1083 |
+
849
|
| 1084 |
+
],
|
| 1085 |
+
"page_idx": 7
|
| 1086 |
+
},
|
| 1087 |
+
{
|
| 1088 |
+
"type": "text",
|
| 1089 |
+
"text": "Baselines We compare our model with DeLinker [12]. Despite that DeLinker incorporates some 3D information, it is still a graph-based generative model. In contrast, our method operates fully in 3D space and thus is able to fully utilize the 3D context.",
|
| 1090 |
+
"bbox": [
|
| 1091 |
+
169,
|
| 1092 |
+
869,
|
| 1093 |
+
823,
|
| 1094 |
+
912
|
| 1095 |
+
],
|
| 1096 |
+
"page_idx": 7
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"type": "page_number",
|
| 1100 |
+
"text": "8",
|
| 1101 |
+
"bbox": [
|
| 1102 |
+
493,
|
| 1103 |
+
935,
|
| 1104 |
+
503,
|
| 1105 |
+
946
|
| 1106 |
+
],
|
| 1107 |
+
"page_idx": 7
|
| 1108 |
+
},
|
| 1109 |
+
{
|
| 1110 |
+
"type": "image",
|
| 1111 |
+
"img_path": "images/0dc3b8de544bac5cc39808bd05dcc32f65c9a9c47b4a0d0c8f43f45440a385e4.jpg",
|
| 1112 |
+
"image_caption": [
|
| 1113 |
+
"Fragments"
|
| 1114 |
+
],
|
| 1115 |
+
"image_footnote": [],
|
| 1116 |
+
"bbox": [
|
| 1117 |
+
174,
|
| 1118 |
+
103,
|
| 1119 |
+
267,
|
| 1120 |
+
172
|
| 1121 |
+
],
|
| 1122 |
+
"page_idx": 8
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "image",
|
| 1126 |
+
"img_path": "images/ae3ceaf181df459dbf9db8bd9c26aad3351eac272490db6fc2f5c426848f2725.jpg",
|
| 1127 |
+
"image_caption": [
|
| 1128 |
+
"Predicted"
|
| 1129 |
+
],
|
| 1130 |
+
"image_footnote": [],
|
| 1131 |
+
"bbox": [
|
| 1132 |
+
272,
|
| 1133 |
+
102,
|
| 1134 |
+
364,
|
| 1135 |
+
172
|
| 1136 |
+
],
|
| 1137 |
+
"page_idx": 8
|
| 1138 |
+
},
|
| 1139 |
+
{
|
| 1140 |
+
"type": "image",
|
| 1141 |
+
"img_path": "images/c9c19610ccdd74ddf680d87bbf40889d3930be08248420a838277e51946be21a.jpg",
|
| 1142 |
+
"image_caption": [],
|
| 1143 |
+
"image_footnote": [],
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
366,
|
| 1146 |
+
102,
|
| 1147 |
+
450,
|
| 1148 |
+
172
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 8
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "image",
|
| 1154 |
+
"img_path": "images/a9dbb7cd5344e4c49a4edf0bd0e3d2de5261075896a6a4b544b5628b6b20fcb9.jpg",
|
| 1155 |
+
"image_caption": [],
|
| 1156 |
+
"image_footnote": [],
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
452,
|
| 1159 |
+
102,
|
| 1160 |
+
540,
|
| 1161 |
+
172
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 8
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "image",
|
| 1167 |
+
"img_path": "images/fb75cc09680d44d7261c8ca07449ced88803e888000ce9553e19f5c8e277a8ff.jpg",
|
| 1168 |
+
"image_caption": [],
|
| 1169 |
+
"image_footnote": [],
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
542,
|
| 1172 |
+
102,
|
| 1173 |
+
632,
|
| 1174 |
+
172
|
| 1175 |
+
],
|
| 1176 |
+
"page_idx": 8
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "image",
|
| 1180 |
+
"img_path": "images/657f2f30e67163c140daf2bbae8495a45b066339ca866458b0ecaff67a46a064.jpg",
|
| 1181 |
+
"image_caption": [],
|
| 1182 |
+
"image_footnote": [],
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
633,
|
| 1185 |
+
102,
|
| 1186 |
+
723,
|
| 1187 |
+
172
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 8
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "image",
|
| 1193 |
+
"img_path": "images/e23b057df0849e753648860a7246741950c187cee4d163e194c5daf3d9a31287.jpg",
|
| 1194 |
+
"image_caption": [
|
| 1195 |
+
"Reference"
|
| 1196 |
+
],
|
| 1197 |
+
"image_footnote": [],
|
| 1198 |
+
"bbox": [
|
| 1199 |
+
728,
|
| 1200 |
+
102,
|
| 1201 |
+
823,
|
| 1202 |
+
172
|
| 1203 |
+
],
|
| 1204 |
+
"page_idx": 8
|
| 1205 |
+
},
|
| 1206 |
+
{
|
| 1207 |
+
"type": "image",
|
| 1208 |
+
"img_path": "images/baea800ffb0d3cd9901cf1d6f546cd7a95ea8b77d5302574b88ddcc5cc390bfc.jpg",
|
| 1209 |
+
"image_caption": [],
|
| 1210 |
+
"image_footnote": [],
|
| 1211 |
+
"bbox": [
|
| 1212 |
+
179,
|
| 1213 |
+
174,
|
| 1214 |
+
263,
|
| 1215 |
+
215
|
| 1216 |
+
],
|
| 1217 |
+
"page_idx": 8
|
| 1218 |
+
},
|
| 1219 |
+
{
|
| 1220 |
+
"type": "image",
|
| 1221 |
+
"img_path": "images/b985ebbe96527145f14443cc6f37106f52e4194c8b0300deb728b1091e563b1a.jpg",
|
| 1222 |
+
"image_caption": [
|
| 1223 |
+
"Similarity: 1.00"
|
| 1224 |
+
],
|
| 1225 |
+
"image_footnote": [],
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
274,
|
| 1228 |
+
174,
|
| 1229 |
+
361,
|
| 1230 |
+
217
|
| 1231 |
+
],
|
| 1232 |
+
"page_idx": 8
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "image",
|
| 1236 |
+
"img_path": "images/c31dab9de609d7fab6dcad7b0e01cae0ee691dc4b76d8d4c1f127c23859282ed.jpg",
|
| 1237 |
+
"image_caption": [
|
| 1238 |
+
"Similarity: 0.91",
|
| 1239 |
+
"Similarity: 0.87"
|
| 1240 |
+
],
|
| 1241 |
+
"image_footnote": [],
|
| 1242 |
+
"bbox": [
|
| 1243 |
+
364,
|
| 1244 |
+
174,
|
| 1245 |
+
450,
|
| 1246 |
+
217
|
| 1247 |
+
],
|
| 1248 |
+
"page_idx": 8
|
| 1249 |
+
},
|
| 1250 |
+
{
|
| 1251 |
+
"type": "image",
|
| 1252 |
+
"img_path": "images/30529b77606437ca33c77bc70b485a4f06453c50af916db96dc05b8c7b5b87c7.jpg",
|
| 1253 |
+
"image_caption": [],
|
| 1254 |
+
"image_footnote": [],
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
452,
|
| 1257 |
+
174,
|
| 1258 |
+
540,
|
| 1259 |
+
215
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 8
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "image",
|
| 1265 |
+
"img_path": "images/e3ea54afc3ea71d40f4544d9642724c48876e5d80741ea140be428a1355913f9.jpg",
|
| 1266 |
+
"image_caption": [
|
| 1267 |
+
"Similarity: 0.85"
|
| 1268 |
+
],
|
| 1269 |
+
"image_footnote": [],
|
| 1270 |
+
"bbox": [
|
| 1271 |
+
542,
|
| 1272 |
+
174,
|
| 1273 |
+
630,
|
| 1274 |
+
215
|
| 1275 |
+
],
|
| 1276 |
+
"page_idx": 8
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "image",
|
| 1280 |
+
"img_path": "images/d1887045b7268b49402d623589243f58b828747ebba8a67e1792fe2bb1eb69d7.jpg",
|
| 1281 |
+
"image_caption": [
|
| 1282 |
+
"Similarity: 0.79"
|
| 1283 |
+
],
|
| 1284 |
+
"image_footnote": [],
|
| 1285 |
+
"bbox": [
|
| 1286 |
+
632,
|
| 1287 |
+
174,
|
| 1288 |
+
718,
|
| 1289 |
+
215
|
| 1290 |
+
],
|
| 1291 |
+
"page_idx": 8
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "image",
|
| 1295 |
+
"img_path": "images/1c84732a6e23c89040f5285915430b39911f008388c6b6405ba9d45eed96e583.jpg",
|
| 1296 |
+
"image_caption": [],
|
| 1297 |
+
"image_footnote": [],
|
| 1298 |
+
"bbox": [
|
| 1299 |
+
743,
|
| 1300 |
+
174,
|
| 1301 |
+
816,
|
| 1302 |
+
220
|
| 1303 |
+
],
|
| 1304 |
+
"page_idx": 8
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "image",
|
| 1308 |
+
"img_path": "images/427c07185debc058c0d2a7a8c2b8d0770c608212d1fef8c75d8d279248e0b4a9.jpg",
|
| 1309 |
+
"image_caption": [],
|
| 1310 |
+
"image_footnote": [],
|
| 1311 |
+
"bbox": [
|
| 1312 |
+
174,
|
| 1313 |
+
241,
|
| 1314 |
+
267,
|
| 1315 |
+
313
|
| 1316 |
+
],
|
| 1317 |
+
"page_idx": 8
|
| 1318 |
+
},
|
| 1319 |
+
{
|
| 1320 |
+
"type": "image",
|
| 1321 |
+
"img_path": "images/dd5209bb53669907e4554571812c4424b4bbc0a5277d0a0a736e8fcdfc1ceaeb.jpg",
|
| 1322 |
+
"image_caption": [],
|
| 1323 |
+
"image_footnote": [],
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
189,
|
| 1326 |
+
315,
|
| 1327 |
+
254,
|
| 1328 |
+
340
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 8
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "image",
|
| 1334 |
+
"img_path": "images/80e137f1740f1c273f9303e3f448bef0efe1e3eabc8f8f6097daa3c65e34756c.jpg",
|
| 1335 |
+
"image_caption": [],
|
| 1336 |
+
"image_footnote": [],
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
274,
|
| 1339 |
+
241,
|
| 1340 |
+
364,
|
| 1341 |
+
313
|
| 1342 |
+
],
|
| 1343 |
+
"page_idx": 8
|
| 1344 |
+
},
|
| 1345 |
+
{
|
| 1346 |
+
"type": "image",
|
| 1347 |
+
"img_path": "images/e823d16c3d9ce1415dd6db79e6a1ad83e79dae39e31c3d7de6a4895cc3fbcd73.jpg",
|
| 1348 |
+
"image_caption": [
|
| 1349 |
+
"Similarity: 0.55"
|
| 1350 |
+
],
|
| 1351 |
+
"image_footnote": [],
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
277,
|
| 1354 |
+
316,
|
| 1355 |
+
361,
|
| 1356 |
+
340
|
| 1357 |
+
],
|
| 1358 |
+
"page_idx": 8
|
| 1359 |
+
},
|
| 1360 |
+
{
|
| 1361 |
+
"type": "image",
|
| 1362 |
+
"img_path": "images/ed2a22d763090143f344051cdae605a1a3190e564c3c97d6e585f7ea7f0abe36.jpg",
|
| 1363 |
+
"image_caption": [],
|
| 1364 |
+
"image_footnote": [],
|
| 1365 |
+
"bbox": [
|
| 1366 |
+
366,
|
| 1367 |
+
241,
|
| 1368 |
+
452,
|
| 1369 |
+
313
|
| 1370 |
+
],
|
| 1371 |
+
"page_idx": 8
|
| 1372 |
+
},
|
| 1373 |
+
{
|
| 1374 |
+
"type": "image",
|
| 1375 |
+
"img_path": "images/1aac91c288dc17f1d8d2b0d65211927e28467da412358eb437f3deb8d58ec735.jpg",
|
| 1376 |
+
"image_caption": [
|
| 1377 |
+
"Similarity: 0.54"
|
| 1378 |
+
],
|
| 1379 |
+
"image_footnote": [],
|
| 1380 |
+
"bbox": [
|
| 1381 |
+
364,
|
| 1382 |
+
316,
|
| 1383 |
+
450,
|
| 1384 |
+
340
|
| 1385 |
+
],
|
| 1386 |
+
"page_idx": 8
|
| 1387 |
+
},
|
| 1388 |
+
{
|
| 1389 |
+
"type": "image",
|
| 1390 |
+
"img_path": "images/fb5e063f1f471ff2d35be297e519906cefb072975f887f0133e866244464cf2c.jpg",
|
| 1391 |
+
"image_caption": [],
|
| 1392 |
+
"image_footnote": [],
|
| 1393 |
+
"bbox": [
|
| 1394 |
+
454,
|
| 1395 |
+
241,
|
| 1396 |
+
540,
|
| 1397 |
+
313
|
| 1398 |
+
],
|
| 1399 |
+
"page_idx": 8
|
| 1400 |
+
},
|
| 1401 |
+
{
|
| 1402 |
+
"type": "image",
|
| 1403 |
+
"img_path": "images/554d6d4a45a472c2e9db86a0a546429e693e413ce0b4d5549a5a4c379a48374b.jpg",
|
| 1404 |
+
"image_caption": [
|
| 1405 |
+
"Similarity: 0.48",
|
| 1406 |
+
"Figure 5: Two example of linker prediction. Atoms highlighted in red are predicted linkers."
|
| 1407 |
+
],
|
| 1408 |
+
"image_footnote": [],
|
| 1409 |
+
"bbox": [
|
| 1410 |
+
452,
|
| 1411 |
+
315,
|
| 1412 |
+
521,
|
| 1413 |
+
345
|
| 1414 |
+
],
|
| 1415 |
+
"page_idx": 8
|
| 1416 |
+
},
|
| 1417 |
+
{
|
| 1418 |
+
"type": "image",
|
| 1419 |
+
"img_path": "images/15015b03f6b0b237cd7122336644d82c3b5f082ce245846548894ea36119d9ea.jpg",
|
| 1420 |
+
"image_caption": [],
|
| 1421 |
+
"image_footnote": [],
|
| 1422 |
+
"bbox": [
|
| 1423 |
+
542,
|
| 1424 |
+
241,
|
| 1425 |
+
632,
|
| 1426 |
+
313
|
| 1427 |
+
],
|
| 1428 |
+
"page_idx": 8
|
| 1429 |
+
},
|
| 1430 |
+
{
|
| 1431 |
+
"type": "image",
|
| 1432 |
+
"img_path": "images/b5d41ab07404c8977de24ad2dff95ce4fd04cdecd47c4059e3184315b57fddd2.jpg",
|
| 1433 |
+
"image_caption": [
|
| 1434 |
+
"Similarity: 0.41"
|
| 1435 |
+
],
|
| 1436 |
+
"image_footnote": [],
|
| 1437 |
+
"bbox": [
|
| 1438 |
+
542,
|
| 1439 |
+
314,
|
| 1440 |
+
609,
|
| 1441 |
+
345
|
| 1442 |
+
],
|
| 1443 |
+
"page_idx": 8
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "image",
|
| 1447 |
+
"img_path": "images/70e49c2dffe2b77f6172157a7fcb7a712cc1f4db8ae2c9712996242f38807df3.jpg",
|
| 1448 |
+
"image_caption": [],
|
| 1449 |
+
"image_footnote": [],
|
| 1450 |
+
"bbox": [
|
| 1451 |
+
633,
|
| 1452 |
+
241,
|
| 1453 |
+
723,
|
| 1454 |
+
313
|
| 1455 |
+
],
|
| 1456 |
+
"page_idx": 8
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "image",
|
| 1460 |
+
"img_path": "images/7c30f3a24ab3a9b780659a679ee787acc33b02ec41e8b71317937da29636e32a.jpg",
|
| 1461 |
+
"image_caption": [
|
| 1462 |
+
"Similarity: 0.37"
|
| 1463 |
+
],
|
| 1464 |
+
"image_footnote": [],
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
633,
|
| 1467 |
+
316,
|
| 1468 |
+
717,
|
| 1469 |
+
340
|
| 1470 |
+
],
|
| 1471 |
+
"page_idx": 8
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "image",
|
| 1475 |
+
"img_path": "images/2c1453ec90c6251d52d69bbef977f23909cb27237d844c99abb9bb0a688384ac.jpg",
|
| 1476 |
+
"image_caption": [],
|
| 1477 |
+
"image_footnote": [],
|
| 1478 |
+
"bbox": [
|
| 1479 |
+
730,
|
| 1480 |
+
241,
|
| 1481 |
+
823,
|
| 1482 |
+
313
|
| 1483 |
+
],
|
| 1484 |
+
"page_idx": 8
|
| 1485 |
+
},
|
| 1486 |
+
{
|
| 1487 |
+
"type": "image",
|
| 1488 |
+
"img_path": "images/8d42fc6aadb579901a65c1782c5c35630b7cd1e4e0e57c97be6013f633221c6a.jpg",
|
| 1489 |
+
"image_caption": [],
|
| 1490 |
+
"image_footnote": [],
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
736,
|
| 1493 |
+
315,
|
| 1494 |
+
816,
|
| 1495 |
+
345
|
| 1496 |
+
],
|
| 1497 |
+
"page_idx": 8
|
| 1498 |
+
},
|
| 1499 |
+
{
|
| 1500 |
+
"type": "text",
|
| 1501 |
+
"text": "Metrics We assess the generated molecules from fragments with four main metrics: (1) Similarity: We use Tanimoto Similarity [32, 3] over Morgan fingerprints [14] to measure the similarity between the molecular graphs of generated molecule and the reference molecule. (2) Percentage of Recovered Molecules: We say a test molecule is recovered if the model is able to generate a molecule that perfectly matches it (Similarity = 1.0). We calculate the percentage of test molecules that are recovered by the model. (3) Binding Affinity: We use Vina [1, 33] to compute the the generated molecules' binding affinity to the target.",
|
| 1502 |
+
"bbox": [
|
| 1503 |
+
169,
|
| 1504 |
+
419,
|
| 1505 |
+
823,
|
| 1506 |
+
516
|
| 1507 |
+
],
|
| 1508 |
+
"page_idx": 8
|
| 1509 |
+
},
|
| 1510 |
+
{
|
| 1511 |
+
"type": "text",
|
| 1512 |
+
"text": "Results For each data point, we use our model and DeLinker to generate 100 molecules. We first calculate the average similarity for each data point and report their overall mean and median values. Then, we calculate the percentage of test molecules that are successfully recovered by the model. Finally, we use Vina to evaluate the generated molecules' binding affinity. These results are summarized in Table 2. As shown in the table, when measured by Vina score, our proposed method's performance is on par with the graph-based baseline DeLinker. However, our method clearly outperforms DeLinker on Similarity and Percentage of Recovery, suggesting that our method is able to link fragments in a more realistic way. In addition, we present two examples along with 5 generated molecules at different similarities in Figure 5. The example demonstrates the model's ability to generate suitable linkers.",
|
| 1513 |
+
"bbox": [
|
| 1514 |
+
169,
|
| 1515 |
+
530,
|
| 1516 |
+
823,
|
| 1517 |
+
670
|
| 1518 |
+
],
|
| 1519 |
+
"page_idx": 8
|
| 1520 |
+
},
|
| 1521 |
+
{
|
| 1522 |
+
"type": "text",
|
| 1523 |
+
"text": "5 Conclusions and Discussions",
|
| 1524 |
+
"text_level": 1,
|
| 1525 |
+
"bbox": [
|
| 1526 |
+
171,
|
| 1527 |
+
689,
|
| 1528 |
+
444,
|
| 1529 |
+
704
|
| 1530 |
+
],
|
| 1531 |
+
"page_idx": 8
|
| 1532 |
+
},
|
| 1533 |
+
{
|
| 1534 |
+
"type": "text",
|
| 1535 |
+
"text": "In this paper, we propose a new approach to structure-based drug design. In specific, we design a 3D generative model that estimates the probability density of atom's occurrences in 3D space and formulate an auto-regressive sampling algorithm. Combined with the sampling algorithm, the model is able to generate drug-like molecules for specific binding sites. By conducting extensive experiments, we demonstrate our model's effectiveness in designing molecules for specific targets. Though our proposed method achieves reasonable performance in structure-based molecule design, there is no guarantee that the model always generates valid molecules successfully. To build a more robust and useful model, we can consider incorporating graph representations to building 3D molecules as future work, such that we can leverage on sophisticated techniques for generating valid molecular graphs such as valency check [35] and property optimization [14].",
|
| 1536 |
+
"bbox": [
|
| 1537 |
+
169,
|
| 1538 |
+
720,
|
| 1539 |
+
826,
|
| 1540 |
+
859
|
| 1541 |
+
],
|
| 1542 |
+
"page_idx": 8
|
| 1543 |
+
},
|
| 1544 |
+
{
|
| 1545 |
+
"type": "page_number",
|
| 1546 |
+
"text": "9",
|
| 1547 |
+
"bbox": [
|
| 1548 |
+
493,
|
| 1549 |
+
935,
|
| 1550 |
+
503,
|
| 1551 |
+
946
|
| 1552 |
+
],
|
| 1553 |
+
"page_idx": 8
|
| 1554 |
+
},
|
| 1555 |
+
{
|
| 1556 |
+
"type": "text",
|
| 1557 |
+
"text": "References",
|
| 1558 |
+
"text_level": 1,
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
173,
|
| 1561 |
+
89,
|
| 1562 |
+
269,
|
| 1563 |
+
106
|
| 1564 |
+
],
|
| 1565 |
+
"page_idx": 9
|
| 1566 |
+
},
|
| 1567 |
+
{
|
| 1568 |
+
"type": "list",
|
| 1569 |
+
"sub_type": "ref_text",
|
| 1570 |
+
"list_items": [
|
| 1571 |
+
"[1] Amr Alhossary, Stephanus Daniel Handoko, Yuguang Mu, and Chee-Keong Kwoh. Fast, accurate, and reliable molecular docking with quickvina 2. Bioinformatics, 31(13):2214-2216, 2015.",
|
| 1572 |
+
"[2] Amy C. Anderson. The process of structure-based drug design. Chemistry & Biology, 10(9): 787-797, 2003. ISSN 1074-5521. doi: https://doi.org/10.1016/j.chembiol.2003.09.002. URL https://www.sciencedirect.com/science/article/pii/S1074552103001947.",
|
| 1573 |
+
"[3] Dávid Bajusz, Anita Rácz, and Károly Héberger. Why is tanimoto index an appropriate choice for fingerprint-based similarity calculations? Journal of cheminformatics, 7(1):1-13, 2015.",
|
| 1574 |
+
"[4] G Richard Bickerton, Gaia V Paolini, Jérémy Besnard, Sorel Muresan, and Andrew L Hopkins. Quantifying the chemical beauty of drugs. Nature chemistry, 4(2):90–98, 2012.",
|
| 1575 |
+
"[5] Esben Jannik Bjerrum and Richard Threlfall. Molecular generation with recurrent neural networks (rnns). arXiv preprint arXiv:1705.04612, 2017.",
|
| 1576 |
+
"[6] Peter Ertl and Ansgar Schuffenhauer. Estimation of synthetic accessibility score of drug-like molecules based on molecular complexity and fragment contributions. Journal of cheminformatics, 1(1):1-11, 2009.",
|
| 1577 |
+
"[7] Paul G Francoeur, Tomohide Masuda, Jocelyn Sunseri, Andrew Jia, Richard B Iovanisci, Ian Snyder, and David R Koes. Three-dimensional convolutional neural networks and a cross-docked data set for structure-based drug design. Journal of Chemical Information and Modeling, 60(9):4200-4215, 2020.",
|
| 1578 |
+
"[8] Niklas WA Gebauer, Michael Gastegger, and Kristof T Schütt. Symmetry-adapted generation of 3d point sets for the targeted discovery of molecules. arXiv preprint arXiv:1906.00957, 2019.",
|
| 1579 |
+
"[9] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. arXiv preprint arXiv:1406.2661, 2014.",
|
| 1580 |
+
"[10] Rafael Gómez-Bombarelli, Jennifer N Wei, David Duvenaud, José Miguel Hernández-Lobato, Benjamín Sánchez-Lengeling, Dennis Sheberla, Jorge Aguilera-Iparraguirre, Timothy D Hirzel, Ryan P Adams, and Alán Aspuru-Guzik. Automatic Chemical Design Using a Data-Driven Continuous Representation of Molecules. ACS Central Science, 4(2):268–276, 2018. ISSN 2374-7943. doi: 10.1021/acscentsci.7b00572.",
|
| 1581 |
+
"[11] Paul CD Hawkins. Conformation generation: the state of the art. Journal of Chemical Information and Modeling, 57(8):1747-1756, 2017.",
|
| 1582 |
+
"[12] Fergus Imrie, Anthony R Bradley, Mihaela van der Schaar, and Charlotte M Deane. Deep generative models for 3d linker design. Journal of chemical information and modeling, 60(4): 1983-1995, 2020.",
|
| 1583 |
+
"[13] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Junction tree variational autoencoder for molecular graph generation. In International Conference on Machine Learning, pages 2323-2332. PMLR, 2018.",
|
| 1584 |
+
"[14] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Composing molecules with multiple property constraints. arXiv preprint arXiv:2002.03244, 2020.",
|
| 1585 |
+
"[15] Wengong Jin, Jeremy Wohlwend, Regina Barzilay, and Tommi Jaakkola. Iterative refinement graph neural network for antibody sequence-structure co-design. arXiv preprint arXiv:2110.04624, 2021.",
|
| 1586 |
+
"[16] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.",
|
| 1587 |
+
"[17] Matt J Kusner, Brooks Paige, and José Miguel Hernández-Lobato. Grammar variational autoencoder. In International Conference on Machine Learning, pages 1945-1954. PMLR, 2017."
|
| 1588 |
+
],
|
| 1589 |
+
"bbox": [
|
| 1590 |
+
173,
|
| 1591 |
+
112,
|
| 1592 |
+
826,
|
| 1593 |
+
910
|
| 1594 |
+
],
|
| 1595 |
+
"page_idx": 9
|
| 1596 |
+
},
|
| 1597 |
+
{
|
| 1598 |
+
"type": "page_number",
|
| 1599 |
+
"text": "10",
|
| 1600 |
+
"bbox": [
|
| 1601 |
+
490,
|
| 1602 |
+
935,
|
| 1603 |
+
508,
|
| 1604 |
+
946
|
| 1605 |
+
],
|
| 1606 |
+
"page_idx": 9
|
| 1607 |
+
},
|
| 1608 |
+
{
|
| 1609 |
+
"type": "list",
|
| 1610 |
+
"sub_type": "ref_text",
|
| 1611 |
+
"list_items": [
|
| 1612 |
+
"[18] Yujia Li, Oriol Vinyals, Chris Dyer, Razvan Pascanu, and Peter Battaglia. Learning deep generative models of graphs. arXiv preprint arXiv:1803.03324, 2018.",
|
| 1613 |
+
"[19] Qi Liu, Miltiadis Allamanis, Marc Brockschmidt, and Alexander L Gaunt. Constrained graph variational autoencoders for molecule design. arXiv preprint arXiv:1805.09076, 2018.",
|
| 1614 |
+
"[20] Tomohide Masuda, Matthew Ragoza, and David Ryan Koes. Generating 3d molecular structures conditional on a receptor binding site with deep generative models. arXiv preprint arXiv:2010.14442, 2020.",
|
| 1615 |
+
"[21] Noel M O'Boyle, Michael Banck, Craig A James, Chris Morley, Tim Vandermeersch, and Geoffrey R Hutchison. Open babel: An open chemical toolbox. Journal of cheminformatics, 3 (1):1-14, 2011.",
|
| 1616 |
+
"[22] Pavel G Polishchuk, Timur I Madzhidov, and Alexandre Varnek. Estimation of the size of drug-like chemical space based on gdb-17 data. Journal of computer-aided molecular design, 27(8):675-679, 2013.",
|
| 1617 |
+
"[23] Matthew Ragoza, Tomohide Masuda, and David Ryan Koes. Learning a continuous representation of 3d molecular structures with deep generative models. arXiv preprint arXiv:2010.08687, 2020.",
|
| 1618 |
+
"[24] Anthony K Rappe, Carla J Casewit, KS Colwell, William A Goddard III, and W Mason Skiff. Uff, a full periodic table force field for molecular mechanics and molecular dynamics simulations. Journal of the American chemical society, 114(25):10024-10035, 1992.",
|
| 1619 |
+
"[25] Kristof T Schütt, PJ Kindermans, Huziel E Sauceda, Stefan Chmiela, Alexandre Tkatchenko, and Klaus R Müller. Schnet: A continuous-filter convolutional neural network for modeling quantum interactions. In 31st Conference on Neural Information Processing Systems (NIPS 2017), Long Beach, CA, USA, pages 1-11, 2017.",
|
| 1620 |
+
"[26] Marwin HS Segler, Thierry Kogej, Christian Tyrchan, and Mark P Waller. Generating focused molecule libraries for drug discovery with recurrent neural networks. ACS central science, 4(1): 120-131, 2018.",
|
| 1621 |
+
"[27] Chence Shi, Minkai Xu, Zhaocheng Zhu, Weinan Zhang, Ming Zhang, and Jian Tang. Graphaf: a flow-based autoregressive model for molecular graph generation. arXiv preprint arXiv:2001.09382, 2020.",
|
| 1622 |
+
"[28] Gregor Simm, Robert Pinsler, and José Miguel Hernández-Lobato. Reinforcement learning for molecular design guided by quantum mechanics. In International Conference on Machine Learning, pages 8959-8969. PMLR, 2020.",
|
| 1623 |
+
"[29] Gregor NC Simm, Robert Pinsler, Gábor Csányi, and José Miguel Hernández-Lobato. Symmetry-aware actor-critic for 3d molecular design. arXiv preprint arXiv:2011.12747, 2020.",
|
| 1624 |
+
"[30] Miha Skalic, José Jiménez, Davide Sabbadin, and Gianni De Fabritiis. Shape-based generative modeling for de novo drug design. Journal of chemical information and modeling, 59(3): 1205-1214, 2019.",
|
| 1625 |
+
"[31] Martin Steinegger and Johannes Söding. Mmseqs2 enables sensitive protein sequence searching for the analysis of massive data sets. Nature biotechnology, 35(11):1026-1028, 2017.",
|
| 1626 |
+
"[32] Taffee T Tanimoto. Elementary mathematical theory of classification and prediction. 1958.",
|
| 1627 |
+
"[33] Oleg Trot and Arthur J Olson. Autodock vina: improving the speed and accuracy of docking with a new scoring function, efficient optimization, and multithreading. Journal of computational chemistry, 31(2):455-461, 2010.",
|
| 1628 |
+
"[34] David Weininger. Smiles, a chemical language and information system. 1. introduction to methodology and encoding rules. Journal of chemical information and computer sciences, 28 (1):31-36, 1988.",
|
| 1629 |
+
"[35] Jiaxuan You, Bowen Liu, Rex Ying, Vijay Pande, and Jure Leskovec. Graph convolutional policy network for goal-directed molecular graph generation. arXiv preprint arXiv:1806.02473, 2018."
|
| 1630 |
+
],
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
173,
|
| 1633 |
+
90,
|
| 1634 |
+
826,
|
| 1635 |
+
910
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 10
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "page_number",
|
| 1641 |
+
"text": "11",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
490,
|
| 1644 |
+
935,
|
| 1645 |
+
506,
|
| 1646 |
+
946
|
| 1647 |
+
],
|
| 1648 |
+
"page_idx": 10
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"type": "text",
|
| 1652 |
+
"text": "Supplementary Material",
|
| 1653 |
+
"text_level": 1,
|
| 1654 |
+
"bbox": [
|
| 1655 |
+
171,
|
| 1656 |
+
87,
|
| 1657 |
+
424,
|
| 1658 |
+
108
|
| 1659 |
+
],
|
| 1660 |
+
"page_idx": 11
|
| 1661 |
+
},
|
| 1662 |
+
{
|
| 1663 |
+
"type": "text",
|
| 1664 |
+
"text": "A Additional Results",
|
| 1665 |
+
"text_level": 1,
|
| 1666 |
+
"bbox": [
|
| 1667 |
+
171,
|
| 1668 |
+
125,
|
| 1669 |
+
366,
|
| 1670 |
+
142
|
| 1671 |
+
],
|
| 1672 |
+
"page_idx": 11
|
| 1673 |
+
},
|
| 1674 |
+
{
|
| 1675 |
+
"type": "text",
|
| 1676 |
+
"text": "A.1 Molecule Design",
|
| 1677 |
+
"text_level": 1,
|
| 1678 |
+
"bbox": [
|
| 1679 |
+
171,
|
| 1680 |
+
156,
|
| 1681 |
+
333,
|
| 1682 |
+
171
|
| 1683 |
+
],
|
| 1684 |
+
"page_idx": 11
|
| 1685 |
+
},
|
| 1686 |
+
{
|
| 1687 |
+
"type": "text",
|
| 1688 |
+
"text": "We present more examples of generated molecules by our method and the CNN baseline liGAN. We select 6 molecules with highest binding affinity for each method and each binding site. The 3 additional binding sites are selected randomly from the testing set. By comparing the samples from two methods, we can find that the 3D molecules generated by our method are generally more realistic, while molecules generated by the baseline have more erroneous structures, such as bonds that are too short and angles that are too sharp. Besides, molecules generated by our method are more diverse, while the 3D atom configurations generated by the baseline are often similar. More importantly, our model can generate novel molecules that are obviously different from the reference molecule and achieve higher binding affinity. To summarize, these examples evidence the proposed model's good performance in terms of drug-likeness, diversity, and binding affinity.",
|
| 1689 |
+
"bbox": [
|
| 1690 |
+
169,
|
| 1691 |
+
181,
|
| 1692 |
+
826,
|
| 1693 |
+
321
|
| 1694 |
+
],
|
| 1695 |
+
"page_idx": 11
|
| 1696 |
+
},
|
| 1697 |
+
{
|
| 1698 |
+
"type": "image",
|
| 1699 |
+
"img_path": "images/53ac65bdeec8f6ac95a8c08730bc7fae2a47cb5c33bbdf7edee2d72b37ae40b7.jpg",
|
| 1700 |
+
"image_caption": [
|
| 1701 |
+
"Ours (3li4)"
|
| 1702 |
+
],
|
| 1703 |
+
"image_footnote": [],
|
| 1704 |
+
"bbox": [
|
| 1705 |
+
212,
|
| 1706 |
+
347,
|
| 1707 |
+
784,
|
| 1708 |
+
460
|
| 1709 |
+
],
|
| 1710 |
+
"page_idx": 11
|
| 1711 |
+
},
|
| 1712 |
+
{
|
| 1713 |
+
"type": "image",
|
| 1714 |
+
"img_path": "images/bb46393dac347fb68674fc753bc53c04ccb5a3a63945116b3a03ef0f8c01d4f3.jpg",
|
| 1715 |
+
"image_caption": [
|
| 1716 |
+
"liGAN (3li4)"
|
| 1717 |
+
],
|
| 1718 |
+
"image_footnote": [],
|
| 1719 |
+
"bbox": [
|
| 1720 |
+
212,
|
| 1721 |
+
474,
|
| 1722 |
+
784,
|
| 1723 |
+
587
|
| 1724 |
+
],
|
| 1725 |
+
"page_idx": 11
|
| 1726 |
+
},
|
| 1727 |
+
{
|
| 1728 |
+
"type": "image",
|
| 1729 |
+
"img_path": "images/3e7e51a113a6b1013f56abc67d396b86300db34540ff0137b9ae11a4a051baf8.jpg",
|
| 1730 |
+
"image_caption": [
|
| 1731 |
+
"Ours (2hcj)"
|
| 1732 |
+
],
|
| 1733 |
+
"image_footnote": [],
|
| 1734 |
+
"bbox": [
|
| 1735 |
+
212,
|
| 1736 |
+
623,
|
| 1737 |
+
784,
|
| 1738 |
+
736
|
| 1739 |
+
],
|
| 1740 |
+
"page_idx": 11
|
| 1741 |
+
},
|
| 1742 |
+
{
|
| 1743 |
+
"type": "image",
|
| 1744 |
+
"img_path": "images/a39f8d176498a4052aa7785b4a04741a7c3251f87d3e26b94c600f19d6c061d7.jpg",
|
| 1745 |
+
"image_caption": [
|
| 1746 |
+
"liGAN (2hcj)"
|
| 1747 |
+
],
|
| 1748 |
+
"image_footnote": [],
|
| 1749 |
+
"bbox": [
|
| 1750 |
+
212,
|
| 1751 |
+
752,
|
| 1752 |
+
784,
|
| 1753 |
+
862
|
| 1754 |
+
],
|
| 1755 |
+
"page_idx": 11
|
| 1756 |
+
},
|
| 1757 |
+
{
|
| 1758 |
+
"type": "page_number",
|
| 1759 |
+
"text": "12",
|
| 1760 |
+
"bbox": [
|
| 1761 |
+
490,
|
| 1762 |
+
935,
|
| 1763 |
+
509,
|
| 1764 |
+
946
|
| 1765 |
+
],
|
| 1766 |
+
"page_idx": 11
|
| 1767 |
+
},
|
| 1768 |
+
{
|
| 1769 |
+
"type": "image",
|
| 1770 |
+
"img_path": "images/6091bcfa0eaf8d5bda127fa680726877cb1603aa1c6699169865c1abe02e9ab5.jpg",
|
| 1771 |
+
"image_caption": [
|
| 1772 |
+
"Ours (4q8b)"
|
| 1773 |
+
],
|
| 1774 |
+
"image_footnote": [],
|
| 1775 |
+
"bbox": [
|
| 1776 |
+
212,
|
| 1777 |
+
109,
|
| 1778 |
+
782,
|
| 1779 |
+
349
|
| 1780 |
+
],
|
| 1781 |
+
"page_idx": 12
|
| 1782 |
+
},
|
| 1783 |
+
{
|
| 1784 |
+
"type": "image",
|
| 1785 |
+
"img_path": "images/8a1c0bf6228606d145adf87af44cb33fce38867695ad740f335a3c749d0068c8.jpg",
|
| 1786 |
+
"image_caption": [
|
| 1787 |
+
"Ours (4ru)"
|
| 1788 |
+
],
|
| 1789 |
+
"image_footnote": [],
|
| 1790 |
+
"bbox": [
|
| 1791 |
+
212,
|
| 1792 |
+
386,
|
| 1793 |
+
782,
|
| 1794 |
+
496
|
| 1795 |
+
],
|
| 1796 |
+
"page_idx": 12
|
| 1797 |
+
},
|
| 1798 |
+
{
|
| 1799 |
+
"type": "image",
|
| 1800 |
+
"img_path": "images/52b17e34c1208ffebb9791ca953d5572c24353e492ae76f5ee3b9c4ecfe1961d.jpg",
|
| 1801 |
+
"image_caption": [
|
| 1802 |
+
"liGAN (4rlu)"
|
| 1803 |
+
],
|
| 1804 |
+
"image_footnote": [],
|
| 1805 |
+
"bbox": [
|
| 1806 |
+
212,
|
| 1807 |
+
513,
|
| 1808 |
+
782,
|
| 1809 |
+
625
|
| 1810 |
+
],
|
| 1811 |
+
"page_idx": 12
|
| 1812 |
+
},
|
| 1813 |
+
{
|
| 1814 |
+
"type": "image",
|
| 1815 |
+
"img_path": "images/cc24f1d663caabb2000129ad906b7717b44197f4f6850cc3265d204bcbc3cb39.jpg",
|
| 1816 |
+
"image_caption": [
|
| 1817 |
+
"Ours (3b6h)"
|
| 1818 |
+
],
|
| 1819 |
+
"image_footnote": [],
|
| 1820 |
+
"bbox": [
|
| 1821 |
+
212,
|
| 1822 |
+
661,
|
| 1823 |
+
782,
|
| 1824 |
+
772
|
| 1825 |
+
],
|
| 1826 |
+
"page_idx": 12
|
| 1827 |
+
},
|
| 1828 |
+
{
|
| 1829 |
+
"type": "image",
|
| 1830 |
+
"img_path": "images/068d12e81f5ccafcbaebe060f60b257201f89b212f56ea99e1a9981fe2159e6f.jpg",
|
| 1831 |
+
"image_caption": [
|
| 1832 |
+
"liGAN (3b6h)"
|
| 1833 |
+
],
|
| 1834 |
+
"image_footnote": [],
|
| 1835 |
+
"bbox": [
|
| 1836 |
+
212,
|
| 1837 |
+
790,
|
| 1838 |
+
782,
|
| 1839 |
+
900
|
| 1840 |
+
],
|
| 1841 |
+
"page_idx": 12
|
| 1842 |
+
},
|
| 1843 |
+
{
|
| 1844 |
+
"type": "page_number",
|
| 1845 |
+
"text": "13",
|
| 1846 |
+
"bbox": [
|
| 1847 |
+
490,
|
| 1848 |
+
935,
|
| 1849 |
+
508,
|
| 1850 |
+
946
|
| 1851 |
+
],
|
| 1852 |
+
"page_idx": 12
|
| 1853 |
+
},
|
| 1854 |
+
{
|
| 1855 |
+
"type": "text",
|
| 1856 |
+
"text": "A.2 Linker Prediction",
|
| 1857 |
+
"text_level": 1,
|
| 1858 |
+
"bbox": [
|
| 1859 |
+
171,
|
| 1860 |
+
90,
|
| 1861 |
+
339,
|
| 1862 |
+
104
|
| 1863 |
+
],
|
| 1864 |
+
"page_idx": 13
|
| 1865 |
+
},
|
| 1866 |
+
{
|
| 1867 |
+
"type": "text",
|
| 1868 |
+
"text": "We present more examples of linker prediction. Since the baseline model DeLinker is graph-based and does not generate 3D linker structures, to make the results of both methods visually comparable, we only show 2D molecular graphs. We randomly selected 5 cases from the testing set. For each case, we select 5 representative molecules, including the molecule with best similarity, the molecule with worst similarity, and 3 molecules between them. These examples evidence that our method is generally more likely to produce linkers that recover or resemble the original structure.",
|
| 1869 |
+
"bbox": [
|
| 1870 |
+
169,
|
| 1871 |
+
116,
|
| 1872 |
+
826,
|
| 1873 |
+
202
|
| 1874 |
+
],
|
| 1875 |
+
"page_idx": 13
|
| 1876 |
+
},
|
| 1877 |
+
{
|
| 1878 |
+
"type": "table",
|
| 1879 |
+
"img_path": "images/caead07026010b2c295a4e5aac787e5329a2f4eca525f3b76407deaa6433f17b.jpg",
|
| 1880 |
+
"table_caption": [],
|
| 1881 |
+
"table_footnote": [],
|
| 1882 |
+
"table_body": "<table><tr><td></td><td>Fragments</td><td colspan=\"5\">Predicted</td><td>Reference</td></tr><tr><td rowspan=\"5\">4qik</td><td rowspan=\"5\"></td><td colspan=\"5\">Ours</td><td rowspan=\"5\"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.97</td><td>Sim: 0.83</td><td>Sim: 0.64</td><td>Sim: 0.63</td></tr><tr><td colspan=\"5\">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.99</td><td>Sim: 0.66</td><td>Sim: 0.58</td><td>Sim: 0.50</td><td>Sim: 0.42</td></tr><tr><td rowspan=\"5\">3ym</td><td rowspan=\"5\"></td><td colspan=\"5\">Ours</td><td rowspan=\"5\"></td></tr><tr><td>Sim: 0.55</td><td>Sim: 0.54</td><td>Sim: 0.48</td><td>Sim: 0.41</td><td>Sim: 0.38</td></tr><tr><td colspan=\"5\">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.58</td><td>Sim: 0.47</td><td>Sim: 0.44</td><td>Sim: 0.39</td><td>Sim: 0.36</td></tr><tr><td rowspan=\"5\">3nf</td><td rowspan=\"5\"></td><td colspan=\"5\">Ours</td><td rowspan=\"5\"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.91</td><td>Sim: 0.87</td><td>Sim: 0.85</td><td>Sim: 0.79</td></tr><tr><td colspan=\"5\">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.90</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.52</td><td>Sim: 0.45</td></tr><tr><td rowspan=\"5\">4xi</td><td rowspan=\"5\"></td><td colspan=\"5\">Ours</td><td rowspan=\"5\"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.57</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.56</td></tr><tr><td colspan=\"5\">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.71</td><td>Sim: 0.57</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.56</td></tr><tr><td rowspan=\"5\">4m7t</td><td rowspan=\"5\"></td><td colspan=\"5\">Ours</td><td rowspan=\"5\"></td></tr><tr><td>Sim: 0.84</td><td>Sim: 0.79</td><td>Sim: 0.67</td><td>Sim: 0.64</td><td>Sim: 0.63</td></tr><tr><td colspan=\"5\">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.67</td><td>Sim: 0.58</td><td>Sim: 0.56</td><td>Sim: 0.53</td><td>Sim: 0.50</td></tr></table>",
|
| 1883 |
+
"bbox": [
|
| 1884 |
+
207,
|
| 1885 |
+
214,
|
| 1886 |
+
790,
|
| 1887 |
+
845
|
| 1888 |
+
],
|
| 1889 |
+
"page_idx": 13
|
| 1890 |
+
},
|
| 1891 |
+
{
|
| 1892 |
+
"type": "page_number",
|
| 1893 |
+
"text": "14",
|
| 1894 |
+
"bbox": [
|
| 1895 |
+
490,
|
| 1896 |
+
935,
|
| 1897 |
+
508,
|
| 1898 |
+
946
|
| 1899 |
+
],
|
| 1900 |
+
"page_idx": 13
|
| 1901 |
+
},
|
| 1902 |
+
{
|
| 1903 |
+
"type": "text",
|
| 1904 |
+
"text": "B Additional Model Details",
|
| 1905 |
+
"text_level": 1,
|
| 1906 |
+
"bbox": [
|
| 1907 |
+
171,
|
| 1908 |
+
89,
|
| 1909 |
+
419,
|
| 1910 |
+
104
|
| 1911 |
+
],
|
| 1912 |
+
"page_idx": 14
|
| 1913 |
+
},
|
| 1914 |
+
{
|
| 1915 |
+
"type": "text",
|
| 1916 |
+
"text": "B.1 Sampling Algorithm",
|
| 1917 |
+
"text_level": 1,
|
| 1918 |
+
"bbox": [
|
| 1919 |
+
171,
|
| 1920 |
+
119,
|
| 1921 |
+
359,
|
| 1922 |
+
135
|
| 1923 |
+
],
|
| 1924 |
+
"page_idx": 14
|
| 1925 |
+
},
|
| 1926 |
+
{
|
| 1927 |
+
"type": "text",
|
| 1928 |
+
"text": "At the first step of molecule generation, there is no placed atoms in the binding site. To sample the first atom, we use Metropolis-Hasting algorithm to draw samples from the marginal distribution $p(\\boldsymbol{r}|\\mathcal{C}) = \\sum_{e}p(e,\\boldsymbol{r}|\\mathcal{C})$ and select coordinate-element pairs that have highest joint probability. We draw 1,000 initial samples from the Gaussian mixture model defined on the coordinates of protein atoms, whose standard deviation is $1\\AA$ . The proposal distribution is a Gaussian with $0.1\\AA$ standard deviation, and the total number of steps is 500.",
|
| 1929 |
+
"bbox": [
|
| 1930 |
+
169,
|
| 1931 |
+
145,
|
| 1932 |
+
823,
|
| 1933 |
+
231
|
| 1934 |
+
],
|
| 1935 |
+
"page_idx": 14
|
| 1936 |
+
},
|
| 1937 |
+
{
|
| 1938 |
+
"type": "text",
|
| 1939 |
+
"text": "If there are previously placed atoms in the binding site, to accelerate sampling and make full use of model parallelism, we discretize the 3D space onto meshgrids. The resolution of the meshgrid is $0.1\\AA$ . We only discretize the space where the radial distance to some frontier atom ranges from $1.0\\AA$ to $2.0\\AA$ in order to save memory. Note that frontier atoms are predicted by the frontier network. Then, we evaluate the non-normalized joint probabilities on the meshgrid and use softmax to normalize them. Finally, we draw coordinate-element pairs from the normalized probability.",
|
| 1940 |
+
"bbox": [
|
| 1941 |
+
169,
|
| 1942 |
+
237,
|
| 1943 |
+
826,
|
| 1944 |
+
325
|
| 1945 |
+
],
|
| 1946 |
+
"page_idx": 14
|
| 1947 |
+
},
|
| 1948 |
+
{
|
| 1949 |
+
"type": "text",
|
| 1950 |
+
"text": "We use the beam search technique to generate 100 different molecules for each binding site, and we set the beam width to 300.",
|
| 1951 |
+
"bbox": [
|
| 1952 |
+
169,
|
| 1953 |
+
330,
|
| 1954 |
+
823,
|
| 1955 |
+
359
|
| 1956 |
+
],
|
| 1957 |
+
"page_idx": 14
|
| 1958 |
+
},
|
| 1959 |
+
{
|
| 1960 |
+
"type": "text",
|
| 1961 |
+
"text": "B.2 Hyperparameters",
|
| 1962 |
+
"text_level": 1,
|
| 1963 |
+
"bbox": [
|
| 1964 |
+
171,
|
| 1965 |
+
375,
|
| 1966 |
+
341,
|
| 1967 |
+
390
|
| 1968 |
+
],
|
| 1969 |
+
"page_idx": 14
|
| 1970 |
+
},
|
| 1971 |
+
{
|
| 1972 |
+
"type": "text",
|
| 1973 |
+
"text": "The hyperparameters are shared across both molecule design and linker prediction tasks. For the context encoder, the neighborhood size of $k$ -NN graphs is 48, the number of message passing layers $L$ is 6, and the dimension of hidden features $h_{i}^{(\\ell)}$ is 256. For the spatial classifier, the dimension of $v$ is 128, and the number of aggregated nodes is 32. We train the model using the Adam optimizer at learning rate 0.0001. The batch size is 4 and the number of training iterations is 1.5 million, which takes about 2 days on GPU.",
|
| 1974 |
+
"bbox": [
|
| 1975 |
+
169,
|
| 1976 |
+
400,
|
| 1977 |
+
823,
|
| 1978 |
+
488
|
| 1979 |
+
],
|
| 1980 |
+
"page_idx": 14
|
| 1981 |
+
},
|
| 1982 |
+
{
|
| 1983 |
+
"type": "page_number",
|
| 1984 |
+
"text": "15",
|
| 1985 |
+
"bbox": [
|
| 1986 |
+
490,
|
| 1987 |
+
935,
|
| 1988 |
+
508,
|
| 1989 |
+
946
|
| 1990 |
+
],
|
| 1991 |
+
"page_idx": 14
|
| 1992 |
+
}
|
| 1993 |
+
]
|
2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10446/d7999c52-f0e4-4d71-8708-fdcdae51a890_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e8a57aacae637971b7e3befcd0ad78dc60ba955827797b8738793ed4871eded9
|
| 3 |
+
size 5842277
|
2203.10xxx/2203.10446/full.md
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A 3D Generative Model for Structure-Based Drug Design
|
| 2 |
+
|
| 3 |
+
# Shitong Luo
|
| 4 |
+
|
| 5 |
+
Helixon Research luost@helixon.com luost26@gmail.com
|
| 6 |
+
|
| 7 |
+
# Jianzhu Ma
|
| 8 |
+
|
| 9 |
+
Peking University majianzhu@pku.edu.cn
|
| 10 |
+
|
| 11 |
+
# Jiaqi Guan
|
| 12 |
+
|
| 13 |
+
University of Illinois Urbana-Champaign jiaqi@illinois.edu
|
| 14 |
+
|
| 15 |
+
# Jian Peng
|
| 16 |
+
|
| 17 |
+
University of Illinois Urbana-Champaign jianpeng@illinois.edu
|
| 18 |
+
|
| 19 |
+
# Abstract
|
| 20 |
+
|
| 21 |
+
We study a fundamental problem in structure-based drug design — generating molecules that bind to specific protein binding sites. While we have witnessed the great success of deep generative models in drug design, the existing methods are mostly string-based or graph-based. They are limited by the lack of spatial information and thus unable to be applied to structure-based design tasks. Particularly, such models have no or little knowledge of how molecules interact with their target proteins exactly in 3D space. In this paper, we propose a 3D generative model that generates molecules given a designated 3D protein binding site. Specifically, given a binding site as the 3D context, our model estimates the probability density of atom's occurrences in 3D space — positions that are more likely to have atoms will be assigned higher probability. To generate 3D molecules, we propose an auto-regressive sampling scheme — atoms are sampled sequentially from the learned distribution until there is no room for new atoms. Combined with this sampling scheme, our model can generate valid and diverse molecules, which could be applicable to various structure-based molecular design tasks such as molecule sampling and linker design. Experimental results demonstrate that molecules sampled from our model exhibit high binding affinity to specific targets and good drug properties such as drug-likeness even if the model is not explicitly optimized for them.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Designing molecules that bind to a specific protein binding site, also known as structure-based drug design, is one of the most challenging tasks in drug discovery [2]. Searching for suitable molecule candidates in silico usually involves massive computational efforts because of the enormous space of synthetically feasible chemicals [22] and conformational degree of freedom of both compound and protein structures [11].
|
| 26 |
+
|
| 27 |
+
In recent years, we have witnessed the success of machine learning approaches to problems in drug design, especially on molecule generation. Most of these approaches use deep generative models to propose drug candidates by learning the underlying distribution of desirable molecules. However, most of such methods are generally SMILES/string-based [10, 17] or graph-based [18, 19, 13, 14]. They are limited by the lack of spatial information and unable to perceive how molecules interact with proteins in 3D space. Hence, these methods are not applicable to generating molecules that fit to a specific protein structure which is also known as the drug target. Another line of work studies
|
| 28 |
+
|
| 29 |
+
generating molecules directly in 3D space [8, 28, 29, 20, 30, 15]. Most of them [8, 28, 29] can only handle very small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms. [20] proposes to generate voxelized molecular images and use a post-processing algorithm to reconstruct molecular structures. Though this method could produce drug-scale molecules for specific protein pockets, the quality of the sampling is heavily limited by voxelization. Therefore, generating high-quality drug molecules for specific 3D protein binding sites remains challenging.
|
| 30 |
+
|
| 31 |
+
In this work, we propose a 3D generative model to approach this task. Specifically, we aim at modeling the distribution of atom occurrence in the 3D space of the binding site. Formally, given a binding site $\mathcal{C}$ as input, we model the distribution $p(e,r|\mathcal{C})$ , where $\boldsymbol{r} \in \mathbb{R}^3$ is an arbitrary 3D coordinate and $e$ is atom type. To realize this distribution, we design a neural network architecture which takes as input a query 3D coordinate $\boldsymbol{r}$ , conditional on the 3D context $\mathcal{C}$ , and outputs the probability of $\boldsymbol{r}$ being occupied by an atom of a particular chemical element. In order to ensure the distribution is equivariant to $\mathcal{C}$ 's rotation and translation, we utilize rotationally invariant graph neural networks to perceive the context of each query coordinate.
|
| 32 |
+
|
| 33 |
+
Despite having a neural network to model the distribution of atom occurrence $p(e, \boldsymbol{r} | \mathcal{C})$ , how to generate valid and diverse molecules still remains technically challenging, mainly for the following two reasons: First, simply drawing i.i.d. samples from the distribution $p(e, \boldsymbol{r} | \mathcal{C})$ does not yield valid molecules because atoms within a molecule are not independent of each other. Second, a desirable sampling algorithm should capture the multi-modality of the feasible chemical space, i.e. it should be able to generate a diverse set of desired molecules given a specific binding context. To tackle the challenge, we propose an auto-regressive sampling algorithm. In specific, we start with a context consisting of only protein atoms. Then, we iteratively sample one atom from the distribution at each step and add it to the context to be used in the next step, until there is no room for new atoms. Compared to other recent methods [20, 23], our auto-regressive algorithm is simpler and more advantageous. It does not rely on post-processing algorithms to infer atom placements from density. More importantly, it is capable of multi-modal sampling by the nature of auto-regressive, avoiding additional latent variables via VAEs [16] or GANs [9] which would bring about extra architectural complexity and training difficulty.
|
| 34 |
+
|
| 35 |
+
We conduct extensive experiments to evaluate our approach. Quantitative and qualitative results show that: (1) our method is able to generate diverse drug-like molecules that have high binding affinity to specific targets based on 3D structures of protein binding sites; (2) our method is able to generate molecules with fairly high drug-likeness score (QED) [4] and synthetic accessibility score (SA) [6] even if the model is not specifically optimized for them; (3) in addition to molecule generation, the proposed method is also applicable to other relevant tasks such as linker design.
|
| 36 |
+
|
| 37 |
+
# 2 Related Work
|
| 38 |
+
|
| 39 |
+
SMILES-Based and Graph-Based Molecule Generation Deep generative models have been prevalent in molecule design. The overall idea is to use deep generative models to propose molecule candidates by learning the underlying distribution of desirable molecules. Existing works can be roughly divided into two classes — string-based and graph-based. String-based methods represent molecules as linear strings, e.g. SMILES strings [34], making a wide range of language modeling tools readily applicable. For example, [5, 10, 26] utilize recurrent neural networks to learn a language model of SMILES strings. However, string-based representations fail to capture molecular similarities, making it a sub-optimal representation for molecules [13]. In contrast, graph representations are more natural, and graph-based approaches have drawn great attention. The majority of graph-based models generate molecules in an auto-regressive fashion, i.e., adding atoms or fragments sequentially, which could be implemented based upon VAEs [13], normalizing flows [27], reinforcement learning [35, 14], etc. Despite the progress made in string-based and graph-based approaches, they are limited by the lack of spatial information and thus unable to be directly applied to structure-based drug design tasks [2]. Specifically, as 1D/2D-based methods, they are unable to perceive how molecules interact with their target proteins exactly in 3D space.
|
| 40 |
+
|
| 41 |
+
Molecule Generation in 3D Space There has been another line of methods that generate molecules directly in 3D space. [8] proposes an auto-regressive model which takes a partially generated molecule as input and outputs the next atom's chemical element and the distances to previous atoms and places
|
| 42 |
+
|
| 43 |
+
the atoms in the 3D space according to the distance constraints. [28, 29] approach this task via reinforcement learning by generating 3D molecules in a sequential way. Different from the previous method[8], they mainly rely on a reward function derived from the potential energy function of atomic systems. These works could generate realistic 3D molecules. However, they can only handle small organic molecules, not sufficient to generate drug-scale molecules which usually contain dozens of heavy atoms.
|
| 44 |
+
|
| 45 |
+
[20, 23] propose a non-autoregressive approach to 3D molecular generation which is able to generate drug-scale molecules. It represents molecules as 3D images by voxelizing molecules onto 3D meshgrids. In this way, the molecular generation problem is transformed into an image generation problem, making it possible to leverage sophisticated image generation techniques. In specific, it employs convolutional neural network-based VAEs [16] or GANs [9] to generate such molecular images. It also attempts to fuse the binding site structures into the generative network, enabling the model to generate molecules for designated binding targets. In order to reconstruct the molecular structures from images, it leverages a post-processing algorithm to search for atom placements that best fit the image. In comparison to previous methods which can only generate small 3D molecules, this method can generate drug-scale 3D molecules. However, the quality of its generated molecules is not satisfying because of the following major limitations. First, it is hardly scalable to large binding pockets, as the number of voxels grows cubically to the size of the binding site. Second, the resolution of the 3D molecular images is another bottleneck that significantly limits the precision due to the same scalability issue. Last, conventional CNNs are not rotation-equivariant, which is crucial for modeling molecular systems [25].
|
| 46 |
+
|
| 47 |
+
# 3 Method
|
| 48 |
+
|
| 49 |
+
Our goal is to generate a set of atoms that is able to form a valid drug-like molecule fitting to a specific binding site. To this end, we first present a 3D generative model in Section 3.1 that predicts the probability of atom occurrence in 3D space of the binding site. Second, we present in Section 3.2 the auto-regressive sampling algorithm for generating valid and multi-modal molecules from the model. Finally, in Section 3.3, we derive the training objective, by which the model learns to predict where should be placed and atoms and what type of atom should be placed.
|
| 50 |
+
|
| 51 |
+
# 3.1 3D Generative Model Design
|
| 52 |
+
|
| 53 |
+
A binding site can be defined as a set of atoms $\mathcal{C} = \{(a_i, r_i)\}_{i=1}^{N_b}$ , where $N_b$ is the number of atoms in the binding site, $a_i$ is the $i$ -th atom's attributes such as chemical element, belonging amino acid, etc., and $r_i$ is its 3D coordinate. To generate atoms in the binding site, we consider modeling the probability of atom occurring at some position $r$ in the site. Formally, this is to model the density $p(e|r,\mathcal{C})$ , where $r \in \mathbb{R}^3$ is an arbitrary 3D coordinate, and $e \in \mathcal{E} = \{\mathrm{H},\mathrm{C},\mathrm{O},\ldots\}$ is the chemical element. Intuitively, this density can be interpreted as a classifier that takes as input a 3D coordinate $r$ conditional on $\mathcal{C}$ and predicts the probability of $r$ being occupied by an atom of type $e$ .
|
| 54 |
+
|
| 55 |
+
To model $p(e|r,\mathcal{C})$ , we devise a model consisting of two parts: Context Encoder learns the representation of each atom in the context $\mathcal{C}$ via graph neural networks. Spatial Classifier takes as input a query position $\pmb{r}$ , then aggregates the representation of contextual atoms nearby it, and finally predicts $p(e|r,\mathcal{C})$ . The implementation of these two parts is detailed as follows.
|
| 56 |
+
|
| 57 |
+
Context Encoder The purpose of the context encoder is to extract information-rich representations for each atom in $\mathcal{C}$ . We assume a desirable representation should satisfy two properties: (1) context-awareness: the representation of an atom should not only encode the property of the atom itself, but also encode its context. (2) rotational and translational invariance: since the physical and biological properties of the system do not change according to rigid transforms, the representations that reflect these properties should be invariant to rigid transforms as well. To this end, we employ rotationally and translationally invariant graph neural networks [25] as the backbone of the context encoder, described as follows.
|
| 58 |
+
|
| 59 |
+
First of all, since there is generally no natural topology in $\mathcal{C}$ , we construct a $k$ -nearest-neighbor graph based on inter-atomic distances, denoted as $\mathcal{G} = \langle \mathcal{C}, \mathbf{A} \rangle$ , where $\mathbf{A}$ is the adjacency matrix. We also denote the $k$ -NN neighborhood of atom $i$ as $N_k(\mathbf{r}_i)$ for convenience. The context encoder will take $\mathcal{G}$ as input and output structure-aware node embeddings.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Figure 1: An illustration of the sampling process. Atoms are sampled sequentially. The probability density changes as we place new atoms. The sampling process naturally diverges, leading to different samples.
|
| 63 |
+
|
| 64 |
+
The first layer of the encoder is a linear layer. It maps atomic attributes $\{a_i\}$ to initial embeddings $\{h_i^{(0)}\}$ . Then, these embeddings along with the graph structure $A$ are fed into $L$ message passing layers. Specifically, the formula of message passing takes the form:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
\boldsymbol {h} _ {i} ^ {(\ell + 1)} = \sigma \left(\boldsymbol {W} _ {0} ^ {\ell} \boldsymbol {h} _ {i} ^ {(\ell)} + \sum_ {j \in N _ {k} (\boldsymbol {r} _ {i})} \boldsymbol {W} _ {1} ^ {\ell} \boldsymbol {w} \left(d _ {i j}\right) \odot \boldsymbol {W} _ {2} ^ {\ell} \boldsymbol {h} _ {j} ^ {(\ell)}\right), \tag {1}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
where $\boldsymbol{w}(\cdot)$ is a weight network and $d_{ij}$ denotes the distance between atom $i$ and atom $j$ . The formula is similar to continuous filter convolution [25]. Note that, the weight of message from $j$ to $i$ depends only on $d_{ij}$ , ensuring its invariance to rotation and translation. Finally, we obtain $\{\pmb{h}_i^{(L)}\}$ a set of embeddings for each atom in $\mathcal{C}$ .
|
| 71 |
+
|
| 72 |
+
Spatial Classifier The spatial classifier takes as input a query position $\boldsymbol{r} \in \mathbb{R}^3$ and predicts the type of atom occupying $\boldsymbol{r}$ . In order to make successful predictions, the model should be able to perceive the context around $\boldsymbol{r}$ . Therefore, the first step of this part is to aggregate atom embeddings from the context encoder:
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\boldsymbol {v} = \sum_ {j \in N _ {k} (\boldsymbol {r})} \boldsymbol {W} _ {0} \boldsymbol {w} _ {\text {a g g r}} \left(\| \boldsymbol {r} - \boldsymbol {r} _ {j} \|\right) \odot \boldsymbol {W} _ {1} \boldsymbol {h} _ {j} ^ {(L)}, \tag {2}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $N_{k}(\boldsymbol{r})$ is the $k$ -nearest neighborhood of $\boldsymbol{r}$ . Note that we weight different embedding using the weight network $\boldsymbol{w}_{\mathrm{aggr}}(\cdot)$ according to distances because it is necessary to distinguish the contribution of different atoms in the context. Finally, in order to predict $p(e|\boldsymbol{r},\mathcal{C})$ , the aggregated feature $\boldsymbol{v}$ is then passed to a classical multi-layer perceptron classifier:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\boldsymbol {c} = \operatorname {M L P} (\boldsymbol {v}), \tag {3}
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $c$ is the non-normalized probability of chemical elements. The estimated probability of position $r$ being occupied by atom of type $e$ is:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
p (e | \boldsymbol {r}, \mathcal {C}) = \frac {\exp (\boldsymbol {c} [ e ])}{1 + \sum_ {e ^ {\prime} \in \mathcal {E}} \exp (\boldsymbol {c} [ e ^ {\prime} ])}, \tag {4}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\mathcal{E}$ is the set of possible chemical elements. Unlike typical classifiers that apply softmax to $c$ , we make use of the extra degree of freedom by adding 1 to the denominator, so that the probability of "nothing" can be expressed as:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C}) = \frac {1}{1 + \sum \exp \left(\boldsymbol {c} \left[ e ^ {\prime} \right]\right)}. \tag {5}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
# 3.2 Sampling
|
| 97 |
+
|
| 98 |
+
Sampling a molecule amounts to generating a set of atoms $\{(e_i, r_i)\}_{i=1}^{N_a}$ . However, formulating an effective sampling algorithm is non-trivial because of the following three challenges. First, we have to define the joint distribution of $e$ and $r$ , i.e. $p(e, r|C)$ , from which we can jointly sample an atom's
|
| 99 |
+
|
| 100 |
+
chemical element and its position. Second, notice that simply drawing i.i.d. samples from $p(e, \mathbf{r}|\mathcal{C})$ doesn't make sense because atoms are clearly not independent of each other. Thus, the sampling algorithm should be able to attend to the dependencies between atoms. Third, the sampling algorithm should produce multi-modal samples. This is important because in reality there is usually more than one molecule that can bind to a specific target.
|
| 101 |
+
|
| 102 |
+
In the following, we first define the joint distribution $p(e, \boldsymbol{r} | \mathcal{C})$ . Then, we present an auto-regressive sampling algorithm to tackle the second and the third challenges.
|
| 103 |
+
|
| 104 |
+
Joint Distribution We define the joint distribution of coordinate $\mathbf{r}$ and atom type $e$ using Eq.4:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
p (e, \boldsymbol {r} | \mathcal {C}) = \frac {\exp (\boldsymbol {c} [ e ])}{Z}, \tag {6}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $Z$ is an unknown normalizing constant and $c$ is a function of $r$ and $\mathcal{C}$ as defined in Eq.3. Though $p(e, r)$ is a non-normalized distribution, drawing samples from it would be efficient because the dimension of $r$ is only 3. Viable sampling methods include Markov chain Monte Carlo (MCMC) or discretization.
|
| 111 |
+
|
| 112 |
+
Auto-Regressive Sampling We sample a molecule by progressively sampling one atom at each step. In specific, at step $t$ , the context $C_t$ contains not only protein atoms but also $t$ atoms sampled beforehand. Sampled atoms in $C_t$ are treated equally as protein atoms in the model, but they have different attributes in order to differentiate themselves from protein atoms. Then, the $(t + 1)$ -th atom will be sampled from $p(e, r | C_t)$ and will be added to $C_t$ , leading to the context for next step $C_{t + 1}$ . The sampling process is illustrated in Figure 1. Formally, we have:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\left(e _ {t + 1}, \boldsymbol {r} _ {t + 1}\right) \sim p (e, \boldsymbol {r} | \mathcal {C} _ {t}),
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {C} _ {t + 1} \leftarrow \mathcal {C} _ {t} \cup \{(e _ {t + 1}, \boldsymbol {r} _ {t + 1}) \}. \tag {7}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
To determine when the auto-regressive sampling should stop, we employ an auxiliary network. The network takes as input the embedding of previously sampled atoms, and classifies them into two categories: frontier and non-frontier. If all the existing atoms are non-frontier, which means there is no room for more atoms, the sampling will be terminated. Finally, we use OpenBabel [21, 20] to obtain bonds of generated structures.
|
| 123 |
+
|
| 124 |
+
In summary, the proposed auto-regressive algorithm succeeds to settle the aforementioned two challenges. First, the model is aware of other atoms when placing new atoms, thus being able to consider the dependencies between them. Second, auto-regressive sampling is a stochastic process. Its sampling path naturally diverges, leading to diverse samples.
|
| 125 |
+
|
| 126 |
+
# 3.3 Training
|
| 127 |
+
|
| 128 |
+
As we adopt auto-regressive sampling strategies, we propose a cloze-filling training scheme — at training time, a random portion of the target molecule is masked, and the network learns to predict the masked part from the observable part and the binding site. This emulates the sampling process where the model can only observe partial molecules. The training loss consists of three terms described below.
|
| 129 |
+
|
| 130 |
+
First, to make sure the model is able to predict positions that actually have atoms (positive positions), we include a binary cross entropy loss to contrast positive positions against negative positions:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
L _ {\mathrm {B C E}} = - \mathbb {E} _ {\boldsymbol {r} \sim p _ {+}} \left[ \log \left(1 - p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C})\right) \right] - \mathbb {E} _ {\boldsymbol {r} \sim p _ {-}} \left[ \log p (\text {N o t h i n g} | \boldsymbol {r}, \mathcal {C}) \right]. \tag {8}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
Here, $p_{+}$ is a positive sampler that yields coordinates of masked atoms. $p_{-}$ is a negative sampler that yields random coordinates in the ambient space. $p_{-}$ is empirically defined as a Gaussian mixture model containing $|\mathcal{C}|$ components centered at each atom in $\mathcal{C}$ . The standard deviation of each component is set to $2\AA$ in order to cover the ambient space. Intuitively, the first term in Eq.8 increases the likelihood of atom placement for positions that should get an atom. The second term decreases the likelihood for other positions.
|
| 137 |
+
|
| 138 |
+
Second, our model should be able to predict the chemical element of atoms. Hence, we further include a standard categorical cross entropy loss:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
L _ {\mathrm {C A T}} = - \mathbb {E} _ {(e, \boldsymbol {r}) \sim p _ {+}} [ \log p (e | \boldsymbol {r}, \mathcal {C}) ]. \tag {9}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
Figure 2: (a) A portion of the molecule is masked. (b) Positive coordinates are drawn from the masked atoms' positions and negative coordinates are drawn from the ambient space. (c) Both positive and negative coordinates are fed into the model. The model predicts the probability of atom occurrence at the coordinates. (d) Training losses are computed based on the discrepancy between predicted probabilities and ground truth.
|
| 146 |
+
|
| 147 |
+
Third, as introduced in Section 3.2, the sampling algorithm requires a frontier network to tell whether the sampling should be terminated. This leads to the last term — a standard binary cross entropy loss for training the frontier network:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
L _ {\mathrm {F}} = \sum_ {i \in \mathcal {F} \subseteq \mathcal {C}} \log \sigma \left(F \left(\boldsymbol {h} _ {i}\right)\right) + \sum_ {i \notin \mathcal {F} \subseteq \mathcal {C}} \log \left(1 - \sigma \left(F \left(\boldsymbol {h} _ {i}\right)\right)\right), \tag {10}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
where $\mathcal{F}$ is the set of frontier atoms in $\mathcal{C}$ , $\sigma$ is the sigmoid function, and $F(\cdot)$ is the frontier network that takes atom embedding as input and predicts the logit probability of the atom being a frontier. During training, an atom is regarded as a frontier if and only if (1) the atom is a part of the target molecule, and (2) at least one of its bonded atom is masked.
|
| 154 |
+
|
| 155 |
+
Finally, by summing up $L_{\mathrm{BCE}}$ , $L_{\mathrm{CAT}}$ , and $L_{\mathrm{F}}$ , we obtain the full training loss $L = L_{\mathrm{BCE}} + L_{\mathrm{CAT}} + L_{\mathrm{F}}$ . The full training process is illustrated in Figure 2.
|
| 156 |
+
|
| 157 |
+
# 4 Experiments
|
| 158 |
+
|
| 159 |
+
We evaluate the proposed method on two relevant structure-based drug design tasks: (1) Molecule Design is to generate molecules for given binding sites (Section 4.1), and (2) Linker Prediction is to generate substructures to link two given fragments in the binding site. (Section 4.2). Below, we describe common setups shared across tasks. Detailed task-specific setups are provided in each subsection.
|
| 160 |
+
|
| 161 |
+
Data We use the CrossDocked dataset [7] following [20]. The dataset originally contains 22.5 million docked protein-ligand pairs at different levels of quality. We filter out data points whose binding pose RMSD is greater than $1\AA$ , leading to a refined subset consisting of 184,057 data points. We use mmseqs2 [31] to cluster data at $30\%$ sequence identity, and randomly draw 100,000 protein-ligand pairs for training and 100 proteins from remaining clusters for testing.
|
| 162 |
+
|
| 163 |
+
Model We trained a universal model for all the tasks. The number of message passing layers in context encoder $L$ is 6, and the hidden dimension is 256. We train the model using the Adam optimizer at learning rate 0.0001. Other details about model architectures and training parameters are provided in the supplementary material and the open source repository: https://github.com/luost26/3D-Generative-SBDD.
|
| 164 |
+
|
| 165 |
+
<table><tr><td colspan="2">Metric</td><td>liGAN</td><td>Ours</td><td>Ref</td></tr><tr><td rowspan="2">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-6.144</td><td>-6.344</td><td>-7.158</td></tr><tr><td>Med.</td><td>-6.100</td><td>-6.200</td><td>-6.950</td></tr><tr><td rowspan="2">QED (↑)</td><td>Avg.</td><td>0.371</td><td>0.525</td><td>0.484</td></tr><tr><td>Med.</td><td>0.369</td><td>0.519</td><td>0.469</td></tr><tr><td rowspan="2">SA (↑)</td><td>Avg.</td><td>0.591</td><td>0.657</td><td>0.733</td></tr><tr><td>Med.</td><td>0.570</td><td>0.650</td><td>0.745</td></tr><tr><td rowspan="2">High Affinity (%, ↑)</td><td>Avg.</td><td>23.77</td><td>29.09</td><td>-</td></tr><tr><td>Med.</td><td>11.00</td><td>18.50</td><td>-</td></tr><tr><td rowspan="2">Diversity (↑)</td><td>Avg.</td><td>0.655</td><td>0.720</td><td>-</td></tr><tr><td>Med.</td><td>0.676</td><td>0.736</td><td>-</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Table 1: Mean and median values of the four metrics on generation quality. $(\uparrow)$ indicates higher is better. $(\downarrow)$ indicates lower is better.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 3: Distributions of Vina, QED, and SA scores over all the generated molecules.
|
| 171 |
+
|
| 172 |
+
# 4.1 Molecule Design
|
| 173 |
+
|
| 174 |
+
In this task, we generate molecules for specific binding sites with our model and baselines. The input to models are binding sites extracted from the proteins in the testing set. We sample 100 unique molecules for each target.
|
| 175 |
+
|
| 176 |
+
Baselines We compare our approach with the state-of-the-art baseline liGAN [20]. liGAN is based on conventional 3D convolutional neural networks. It generates voxelized molecular images and relies on a post-processing algorithm to reconstruct the molecule from the generated image.
|
| 177 |
+
|
| 178 |
+
Metrics We evaluate the quality of generated molecules from three main aspects: (1) Binding Affinity measures how well the generated molecules fit the binding site. We use Vina [33, 1] to compute the binding affinity (Vina Score). Before feeding the molecules to Vina, we employ the universal force fields (UFF) [24] to refine the generated structures following [20]. (2) Drug Likeness reflects how much a molecule is like a drug. We use QED score [4] as the metric for drug-likeness. (3) Synthesizability assesses the ease of synthesis of generated molecules. We use normalized SA score [6, 35] to measure molecules' synthesizability.
|
| 179 |
+
|
| 180 |
+
In order to evaluate the generation quality and diversity for each binding site, we define two additional metrics: (1) Percentage of Samples with High Affinity, which measures the percentage of a binding site's generated molecules whose binding affinity is higher than or equal to the reference ligand. (2) Diversity [14], which measures the diversity of generated molecules for a binding site. It is calculated by averaging pairwise Tanimoto similarities [3, 32] over Morgan fingerprints among the generated molecules of a target.
|
| 181 |
+
|
| 182 |
+
Results We first calculate Vina Score, QED, and SA for each of the generated molecules. Figure 3 presents the histogram of these three metrics and Table 1 shows the mean and median values of them over all generated molecules. For each binding site, we further calculate Percentage of Samples with High Affinity and Diversity. We report their mean and median values in the bottom half of Table 1. From the quantitative results, we find that in general, our model is able to discover diverse molecules that have higher binding affinity to specific targets. Besides, the generated molecules from our model also exhibit other desirable properties including fairly high drug-likeness and synthesizeability. When compared to the CNN baseline liGAN [20], our method achieves clearly better performance on all metrics, especially on the drug-likeness score QED, which indicates that our model produces more realistic drug-like molecules.
|
| 183 |
+
|
| 184 |
+
To better understand the results, we select two binding sites in the testing set and visualize their top affinity samples for closer inspection. The top row of Figure 4 is the first example (PDB ID:2hcj). The average QED and SA scores of the generated molecules for this target are 0.483 and 0.663
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Ours (2hcj)
|
| 188 |
+
Figure 4: Generated molecules with top binding affinity and the reference molecule for two representative binding sites. Lower Vina score indicates higher binding affinity.
|
| 189 |
+
|
| 190 |
+
respectively, around the median of these two scores. $8\%$ of the generated molecules have higher binding affinity than the reference molecule, below the median $18.5\%$ . The second example (PDB ID:4r1u) is shown in the bottom row. The average QED and SA scores are 0.728 and 0.785, and $18\%$ of sampled molecules achieve higher binding affinity. From these two examples in Figure 4, we can see that the generated molecules have overall structures similar to the reference molecule and they share some common important substructures, which indicates that the generated molecules fit into the binding site as well as the reference one. Besides, the top affinity molecules generally achieve QED and SA score comparable to or even higher than the reference molecule, which reflects that the top affinity molecules not only fit well into the binding site but also exhibit desirable quality. In conclusion, the above two representative cases evidence the model's ability to generate drug-like and high binding affinity molecules for designated targets.
|
| 191 |
+
|
| 192 |
+
# 4.2 Linker Prediction
|
| 193 |
+
|
| 194 |
+
Linker prediction is to build a molecule that incorporates two given disconnected fragments in the context of a binding site [12]. Our model is capable of linker design without any task-specific adaptation or re-training. In specific, given a binding site and some fragments as input, we compose the initial context $\mathcal{C}_0$ containing both the binding site and the fragments. Then, we run the auto-regressive sampling algorithm to sequentially add atoms until the molecule is comp
|
| 195 |
+
|
| 196 |
+
Table 2: Performance of linker prediction.
|
| 197 |
+
|
| 198 |
+
<table><tr><td colspan="2">Metric</td><td>DeLinker</td><td>Ours</td></tr><tr><td rowspan="2">Similarity (↑)</td><td>Avg.</td><td>0.612</td><td>0.701</td></tr><tr><td>Med.</td><td>0.600</td><td>0.722</td></tr><tr><td colspan="2">Recovered (%, ↑)</td><td>40.00</td><td>48.33</td></tr><tr><td rowspan="2">Vina Score (kcal/mol, ↓)</td><td>Avg.</td><td>-8.512</td><td>-8.603</td></tr><tr><td>Med.</td><td>-8.576</td><td>-8.575</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Data Preparation Following [12], we construct fragments of molecules in the testing set by enumerating possible double-cuts of acyclic single bonds. The pre-processing results in 120 data points in total. Each of them consists of two disconnected molecule fragments.
|
| 201 |
+
|
| 202 |
+
Baselines We compare our model with DeLinker [12]. Despite that DeLinker incorporates some 3D information, it is still a graph-based generative model. In contrast, our method operates fully in 3D space and thus is able to fully utilize the 3D context.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Fragments
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
Predicted
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Reference
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Similarity: 1.00
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Similarity: 0.91
|
| 228 |
+
Similarity: 0.87
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
Similarity: 0.85
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
Similarity: 0.79
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
Similarity: 0.55
|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
Similarity: 0.54
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
Similarity: 0.48
|
| 258 |
+
Figure 5: Two example of linker prediction. Atoms highlighted in red are predicted linkers.
|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
Similarity: 0.41
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
Similarity: 0.37
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+
Metrics We assess the generated molecules from fragments with four main metrics: (1) Similarity: We use Tanimoto Similarity [32, 3] over Morgan fingerprints [14] to measure the similarity between the molecular graphs of generated molecule and the reference molecule. (2) Percentage of Recovered Molecules: We say a test molecule is recovered if the model is able to generate a molecule that perfectly matches it (Similarity = 1.0). We calculate the percentage of test molecules that are recovered by the model. (3) Binding Affinity: We use Vina [1, 33] to compute the the generated molecules' binding affinity to the target.
|
| 275 |
+
|
| 276 |
+
Results For each data point, we use our model and DeLinker to generate 100 molecules. We first calculate the average similarity for each data point and report their overall mean and median values. Then, we calculate the percentage of test molecules that are successfully recovered by the model. Finally, we use Vina to evaluate the generated molecules' binding affinity. These results are summarized in Table 2. As shown in the table, when measured by Vina score, our proposed method's performance is on par with the graph-based baseline DeLinker. However, our method clearly outperforms DeLinker on Similarity and Percentage of Recovery, suggesting that our method is able to link fragments in a more realistic way. In addition, we present two examples along with 5 generated molecules at different similarities in Figure 5. The example demonstrates the model's ability to generate suitable linkers.
|
| 277 |
+
|
| 278 |
+
# 5 Conclusions and Discussions
|
| 279 |
+
|
| 280 |
+
In this paper, we propose a new approach to structure-based drug design. In specific, we design a 3D generative model that estimates the probability density of atom's occurrences in 3D space and formulate an auto-regressive sampling algorithm. Combined with the sampling algorithm, the model is able to generate drug-like molecules for specific binding sites. By conducting extensive experiments, we demonstrate our model's effectiveness in designing molecules for specific targets. Though our proposed method achieves reasonable performance in structure-based molecule design, there is no guarantee that the model always generates valid molecules successfully. To build a more robust and useful model, we can consider incorporating graph representations to building 3D molecules as future work, such that we can leverage on sophisticated techniques for generating valid molecular graphs such as valency check [35] and property optimization [14].
|
| 281 |
+
|
| 282 |
+
# References
|
| 283 |
+
|
| 284 |
+
[1] Amr Alhossary, Stephanus Daniel Handoko, Yuguang Mu, and Chee-Keong Kwoh. Fast, accurate, and reliable molecular docking with quickvina 2. Bioinformatics, 31(13):2214-2216, 2015.
|
| 285 |
+
[2] Amy C. Anderson. The process of structure-based drug design. Chemistry & Biology, 10(9): 787-797, 2003. ISSN 1074-5521. doi: https://doi.org/10.1016/j.chembiol.2003.09.002. URL https://www.sciencedirect.com/science/article/pii/S1074552103001947.
|
| 286 |
+
[3] Dávid Bajusz, Anita Rácz, and Károly Héberger. Why is tanimoto index an appropriate choice for fingerprint-based similarity calculations? Journal of cheminformatics, 7(1):1-13, 2015.
|
| 287 |
+
[4] G Richard Bickerton, Gaia V Paolini, Jérémy Besnard, Sorel Muresan, and Andrew L Hopkins. Quantifying the chemical beauty of drugs. Nature chemistry, 4(2):90–98, 2012.
|
| 288 |
+
[5] Esben Jannik Bjerrum and Richard Threlfall. Molecular generation with recurrent neural networks (rnns). arXiv preprint arXiv:1705.04612, 2017.
|
| 289 |
+
[6] Peter Ertl and Ansgar Schuffenhauer. Estimation of synthetic accessibility score of drug-like molecules based on molecular complexity and fragment contributions. Journal of cheminformatics, 1(1):1-11, 2009.
|
| 290 |
+
[7] Paul G Francoeur, Tomohide Masuda, Jocelyn Sunseri, Andrew Jia, Richard B Iovanisci, Ian Snyder, and David R Koes. Three-dimensional convolutional neural networks and a cross-docked data set for structure-based drug design. Journal of Chemical Information and Modeling, 60(9):4200-4215, 2020.
|
| 291 |
+
[8] Niklas WA Gebauer, Michael Gastegger, and Kristof T Schütt. Symmetry-adapted generation of 3d point sets for the targeted discovery of molecules. arXiv preprint arXiv:1906.00957, 2019.
|
| 292 |
+
[9] Ian J Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial networks. arXiv preprint arXiv:1406.2661, 2014.
|
| 293 |
+
[10] Rafael Gómez-Bombarelli, Jennifer N Wei, David Duvenaud, José Miguel Hernández-Lobato, Benjamín Sánchez-Lengeling, Dennis Sheberla, Jorge Aguilera-Iparraguirre, Timothy D Hirzel, Ryan P Adams, and Alán Aspuru-Guzik. Automatic Chemical Design Using a Data-Driven Continuous Representation of Molecules. ACS Central Science, 4(2):268–276, 2018. ISSN 2374-7943. doi: 10.1021/acscentsci.7b00572.
|
| 294 |
+
[11] Paul CD Hawkins. Conformation generation: the state of the art. Journal of Chemical Information and Modeling, 57(8):1747-1756, 2017.
|
| 295 |
+
[12] Fergus Imrie, Anthony R Bradley, Mihaela van der Schaar, and Charlotte M Deane. Deep generative models for 3d linker design. Journal of chemical information and modeling, 60(4): 1983-1995, 2020.
|
| 296 |
+
[13] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Junction tree variational autoencoder for molecular graph generation. In International Conference on Machine Learning, pages 2323-2332. PMLR, 2018.
|
| 297 |
+
[14] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Composing molecules with multiple property constraints. arXiv preprint arXiv:2002.03244, 2020.
|
| 298 |
+
[15] Wengong Jin, Jeremy Wohlwend, Regina Barzilay, and Tommi Jaakkola. Iterative refinement graph neural network for antibody sequence-structure co-design. arXiv preprint arXiv:2110.04624, 2021.
|
| 299 |
+
[16] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 300 |
+
[17] Matt J Kusner, Brooks Paige, and José Miguel Hernández-Lobato. Grammar variational autoencoder. In International Conference on Machine Learning, pages 1945-1954. PMLR, 2017.
|
| 301 |
+
|
| 302 |
+
[18] Yujia Li, Oriol Vinyals, Chris Dyer, Razvan Pascanu, and Peter Battaglia. Learning deep generative models of graphs. arXiv preprint arXiv:1803.03324, 2018.
|
| 303 |
+
[19] Qi Liu, Miltiadis Allamanis, Marc Brockschmidt, and Alexander L Gaunt. Constrained graph variational autoencoders for molecule design. arXiv preprint arXiv:1805.09076, 2018.
|
| 304 |
+
[20] Tomohide Masuda, Matthew Ragoza, and David Ryan Koes. Generating 3d molecular structures conditional on a receptor binding site with deep generative models. arXiv preprint arXiv:2010.14442, 2020.
|
| 305 |
+
[21] Noel M O'Boyle, Michael Banck, Craig A James, Chris Morley, Tim Vandermeersch, and Geoffrey R Hutchison. Open babel: An open chemical toolbox. Journal of cheminformatics, 3 (1):1-14, 2011.
|
| 306 |
+
[22] Pavel G Polishchuk, Timur I Madzhidov, and Alexandre Varnek. Estimation of the size of drug-like chemical space based on gdb-17 data. Journal of computer-aided molecular design, 27(8):675-679, 2013.
|
| 307 |
+
[23] Matthew Ragoza, Tomohide Masuda, and David Ryan Koes. Learning a continuous representation of 3d molecular structures with deep generative models. arXiv preprint arXiv:2010.08687, 2020.
|
| 308 |
+
[24] Anthony K Rappe, Carla J Casewit, KS Colwell, William A Goddard III, and W Mason Skiff. Uff, a full periodic table force field for molecular mechanics and molecular dynamics simulations. Journal of the American chemical society, 114(25):10024-10035, 1992.
|
| 309 |
+
[25] Kristof T Schütt, PJ Kindermans, Huziel E Sauceda, Stefan Chmiela, Alexandre Tkatchenko, and Klaus R Müller. Schnet: A continuous-filter convolutional neural network for modeling quantum interactions. In 31st Conference on Neural Information Processing Systems (NIPS 2017), Long Beach, CA, USA, pages 1-11, 2017.
|
| 310 |
+
[26] Marwin HS Segler, Thierry Kogej, Christian Tyrchan, and Mark P Waller. Generating focused molecule libraries for drug discovery with recurrent neural networks. ACS central science, 4(1): 120-131, 2018.
|
| 311 |
+
[27] Chence Shi, Minkai Xu, Zhaocheng Zhu, Weinan Zhang, Ming Zhang, and Jian Tang. Graphaf: a flow-based autoregressive model for molecular graph generation. arXiv preprint arXiv:2001.09382, 2020.
|
| 312 |
+
[28] Gregor Simm, Robert Pinsler, and José Miguel Hernández-Lobato. Reinforcement learning for molecular design guided by quantum mechanics. In International Conference on Machine Learning, pages 8959-8969. PMLR, 2020.
|
| 313 |
+
[29] Gregor NC Simm, Robert Pinsler, Gábor Csányi, and José Miguel Hernández-Lobato. Symmetry-aware actor-critic for 3d molecular design. arXiv preprint arXiv:2011.12747, 2020.
|
| 314 |
+
[30] Miha Skalic, José Jiménez, Davide Sabbadin, and Gianni De Fabritiis. Shape-based generative modeling for de novo drug design. Journal of chemical information and modeling, 59(3): 1205-1214, 2019.
|
| 315 |
+
[31] Martin Steinegger and Johannes Söding. Mmseqs2 enables sensitive protein sequence searching for the analysis of massive data sets. Nature biotechnology, 35(11):1026-1028, 2017.
|
| 316 |
+
[32] Taffee T Tanimoto. Elementary mathematical theory of classification and prediction. 1958.
|
| 317 |
+
[33] Oleg Trot and Arthur J Olson. Autodock vina: improving the speed and accuracy of docking with a new scoring function, efficient optimization, and multithreading. Journal of computational chemistry, 31(2):455-461, 2010.
|
| 318 |
+
[34] David Weininger. Smiles, a chemical language and information system. 1. introduction to methodology and encoding rules. Journal of chemical information and computer sciences, 28 (1):31-36, 1988.
|
| 319 |
+
[35] Jiaxuan You, Bowen Liu, Rex Ying, Vijay Pande, and Jure Leskovec. Graph convolutional policy network for goal-directed molecular graph generation. arXiv preprint arXiv:1806.02473, 2018.
|
| 320 |
+
|
| 321 |
+
# Supplementary Material
|
| 322 |
+
|
| 323 |
+
# A Additional Results
|
| 324 |
+
|
| 325 |
+
# A.1 Molecule Design
|
| 326 |
+
|
| 327 |
+
We present more examples of generated molecules by our method and the CNN baseline liGAN. We select 6 molecules with highest binding affinity for each method and each binding site. The 3 additional binding sites are selected randomly from the testing set. By comparing the samples from two methods, we can find that the 3D molecules generated by our method are generally more realistic, while molecules generated by the baseline have more erroneous structures, such as bonds that are too short and angles that are too sharp. Besides, molecules generated by our method are more diverse, while the 3D atom configurations generated by the baseline are often similar. More importantly, our model can generate novel molecules that are obviously different from the reference molecule and achieve higher binding affinity. To summarize, these examples evidence the proposed model's good performance in terms of drug-likeness, diversity, and binding affinity.
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
Ours (3li4)
|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
liGAN (3li4)
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
Ours (2hcj)
|
| 337 |
+
|
| 338 |
+

|
| 339 |
+
liGAN (2hcj)
|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
Ours (4q8b)
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
Ours (4ru)
|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
liGAN (4rlu)
|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
Ours (3b6h)
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
liGAN (3b6h)
|
| 355 |
+
|
| 356 |
+
# A.2 Linker Prediction
|
| 357 |
+
|
| 358 |
+
We present more examples of linker prediction. Since the baseline model DeLinker is graph-based and does not generate 3D linker structures, to make the results of both methods visually comparable, we only show 2D molecular graphs. We randomly selected 5 cases from the testing set. For each case, we select 5 representative molecules, including the molecule with best similarity, the molecule with worst similarity, and 3 molecules between them. These examples evidence that our method is generally more likely to produce linkers that recover or resemble the original structure.
|
| 359 |
+
|
| 360 |
+
<table><tr><td></td><td>Fragments</td><td colspan="5">Predicted</td><td>Reference</td></tr><tr><td rowspan="5">4qik</td><td rowspan="5"></td><td colspan="5">Ours</td><td rowspan="5"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.97</td><td>Sim: 0.83</td><td>Sim: 0.64</td><td>Sim: 0.63</td></tr><tr><td colspan="5">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.99</td><td>Sim: 0.66</td><td>Sim: 0.58</td><td>Sim: 0.50</td><td>Sim: 0.42</td></tr><tr><td rowspan="5">3ym</td><td rowspan="5"></td><td colspan="5">Ours</td><td rowspan="5"></td></tr><tr><td>Sim: 0.55</td><td>Sim: 0.54</td><td>Sim: 0.48</td><td>Sim: 0.41</td><td>Sim: 0.38</td></tr><tr><td colspan="5">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.58</td><td>Sim: 0.47</td><td>Sim: 0.44</td><td>Sim: 0.39</td><td>Sim: 0.36</td></tr><tr><td rowspan="5">3nf</td><td rowspan="5"></td><td colspan="5">Ours</td><td rowspan="5"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.91</td><td>Sim: 0.87</td><td>Sim: 0.85</td><td>Sim: 0.79</td></tr><tr><td colspan="5">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.90</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.52</td><td>Sim: 0.45</td></tr><tr><td rowspan="5">4xi</td><td rowspan="5"></td><td colspan="5">Ours</td><td rowspan="5"></td></tr><tr><td>Sim: 1.00</td><td>Sim: 0.57</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.56</td></tr><tr><td colspan="5">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.71</td><td>Sim: 0.57</td><td>Sim: 0.57</td><td>Sim: 0.56</td><td>Sim: 0.56</td></tr><tr><td rowspan="5">4m7t</td><td rowspan="5"></td><td colspan="5">Ours</td><td rowspan="5"></td></tr><tr><td>Sim: 0.84</td><td>Sim: 0.79</td><td>Sim: 0.67</td><td>Sim: 0.64</td><td>Sim: 0.63</td></tr><tr><td colspan="5">DeLinker</td></tr><tr><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Sim: 0.67</td><td>Sim: 0.58</td><td>Sim: 0.56</td><td>Sim: 0.53</td><td>Sim: 0.50</td></tr></table>
|
| 361 |
+
|
| 362 |
+
# B Additional Model Details
|
| 363 |
+
|
| 364 |
+
# B.1 Sampling Algorithm
|
| 365 |
+
|
| 366 |
+
At the first step of molecule generation, there is no placed atoms in the binding site. To sample the first atom, we use Metropolis-Hasting algorithm to draw samples from the marginal distribution $p(\boldsymbol{r}|\mathcal{C}) = \sum_{e}p(e,\boldsymbol{r}|\mathcal{C})$ and select coordinate-element pairs that have highest joint probability. We draw 1,000 initial samples from the Gaussian mixture model defined on the coordinates of protein atoms, whose standard deviation is $1\AA$ . The proposal distribution is a Gaussian with $0.1\AA$ standard deviation, and the total number of steps is 500.
|
| 367 |
+
|
| 368 |
+
If there are previously placed atoms in the binding site, to accelerate sampling and make full use of model parallelism, we discretize the 3D space onto meshgrids. The resolution of the meshgrid is $0.1\AA$ . We only discretize the space where the radial distance to some frontier atom ranges from $1.0\AA$ to $2.0\AA$ in order to save memory. Note that frontier atoms are predicted by the frontier network. Then, we evaluate the non-normalized joint probabilities on the meshgrid and use softmax to normalize them. Finally, we draw coordinate-element pairs from the normalized probability.
|
| 369 |
+
|
| 370 |
+
We use the beam search technique to generate 100 different molecules for each binding site, and we set the beam width to 300.
|
| 371 |
+
|
| 372 |
+
# B.2 Hyperparameters
|
| 373 |
+
|
| 374 |
+
The hyperparameters are shared across both molecule design and linker prediction tasks. For the context encoder, the neighborhood size of $k$ -NN graphs is 48, the number of message passing layers $L$ is 6, and the dimension of hidden features $h_{i}^{(\ell)}$ is 256. For the spatial classifier, the dimension of $v$ is 128, and the number of aggregated nodes is 32. We train the model using the Adam optimizer at learning rate 0.0001. The batch size is 4 and the number of training iterations is 1.5 million, which takes about 2 days on GPU.
|
2203.10xxx/2203.10446/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a654a2a26264c75a247ed9c247ff462325a60cb7356c9c982fdcc46ead2e5613
|
| 3 |
+
size 1096376
|
2203.10xxx/2203.10446/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_content_list.json
ADDED
|
@@ -0,0 +1,1542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Inspection-L: Self-Supervised GNN Node Embeddings for Money Laundering Detection in Bitcoin",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
230,
|
| 8 |
+
103,
|
| 9 |
+
766,
|
| 10 |
+
143
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Wai Weng Lo $^{a,\\ast}$ , Gayan K. Kulatilleke $^{a}$ , Mohanad Sarhan $^{a}$ , Siamak Layeghy $^{a}$ , Marius Portmann $^{a}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
171,
|
| 19 |
+
164,
|
| 20 |
+
823,
|
| 21 |
+
179
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "aThe University of Queensland, Brisbane, Australia",
|
| 28 |
+
"bbox": [
|
| 29 |
+
356,
|
| 30 |
+
189,
|
| 31 |
+
640,
|
| 32 |
+
200
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Abstract",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
58,
|
| 42 |
+
255,
|
| 43 |
+
126,
|
| 44 |
+
268
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "Criminals have become increasingly experienced in using cryptocurrencies, such as Bitcoin, for money laundering. The use of cryptocurrencies can hide criminal identities and transfer hundreds of millions of dollars of dirty funds through their criminal digital wallets. However, this is considered a paradox because cryptocurrencies are goldmines for open-source intelligence, giving law enforcement agencies more power when conducting forensic analyses. This paper proposed Inspection-L, a graph neural network (GNN) framework based on a self-supervised Deep Graph Infomax (DGI) and Graph Isomorphism Network (GIN), with supervised learning algorithms, namely Random Forest (RF), to detect illicit transactions for anti-money laundering (AML). To the best of our knowledge, our proposal is the first to apply self-supervised GNNs to the problem of AML in Bitcoin. The proposed method was evaluated on the Elliptic dataset and shows that our approach outperforms the state-of-the-art in terms of key classification metrics, which demonstrates the potential of self-supervised GNN in the detection of illicit cryptocurrency transactions.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
57,
|
| 53 |
+
275,
|
| 54 |
+
939,
|
| 55 |
+
405
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Keywords: graph neural networks; machine learning; forensics; anomaly detection; cryptocurrencies",
|
| 62 |
+
"bbox": [
|
| 63 |
+
58,
|
| 64 |
+
412,
|
| 65 |
+
739,
|
| 66 |
+
426
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1. Introduction",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
58,
|
| 76 |
+
453,
|
| 77 |
+
174,
|
| 78 |
+
466
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "The advent of the first cryptocurrency—Bitcoin [1]—has revolutionized the conventional financial ecosystem, as it enables low-cost, near-anonymous, peer-to-peer cash transfers within and across various borders. Due to its pseudonymity, many cybercriminals, terrorists, and hackers have started to use cryptocurrency for illegal transactions. For example, the WannaCry ransomware attack used Bitcoin [2] as the payment method due to its non-traceability. The criminals received nearly 3.4 million (46.4 BTC) within four days of the WannaCry attack [2]. Therefore, effective detection of illicit transactions in Bitcoin transaction graphs is essential for preventing illegal transactions. Paradoxically, cryptocurrencies are goldmines for open-source intelligence, as transaction network data are publicly available, enabling law enforcement agencies to conduct a forensic analysis of the transaction's linkages and flows. However, the problem is challenging for law enforcement agencies, owing to its volume<sup>1</sup>, the untraceable p2p cross-border nature of Bitcoin transactions, and the use of technologies such as mixers and tumblers.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
57,
|
| 87 |
+
476,
|
| 88 |
+
504,
|
| 89 |
+
745
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Graph representation learning has shown great potential for detecting money laundering activities using cryptocurrencies. GNNs are tailored to applications with graph-structured data, such as the social sciences, chemistry, and telecommunications,",
|
| 96 |
+
"bbox": [
|
| 97 |
+
57,
|
| 98 |
+
747,
|
| 99 |
+
485,
|
| 100 |
+
804
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "and can leverage the inherent structure of the graph data by building relational inductive biases into the deep learning architecture. This provides the ability to learn, reason, and generalize from the graph data, inspired by the concept of message propagation [3].",
|
| 107 |
+
"bbox": [
|
| 108 |
+
507,
|
| 109 |
+
453,
|
| 110 |
+
937,
|
| 111 |
+
524
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "The Bitcoin transaction flow data can naturally be represented in graph format. A graph is constructed from the raw Bitcoin data and labeled such that the nodes represent transactions and the edges represent the flow of Bitcoin currency (BTC) from one transaction to the next in the adjacency matrix. Both the topological information and the information contained in the node features are crucial for detecting illicit transactions.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
507,
|
| 120 |
+
524,
|
| 121 |
+
937,
|
| 122 |
+
623
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "This paper proposes Inspection-L, a Graph Neural Network (GNN) framework based on an enhanced self-supervised Deep Graph Infomax (DGI) [4] and supervised Random Forest (RF)-based classifier to detect illicit transactions for AML.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
507,
|
| 131 |
+
624,
|
| 132 |
+
937,
|
| 133 |
+
680
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "Specifically, we investigate the Elliptic dataset [5], a realistic, partially labeled Bitcoin temporal graph-based transaction dataset consisting of real entities belonging to licit (e.g., wallet, miners), illicit entities (e.g., scams, terrorist organizations, ransomware), and unknown transaction categories. The proposed Inspection-L framework aims to detect illegal transactions based on graph representation learning in a self-supervised manner. Current graph machine learning approaches, such as [5], generally apply supervised graph neural network approaches to the detection of illicit transactions. However, supervised learning requires manual labeling. In the AML scenario, building an effective model that utilizes unknown label data is required, since human's labeling Bitcoin data could be costly and ineffective. It also only performs well when the labels are enough. Thus, exploiting unlabeled data to improve performance is crit-",
|
| 140 |
+
"bbox": [
|
| 141 |
+
507,
|
| 142 |
+
680,
|
| 143 |
+
944,
|
| 144 |
+
894
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "aside_text",
|
| 150 |
+
"text": "arXiv:2203.10465v4 [cs.CR] 9 Oct 2022",
|
| 151 |
+
"bbox": [
|
| 152 |
+
21,
|
| 153 |
+
315,
|
| 154 |
+
57,
|
| 155 |
+
717
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "page_footnote",
|
| 161 |
+
"text": "*Corresponding author",
|
| 162 |
+
"bbox": [
|
| 163 |
+
77,
|
| 164 |
+
825,
|
| 165 |
+
206,
|
| 166 |
+
837
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 0
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "page_footnote",
|
| 172 |
+
"text": "Email addresses: w.w.lo@uq.net.au (Wai Weng Lo),",
|
| 173 |
+
"bbox": [
|
| 174 |
+
82,
|
| 175 |
+
838,
|
| 176 |
+
386,
|
| 177 |
+
848
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "page_footnote",
|
| 183 |
+
"text": "g.kulatilleke@uq.net.au (Gayan K. Kulatilleke),",
|
| 184 |
+
"bbox": [
|
| 185 |
+
60,
|
| 186 |
+
848,
|
| 187 |
+
356,
|
| 188 |
+
860
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 0
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "page_footnote",
|
| 194 |
+
"text": "m.sarhan@uq.net.au (Mohanad Sarhan), siamak.layeghy@uq.net.au",
|
| 195 |
+
"bbox": [
|
| 196 |
+
60,
|
| 197 |
+
860,
|
| 198 |
+
472,
|
| 199 |
+
871
|
| 200 |
+
],
|
| 201 |
+
"page_idx": 0
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "page_footnote",
|
| 205 |
+
"text": "(Siamak Layeghy), marius@itee.uq.edu.au (Marius Portmann)",
|
| 206 |
+
"bbox": [
|
| 207 |
+
60,
|
| 208 |
+
871,
|
| 209 |
+
423,
|
| 210 |
+
882
|
| 211 |
+
],
|
| 212 |
+
"page_idx": 0
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "page_footnote",
|
| 216 |
+
"text": "<sup>1</sup>As of 2022 Aug 09, the volume of the entire BTC transaction record, the blockchain, is 420GB, with an average growth rate of $129\\%$ .",
|
| 217 |
+
"bbox": [
|
| 218 |
+
60,
|
| 219 |
+
883,
|
| 220 |
+
484,
|
| 221 |
+
904
|
| 222 |
+
],
|
| 223 |
+
"page_idx": 0
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "footer",
|
| 227 |
+
"text": "Preprint submitted to Journal of BTEX Templates",
|
| 228 |
+
"bbox": [
|
| 229 |
+
60,
|
| 230 |
+
915,
|
| 231 |
+
322,
|
| 232 |
+
927
|
| 233 |
+
],
|
| 234 |
+
"page_idx": 0
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "footer",
|
| 238 |
+
"text": "October 11, 2022",
|
| 239 |
+
"bbox": [
|
| 240 |
+
838,
|
| 241 |
+
914,
|
| 242 |
+
937,
|
| 243 |
+
926
|
| 244 |
+
],
|
| 245 |
+
"page_idx": 0
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"text": "ical for AML. On the other hand, self-supervised graph neural network algorithms [6][7] allow for the unknown label data to be exploited, which can improve the quality of representation for the downstream tasks such as fraud transaction detection in Bitcoin. Furthermore, in supervised learning, GNN is limited to capturing K-hop neighbor information; for example, once the hops of the neighbor are larger than k, the supervised learning GNN fails to capture that node information.",
|
| 250 |
+
"bbox": [
|
| 251 |
+
55,
|
| 252 |
+
96,
|
| 253 |
+
485,
|
| 254 |
+
209
|
| 255 |
+
],
|
| 256 |
+
"page_idx": 1
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"text": "In this paper, we applied DGI self-supervised learning to capture the global graph information, as this is not limited to capturing the K-layer neighborhood information, where every node can access the entire graph's structural pattern and node information using random shuffle node features. The DGI discriminator tries to determine wherever the node feature is shuffled or not. Thus, every node can access global parts of the node's properties, rather than K-layer neighborhood information.",
|
| 261 |
+
"bbox": [
|
| 262 |
+
55,
|
| 263 |
+
210,
|
| 264 |
+
485,
|
| 265 |
+
336
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 1
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "We demonstrate how the self-supervised DGI algorithm can be integrated with standard machine learning classification algorithms, i.e., Random Forest, to build an efficient anti-money-laundering detection system. We show that our Inspection-L method outperforms the state-of-the-art in terms of F1 score.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
55,
|
| 274 |
+
338,
|
| 275 |
+
485,
|
| 276 |
+
407
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "In summary, the key contributions of this paper are:",
|
| 283 |
+
"bbox": [
|
| 284 |
+
82,
|
| 285 |
+
409,
|
| 286 |
+
433,
|
| 287 |
+
423
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 1
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "list",
|
| 293 |
+
"sub_type": "text",
|
| 294 |
+
"list_items": [
|
| 295 |
+
"- Different from most existing works, which typically use supervised graph representation learning to generate node embeddings for illegal transaction detection, we use a self-supervised learning approach to learn the node embeddings without using any labels.",
|
| 296 |
+
"- The proposed Inspection-L is based on a self-supervised DGI combined with the Random Forest (RF) supervised machine learning algorithms, to capture topological information and node features in the transaction graph to detect illegal transactions. To the best of our knowledge, our proposal is the first to utilize self-supervised GNNs to generate node embeddings for AML in Bitcoin.",
|
| 297 |
+
"- The comprehensive evaluation of the proposed framework using the Elliptic benchmark datasets demonstrates superior performance compared to other, supervised machine learning approaches."
|
| 298 |
+
],
|
| 299 |
+
"bbox": [
|
| 300 |
+
82,
|
| 301 |
+
435,
|
| 302 |
+
485,
|
| 303 |
+
690
|
| 304 |
+
],
|
| 305 |
+
"page_idx": 1
|
| 306 |
+
},
|
| 307 |
+
{
|
| 308 |
+
"type": "text",
|
| 309 |
+
"text": "2. RELATED WORKS",
|
| 310 |
+
"text_level": 1,
|
| 311 |
+
"bbox": [
|
| 312 |
+
58,
|
| 313 |
+
711,
|
| 314 |
+
231,
|
| 315 |
+
725
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 1
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "Mark et al. [5] created and published the Elliptic dataset, a temporal graph-based Bitcoin transaction dataset consisting of over 200K Bitcoin node transactions, 234K payment edges, and 49 transaction graphs with distinct time steps. Each of the transaction nodes was labeled as a \"licit\", \"illicit\", or \"unknown\" entity. They evaluated the Elliptic dataset using various machine learning methods, including Logistic Regression (LR), Random Forest (RF), Multilayer Perceptrons (MLP) [8], Graph Convolutional Networks (GCNs) [9] and EvolveGCN [10]. They retrieved a recall score in the illicit category of 0.67 using RF and 0.51 using GCNs.",
|
| 322 |
+
"bbox": [
|
| 323 |
+
55,
|
| 324 |
+
736,
|
| 325 |
+
485,
|
| 326 |
+
892
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 1
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "text",
|
| 332 |
+
"text": "Yining et al. [11] collected the Bitcoin transaction graph data between July 2014 and May 2017 by running a Bitcoin client and used an external trusted source, \"Wallet Explorer\", a website that tracks Bitcoin wallets, to label the data. They first highlighted the differences between money laundering and regular transactions using network centrality such as PageRank, clustering coefficient [12], then used a node2vec-based [13] classifier to classify money laundering transactions. The research also indicated that statistical information, such as indegree/out-degree, number of weakly connected components, and sum/mean/standard deviation of the output values, could distinguish money laundering transactions from legal transactions. However, this approach only considers graph topological patterns, without considering node features. Vassallo et al. [14] focused on the detection of illicit cryptocurrency activities (e.g., scams, terrorism financing, and Ponzi schemes). Their proposed detection framework is based on Adaptive Stacked eXtreme Gradient Boosting (ASXGB), an enhanced variation of eXtreme Gradient Boosting (XGBoost). ASXGB was evaluated using the Elliptic dataset, and the results demonstrate its superiority at both the account and transaction levels.",
|
| 333 |
+
"bbox": [
|
| 334 |
+
507,
|
| 335 |
+
96,
|
| 336 |
+
937,
|
| 337 |
+
394
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 1
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "Chaehyeon et al. [15] applied supervised machine learning algorithms to classify illicit nodes in the Bitcoin network. They used two supervised machine learning models, namely, Random Forest (RF) and Artificial Neural Network (ANN) [8] to detect illegal transactions. First, they collected the legal and illegal Bitcoin data from the forum sites \"Wallet Explorer\" and \"Blockchain Explorer\". Next, they performed feature extraction based on the characteristics of Bitcoin transactions, such as transaction fees and transaction size. The extracted features were labeled legal or illegal for supervised training. The results indicated that relatively high F1 scores could be achieved; specifically, ANN and RF achieved 0.89 and 0.98 F1 scores, respectively. In [16] proposed using GCNs intertwined with linear layers to classify illicit nodes of the Elliptic dataset [5]. An overall classification accuracy and recall of $97.40\\%$ and 0.67, respectively, can be achieved to detect illicit transactions. In [17], the authors used an autoencoder with graph embedding to detect mixing and demixing services for Bitcoin cryptocurrency. They first applied graph node embedding to generate the node representation; then, a K-means algorithm was applied to cluster the node embeddings to detect mixing and demixing services. The proposed model was evaluated based on real-world Bitcoin datasets to evaluate the model's effectiveness, and the results demonstrate that the proposed model can effectively perform demix/mixing service anomaly detection.",
|
| 344 |
+
"bbox": [
|
| 345 |
+
507,
|
| 346 |
+
395,
|
| 347 |
+
937,
|
| 348 |
+
749
|
| 349 |
+
],
|
| 350 |
+
"page_idx": 1
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"type": "text",
|
| 354 |
+
"text": "Lorenz et al. [18] proposed active learning techniques by using a minimum number of labels to achieve a high rate of detection of illicit transactions on the Elliptic dataset. In [19], the authors applied unsupervised learning to detect suspicious nodes in the Bitcoin transaction graph. They used various kinds of unsupervised machine learning algorithms, such as K-means and Gaussian Mixture models, to cluster normal and illicit nodes. However, since the Bitcoin transaction dataset they used lacked ground-truth labels, they simply used the internal index to validate the clustering algorithm, without confirming that those nodes are actually malicious transactions. Monamo et al. [20]",
|
| 355 |
+
"bbox": [
|
| 356 |
+
507,
|
| 357 |
+
750,
|
| 358 |
+
939,
|
| 359 |
+
904
|
| 360 |
+
],
|
| 361 |
+
"page_idx": 1
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"type": "page_number",
|
| 365 |
+
"text": "2",
|
| 366 |
+
"bbox": [
|
| 367 |
+
492,
|
| 368 |
+
914,
|
| 369 |
+
504,
|
| 370 |
+
925
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 1
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "text",
|
| 376 |
+
"text": "applied trimmed-Kmeans to detect fraud in the Bitcoin network. They used various graph centrality measures (i.e. in degree, out-degree of the Bitcoin transactions) and currency features (i.e. the total amount sent), which were then used for Bitcoin transaction clustering. However, similar to [19], due to the unavailability of ground-truth labels, they used clustering performance metrics such as \"within the sum of squares\", without being able to validate the true nature of the Bitcoin transaction anomalies. Shucheng et al. [21] proposed SIEGE, a self-supervised graph learning approach for Ethereum phishing scam detection, using two pretext tasks to generate node embeddings without using labels and an incremental paradigm to capture data distribution changes for over half a year. However, a significant limitation of this approach is that it does not consider the Bitcoin context and is limited to detecting Ethereum phishing scams. Additionally, their simple application of GCNs[9] in the pretext task phase is much less effective than the Weisfeiler-Lehman (1-WL) test[22].",
|
| 377 |
+
"bbox": [
|
| 378 |
+
57,
|
| 379 |
+
96,
|
| 380 |
+
500,
|
| 381 |
+
351
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 2
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "text",
|
| 387 |
+
"text": "In contrast with related studies, our approach can detect not only phishing scams but also other illicit transactions, such as terrorist organizations, ransomware and Ponzi schemes, by utilizing the Elliptic dataset [5].",
|
| 388 |
+
"bbox": [
|
| 389 |
+
57,
|
| 390 |
+
351,
|
| 391 |
+
485,
|
| 392 |
+
409
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 2
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "text",
|
| 398 |
+
"text": "3. BACKGROUND",
|
| 399 |
+
"text_level": 1,
|
| 400 |
+
"bbox": [
|
| 401 |
+
58,
|
| 402 |
+
430,
|
| 403 |
+
205,
|
| 404 |
+
442
|
| 405 |
+
],
|
| 406 |
+
"page_idx": 2
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"type": "image",
|
| 410 |
+
"img_path": "images/799e853a5a8a1fb32e04f838f83aa3acdc72b3f7f1c604a8dbe0562de1002bee.jpg",
|
| 411 |
+
"image_caption": [
|
| 412 |
+
"Figure 1: Overview of Deep Graph Infomax"
|
| 413 |
+
],
|
| 414 |
+
"image_footnote": [],
|
| 415 |
+
"bbox": [
|
| 416 |
+
100,
|
| 417 |
+
464,
|
| 418 |
+
442,
|
| 419 |
+
690
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 2
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "The main innovation of our proposed model is its use of DGI [4] with our proposed GIN encoder to learn node embeddings in a self-supervised manner. Then, the node embeddings can be treated as enhanced features and be combined with the raw features for standard supervised RF machine learning algorithms to classify illicit transaction. This has a clear advantage over simple features, as inputs to overall graph-structured patterns are available for the downstream classifier.",
|
| 426 |
+
"bbox": [
|
| 427 |
+
57,
|
| 428 |
+
733,
|
| 429 |
+
485,
|
| 430 |
+
845
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 2
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "Consequently, the current graph-based approaches [5] [16] try to apply a supervised GCN-based approach to capture the overall graph-structured patterns. However, the main limitation is that GCN can only capture the neighborhood information of",
|
| 437 |
+
"bbox": [
|
| 438 |
+
57,
|
| 439 |
+
847,
|
| 440 |
+
485,
|
| 441 |
+
904
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 2
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "limited K layers, not the global view graph and node information, due to the threat of overfitting. While some models, such as FDGATII [23], are capable of a larger K, these are still limited by their layer structure and finite k. On the other hand, our Inspection-L approach allows for every node to obtain access to the structural patterns of the entire graph, which can capture more global neighborhood information. The proposed method considers that the message-passing functions of [5] [16] are not powerful enough, as they lack injective functions. Therefore, we proposed a GIN encoder to make the message propagation function more robust.",
|
| 448 |
+
"bbox": [
|
| 449 |
+
507,
|
| 450 |
+
96,
|
| 451 |
+
937,
|
| 452 |
+
252
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 2
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "text",
|
| 458 |
+
"text": "3.1. Graph Neural Networks",
|
| 459 |
+
"text_level": 1,
|
| 460 |
+
"bbox": [
|
| 461 |
+
509,
|
| 462 |
+
267,
|
| 463 |
+
714,
|
| 464 |
+
280
|
| 465 |
+
],
|
| 466 |
+
"page_idx": 2
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"type": "text",
|
| 470 |
+
"text": "GNNs is a deep learning approach for graph-based data and a recent and highly promising area of machine learning [23]. The key feature of GNNs is their ability to combine a topological graph structure with features. For each node in a graph, this means aggregating neighboring node features to leverage a new representation of the current node that considers the neighboring information. The output of this process is known as embeddings. Final node embeddings are low- or n-dimensional vector representations that capture topological and node properties. Embeddings can be learned in a supervised or unsupervised manner and used for downstream tasks such as node classification, clustering, and link prediction [23]. The $k$ -th layer of a typical GCN is:",
|
| 471 |
+
"bbox": [
|
| 472 |
+
507,
|
| 473 |
+
284,
|
| 474 |
+
937,
|
| 475 |
+
469
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 2
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "equation",
|
| 481 |
+
"text": "\n$$\nh _ {v} ^ {(k)} = \\sigma \\left(W \\cdot \\operatorname {M E A N} \\left\\{h _ {u} ^ {(k - 1)}, \\forall u \\in N (v) \\cup \\{v \\} \\right\\}\\right). \\tag {1}\n$$\n",
|
| 482 |
+
"text_format": "latex",
|
| 483 |
+
"bbox": [
|
| 484 |
+
559,
|
| 485 |
+
479,
|
| 486 |
+
936,
|
| 487 |
+
500
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 2
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "where $h_{\\nu}^{(k)}$ is the feature vector of node $\\nu$ at the $k$ -th iteration/layer, $h_{\\nu}^{(0)} = X_{\\nu}$ , and $N(\\nu)$ is the set of neighbor nodes of $\\nu$ . $W^{(l)}$ is the weight matrix that will be learned for the downstream tasks. $\\sigma$ is an activation function, typically ReLU, for computing node representations.",
|
| 494 |
+
"bbox": [
|
| 495 |
+
507,
|
| 496 |
+
511,
|
| 497 |
+
937,
|
| 498 |
+
583
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 2
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"text": "3.2. Graph Isomorphism Network",
|
| 505 |
+
"text_level": 1,
|
| 506 |
+
"bbox": [
|
| 507 |
+
509,
|
| 508 |
+
596,
|
| 509 |
+
752,
|
| 510 |
+
611
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 2
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "text",
|
| 516 |
+
"text": "Graph Isomorphism Network (GIN) is theoretically a maximally powerful GNN proposed by Xu et al. [24]. The main difference between GIN and other GNNs is the message aggregation function, which is shown below:",
|
| 517 |
+
"bbox": [
|
| 518 |
+
507,
|
| 519 |
+
614,
|
| 520 |
+
937,
|
| 521 |
+
671
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 2
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "equation",
|
| 527 |
+
"text": "\n$$\nh _ {v} ^ {(k)} = \\operatorname {M L P} ^ {(k)} \\left(\\left(1 + \\epsilon^ {(k)}\\right) \\cdot h _ {v} ^ {(k - 1)} + \\sum_ {u \\in N (v)} h _ {u} ^ {(k - 1)}\\right) \\tag {2}\n$$\n",
|
| 528 |
+
"text_format": "latex",
|
| 529 |
+
"bbox": [
|
| 530 |
+
566,
|
| 531 |
+
682,
|
| 532 |
+
936,
|
| 533 |
+
720
|
| 534 |
+
],
|
| 535 |
+
"page_idx": 2
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"text": "GCNs is less effective than the Weisfeiler-Lehman (1-WL) [22] test due to the single-layer aggregation function, which is same as the hash function of a 1-WL algorithm. According to [24], a single, non-linear layer is insufficient for graph learning. Thus, GCN message passing functions are not necessarily injective. Therefore, GIN [24] was proposed to make the passing function injective, as shown in Equation 2, where $\\varepsilon^{(k)}$ is a scalar parameter, and MLP stands for multilayer perceptron. $h_{\\nu}^{(k)} \\in \\mathbb{R}^d$ is the embedding of node $\\nu_{i}$ at the $k$ -th layer, $h_{\\nu}^{(0)} = x_{\\nu}$ is the original input node features, and $N(\\nu_{i})$ is the set of neighboring nodes of node $\\nu_{i}$ . We can stack $k$ layers to obtain the final node representation $h_{\\nu}^{(k)}$ .",
|
| 540 |
+
"bbox": [
|
| 541 |
+
507,
|
| 542 |
+
730,
|
| 543 |
+
937,
|
| 544 |
+
902
|
| 545 |
+
],
|
| 546 |
+
"page_idx": 2
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "page_number",
|
| 550 |
+
"text": "3",
|
| 551 |
+
"bbox": [
|
| 552 |
+
492,
|
| 553 |
+
914,
|
| 554 |
+
502,
|
| 555 |
+
925
|
| 556 |
+
],
|
| 557 |
+
"page_idx": 2
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "text",
|
| 561 |
+
"text": "3.3. Deep Graph Infomax",
|
| 562 |
+
"text_level": 1,
|
| 563 |
+
"bbox": [
|
| 564 |
+
58,
|
| 565 |
+
96,
|
| 566 |
+
243,
|
| 567 |
+
111
|
| 568 |
+
],
|
| 569 |
+
"page_idx": 3
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"type": "text",
|
| 573 |
+
"text": "Deep Graph Infomax (DGI) [4] is a self-supervised graph representation learning approach that relies on maximizing the mutual information between patch representations and the global graph summary. The patch representations summarize subgraphs, allowing for the preservation of similarities at the patch level. A trained encoder in DGI can be reused to generate node embeddings for downstream tasks, such as node clustering.",
|
| 574 |
+
"bbox": [
|
| 575 |
+
57,
|
| 576 |
+
114,
|
| 577 |
+
495,
|
| 578 |
+
212
|
| 579 |
+
],
|
| 580 |
+
"page_idx": 3
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"type": "text",
|
| 584 |
+
"text": "Most of the previous works on self-supervised representation learning approaches rely on the random walk strategy [25][26], which is extremely computationally expensive because the number of walks depends on the number of nodes on the graph, making it unscalable for large graphs. Moreover, the choice of hyperparameters (length of the walk, number of walks) can significantly impact the model performance. Overall, DGI does not require supervision or random walk techniques. Instead, it guides the model to learn node connections by simultaneously leveraging local and global information in a graph [4].",
|
| 585 |
+
"bbox": [
|
| 586 |
+
57,
|
| 587 |
+
214,
|
| 588 |
+
485,
|
| 589 |
+
355
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 3
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "text",
|
| 595 |
+
"text": "Figure 1 shows the overall operation of DGI. $G$ is a true graph with the true nodes, the true edges that connect them, and real node features associated with each node. $H$ is a corrupted graph where the nodes and edges have been changed using a corruption function. [4] suggests that the corruption function can randomly shuffle each node feature and maintain the same edges as the true graph $G$ .",
|
| 596 |
+
"bbox": [
|
| 597 |
+
57,
|
| 598 |
+
355,
|
| 599 |
+
485,
|
| 600 |
+
455
|
| 601 |
+
],
|
| 602 |
+
"page_idx": 3
|
| 603 |
+
},
|
| 604 |
+
{
|
| 605 |
+
"type": "text",
|
| 606 |
+
"text": "The DGI training procedure consists of four components:",
|
| 607 |
+
"bbox": [
|
| 608 |
+
82,
|
| 609 |
+
455,
|
| 610 |
+
472,
|
| 611 |
+
469
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 3
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "list",
|
| 617 |
+
"sub_type": "text",
|
| 618 |
+
"list_items": [
|
| 619 |
+
"- A corruption procedure $C$ that changes the real input graph $G$ into a corrupted graph $H = (C(G))$ . This can be achieved by randomly shifting the node features among the nodes in a real graph $G$ or by adding and removing an edge from the real graph $G$ .",
|
| 620 |
+
"- An encoder $E$ that computes the node embeddings of a corrupted graph and a real graph. This can be achieved using various graph representation methods, such as Graph Convolutional Networks (GCNs) [9], Graph Attention Networks (GATs) [27] or Graph Transformer Networks (GTNs) [28].",
|
| 621 |
+
"- The node embedding vectors for each node in the real graph are summarized into a single embed vector of the entire graph $\\overline{s}$ (global graph summary) by using a readout function $R$ to compute the whole graph embeddings.",
|
| 622 |
+
"- A discriminator $D$ , which is a logistic non-linear sigmoid function, compares a real node embedding vector $\\vec{h}_i$ and a corrupted node embedding $\\widetilde{h}_i$ against the whole real graph embedding $\\overline{s}$ , and provides a score between 0 and 1, as shown in Equation 3. This binary cross-entropy loss objective function [4] can be applied to discriminate between the embedding of the real node and the corrupted node to train the encoder $E$ ."
|
| 623 |
+
],
|
| 624 |
+
"bbox": [
|
| 625 |
+
82,
|
| 626 |
+
478,
|
| 627 |
+
500,
|
| 628 |
+
834
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 3
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "equation",
|
| 634 |
+
"text": "\n$$\nL = \\frac {1}{N + M} \\left(\\sum_ {i = 1} ^ {N} \\mathbb {E} _ {\\left(\\mathbf {X}, \\mathbf {A}\\right)} \\left[ \\log D \\left(\\vec {h} _ {i}, \\vec {s}\\right) \\right] + \\right. \\tag {3}\n$$\n",
|
| 635 |
+
"text_format": "latex",
|
| 636 |
+
"bbox": [
|
| 637 |
+
591,
|
| 638 |
+
118,
|
| 639 |
+
937,
|
| 640 |
+
174
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 3
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "equation",
|
| 646 |
+
"text": "\n$$\n\\left. \\sum_ {j = 1} ^ {M} \\mathbb {E} _ {(\\overline {{\\mathbf {X}}}, \\overline {{\\mathbf {A}}})} \\left[ \\log \\left(1 - D (\\vec {h} _ {j}, \\vec {s})\\right) \\right]\\right)\n$$\n",
|
| 647 |
+
"text_format": "latex",
|
| 648 |
+
"bbox": [
|
| 649 |
+
647,
|
| 650 |
+
178,
|
| 651 |
+
855,
|
| 652 |
+
217
|
| 653 |
+
],
|
| 654 |
+
"page_idx": 3
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "text",
|
| 658 |
+
"text": "4. PROPOSED METHOD",
|
| 659 |
+
"text_level": 1,
|
| 660 |
+
"bbox": [
|
| 661 |
+
509,
|
| 662 |
+
237,
|
| 663 |
+
707,
|
| 664 |
+
250
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "To construct Bitcoin transaction graphs from the dataset, we used 49 different Bitcoin transaction graphs (TGs) [5] using time steps so that the nodes can be represented as node transactions and the edges can be represented as flows of Bitcoin transactions. This is a very natural way to represent Bitcoin transactions.",
|
| 671 |
+
"bbox": [
|
| 672 |
+
507,
|
| 673 |
+
262,
|
| 674 |
+
937,
|
| 675 |
+
346
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "The pseudocode and overall procedure of our proposed algorithm are shown in Algorithm 1 and Figure. 2. The proposed framework consists of two-stage: DGI training for node embedding extraction to perform feature augmentation, supervised machine learning classification.",
|
| 682 |
+
"bbox": [
|
| 683 |
+
507,
|
| 684 |
+
347,
|
| 685 |
+
937,
|
| 686 |
+
418
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 3
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "text",
|
| 692 |
+
"text": "4.1. DGI Training",
|
| 693 |
+
"text_level": 1,
|
| 694 |
+
"bbox": [
|
| 695 |
+
509,
|
| 696 |
+
432,
|
| 697 |
+
638,
|
| 698 |
+
445
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "To train the proposed model, the input includes the transaction graphs $G$ with node features (i.e., all 166 features, which is a combination of local and macro features, which we denote AF, or only the 94 local features), and the specified number of training epochs $K$ to extract true node embeddings and corrupted node embeddings. Before this, we need to define the corruption function $C$ to generate the corrupted transaction graphs $C(G)$ for our GIN encoder to extract the corrupted node embeddings. In this paper, we randomly shuffled all the node features among the nodes in real transaction graphs $G$ to generate the corrupted transaction graphs for each real graphs by shuffling the feature matrix in rows $\\mathbf{X}$ by using Bernoulli distribution. Overall, instead of adding or removing edges from the adjacency matrix such that $\\mathbf{A}_G \\neq \\mathbf{A}_H$ , we use corruption function $C$ , which shuffle the node features such that $\\mathbf{X}_G \\neq \\mathbf{X}_H$ , and retain the adjacency matrix, i.e., $(\\mathbf{A}_G = \\mathbf{A}_H)$ . Note that the corruption function only changes the node features, and not the structure; therefore, $N_G = N_H$ . In case of the DGI implementation, we now have $N = M$ .",
|
| 705 |
+
"bbox": [
|
| 706 |
+
507,
|
| 707 |
+
450,
|
| 708 |
+
937,
|
| 709 |
+
718
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 3
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"text": "For each batch of graph data $G$ in the training epoch, in Algorithm 1 from Line 3 to 4, we use our proposed GIN encoder to extract true node and corrupted node embeddings. Our proposed GIN encoder is shown in Figure 2 with two layers of MLP, which consists of 128 hidden units, ReLU activation function and Batch normalization (as shown in Algorithm 2) [29].",
|
| 716 |
+
"bbox": [
|
| 717 |
+
507,
|
| 718 |
+
720,
|
| 719 |
+
937,
|
| 720 |
+
818
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 3
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"text": "The design of the MLPs is motivated by the fundamental goal of a GNN-based model. Ideally, various types of different graph patterns should be distinguishable via the graph encoder, which means that different graph structures should be mapped to different locations in the embedding space. This requires the",
|
| 727 |
+
"bbox": [
|
| 728 |
+
507,
|
| 729 |
+
819,
|
| 730 |
+
937,
|
| 731 |
+
890
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 3
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "page_number",
|
| 737 |
+
"text": "4",
|
| 738 |
+
"bbox": [
|
| 739 |
+
492,
|
| 740 |
+
914,
|
| 741 |
+
502,
|
| 742 |
+
925
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 3
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "image",
|
| 748 |
+
"img_path": "images/4d190f2226cc502b18362a977242ff03f903da0468f27a9eb85754b9ddc91ed2.jpg",
|
| 749 |
+
"image_caption": [
|
| 750 |
+
"Figure 2: Proposed Method"
|
| 751 |
+
],
|
| 752 |
+
"image_footnote": [],
|
| 753 |
+
"bbox": [
|
| 754 |
+
90,
|
| 755 |
+
92,
|
| 756 |
+
939,
|
| 757 |
+
321
|
| 758 |
+
],
|
| 759 |
+
"page_idx": 4
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "code",
|
| 763 |
+
"sub_type": "algorithm",
|
| 764 |
+
"code_caption": [
|
| 765 |
+
"Algorithm 1: Pseudocode for Our Proposed Algorithm"
|
| 766 |
+
],
|
| 767 |
+
"code_body": "input: Set of training graphs $G^{+} = \\{G(V,A,X)\\}$ ; Number of training epochs $K$ ; Corruption function $C$ ; All 166 Features (AF); First 94 Local Features (LF); output: Optimized GIN encoder $g$ , Optimized RF $h\\_ R$ \n1 Initialize the parameters $\\theta$ and $\\omega$ for the encoder $g$ and the discriminator $D$ ; \n2 foreach batch $G \\in G^{+}$ do \n3 for epoch $\\leftarrow 1$ to $K$ do \n4 $h_i = g(G, \\theta)$ \n5 $\\widetilde{h}_i = g(C(G), \\theta)$ \n6 $\\bar{s} = \\sigma \\left( \\frac{1}{n} \\sum_{i=1}^{n} h_i^{(L)} \\right)$ \n7 $D(h_i, \\bar{s}) = \\sigma(h_i^T \\mathbf{w} \\bar{s})$ \n8 $D(\\widetilde{h}_i, \\bar{s}) = \\sigma(\\widetilde{h}_i^T \\mathbf{w} \\bar{s})$ \n9 $L_{DGI} = \\frac{1}{N + M} \\left( \\sum_{i=1}^{N} \\mathbb{E}_{(\\mathbf{X}, \\mathbf{A})}[\\log D(\\vec{h}_i, \\vec{s})] + \\sum_{j=1}^{M} \\mathbb{E}_{(\\overline{\\mathbf{X}}, \\overline{\\mathbf{A}})}[\\log(1 - D(\\vec{h}_j, \\vec{s}))]\\right)$ \n10 $\\theta, \\omega \\gets$ Adam ( $L_{DGI}$ ) \n11 Select labeled node embedding $h_i$ from $h_i = g(G, \\theta)$ and corresponding labels $y$ for $G \\in$ training set; \n12 $h\\_ R \\gets$ RF(( $h_i||\\{AF$ or $LF\\}$ ), y) \n13 return $h\\_ R, g$",
|
| 768 |
+
"bbox": [
|
| 769 |
+
90,
|
| 770 |
+
376,
|
| 771 |
+
680,
|
| 772 |
+
642
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 4
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "text",
|
| 778 |
+
"text": "ability to solve the graph isomorphism problem, where nonisomorphic graphs should be mapped to different representations.",
|
| 779 |
+
"bbox": [
|
| 780 |
+
57,
|
| 781 |
+
671,
|
| 782 |
+
485,
|
| 783 |
+
714
|
| 784 |
+
],
|
| 785 |
+
"page_idx": 4
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "text",
|
| 789 |
+
"text": "We applied a full neighbor sampling technique and used two-hop neighbor samples for the GIN encoder with Batch normalization, as DGI benefits from employing wider rather than deeper models [4].",
|
| 790 |
+
"bbox": [
|
| 791 |
+
57,
|
| 792 |
+
715,
|
| 793 |
+
485,
|
| 794 |
+
771
|
| 795 |
+
],
|
| 796 |
+
"page_idx": 4
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"type": "text",
|
| 800 |
+
"text": "For the read-out function $R$ , we applied the mean operation on all node embeddings in the real graph $G$ and then applied a sigmoid activation function to compute the whole graph embeddings $\\overline{s}$ :",
|
| 801 |
+
"bbox": [
|
| 802 |
+
57,
|
| 803 |
+
772,
|
| 804 |
+
485,
|
| 805 |
+
829
|
| 806 |
+
],
|
| 807 |
+
"page_idx": 4
|
| 808 |
+
},
|
| 809 |
+
{
|
| 810 |
+
"type": "equation",
|
| 811 |
+
"text": "\n$$\n\\bar {s} = \\sigma \\left(\\frac {1}{n} \\sum_ {i = 1} ^ {n} h _ {i} ^ {(L)}\\right) \\tag {4}\n$$\n",
|
| 812 |
+
"text_format": "latex",
|
| 813 |
+
"bbox": [
|
| 814 |
+
210,
|
| 815 |
+
839,
|
| 816 |
+
485,
|
| 817 |
+
875
|
| 818 |
+
],
|
| 819 |
+
"page_idx": 4
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"type": "text",
|
| 823 |
+
"text": "In Algorithm 1, from line 7 to 8, as shown in Equation 5 and Equation 6, for the discriminator $D$ , we used a logistic sigmoid",
|
| 824 |
+
"bbox": [
|
| 825 |
+
58,
|
| 826 |
+
877,
|
| 827 |
+
485,
|
| 828 |
+
906
|
| 829 |
+
],
|
| 830 |
+
"page_idx": 4
|
| 831 |
+
},
|
| 832 |
+
{
|
| 833 |
+
"type": "code",
|
| 834 |
+
"sub_type": "algorithm",
|
| 835 |
+
"code_caption": [
|
| 836 |
+
"Algorithm 2: Batch Normalizing Transform [29]"
|
| 837 |
+
],
|
| 838 |
+
"code_body": "input: Values of $x$ over a mini-batch: $B = \\{x_{1\\dots m}\\}$ Parameters can be learned: $\\gamma ,\\beta$ output: $\\left\\{y_i = \\mathbf{BN}_{\\gamma ,\\beta}(x_i)\\right\\}$ \n1 $\\mu_B\\gets \\frac{1}{m}\\sum_{i = 1}^m x_i$ \n2 $\\sigma_B^2\\gets \\frac{1}{m}\\sum_{i = 1}^m (x_i - \\mu_B)^2$ \n3 $\\widehat{x}_i\\gets \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_B^2 + \\epsilon}}$ \n4 $y_{i}\\gets \\gamma \\widehat{x}_{i} + \\beta \\equiv \\mathbf{BN}_{\\gamma ,\\beta}(x_{i})$",
|
| 839 |
+
"bbox": [
|
| 840 |
+
512,
|
| 841 |
+
705,
|
| 842 |
+
882,
|
| 843 |
+
812
|
| 844 |
+
],
|
| 845 |
+
"page_idx": 4
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"text": "non-linear function to discriminate node embedding vector $\\vec{h}_i$ against the real whole graph embedding $\\overline{s}$ to calculate the score of $(\\vec{h}_i,\\overline{s})$ being positive or negative:",
|
| 850 |
+
"bbox": [
|
| 851 |
+
507,
|
| 852 |
+
835,
|
| 853 |
+
937,
|
| 854 |
+
882
|
| 855 |
+
],
|
| 856 |
+
"page_idx": 4
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "equation",
|
| 860 |
+
"text": "\n$$\nD \\left(h _ {i}, \\bar {s}\\right) = \\sigma \\left(h _ {i} ^ {T} w \\bar {s}\\right) \\tag {5}\n$$\n",
|
| 861 |
+
"text_format": "latex",
|
| 862 |
+
"bbox": [
|
| 863 |
+
653,
|
| 864 |
+
889,
|
| 865 |
+
936,
|
| 866 |
+
909
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 4
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "page_number",
|
| 872 |
+
"text": "5",
|
| 873 |
+
"bbox": [
|
| 874 |
+
492,
|
| 875 |
+
914,
|
| 876 |
+
502,
|
| 877 |
+
925
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 4
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "equation",
|
| 883 |
+
"text": "\n$$\nD \\left(\\widetilde {h _ {i}}, \\bar {s}\\right) = \\sigma \\left(\\widetilde {h _ {i} ^ {T}} w \\bar {s}\\right) \\tag {6}\n$$\n",
|
| 884 |
+
"text_format": "latex",
|
| 885 |
+
"bbox": [
|
| 886 |
+
201,
|
| 887 |
+
108,
|
| 888 |
+
485,
|
| 889 |
+
127
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 5
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "text",
|
| 895 |
+
"text": "We then used a binary cross-entropy loss objective function (based on Equation 3, modified so that $N = M$ ) to perform gradient descent, as shown in Algorithm 1, line 10. To perform gradient descent, we maximized the score if the node embedding is a true node embedding $\\vec{h}_i$ and minimized the score if it is a corrupted node embedding $\\vec{h}_i$ compared to the global graph summary generated by the read-out function $R$ (Equation 4). As a result, we maximized the mutual information between patch representations and the whole real graph summary based on the binary cross-entropy loss function (BCE), as shown in Equation 3 to perform gradient descent. After the training process, the trained encoder can be used to generate new graph embeddings for downstream purposes; in this case, the detection of illegal transactions.",
|
| 896 |
+
"bbox": [
|
| 897 |
+
57,
|
| 898 |
+
131,
|
| 899 |
+
485,
|
| 900 |
+
332
|
| 901 |
+
],
|
| 902 |
+
"page_idx": 5
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"type": "text",
|
| 906 |
+
"text": "In our experiments, we used all 34 different Bitcoin transaction graphs to train the DGI with the GIN encoder in a self-supervised manner. For each training graph, we trained 300 epochs using an Adam optimizer with a learning rate of 0.0001, as shown in Algorithm 1, line 10.",
|
| 907 |
+
"bbox": [
|
| 908 |
+
57,
|
| 909 |
+
332,
|
| 910 |
+
484,
|
| 911 |
+
404
|
| 912 |
+
],
|
| 913 |
+
"page_idx": 5
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "text",
|
| 917 |
+
"text": "4.2. Supervised Machine Learning Classification",
|
| 918 |
+
"text_level": 1,
|
| 919 |
+
"bbox": [
|
| 920 |
+
58,
|
| 921 |
+
418,
|
| 922 |
+
394,
|
| 923 |
+
432
|
| 924 |
+
],
|
| 925 |
+
"page_idx": 5
|
| 926 |
+
},
|
| 927 |
+
{
|
| 928 |
+
"type": "text",
|
| 929 |
+
"text": "After the DGI training, we reused the encoder to generate node embeddings, as shown in Algorithm 1, line 11-12 to train and test the RF classifier with 100 estimators. In our experiments, we performed 70:30 splitting, 34 different Bitcoin transaction graphs for training and the remaining 15 bitcoin transaction graphs for testing. All 34 training graphs were fed to DGI to train the GIN encoder in a self-supervised manner. Once the training phase was completed, we used a trained GIN encoder to extract all the node embeddings (all 34 graph node embeddings) in the training graphs. As the datasets consist of two labels, binary classification and unknown labels, we dropped unknown label data in the RF training and testing phases and only used label data for performance. We used all training graph node embeddings to train the RF in a supervised manner. For testing, we extracted the last 15 test graph node embeddings using the trained GIN and fed the node embeddings to the trained RF for illegal transaction detection.",
|
| 930 |
+
"bbox": [
|
| 931 |
+
60,
|
| 932 |
+
436,
|
| 933 |
+
485,
|
| 934 |
+
676
|
| 935 |
+
],
|
| 936 |
+
"page_idx": 5
|
| 937 |
+
},
|
| 938 |
+
{
|
| 939 |
+
"type": "text",
|
| 940 |
+
"text": "We experimented with the following three combinations of features and embeddings:",
|
| 941 |
+
"bbox": [
|
| 942 |
+
58,
|
| 943 |
+
677,
|
| 944 |
+
485,
|
| 945 |
+
706
|
| 946 |
+
],
|
| 947 |
+
"page_idx": 5
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"type": "list",
|
| 951 |
+
"sub_type": "text",
|
| 952 |
+
"list_items": [
|
| 953 |
+
"1. DNE : Node Embeddings only: After the DGI training, we reused the encoder to generate node embeddings for training and testing the RF classifier, as mentioned above.",
|
| 954 |
+
"2. $\\mathbf{LF} + \\mathbf{DNE}$ : Node Embeddings with LF features: Similar to scenario 1, we also combined local features (i.e, first 94 raw features) with the node embeddings generated by the trained encode for training and testing the RF classifier, as mentioned above.",
|
| 955 |
+
"3. AF + DNE : Node Embeddings with AF Features: Similar to scenario 1, we also combined all raw features (AF features) with the node embeddings generated by the trained encoder for training and testing the RF classifier, as mentioned above."
|
| 956 |
+
],
|
| 957 |
+
"bbox": [
|
| 958 |
+
78,
|
| 959 |
+
715,
|
| 960 |
+
485,
|
| 961 |
+
902
|
| 962 |
+
],
|
| 963 |
+
"page_idx": 5
|
| 964 |
+
},
|
| 965 |
+
{
|
| 966 |
+
"type": "table",
|
| 967 |
+
"img_path": "images/ab783ab5451262c5b002695f9ccf674c15643c66381df717fe329cf1077782b7.jpg",
|
| 968 |
+
"table_caption": [
|
| 969 |
+
"Table 1: Implementation environment specification"
|
| 970 |
+
],
|
| 971 |
+
"table_footnote": [],
|
| 972 |
+
"table_body": "<table><tr><td>Unit</td><td>Description</td></tr><tr><td>Processor</td><td>2.3 GHz 2-core Inter Xeon(R) Processor</td></tr><tr><td>RAM</td><td>12GB</td></tr><tr><td>GPU</td><td>Tesla P100 GPU 16GB</td></tr><tr><td>Operating System</td><td>Linux</td></tr><tr><td>Packages</td><td>Skit-learn, Numpy, Pandas, PyTorch Geometric, and matplotlib</td></tr></table>",
|
| 973 |
+
"bbox": [
|
| 974 |
+
512,
|
| 975 |
+
116,
|
| 976 |
+
927,
|
| 977 |
+
212
|
| 978 |
+
],
|
| 979 |
+
"page_idx": 5
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"text": "4.3. Implementation Environments",
|
| 984 |
+
"text_level": 1,
|
| 985 |
+
"bbox": [
|
| 986 |
+
509,
|
| 987 |
+
236,
|
| 988 |
+
749,
|
| 989 |
+
250
|
| 990 |
+
],
|
| 991 |
+
"page_idx": 5
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"type": "text",
|
| 995 |
+
"text": "Experiments were carried out using a 2.3GHz 2-core Intel(R) Xeon(R) processor with 12 GB memory and Tesla P100 GPU on a Linux operating system. The proposed approach was developed using the Python programming language with several statistical and visualization packages, such as Sckt-learn, Numpy, Pandas, PyTorch Geometric, and Matplotlib. Table 1 summarizes the system configuration.",
|
| 996 |
+
"bbox": [
|
| 997 |
+
507,
|
| 998 |
+
253,
|
| 999 |
+
937,
|
| 1000 |
+
353
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 5
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "text",
|
| 1006 |
+
"text": "5. Experiments and Results",
|
| 1007 |
+
"text_level": 1,
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
509,
|
| 1010 |
+
374,
|
| 1011 |
+
714,
|
| 1012 |
+
387
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 5
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"text": "5.1. Dataset",
|
| 1019 |
+
"text_level": 1,
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
510,
|
| 1022 |
+
399,
|
| 1023 |
+
601,
|
| 1024 |
+
411
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 5
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "In this paper, we adopted the Elliptic dataset [5], which is the world's largest labeled dataset of bitcoin transactions. The Elliptic dataset [5] consists of 203,769 node as transactions and 234,355 directed transaction payment flows (i.e., transaction inputs, transaction outputs). The datasets also consist of 49 different timestep graphs, which are uniformly spaced with a two-week interval, as illustrated in 3. Each connected transaction component consists of a time step that appears on the blockchain in less than three hours. Our $G$ represents one such transaction graph for the 49.",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
507,
|
| 1033 |
+
416,
|
| 1034 |
+
937,
|
| 1035 |
+
558
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 5
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "text",
|
| 1041 |
+
"text": "In the Elliptic dataset [5], $21\\%$ of the node entities are labeled as licit, and only $2\\%$ are labeled as illicit. The remaining node entities are unlabeled but have node features. These node entities consist of 166 features (AF features), among which the first 94 features contain local information (LF features) of the transactions, including the time step, transaction fees, and the number of inputs or outputs. The remaining 72 features are aggregated features. These features can be obtained by aggregating transaction information from one-hop backward/forward graph nodes, such as the standard deviation, minimum, maximum, and correlation coefficients of the neighbor transactions for the same information data. More importantly, all features were obtained using only publicly available information.",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
507,
|
| 1044 |
+
558,
|
| 1045 |
+
937,
|
| 1046 |
+
743
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 5
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "text",
|
| 1052 |
+
"text": "5.2. Performance Metric",
|
| 1053 |
+
"text_level": 1,
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
509,
|
| 1056 |
+
758,
|
| 1057 |
+
684,
|
| 1058 |
+
771
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 5
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "text",
|
| 1064 |
+
"text": "To evaluate the performance of the proposed methods, the standard metrics listed in Table 2 were used, where $TP$ , $TN$ , $FP$ and $FN$ represent the number of True Positives, True Negatives, False Positives and False Negatives, respectively.",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
507,
|
| 1067 |
+
775,
|
| 1068 |
+
937,
|
| 1069 |
+
832
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 5
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "text",
|
| 1075 |
+
"text": "In Table 2, true positive (TP) denotes the total number of true positives, true negative (TN) indicates the total number of false positives, false positive (FP) denotes the total number of false negatives and false negative (TN) shows the total number of true negatives. The proposed method was evaluated using",
|
| 1076 |
+
"bbox": [
|
| 1077 |
+
507,
|
| 1078 |
+
832,
|
| 1079 |
+
937,
|
| 1080 |
+
903
|
| 1081 |
+
],
|
| 1082 |
+
"page_idx": 5
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "page_number",
|
| 1086 |
+
"text": "6",
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
492,
|
| 1089 |
+
914,
|
| 1090 |
+
502,
|
| 1091 |
+
925
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 5
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "image",
|
| 1097 |
+
"img_path": "images/89c92d1a6da11b635802b7c24f091bcdc06704159ff121d89179de149713cbdc.jpg",
|
| 1098 |
+
"image_caption": [],
|
| 1099 |
+
"image_footnote": [],
|
| 1100 |
+
"bbox": [
|
| 1101 |
+
221,
|
| 1102 |
+
93,
|
| 1103 |
+
315,
|
| 1104 |
+
141
|
| 1105 |
+
],
|
| 1106 |
+
"page_idx": 6
|
| 1107 |
+
},
|
| 1108 |
+
{
|
| 1109 |
+
"type": "image",
|
| 1110 |
+
"img_path": "images/ab2f544a011e99f771260b67be4561e3ec3e95dcf5d426bb7f94827d53749022.jpg",
|
| 1111 |
+
"image_caption": [
|
| 1112 |
+
"Figure 3: Overview of Elliptic Dataset [30]"
|
| 1113 |
+
],
|
| 1114 |
+
"image_footnote": [],
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
253,
|
| 1117 |
+
143,
|
| 1118 |
+
776,
|
| 1119 |
+
256
|
| 1120 |
+
],
|
| 1121 |
+
"page_idx": 6
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "image",
|
| 1125 |
+
"img_path": "images/0f3dc1b4e458e4e71d65d6964de9fcc973ee56977c8cff7f3b693f31e59b0db6.jpg",
|
| 1126 |
+
"image_caption": [
|
| 1127 |
+
"Figure 4: Illicit F1 over test timestep"
|
| 1128 |
+
],
|
| 1129 |
+
"image_footnote": [],
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
97,
|
| 1132 |
+
302,
|
| 1133 |
+
905,
|
| 1134 |
+
545
|
| 1135 |
+
],
|
| 1136 |
+
"page_idx": 6
|
| 1137 |
+
},
|
| 1138 |
+
{
|
| 1139 |
+
"type": "table",
|
| 1140 |
+
"img_path": "images/1ed96fd1691779479b6f2bb38d9423a7bff04d897f0494cefe16e42d64c096e3.jpg",
|
| 1141 |
+
"table_caption": [
|
| 1142 |
+
"Table 2: Evaluation metrics used in this study"
|
| 1143 |
+
],
|
| 1144 |
+
"table_footnote": [],
|
| 1145 |
+
"table_body": "<table><tr><td>Metric</td><td>Definition</td></tr><tr><td>Detection Rate (Recall)</td><td>TP/TP+FN</td></tr><tr><td>Precision</td><td>TP/TP+FP</td></tr><tr><td>F1-Score</td><td>2×Recall×Precision/Recall+Precision</td></tr><tr><td>AUC-Score</td><td>\\(\\int_0^1 \\frac{TP}{TP+FN} d\\frac{FP}{TN+FP}\\)</td></tr></table>",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
124,
|
| 1148 |
+
619,
|
| 1149 |
+
416,
|
| 1150 |
+
715
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 6
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "text",
|
| 1156 |
+
"text": "Precision, Recall, F1-score and Area under the receiver operating characteristics (ROC) curve. All the above metrics can be obtained using the confusion matrix (CM).",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
57,
|
| 1159 |
+
740,
|
| 1160 |
+
485,
|
| 1161 |
+
781
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 6
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "Accuracy indicates that the model is well learned in case of a balanced test dataset; however, for imbalanced scenarios, as in this case, only considering accuracy measures may lead to misleading conclusion, since it is strongly biased in favor of the licit majority class. Thus, for this case, recall and F1-score metrics provide a more reasonable explanation of the model's performance.",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
57,
|
| 1170 |
+
782,
|
| 1171 |
+
485,
|
| 1172 |
+
879
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 6
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "text",
|
| 1178 |
+
"text": "Recall (also known as Detection Rate) is the total number of",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
82,
|
| 1181 |
+
882,
|
| 1182 |
+
485,
|
| 1183 |
+
895
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 6
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "text",
|
| 1189 |
+
"text": "true positives divided by the total number of true positives and false negatives. If the recall rate is very low, this means that the classifier cannot detect illicit transactions.",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
507,
|
| 1192 |
+
599,
|
| 1193 |
+
936,
|
| 1194 |
+
640
|
| 1195 |
+
],
|
| 1196 |
+
"page_idx": 6
|
| 1197 |
+
},
|
| 1198 |
+
{
|
| 1199 |
+
"type": "text",
|
| 1200 |
+
"text": "Precision measures the quality of the correct predictions. This is the number of true positives divided by the number of true positives and false positives. If the false positive is very high, it will cause low precision. Our goal is to maximize the precision as much as possible.",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
507,
|
| 1203 |
+
642,
|
| 1204 |
+
937,
|
| 1205 |
+
712
|
| 1206 |
+
],
|
| 1207 |
+
"page_idx": 6
|
| 1208 |
+
},
|
| 1209 |
+
{
|
| 1210 |
+
"type": "text",
|
| 1211 |
+
"text": "F1-score is the trade-off between precision and recall. Mathematically, it is the harmonic mean of precision and recall.",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
507,
|
| 1214 |
+
713,
|
| 1215 |
+
937,
|
| 1216 |
+
741
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 6
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "text",
|
| 1222 |
+
"text": "The area under the curve (AUC) computes the trade-off between sensitivity and specificity, plotted based on the trade-off between the true positive rate on the y-axis and the false positive rate on the x-axis. Our goal is to maximize the AUC score as much as possible, making it closer to 1.0.",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
507,
|
| 1225 |
+
741,
|
| 1226 |
+
937,
|
| 1227 |
+
812
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 6
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "text",
|
| 1233 |
+
"text": "5.3. Experimental Results",
|
| 1234 |
+
"text_level": 1,
|
| 1235 |
+
"bbox": [
|
| 1236 |
+
509,
|
| 1237 |
+
826,
|
| 1238 |
+
690,
|
| 1239 |
+
839
|
| 1240 |
+
],
|
| 1241 |
+
"page_idx": 6
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "text",
|
| 1245 |
+
"text": "Table 3 shows the corresponding results of our Inspection-L compared to the state-of-the-art in terms of the key metrics. As can be observed from the table, regarding to illicit F1-Score, Inspection-L (LF+DNE and AF+DNE) outperforms the best",
|
| 1246 |
+
"bbox": [
|
| 1247 |
+
507,
|
| 1248 |
+
844,
|
| 1249 |
+
937,
|
| 1250 |
+
901
|
| 1251 |
+
],
|
| 1252 |
+
"page_idx": 6
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "page_number",
|
| 1256 |
+
"text": "7",
|
| 1257 |
+
"bbox": [
|
| 1258 |
+
492,
|
| 1259 |
+
914,
|
| 1260 |
+
502,
|
| 1261 |
+
923
|
| 1262 |
+
],
|
| 1263 |
+
"page_idx": 6
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"type": "image",
|
| 1267 |
+
"img_path": "images/47d0b140202ca03ae75c0e8f01cfc2d87dfe8c169da31ba531470d0090cd5614.jpg",
|
| 1268 |
+
"image_caption": [
|
| 1269 |
+
"(a) AF + DNE"
|
| 1270 |
+
],
|
| 1271 |
+
"image_footnote": [],
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
144,
|
| 1274 |
+
93,
|
| 1275 |
+
366,
|
| 1276 |
+
242
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 7
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "image",
|
| 1282 |
+
"img_path": "images/93bf8f486221a9a2f176fe6c73f3f446a1e0c63ea4e8c43306931605be8125d6.jpg",
|
| 1283 |
+
"image_caption": [
|
| 1284 |
+
"(b) LF + DNE",
|
| 1285 |
+
"Figure 5: Confusion Matrix"
|
| 1286 |
+
],
|
| 1287 |
+
"image_footnote": [],
|
| 1288 |
+
"bbox": [
|
| 1289 |
+
378,
|
| 1290 |
+
95,
|
| 1291 |
+
600,
|
| 1292 |
+
242
|
| 1293 |
+
],
|
| 1294 |
+
"page_idx": 7
|
| 1295 |
+
},
|
| 1296 |
+
{
|
| 1297 |
+
"type": "image",
|
| 1298 |
+
"img_path": "images/46e3fb9c36111a26b3b35d6ebcfea24bc884357157ecb5c2b4d81577dabb0ffd.jpg",
|
| 1299 |
+
"image_caption": [
|
| 1300 |
+
"(c) DNE"
|
| 1301 |
+
],
|
| 1302 |
+
"image_footnote": [],
|
| 1303 |
+
"bbox": [
|
| 1304 |
+
631,
|
| 1305 |
+
96,
|
| 1306 |
+
852,
|
| 1307 |
+
241
|
| 1308 |
+
],
|
| 1309 |
+
"page_idx": 7
|
| 1310 |
+
},
|
| 1311 |
+
{
|
| 1312 |
+
"type": "table",
|
| 1313 |
+
"img_path": "images/6426aa00fdb906da97d1672922126fbc5d67d449fd39182e9824fa3acd417031.jpg",
|
| 1314 |
+
"table_caption": [
|
| 1315 |
+
"Table 3: Results of binary classification by Inspection-L compared to the state-of-the-art. AF refers to all raw features, LF refers to the local raw features, i.e., the first 94 raw features, GNE refers to the node embeddings generated by GCN in [5] using labels and DNE refers to the node embeddings computed by DGI without using labels."
|
| 1316 |
+
],
|
| 1317 |
+
"table_footnote": [],
|
| 1318 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">Illicit</td><td rowspan=\"2\">AUC</td></tr><tr><td>Precision</td><td>Recall</td><td>F1</td></tr><tr><td>Logistic RegrAF [5]</td><td>0.404</td><td>0.593</td><td>0.481</td><td>-</td></tr><tr><td>Logistic Regr AF + GNE [5]</td><td>0.537</td><td>0.528</td><td>0.533</td><td>-</td></tr><tr><td>Logistic Regr LF [5]</td><td>0.348</td><td>0.668</td><td>0.457</td><td>-</td></tr><tr><td>Logistic Regr LF + GNE [5]</td><td>0.518</td><td>0.571</td><td>0.543</td><td>-</td></tr><tr><td>RandomForest AF [5]</td><td>0.956</td><td>0.670</td><td>0.788</td><td>-</td></tr><tr><td>RandomForest AF + GNE [5]</td><td>0.971</td><td>0.675</td><td>0.796</td><td>-</td></tr><tr><td>RandomForest AF [14]</td><td>0.897</td><td>0.721</td><td>0.800</td><td>-</td></tr><tr><td>RandomForest AF + GNE [14]</td><td>0.958</td><td>0.715</td><td>0.819</td><td>-</td></tr><tr><td>XGB AF [14]</td><td>0.921</td><td>0.732</td><td>0.815</td><td>-</td></tr><tr><td>XGB AF + GNE [14]</td><td>0.986</td><td>0.692</td><td>0.813</td><td>-</td></tr><tr><td>RandomForest LF [5]</td><td>0.803</td><td>0.611</td><td>0.694</td><td>-</td></tr><tr><td>RandomForest LF + GNE [5]</td><td>0.878</td><td>0.668</td><td>0.759</td><td>-</td></tr><tr><td>MLP AF [5]</td><td>0.694</td><td>0.617</td><td>0.653</td><td>-</td></tr><tr><td>MLP AF + GNE [5]</td><td>0.780</td><td>0.617</td><td>0.689</td><td>-</td></tr><tr><td>MLP LF [5]</td><td>0.637</td><td>0.662</td><td>0.649</td><td>-</td></tr><tr><td>MLP LF + GNE [5]</td><td>0.681</td><td>0.578</td><td>0.625</td><td>-</td></tr><tr><td>GCN [5]</td><td>0.812</td><td>0.512</td><td>0.628</td><td>-</td></tr><tr><td>GCN [16]</td><td>0.899</td><td>0.678</td><td>0.773</td><td>-</td></tr><tr><td>Skip-GCN [5]</td><td>0.812</td><td>0.623</td><td>0.705</td><td>-</td></tr><tr><td>EvolveGCN [5]</td><td>0.850</td><td>0.624</td><td>0.720</td><td>-</td></tr><tr><td>Inspection-L DNE (RF)</td><td>0.593</td><td>0.032</td><td>0.061</td><td>0.735</td></tr><tr><td>Inspection-L LF + DNE (RF)</td><td>0.906</td><td>0.712</td><td>0.797</td><td>0.895</td></tr><tr><td>Inspection-L AF + DNE (RF)</td><td>0.972</td><td>0.721</td><td>0.828</td><td>0.916</td></tr></table>",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
63,
|
| 1321 |
+
370,
|
| 1322 |
+
484,
|
| 1323 |
+
659
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 7
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "text",
|
| 1329 |
+
"text": "reported classifiers. In the best-performing variant, AF+DNE, we concatenated the node embeddings generated from DGI with all original raw features (AF). The experiment achieved an F1 score and Recall of 0.828 and 0.721, respectively. Using all features (AF) with node embeddings (DNE) as input for classification, the ML model's performance significantly increased, with an AUC of 0.916, compared to 0.735 when only the node embeddings were used for classification. The experiments demonstrate that graph information (node embeddings) is useful to enhance the transaction representations (embeddings).",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
55,
|
| 1332 |
+
682,
|
| 1333 |
+
485,
|
| 1334 |
+
824
|
| 1335 |
+
],
|
| 1336 |
+
"page_idx": 7
|
| 1337 |
+
},
|
| 1338 |
+
{
|
| 1339 |
+
"type": "text",
|
| 1340 |
+
"text": "In the second experiment LF+DNE, we concatenated the node embeddings generated from DGI with the local features (LF), which can achieve an F1-score and Recall of 0.712 and 0.797, respectively. Both the results were superior to the state-of-the-art algorithms.",
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
55,
|
| 1343 |
+
824,
|
| 1344 |
+
485,
|
| 1345 |
+
896
|
| 1346 |
+
],
|
| 1347 |
+
"page_idx": 7
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
"type": "text",
|
| 1351 |
+
"text": "These results demonstrate the ability of our self-supervised GIN-based approach to generate an enhanced feature set to improve anti-money-laundering detection performance. Furthermore, the results show that the accuracy of the model improves with the enhanced feature set, which contains summary information. Note that the summary information in the AF feature set consists of 1-hop forward and 1-hop backward neighborhood summaries for each node. Unfortunately, the Elliptic dataset does not provide detailed information regarding the feature descriptions, possibly due to confidentially reasons, which limits our ability to provide a deeper discussion.",
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
507,
|
| 1354 |
+
304,
|
| 1355 |
+
937,
|
| 1356 |
+
461
|
| 1357 |
+
],
|
| 1358 |
+
"page_idx": 7
|
| 1359 |
+
},
|
| 1360 |
+
{
|
| 1361 |
+
"type": "text",
|
| 1362 |
+
"text": "Figure 4 shows the F1 measure of the three different model variants across various testing timesteps. Interestingly, none of the three variants can detect new illicit transactions with high precision after dark market shutdown, which occurs at time step 43 [5]. Thus, we note that developing robust methods to detect illicit transactions without their being affected by emerging events is a major challenge that future works need to address.",
|
| 1363 |
+
"bbox": [
|
| 1364 |
+
507,
|
| 1365 |
+
461,
|
| 1366 |
+
937,
|
| 1367 |
+
560
|
| 1368 |
+
],
|
| 1369 |
+
"page_idx": 7
|
| 1370 |
+
},
|
| 1371 |
+
{
|
| 1372 |
+
"type": "text",
|
| 1373 |
+
"text": "Figure 5 shows the confusion matrix of the three different scenarios. Although the classifier trained with embedding features cannot accurately detect illicit transactions, it rarely classifies licit transactions as illicit. Therefore, the false alarm rate is very low, as shown in Figure 5c. The RF classifier trained using both raw features and embedding features, shown in Figure 5a,5b, has the advantage of achieving a high detection rate and a low false alarm rate. As a result, the experimental results demonstrate that DNE node embeddings can be used for feature augmentation to improve overall detection performance.",
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
507,
|
| 1376 |
+
560,
|
| 1377 |
+
937,
|
| 1378 |
+
703
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 7
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "text",
|
| 1384 |
+
"text": "5.4. Broader applications of AML",
|
| 1385 |
+
"text_level": 1,
|
| 1386 |
+
"bbox": [
|
| 1387 |
+
509,
|
| 1388 |
+
715,
|
| 1389 |
+
746,
|
| 1390 |
+
731
|
| 1391 |
+
],
|
| 1392 |
+
"page_idx": 7
|
| 1393 |
+
},
|
| 1394 |
+
{
|
| 1395 |
+
"type": "text",
|
| 1396 |
+
"text": "The blockchain operates as a decentralized bank for bitcoin cryptocurrency [31]. All bitcoin transactions are permanently recorded on the blockchain, which is a visible and verifiable public ledger [32]. Bitcoin addresses are not registered to individuals, in contrast to bank accounts [2]. Thus, due to this pseudo-anonymity [11], bitcoin and other crypto-currencies are increasingly used for ransomware [2], ponzi schemes [11] and illicit material trade on the dark web [23]",
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
507,
|
| 1399 |
+
734,
|
| 1400 |
+
937,
|
| 1401 |
+
848
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 7
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "text",
|
| 1407 |
+
"text": "While bitcoin transactions are difficult to track, they are not completely anonymous [2]. Users can be traced by their IP addresses and transaction flows [32]. An analysis of the bitcoin graph can reveal suspicious behavior patterns characteristic of",
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
507,
|
| 1410 |
+
848,
|
| 1411 |
+
937,
|
| 1412 |
+
904
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 7
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "page_number",
|
| 1418 |
+
"text": "8",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
492,
|
| 1421 |
+
914,
|
| 1422 |
+
504,
|
| 1423 |
+
925
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 7
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "text",
|
| 1429 |
+
"text": "money laundering [2]. To break the tell-tale transnational link between bitcoin transactions and illegal activity, bitcoin mixing services provide a new, untainted bitcoin address from their reserves and the pay-outs are spread out over time [2]. Bitcoin Fog is a service that hides transaction origins by bundling multiple inputs into a smaller number of larger outputs [11]. However, the additional obscuring activities themselves could add characteristic signatures into transaction flows. Thus, it is still possible to detect patterns in the underlying transaction flow to facilitate AML detection [11, 5]. Unfortunately, next-generation cryptocurrencies such as Monero, Dash, and Z-Cash, with built-in anonymity features, make tracking and detection challenging [2]. As a result, there is a constant need for improved AML detection methodologies.",
|
| 1430 |
+
"bbox": [
|
| 1431 |
+
57,
|
| 1432 |
+
96,
|
| 1433 |
+
485,
|
| 1434 |
+
294
|
| 1435 |
+
],
|
| 1436 |
+
"page_idx": 8
|
| 1437 |
+
},
|
| 1438 |
+
{
|
| 1439 |
+
"type": "text",
|
| 1440 |
+
"text": "6. Conclusions and Future Work",
|
| 1441 |
+
"text_level": 1,
|
| 1442 |
+
"bbox": [
|
| 1443 |
+
58,
|
| 1444 |
+
313,
|
| 1445 |
+
302,
|
| 1446 |
+
328
|
| 1447 |
+
],
|
| 1448 |
+
"page_idx": 8
|
| 1449 |
+
},
|
| 1450 |
+
{
|
| 1451 |
+
"type": "text",
|
| 1452 |
+
"text": "This paper presents a novel approach for the detection of illicit Bitcoin transactions based on self-supervised GNNs. We first used the DGI to generate the node embedding with raw features to train the Random Forest for detection. Our experimental evaluation indicates that our approach performs exceptionally well and outperforms the state-of-the-art ML-based/Graph-based classifier overall. The evaluation results of our initial classifier demonstrate the potential of using a self-supervised GNN-based approach for illegal transaction detection in cryptocurrencies. We hope to inspire others to work on the important challenge of using graph machine learning to perform financial forensics through this research, which is lacking in the current research. In the future, we plan to integrate this with unsupervised anomaly detection algorithms to detect illegal transactions in an unsupervised manner.",
|
| 1453 |
+
"bbox": [
|
| 1454 |
+
60,
|
| 1455 |
+
337,
|
| 1456 |
+
485,
|
| 1457 |
+
549
|
| 1458 |
+
],
|
| 1459 |
+
"page_idx": 8
|
| 1460 |
+
},
|
| 1461 |
+
{
|
| 1462 |
+
"type": "text",
|
| 1463 |
+
"text": "References",
|
| 1464 |
+
"text_level": 1,
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
58,
|
| 1467 |
+
568,
|
| 1468 |
+
141,
|
| 1469 |
+
581
|
| 1470 |
+
],
|
| 1471 |
+
"page_idx": 8
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "list",
|
| 1475 |
+
"sub_type": "ref_text",
|
| 1476 |
+
"list_items": [
|
| 1477 |
+
"[1] S. Nakamoto, Bitcoin: A peer-to-peer electronic cash system, Technical Report, Manubot, 2019.",
|
| 1478 |
+
"[2] N. Kshetri, J. Voas, Do crypto-currencies fuel ransomware?, in: IT professional, volume 19, IEEE, 2017, pp. 11-15.",
|
| 1479 |
+
"[3] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, P. S. Yu, A comprehensive survey on graph neural networks, in: IEEE Transactions on Neural Networks and Learning Systems, volume 32, 2021, pp. 4-24.",
|
| 1480 |
+
"[4] P. Velicković, W. Fedus, W. L. Hamilton, P. Lio, Y. Bengio, R. D. Hjelm, Deep graph infomax, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=rklz9iAcKQ.",
|
| 1481 |
+
"[5] M. Weber, G. Domeniconi, J. Chen, D. K. I. Weidele, C. Bellei, T. Robinson, C. E. Leiserson, Anti-money laundering in bitcoin: Experimenting with graph convolutional networks for financial forensics, in: ACM SIGKDD International Workshop on Knowledge discovery and data mining, 2019.",
|
| 1482 |
+
"[6] X. Liu, F. Zhang, Z. Hou, L. Mian, Z. Wang, J. Zhang, J. Tang, Self-supervised learning: Generative or contrastive, in: IEEE Transactions on Knowledge and Data Engineering, 2021, pp. 1-1. doi:10.1109/TKDE.2021.3090866.",
|
| 1483 |
+
"[7] Y. Liu, M. Jin, S. Pan, C. Zhou, Y. Zheng, F. Xia, P. Yu, Graph self-supervised learning: A survey, in: IEEE Transactions on Knowledge and Data Engineering, 2022, pp. 1-1. doi:10.1109/TKDE.2022.3172903.",
|
| 1484 |
+
"[8] C. M. Bishop, N. M. Nasrabadi, Pattern recognition and machine learning, volume 4, Springer, 2006.",
|
| 1485 |
+
"[9] T. N. Kipf, M. Welling, Semi-supervised classification with graph convolutional networks, in: International Conference on Learning Representations, 2017."
|
| 1486 |
+
],
|
| 1487 |
+
"bbox": [
|
| 1488 |
+
65,
|
| 1489 |
+
589,
|
| 1490 |
+
485,
|
| 1491 |
+
904
|
| 1492 |
+
],
|
| 1493 |
+
"page_idx": 8
|
| 1494 |
+
},
|
| 1495 |
+
{
|
| 1496 |
+
"type": "list",
|
| 1497 |
+
"sub_type": "ref_text",
|
| 1498 |
+
"list_items": [
|
| 1499 |
+
"[10] A. Pareja, G. Domeniconi, J. Chen, T. Ma, T. Suzumura, H. Kanezashi, T. Kaler, T. Schardl, C. Leiserson, Evolvegen: Evolving graph convolutional networks for dynamic graphs, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 5363-5370.",
|
| 1500 |
+
"[11] Y. Hu, S. Seneviratne, K. Thilakarathna, K. Fukuda, A. Seneviratne, Characterizing and detecting money laundering activities on the bitcoin network, arXiv preprint arXiv:1912.12060 (2019).",
|
| 1501 |
+
"[12] J. A. Bondy, U. S. R. Murty, et al., Graph theory with applications, volume 290, Macmillan London, 1976.",
|
| 1502 |
+
"[13] A. Grover, J. Leskovec, node2vec: Scalable feature learning for networks, in: Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining, 2016, pp. 855-864.",
|
| 1503 |
+
"[14] D. Vassallo, V. Vella, J. Ellul, Application of gradient boosting algorithms for anti-money laundering in cryptocurrencies, in: SN Computer Science, volume 2, Springer, 2021, pp. 1-15.",
|
| 1504 |
+
"[15] C. Lee, S. Maharjan, K. Ko, J. W.-K. Hong, Toward detecting illegal transactions on bitcoin using machine-learning methods, in: International Conference on Blockchain and Trustworthy Systems, Springer, 2019, pp. 520-533.",
|
| 1505 |
+
"[16] I. Alarab, S. Prakoonwit, M. I. Nacer, Competence of graph convolutional networks for anti-money laundering in bitcoin blockchain, in: Proceedings of the 2020 5th International Conference on Machine Learning Technologies, 2020, pp. 23-27.",
|
| 1506 |
+
"[17] L. Nan, D. Tao, Bitcoin mixing detection using deep autoencoder, in: 2018 IEEE Third international conference on data science in cyberspace (DSC), IEEE, 2018, pp. 280-287.",
|
| 1507 |
+
"[18] J. Lorenz, M. I. Silva, D. Aparicio, J. T. Ascensão, P. Bizarro, Machine learning methods to detect money laundering in the bitcoin blockchain in the presence of label scarcity, in: Proceedings of the First ACM International Conference on AI in Finance, 2020, pp. 1-8.",
|
| 1508 |
+
"[19] T. Pham, S. Lee, Anomaly detection in bitcoin network using unsupervised learning methods, in: arXiv preprint arXiv:1611.03941, 2016.",
|
| 1509 |
+
"[20] P. Monamo, V. Marivate, B. Twala, Unsupervised learning for robust bitcoin fraud detection, in: 2016 Information Security for South Africa (ISSA), IEEE, 2016, pp. 129-134.",
|
| 1510 |
+
"[21] S. Li, F. Xu, R. Wang, S. Zhong, Self-supervised incremental deep graph learning for ethereum phishing scam detection, in: arXiv preprint arXiv:2106.10176, 2021.",
|
| 1511 |
+
"[22] N. Shervashidze, P. Schweitzer, E. J. Van Leeuwen, K. Mehlhorn, K. M. Borgwardt, Weisfeiler-lehman graph kernels., in: booktitle of Machine Learning Research, volume 12, 2011.",
|
| 1512 |
+
"[23] G. K. Kulatilleke, M. Portmann, R. Ko, S. S. Chandra, Fdgatii: Fast dynamic graph attention with initial residual and identity mapping, in: arXiv preprint arXiv:2110.11464, 2021.",
|
| 1513 |
+
"[24] K. Xu, W. Hu, J. Leskovec, S. Jegelka, How powerful are graph neural networks?, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=ryGs6iA5Km.",
|
| 1514 |
+
"[25] W. L. Hamilton, R. Ying, J. Leskovec, Inductive representation learning on large graphs, in: Advances in Neural Information Processing Systems, 2017. arXiv:1706.02216.",
|
| 1515 |
+
"[26] C. Zhang, D. Song, C. Huang, A. Swami, N. V. Chawla, Heterogeneous graph neural network, in: Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 2019, pp. 793-803.",
|
| 1516 |
+
"[27] P. Velickovic, G. Cucurull, A. Casanova, A. Romero, P. Lio, Y. Bengio, Graph attention networks, in: International Conference on Learning Representations (ICLR), 2018.",
|
| 1517 |
+
"[28] S. Yun, M. Jeong, R. Kim, J. Kang, H. J. Kim, Graph transformer networks, in: Advances in neural information processing systems, volume 32, 2019.",
|
| 1518 |
+
"[29] S. Ioffe, C. Szegedy, Batch normalization: Accelerating deep network training by reducing internal covariate shift, in: International conference on machine learning, PMLR, 2015, pp. 448-456.",
|
| 1519 |
+
"[30] D. T. Robinson, How to Combat Financial Crime in Cryptocurrencies, 2019. URL: https://www.elliptic.co/blog/elliptic-dataset-cryptocurrency-financial-crime.",
|
| 1520 |
+
"[31] S. Nakamoto, Bitcoin: a peer-to-peer electronic cash system [eb/ol], Consulted 1 (2008) 28.",
|
| 1521 |
+
"[32] R. Van Wegberg, J.-J. Oerlemans, O. van Deventer, Bitcoin money laundering: mixed results? an explorative study on money laundering of cybercrime proceeds using bitcoin, Journal of Financial Crime (2018)."
|
| 1522 |
+
],
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
512,
|
| 1525 |
+
98,
|
| 1526 |
+
937,
|
| 1527 |
+
897
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 8
|
| 1530 |
+
},
|
| 1531 |
+
{
|
| 1532 |
+
"type": "page_number",
|
| 1533 |
+
"text": "9",
|
| 1534 |
+
"bbox": [
|
| 1535 |
+
492,
|
| 1536 |
+
913,
|
| 1537 |
+
504,
|
| 1538 |
+
925
|
| 1539 |
+
],
|
| 1540 |
+
"page_idx": 8
|
| 1541 |
+
}
|
| 1542 |
+
]
|
2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_model.json
ADDED
|
@@ -0,0 +1,2033 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.316,
|
| 8 |
+
0.058,
|
| 9 |
+
0.718
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2203.10465v4 [cs.CR] 9 Oct 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.231,
|
| 18 |
+
0.104,
|
| 19 |
+
0.768,
|
| 20 |
+
0.145
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Inspection-L: Self-Supervised GNN Node Embeddings for Money Laundering Detection in Bitcoin"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.172,
|
| 29 |
+
0.165,
|
| 30 |
+
0.825,
|
| 31 |
+
0.18
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Wai Weng Lo\\(^{a,\\ast}\\), Gayan K. Kulatilleke\\(^{a}\\), Mohanad Sarhan\\(^{a}\\), Siamak Layeghy\\(^{a}\\), Marius Portmann\\(^{a}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.357,
|
| 40 |
+
0.19,
|
| 41 |
+
0.641,
|
| 42 |
+
0.202
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "aThe University of Queensland, Brisbane, Australia"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "title",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.06,
|
| 51 |
+
0.256,
|
| 52 |
+
0.127,
|
| 53 |
+
0.269
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Abstract"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.058,
|
| 62 |
+
0.277,
|
| 63 |
+
0.94,
|
| 64 |
+
0.406
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Criminals have become increasingly experienced in using cryptocurrencies, such as Bitcoin, for money laundering. The use of cryptocurrencies can hide criminal identities and transfer hundreds of millions of dollars of dirty funds through their criminal digital wallets. However, this is considered a paradox because cryptocurrencies are goldmines for open-source intelligence, giving law enforcement agencies more power when conducting forensic analyses. This paper proposed Inspection-L, a graph neural network (GNN) framework based on a self-supervised Deep Graph Infomax (DGI) and Graph Isomorphism Network (GIN), with supervised learning algorithms, namely Random Forest (RF), to detect illicit transactions for anti-money laundering (AML). To the best of our knowledge, our proposal is the first to apply self-supervised GNNs to the problem of AML in Bitcoin. The proposed method was evaluated on the Elliptic dataset and shows that our approach outperforms the state-of-the-art in terms of key classification metrics, which demonstrates the potential of self-supervised GNN in the detection of illicit cryptocurrency transactions."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.059,
|
| 73 |
+
0.413,
|
| 74 |
+
0.741,
|
| 75 |
+
0.428
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Keywords: graph neural networks; machine learning; forensics; anomaly detection; cryptocurrencies"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.06,
|
| 84 |
+
0.454,
|
| 85 |
+
0.176,
|
| 86 |
+
0.467
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "1. Introduction"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.058,
|
| 95 |
+
0.478,
|
| 96 |
+
0.505,
|
| 97 |
+
0.746
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "The advent of the first cryptocurrency—Bitcoin [1]—has revolutionized the conventional financial ecosystem, as it enables low-cost, near-anonymous, peer-to-peer cash transfers within and across various borders. Due to its pseudonymity, many cybercriminals, terrorists, and hackers have started to use cryptocurrency for illegal transactions. For example, the WannaCry ransomware attack used Bitcoin [2] as the payment method due to its non-traceability. The criminals received nearly 3.4 million (46.4 BTC) within four days of the WannaCry attack [2]. Therefore, effective detection of illicit transactions in Bitcoin transaction graphs is essential for preventing illegal transactions. Paradoxically, cryptocurrencies are goldmines for open-source intelligence, as transaction network data are publicly available, enabling law enforcement agencies to conduct a forensic analysis of the transaction's linkages and flows. However, the problem is challenging for law enforcement agencies, owing to its volume<sup>1</sup>, the untraceable p2p cross-border nature of Bitcoin transactions, and the use of technologies such as mixers and tumblers."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.058,
|
| 106 |
+
0.748,
|
| 107 |
+
0.487,
|
| 108 |
+
0.805
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Graph representation learning has shown great potential for detecting money laundering activities using cryptocurrencies. GNNs are tailored to applications with graph-structured data, such as the social sciences, chemistry, and telecommunications,"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.509,
|
| 117 |
+
0.454,
|
| 118 |
+
0.938,
|
| 119 |
+
0.525
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "and can leverage the inherent structure of the graph data by building relational inductive biases into the deep learning architecture. This provides the ability to learn, reason, and generalize from the graph data, inspired by the concept of message propagation [3]."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.509,
|
| 128 |
+
0.525,
|
| 129 |
+
0.938,
|
| 130 |
+
0.624
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "The Bitcoin transaction flow data can naturally be represented in graph format. A graph is constructed from the raw Bitcoin data and labeled such that the nodes represent transactions and the edges represent the flow of Bitcoin currency (BTC) from one transaction to the next in the adjacency matrix. Both the topological information and the information contained in the node features are crucial for detecting illicit transactions."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.509,
|
| 139 |
+
0.625,
|
| 140 |
+
0.938,
|
| 141 |
+
0.681
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "This paper proposes Inspection-L, a Graph Neural Network (GNN) framework based on an enhanced self-supervised Deep Graph Infomax (DGI) [4] and supervised Random Forest (RF)-based classifier to detect illicit transactions for AML."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.509,
|
| 150 |
+
0.681,
|
| 151 |
+
0.946,
|
| 152 |
+
0.895
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Specifically, we investigate the Elliptic dataset [5], a realistic, partially labeled Bitcoin temporal graph-based transaction dataset consisting of real entities belonging to licit (e.g., wallet, miners), illicit entities (e.g., scams, terrorist organizations, ransomware), and unknown transaction categories. The proposed Inspection-L framework aims to detect illegal transactions based on graph representation learning in a self-supervised manner. Current graph machine learning approaches, such as [5], generally apply supervised graph neural network approaches to the detection of illicit transactions. However, supervised learning requires manual labeling. In the AML scenario, building an effective model that utilizes unknown label data is required, since human's labeling Bitcoin data could be costly and ineffective. It also only performs well when the labels are enough. Thus, exploiting unlabeled data to improve performance is crit-"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "page_footnote",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.078,
|
| 161 |
+
0.826,
|
| 162 |
+
0.207,
|
| 163 |
+
0.838
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "*Corresponding author"
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "page_footnote",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.084,
|
| 172 |
+
0.839,
|
| 173 |
+
0.388,
|
| 174 |
+
0.849
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "Email addresses: w.w.lo@uq.net.au (Wai Weng Lo),"
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "page_footnote",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.061,
|
| 183 |
+
0.849,
|
| 184 |
+
0.357,
|
| 185 |
+
0.861
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "g.kulatilleke@uq.net.au (Gayan K. Kulatilleke),"
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "page_footnote",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.061,
|
| 194 |
+
0.862,
|
| 195 |
+
0.473,
|
| 196 |
+
0.872
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": "m.sarhan@uq.net.au (Mohanad Sarhan), siamak.layeghy@uq.net.au"
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "page_footnote",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.061,
|
| 205 |
+
0.872,
|
| 206 |
+
0.425,
|
| 207 |
+
0.883
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": "(Siamak Layeghy), marius@itee.uq.edu.au (Marius Portmann)"
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "page_footnote",
|
| 214 |
+
"bbox": [
|
| 215 |
+
0.061,
|
| 216 |
+
0.884,
|
| 217 |
+
0.485,
|
| 218 |
+
0.905
|
| 219 |
+
],
|
| 220 |
+
"angle": 0,
|
| 221 |
+
"content": "<sup>1</sup>As of 2022 Aug 09, the volume of the entire BTC transaction record, the blockchain, is 420GB, with an average growth rate of \\(129\\%\\)."
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "footer",
|
| 225 |
+
"bbox": [
|
| 226 |
+
0.061,
|
| 227 |
+
0.916,
|
| 228 |
+
0.324,
|
| 229 |
+
0.928
|
| 230 |
+
],
|
| 231 |
+
"angle": 0,
|
| 232 |
+
"content": "Preprint submitted to Journal of BTEX Templates"
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "footer",
|
| 236 |
+
"bbox": [
|
| 237 |
+
0.84,
|
| 238 |
+
0.915,
|
| 239 |
+
0.938,
|
| 240 |
+
0.927
|
| 241 |
+
],
|
| 242 |
+
"angle": 0,
|
| 243 |
+
"content": "October 11, 2022"
|
| 244 |
+
}
|
| 245 |
+
],
|
| 246 |
+
[
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.057,
|
| 251 |
+
0.097,
|
| 252 |
+
0.486,
|
| 253 |
+
0.21
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "ical for AML. On the other hand, self-supervised graph neural network algorithms [6][7] allow for the unknown label data to be exploited, which can improve the quality of representation for the downstream tasks such as fraud transaction detection in Bitcoin. Furthermore, in supervised learning, GNN is limited to capturing K-hop neighbor information; for example, once the hops of the neighbor are larger than k, the supervised learning GNN fails to capture that node information."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.057,
|
| 262 |
+
0.211,
|
| 263 |
+
0.486,
|
| 264 |
+
0.337
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "In this paper, we applied DGI self-supervised learning to capture the global graph information, as this is not limited to capturing the K-layer neighborhood information, where every node can access the entire graph's structural pattern and node information using random shuffle node features. The DGI discriminator tries to determine wherever the node feature is shuffled or not. Thus, every node can access global parts of the node's properties, rather than K-layer neighborhood information."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.057,
|
| 273 |
+
0.339,
|
| 274 |
+
0.486,
|
| 275 |
+
0.409
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "We demonstrate how the self-supervised DGI algorithm can be integrated with standard machine learning classification algorithms, i.e., Random Forest, to build an efficient anti-money-laundering detection system. We show that our Inspection-L method outperforms the state-of-the-art in terms of F1 score."
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.084,
|
| 284 |
+
0.41,
|
| 285 |
+
0.435,
|
| 286 |
+
0.424
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "In summary, the key contributions of this paper are:"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.084,
|
| 295 |
+
0.436,
|
| 296 |
+
0.486,
|
| 297 |
+
0.506
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "- Different from most existing works, which typically use supervised graph representation learning to generate node embeddings for illegal transaction detection, we use a self-supervised learning approach to learn the node embeddings without using any labels."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.084,
|
| 306 |
+
0.521,
|
| 307 |
+
0.486,
|
| 308 |
+
0.62
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "- The proposed Inspection-L is based on a self-supervised DGI combined with the Random Forest (RF) supervised machine learning algorithms, to capture topological information and node features in the transaction graph to detect illegal transactions. To the best of our knowledge, our proposal is the first to utilize self-supervised GNNs to generate node embeddings for AML in Bitcoin."
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.084,
|
| 317 |
+
0.634,
|
| 318 |
+
0.486,
|
| 319 |
+
0.691
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "- The comprehensive evaluation of the proposed framework using the Elliptic benchmark datasets demonstrates superior performance compared to other, supervised machine learning approaches."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "list",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.084,
|
| 328 |
+
0.436,
|
| 329 |
+
0.486,
|
| 330 |
+
0.691
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": null
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "title",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.059,
|
| 339 |
+
0.712,
|
| 340 |
+
0.233,
|
| 341 |
+
0.726
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "2. RELATED WORKS"
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.057,
|
| 350 |
+
0.737,
|
| 351 |
+
0.487,
|
| 352 |
+
0.893
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "Mark et al. [5] created and published the Elliptic dataset, a temporal graph-based Bitcoin transaction dataset consisting of over 200K Bitcoin node transactions, 234K payment edges, and 49 transaction graphs with distinct time steps. Each of the transaction nodes was labeled as a \"licit\", \"illicit\", or \"unknown\" entity. They evaluated the Elliptic dataset using various machine learning methods, including Logistic Regression (LR), Random Forest (RF), Multilayer Perceptrons (MLP) [8], Graph Convolutional Networks (GCNs) [9] and EvolveGCN [10]. They retrieved a recall score in the illicit category of 0.67 using RF and 0.51 using GCNs."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.508,
|
| 361 |
+
0.097,
|
| 362 |
+
0.938,
|
| 363 |
+
0.395
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "Yining et al. [11] collected the Bitcoin transaction graph data between July 2014 and May 2017 by running a Bitcoin client and used an external trusted source, \"Wallet Explorer\", a website that tracks Bitcoin wallets, to label the data. They first highlighted the differences between money laundering and regular transactions using network centrality such as PageRank, clustering coefficient [12], then used a node2vec-based [13] classifier to classify money laundering transactions. The research also indicated that statistical information, such as indegree/out-degree, number of weakly connected components, and sum/mean/standard deviation of the output values, could distinguish money laundering transactions from legal transactions. However, this approach only considers graph topological patterns, without considering node features. Vassallo et al. [14] focused on the detection of illicit cryptocurrency activities (e.g., scams, terrorism financing, and Ponzi schemes). Their proposed detection framework is based on Adaptive Stacked eXtreme Gradient Boosting (ASXGB), an enhanced variation of eXtreme Gradient Boosting (XGBoost). ASXGB was evaluated using the Elliptic dataset, and the results demonstrate its superiority at both the account and transaction levels."
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.508,
|
| 372 |
+
0.396,
|
| 373 |
+
0.938,
|
| 374 |
+
0.75
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "Chaehyeon et al. [15] applied supervised machine learning algorithms to classify illicit nodes in the Bitcoin network. They used two supervised machine learning models, namely, Random Forest (RF) and Artificial Neural Network (ANN) [8] to detect illegal transactions. First, they collected the legal and illegal Bitcoin data from the forum sites \"Wallet Explorer\" and \"Blockchain Explorer\". Next, they performed feature extraction based on the characteristics of Bitcoin transactions, such as transaction fees and transaction size. The extracted features were labeled legal or illegal for supervised training. The results indicated that relatively high F1 scores could be achieved; specifically, ANN and RF achieved 0.89 and 0.98 F1 scores, respectively. In [16] proposed using GCNs intertwined with linear layers to classify illicit nodes of the Elliptic dataset [5]. An overall classification accuracy and recall of \\(97.40\\%\\) and 0.67, respectively, can be achieved to detect illicit transactions. In [17], the authors used an autoencoder with graph embedding to detect mixing and demixing services for Bitcoin cryptocurrency. They first applied graph node embedding to generate the node representation; then, a K-means algorithm was applied to cluster the node embeddings to detect mixing and demixing services. The proposed model was evaluated based on real-world Bitcoin datasets to evaluate the model's effectiveness, and the results demonstrate that the proposed model can effectively perform demix/mixing service anomaly detection."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.508,
|
| 383 |
+
0.751,
|
| 384 |
+
0.941,
|
| 385 |
+
0.906
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "Lorenz et al. [18] proposed active learning techniques by using a minimum number of labels to achieve a high rate of detection of illicit transactions on the Elliptic dataset. In [19], the authors applied unsupervised learning to detect suspicious nodes in the Bitcoin transaction graph. They used various kinds of unsupervised machine learning algorithms, such as K-means and Gaussian Mixture models, to cluster normal and illicit nodes. However, since the Bitcoin transaction dataset they used lacked ground-truth labels, they simply used the internal index to validate the clustering algorithm, without confirming that those nodes are actually malicious transactions. Monamo et al. [20]"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "page_number",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.494,
|
| 394 |
+
0.915,
|
| 395 |
+
0.505,
|
| 396 |
+
0.926
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "2"
|
| 400 |
+
}
|
| 401 |
+
],
|
| 402 |
+
[
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.058,
|
| 407 |
+
0.097,
|
| 408 |
+
0.502,
|
| 409 |
+
0.352
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "applied trimmed-Kmeans to detect fraud in the Bitcoin network. They used various graph centrality measures (i.e. in degree, out-degree of the Bitcoin transactions) and currency features (i.e. the total amount sent), which were then used for Bitcoin transaction clustering. However, similar to [19], due to the unavailability of ground-truth labels, they used clustering performance metrics such as \"within the sum of squares\", without being able to validate the true nature of the Bitcoin transaction anomalies. Shucheng et al. [21] proposed SIEGE, a self-supervised graph learning approach for Ethereum phishing scam detection, using two pretext tasks to generate node embeddings without using labels and an incremental paradigm to capture data distribution changes for over half a year. However, a significant limitation of this approach is that it does not consider the Bitcoin context and is limited to detecting Ethereum phishing scams. Additionally, their simple application of GCNs[9] in the pretext task phase is much less effective than the Weisfeiler-Lehman (1-WL) test[22]."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.058,
|
| 418 |
+
0.353,
|
| 419 |
+
0.487,
|
| 420 |
+
0.41
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "In contrast with related studies, our approach can detect not only phishing scams but also other illicit transactions, such as terrorist organizations, ransomware and Ponzi schemes, by utilizing the Elliptic dataset [5]."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "title",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.059,
|
| 429 |
+
0.431,
|
| 430 |
+
0.206,
|
| 431 |
+
0.443
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "3. BACKGROUND"
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "image",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.102,
|
| 440 |
+
0.466,
|
| 441 |
+
0.443,
|
| 442 |
+
0.692
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": null
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "image_caption",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.149,
|
| 451 |
+
0.705,
|
| 452 |
+
0.396,
|
| 453 |
+
0.717
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "Figure 1: Overview of Deep Graph Infomax"
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.058,
|
| 462 |
+
0.734,
|
| 463 |
+
0.486,
|
| 464 |
+
0.846
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "The main innovation of our proposed model is its use of DGI [4] with our proposed GIN encoder to learn node embeddings in a self-supervised manner. Then, the node embeddings can be treated as enhanced features and be combined with the raw features for standard supervised RF machine learning algorithms to classify illicit transaction. This has a clear advantage over simple features, as inputs to overall graph-structured patterns are available for the downstream classifier."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.058,
|
| 473 |
+
0.848,
|
| 474 |
+
0.487,
|
| 475 |
+
0.905
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "Consequently, the current graph-based approaches [5] [16] try to apply a supervised GCN-based approach to capture the overall graph-structured patterns. However, the main limitation is that GCN can only capture the neighborhood information of"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.509,
|
| 484 |
+
0.097,
|
| 485 |
+
0.938,
|
| 486 |
+
0.253
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "limited K layers, not the global view graph and node information, due to the threat of overfitting. While some models, such as FDGATII [23], are capable of a larger K, these are still limited by their layer structure and finite k. On the other hand, our Inspection-L approach allows for every node to obtain access to the structural patterns of the entire graph, which can capture more global neighborhood information. The proposed method considers that the message-passing functions of [5] [16] are not powerful enough, as they lack injective functions. Therefore, we proposed a GIN encoder to make the message propagation function more robust."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "title",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.51,
|
| 495 |
+
0.268,
|
| 496 |
+
0.715,
|
| 497 |
+
0.281
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "3.1. Graph Neural Networks"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.509,
|
| 506 |
+
0.285,
|
| 507 |
+
0.938,
|
| 508 |
+
0.47
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "GNNs is a deep learning approach for graph-based data and a recent and highly promising area of machine learning [23]. The key feature of GNNs is their ability to combine a topological graph structure with features. For each node in a graph, this means aggregating neighboring node features to leverage a new representation of the current node that considers the neighboring information. The output of this process is known as embeddings. Final node embeddings are low- or n-dimensional vector representations that capture topological and node properties. Embeddings can be learned in a supervised or unsupervised manner and used for downstream tasks such as node classification, clustering, and link prediction [23]. The \\(k\\)-th layer of a typical GCN is:"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "equation",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.561,
|
| 517 |
+
0.48,
|
| 518 |
+
0.937,
|
| 519 |
+
0.501
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "\\[\nh _ {v} ^ {(k)} = \\sigma \\left(W \\cdot \\operatorname {M E A N} \\left\\{h _ {u} ^ {(k - 1)}, \\forall u \\in N (v) \\cup \\{v \\} \\right\\}\\right). \\tag {1}\n\\]"
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.509,
|
| 528 |
+
0.512,
|
| 529 |
+
0.938,
|
| 530 |
+
0.584
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "where \\( h_{\\nu}^{(k)} \\) is the feature vector of node \\( \\nu \\) at the \\( k \\)-th iteration/layer, \\( h_{\\nu}^{(0)} = X_{\\nu} \\), and \\( N(\\nu) \\) is the set of neighbor nodes of \\( \\nu \\). \\( W^{(l)} \\) is the weight matrix that will be learned for the downstream tasks. \\( \\sigma \\) is an activation function, typically ReLU, for computing node representations."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "title",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.51,
|
| 539 |
+
0.598,
|
| 540 |
+
0.753,
|
| 541 |
+
0.612
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "3.2. Graph Isomorphism Network"
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.509,
|
| 550 |
+
0.615,
|
| 551 |
+
0.938,
|
| 552 |
+
0.673
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "Graph Isomorphism Network (GIN) is theoretically a maximally powerful GNN proposed by Xu et al. [24]. The main difference between GIN and other GNNs is the message aggregation function, which is shown below:"
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "equation",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.568,
|
| 561 |
+
0.683,
|
| 562 |
+
0.937,
|
| 563 |
+
0.721
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "\\[\nh _ {v} ^ {(k)} = \\operatorname {M L P} ^ {(k)} \\left(\\left(1 + \\epsilon^ {(k)}\\right) \\cdot h _ {v} ^ {(k - 1)} + \\sum_ {u \\in N (v)} h _ {u} ^ {(k - 1)}\\right) \\tag {2}\n\\]"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.509,
|
| 572 |
+
0.731,
|
| 573 |
+
0.938,
|
| 574 |
+
0.903
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "GCNs is less effective than the Weisfeiler-Lehman (1-WL) [22] test due to the single-layer aggregation function, which is same as the hash function of a 1-WL algorithm. According to [24], a single, non-linear layer is insufficient for graph learning. Thus, GCN message passing functions are not necessarily injective. Therefore, GIN [24] was proposed to make the passing function injective, as shown in Equation 2, where \\(\\varepsilon^{(k)}\\) is a scalar parameter, and MLP stands for multilayer perceptron. \\(h_{\\nu}^{(k)} \\in \\mathbb{R}^d\\) is the embedding of node \\(\\nu_{i}\\) at the \\(k\\)-th layer, \\(h_{\\nu}^{(0)} = x_{\\nu}\\) is the original input node features, and \\(N(\\nu_{i})\\) is the set of neighboring nodes of node \\(\\nu_{i}\\). We can stack \\(k\\) layers to obtain the final node representation \\(h_{\\nu}^{(k)}\\)."
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "page_number",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.494,
|
| 583 |
+
0.915,
|
| 584 |
+
0.504,
|
| 585 |
+
0.926
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "3"
|
| 589 |
+
}
|
| 590 |
+
],
|
| 591 |
+
[
|
| 592 |
+
{
|
| 593 |
+
"type": "title",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.059,
|
| 596 |
+
0.097,
|
| 597 |
+
0.245,
|
| 598 |
+
0.112
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "3.3. Deep Graph Infomax"
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.058,
|
| 607 |
+
0.115,
|
| 608 |
+
0.496,
|
| 609 |
+
0.214
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "Deep Graph Infomax (DGI) [4] is a self-supervised graph representation learning approach that relies on maximizing the mutual information between patch representations and the global graph summary. The patch representations summarize subgraphs, allowing for the preservation of similarities at the patch level. A trained encoder in DGI can be reused to generate node embeddings for downstream tasks, such as node clustering."
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.058,
|
| 618 |
+
0.215,
|
| 619 |
+
0.487,
|
| 620 |
+
0.356
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "Most of the previous works on self-supervised representation learning approaches rely on the random walk strategy [25][26], which is extremely computationally expensive because the number of walks depends on the number of nodes on the graph, making it unscalable for large graphs. Moreover, the choice of hyperparameters (length of the walk, number of walks) can significantly impact the model performance. Overall, DGI does not require supervision or random walk techniques. Instead, it guides the model to learn node connections by simultaneously leveraging local and global information in a graph [4]."
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.058,
|
| 629 |
+
0.356,
|
| 630 |
+
0.487,
|
| 631 |
+
0.456
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "Figure 1 shows the overall operation of DGI. \\( G \\) is a true graph with the true nodes, the true edges that connect them, and real node features associated with each node. \\( H \\) is a corrupted graph where the nodes and edges have been changed using a corruption function. [4] suggests that the corruption function can randomly shuffle each node feature and maintain the same edges as the true graph \\( G \\)."
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.084,
|
| 640 |
+
0.456,
|
| 641 |
+
0.473,
|
| 642 |
+
0.47
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "The DGI training procedure consists of four components:"
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.084,
|
| 651 |
+
0.479,
|
| 652 |
+
0.495,
|
| 653 |
+
0.55
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "- A corruption procedure \\( C \\) that changes the real input graph \\( G \\) into a corrupted graph \\( H = (C(G)) \\). This can be achieved by randomly shifting the node features among the nodes in a real graph \\( G \\) or by adding and removing an edge from the real graph \\( G \\)."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.084,
|
| 662 |
+
0.56,
|
| 663 |
+
0.501,
|
| 664 |
+
0.645
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "- An encoder \\( E \\) that computes the node embeddings of a corrupted graph and a real graph. This can be achieved using various graph representation methods, such as Graph Convolutional Networks (GCNs) [9], Graph Attention Networks (GATs) [27] or Graph Transformer Networks (GTNs) [28]."
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "text",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.084,
|
| 673 |
+
0.655,
|
| 674 |
+
0.487,
|
| 675 |
+
0.713
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "- The node embedding vectors for each node in the real graph are summarized into a single embed vector of the entire graph \\(\\overline{s}\\) (global graph summary) by using a readout function \\(R\\) to compute the whole graph embeddings."
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.084,
|
| 684 |
+
0.72,
|
| 685 |
+
0.487,
|
| 686 |
+
0.835
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "- A discriminator \\(D\\), which is a logistic non-linear sigmoid function, compares a real node embedding vector \\(\\vec{h}_i\\) and a corrupted node embedding \\(\\widetilde{h}_i\\) against the whole real graph embedding \\(\\overline{s}\\), and provides a score between 0 and 1, as shown in Equation 3. This binary cross-entropy loss objective function [4] can be applied to discriminate between the embedding of the real node and the corrupted node to train the encoder \\(E\\)."
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "list",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.084,
|
| 695 |
+
0.479,
|
| 696 |
+
0.501,
|
| 697 |
+
0.835
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": null
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "equation",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.592,
|
| 706 |
+
0.119,
|
| 707 |
+
0.938,
|
| 708 |
+
0.175
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "\\[\nL = \\frac {1}{N + M} \\left(\\sum_ {i = 1} ^ {N} \\mathbb {E} _ {\\left(\\mathbf {X}, \\mathbf {A}\\right)} \\left[ \\log D \\left(\\vec {h} _ {i}, \\vec {s}\\right) \\right] + \\right. \\tag {3}\n\\]"
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "equation",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.648,
|
| 717 |
+
0.179,
|
| 718 |
+
0.856,
|
| 719 |
+
0.218
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "\\[\n\\left. \\sum_ {j = 1} ^ {M} \\mathbb {E} _ {(\\overline {{\\mathbf {X}}}, \\overline {{\\mathbf {A}}})} \\left[ \\log \\left(1 - D (\\vec {h} _ {j}, \\vec {s})\\right) \\right]\\right)\n\\]"
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "title",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.51,
|
| 728 |
+
0.238,
|
| 729 |
+
0.708,
|
| 730 |
+
0.251
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "4. PROPOSED METHOD"
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.509,
|
| 739 |
+
0.263,
|
| 740 |
+
0.938,
|
| 741 |
+
0.347
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "To construct Bitcoin transaction graphs from the dataset, we used 49 different Bitcoin transaction graphs (TGs) [5] using time steps so that the nodes can be represented as node transactions and the edges can be represented as flows of Bitcoin transactions. This is a very natural way to represent Bitcoin transactions."
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.509,
|
| 750 |
+
0.348,
|
| 751 |
+
0.938,
|
| 752 |
+
0.419
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "The pseudocode and overall procedure of our proposed algorithm are shown in Algorithm 1 and Figure. 2. The proposed framework consists of two-stage: DGI training for node embedding extraction to perform feature augmentation, supervised machine learning classification."
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "title",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.51,
|
| 761 |
+
0.433,
|
| 762 |
+
0.64,
|
| 763 |
+
0.447
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "4.1. DGI Training"
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.509,
|
| 772 |
+
0.451,
|
| 773 |
+
0.938,
|
| 774 |
+
0.719
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "To train the proposed model, the input includes the transaction graphs \\( G \\) with node features (i.e., all 166 features, which is a combination of local and macro features, which we denote AF, or only the 94 local features), and the specified number of training epochs \\( K \\) to extract true node embeddings and corrupted node embeddings. Before this, we need to define the corruption function \\( C \\) to generate the corrupted transaction graphs \\( C(G) \\) for our GIN encoder to extract the corrupted node embeddings. In this paper, we randomly shuffled all the node features among the nodes in real transaction graphs \\( G \\) to generate the corrupted transaction graphs for each real graphs by shuffling the feature matrix in rows \\( \\mathbf{X} \\) by using Bernoulli distribution. Overall, instead of adding or removing edges from the adjacency matrix such that \\( \\mathbf{A}_G \\neq \\mathbf{A}_H \\), we use corruption function \\( C \\), which shuffle the node features such that \\( \\mathbf{X}_G \\neq \\mathbf{X}_H \\), and retain the adjacency matrix, i.e., \\( (\\mathbf{A}_G = \\mathbf{A}_H) \\). Note that the corruption function only changes the node features, and not the structure; therefore, \\( N_G = N_H \\). In case of the DGI implementation, we now have \\( N = M \\)."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.509,
|
| 783 |
+
0.721,
|
| 784 |
+
0.938,
|
| 785 |
+
0.819
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "For each batch of graph data \\( G \\) in the training epoch, in Algorithm 1 from Line 3 to 4, we use our proposed GIN encoder to extract true node and corrupted node embeddings. Our proposed GIN encoder is shown in Figure 2 with two layers of MLP, which consists of 128 hidden units, ReLU activation function and Batch normalization (as shown in Algorithm 2) [29]."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "text",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.509,
|
| 794 |
+
0.82,
|
| 795 |
+
0.938,
|
| 796 |
+
0.891
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "The design of the MLPs is motivated by the fundamental goal of a GNN-based model. Ideally, various types of different graph patterns should be distinguishable via the graph encoder, which means that different graph structures should be mapped to different locations in the embedding space. This requires the"
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "page_number",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.494,
|
| 805 |
+
0.915,
|
| 806 |
+
0.504,
|
| 807 |
+
0.926
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "4"
|
| 811 |
+
}
|
| 812 |
+
],
|
| 813 |
+
[
|
| 814 |
+
{
|
| 815 |
+
"type": "image",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.092,
|
| 818 |
+
0.093,
|
| 819 |
+
0.941,
|
| 820 |
+
0.322
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": null
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "image_caption",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.42,
|
| 829 |
+
0.331,
|
| 830 |
+
0.578,
|
| 831 |
+
0.344
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "Figure 2: Proposed Method"
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "code_caption",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.073,
|
| 840 |
+
0.362,
|
| 841 |
+
0.447,
|
| 842 |
+
0.377
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "Algorithm 1: Pseudocode for Our Proposed Algorithm"
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "algorithm",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.092,
|
| 851 |
+
0.378,
|
| 852 |
+
0.681,
|
| 853 |
+
0.643
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "input: Set of training graphs \\(G^{+} = \\{G(V,A,X)\\}\\); Number of training epochs \\(K\\); Corruption function \\(C\\); All 166 Features (AF); First 94 Local Features (LF); output: Optimized GIN encoder \\(g\\), Optimized RF \\(h\\_ R\\) \n1 Initialize the parameters \\(\\theta\\) and \\(\\omega\\) for the encoder \\(g\\) and the discriminator \\(D\\); \n2 foreach batch \\(G \\in G^{+}\\) do \n3 for epoch \\(\\leftarrow 1\\) to \\(K\\) do \n4 \\(h_i = g(G, \\theta)\\) \n5 \\(\\widetilde{h}_i = g(C(G), \\theta)\\) \n6 \\(\\bar{s} = \\sigma \\left( \\frac{1}{n} \\sum_{i=1}^{n} h_i^{(L)} \\right)\\) \n7 \\(D(h_i, \\bar{s}) = \\sigma(h_i^T \\mathbf{w} \\bar{s})\\) \n8 \\(D(\\widetilde{h}_i, \\bar{s}) = \\sigma(\\widetilde{h}_i^T \\mathbf{w} \\bar{s})\\) \n9 \\(L_{DGI} = \\frac{1}{N + M} \\left( \\sum_{i=1}^{N} \\mathbb{E}_{(\\mathbf{X}, \\mathbf{A})}[\\log D(\\vec{h}_i, \\vec{s})] + \\sum_{j=1}^{M} \\mathbb{E}_{(\\overline{\\mathbf{X}}, \\overline{\\mathbf{A}})}[\\log(1 - D(\\vec{h}_j, \\vec{s}))]\\right)\\) \n10 \\(\\theta, \\omega \\gets\\) Adam (\\(L_{DGI}\\)) \n11 Select labeled node embedding \\(h_i\\) from \\(h_i = g(G, \\theta)\\) and corresponding labels \\(y\\) for \\(G \\in\\) training set; \n12 \\(h\\_ R \\gets\\) RF((\\(h_i||\\{AF\\) or \\(LF\\}\\)), y) \n13 return \\(h\\_ R, g\\)"
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.058,
|
| 862 |
+
0.673,
|
| 863 |
+
0.486,
|
| 864 |
+
0.715
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "ability to solve the graph isomorphism problem, where nonisomorphic graphs should be mapped to different representations."
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.058,
|
| 873 |
+
0.716,
|
| 874 |
+
0.486,
|
| 875 |
+
0.772
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "We applied a full neighbor sampling technique and used two-hop neighbor samples for the GIN encoder with Batch normalization, as DGI benefits from employing wider rather than deeper models [4]."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.058,
|
| 884 |
+
0.773,
|
| 885 |
+
0.486,
|
| 886 |
+
0.83
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "For the read-out function \\( R \\), we applied the mean operation on all node embeddings in the real graph \\( G \\) and then applied a sigmoid activation function to compute the whole graph embeddings \\( \\overline{s} \\):"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "equation",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.211,
|
| 895 |
+
0.84,
|
| 896 |
+
0.486,
|
| 897 |
+
0.876
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "\\[\n\\bar {s} = \\sigma \\left(\\frac {1}{n} \\sum_ {i = 1} ^ {n} h _ {i} ^ {(L)}\\right) \\tag {4}\n\\]"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.059,
|
| 906 |
+
0.878,
|
| 907 |
+
0.486,
|
| 908 |
+
0.907
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "In Algorithm 1, from line 7 to 8, as shown in Equation 5 and Equation 6, for the discriminator \\( D \\), we used a logistic sigmoid"
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "code_caption",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.524,
|
| 917 |
+
0.677,
|
| 918 |
+
0.854,
|
| 919 |
+
0.705
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "Algorithm 2: Batch Normalizing Transform [29]"
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "algorithm",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.513,
|
| 928 |
+
0.706,
|
| 929 |
+
0.884,
|
| 930 |
+
0.813
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "input: Values of \\(x\\) over a mini-batch: \\(B = \\{x_{1\\dots m}\\}\\) Parameters can be learned: \\(\\gamma ,\\beta\\) output: \\(\\left\\{y_i = \\mathbf{BN}_{\\gamma ,\\beta}(x_i)\\right\\}\\) \n1 \\(\\mu_B\\gets \\frac{1}{m}\\sum_{i = 1}^m x_i\\) \n2 \\(\\sigma_B^2\\gets \\frac{1}{m}\\sum_{i = 1}^m (x_i - \\mu_B)^2\\) \n3 \\(\\widehat{x}_i\\gets \\frac{x_i - \\mu_B}{\\sqrt{\\sigma_B^2 + \\epsilon}}\\) \n4 \\(y_{i}\\gets \\gamma \\widehat{x}_{i} + \\beta \\equiv \\mathbf{BN}_{\\gamma ,\\beta}(x_{i})\\)"
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.509,
|
| 939 |
+
0.837,
|
| 940 |
+
0.938,
|
| 941 |
+
0.883
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "non-linear function to discriminate node embedding vector \\(\\vec{h}_i\\) against the real whole graph embedding \\(\\overline{s}\\) to calculate the score of \\((\\vec{h}_i,\\overline{s})\\) being positive or negative:"
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "equation",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.654,
|
| 950 |
+
0.89,
|
| 951 |
+
0.937,
|
| 952 |
+
0.91
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "\\[\nD \\left(h _ {i}, \\bar {s}\\right) = \\sigma \\left(h _ {i} ^ {T} w \\bar {s}\\right) \\tag {5}\n\\]"
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "page_number",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.494,
|
| 961 |
+
0.915,
|
| 962 |
+
0.504,
|
| 963 |
+
0.926
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "5"
|
| 967 |
+
}
|
| 968 |
+
],
|
| 969 |
+
[
|
| 970 |
+
{
|
| 971 |
+
"type": "equation",
|
| 972 |
+
"bbox": [
|
| 973 |
+
0.202,
|
| 974 |
+
0.109,
|
| 975 |
+
0.486,
|
| 976 |
+
0.128
|
| 977 |
+
],
|
| 978 |
+
"angle": 0,
|
| 979 |
+
"content": "\\[\nD \\left(\\widetilde {h _ {i}}, \\bar {s}\\right) = \\sigma \\left(\\widetilde {h _ {i} ^ {T}} w \\bar {s}\\right) \\tag {6}\n\\]"
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"bbox": [
|
| 984 |
+
0.058,
|
| 985 |
+
0.132,
|
| 986 |
+
0.486,
|
| 987 |
+
0.333
|
| 988 |
+
],
|
| 989 |
+
"angle": 0,
|
| 990 |
+
"content": "We then used a binary cross-entropy loss objective function (based on Equation 3, modified so that \\( N = M \\)) to perform gradient descent, as shown in Algorithm 1, line 10. To perform gradient descent, we maximized the score if the node embedding is a true node embedding \\( \\vec{h}_i \\) and minimized the score if it is a corrupted node embedding \\( \\vec{h}_i \\) compared to the global graph summary generated by the read-out function \\( R \\) (Equation 4). As a result, we maximized the mutual information between patch representations and the whole real graph summary based on the binary cross-entropy loss function (BCE), as shown in Equation 3 to perform gradient descent. After the training process, the trained encoder can be used to generate new graph embeddings for downstream purposes; in this case, the detection of illegal transactions."
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "text",
|
| 994 |
+
"bbox": [
|
| 995 |
+
0.058,
|
| 996 |
+
0.334,
|
| 997 |
+
0.485,
|
| 998 |
+
0.405
|
| 999 |
+
],
|
| 1000 |
+
"angle": 0,
|
| 1001 |
+
"content": "In our experiments, we used all 34 different Bitcoin transaction graphs to train the DGI with the GIN encoder in a self-supervised manner. For each training graph, we trained 300 epochs using an Adam optimizer with a learning rate of 0.0001, as shown in Algorithm 1, line 10."
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "title",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
0.059,
|
| 1007 |
+
0.419,
|
| 1008 |
+
0.396,
|
| 1009 |
+
0.433
|
| 1010 |
+
],
|
| 1011 |
+
"angle": 0,
|
| 1012 |
+
"content": "4.2. Supervised Machine Learning Classification"
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "text",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.062,
|
| 1018 |
+
0.437,
|
| 1019 |
+
0.486,
|
| 1020 |
+
0.677
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "After the DGI training, we reused the encoder to generate node embeddings, as shown in Algorithm 1, line 11-12 to train and test the RF classifier with 100 estimators. In our experiments, we performed 70:30 splitting, 34 different Bitcoin transaction graphs for training and the remaining 15 bitcoin transaction graphs for testing. All 34 training graphs were fed to DGI to train the GIN encoder in a self-supervised manner. Once the training phase was completed, we used a trained GIN encoder to extract all the node embeddings (all 34 graph node embeddings) in the training graphs. As the datasets consist of two labels, binary classification and unknown labels, we dropped unknown label data in the RF training and testing phases and only used label data for performance. We used all training graph node embeddings to train the RF in a supervised manner. For testing, we extracted the last 15 test graph node embeddings using the trained GIN and fed the node embeddings to the trained RF for illegal transaction detection."
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.059,
|
| 1029 |
+
0.678,
|
| 1030 |
+
0.486,
|
| 1031 |
+
0.707
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "We experimented with the following three combinations of features and embeddings:"
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.08,
|
| 1040 |
+
0.716,
|
| 1041 |
+
0.486,
|
| 1042 |
+
0.759
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "1. DNE : Node Embeddings only: After the DGI training, we reused the encoder to generate node embeddings for training and testing the RF classifier, as mentioned above."
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.08,
|
| 1051 |
+
0.76,
|
| 1052 |
+
0.485,
|
| 1053 |
+
0.83
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "2. \\(\\mathbf{LF} + \\mathbf{DNE}\\) : Node Embeddings with LF features: Similar to scenario 1, we also combined local features (i.e, first 94 raw features) with the node embeddings generated by the trained encode for training and testing the RF classifier, as mentioned above."
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.08,
|
| 1062 |
+
0.833,
|
| 1063 |
+
0.486,
|
| 1064 |
+
0.903
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "3. AF + DNE : Node Embeddings with AF Features: Similar to scenario 1, we also combined all raw features (AF features) with the node embeddings generated by the trained encoder for training and testing the RF classifier, as mentioned above."
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "list",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.08,
|
| 1073 |
+
0.716,
|
| 1074 |
+
0.486,
|
| 1075 |
+
0.903
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": null
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "table_caption",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.584,
|
| 1084 |
+
0.095,
|
| 1085 |
+
0.864,
|
| 1086 |
+
0.107
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "Table 1: Implementation environment specification"
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "table",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
0.513,
|
| 1095 |
+
0.117,
|
| 1096 |
+
0.929,
|
| 1097 |
+
0.213
|
| 1098 |
+
],
|
| 1099 |
+
"angle": 0,
|
| 1100 |
+
"content": "<table><tr><td>Unit</td><td>Description</td></tr><tr><td>Processor</td><td>2.3 GHz 2-core Inter Xeon(R) Processor</td></tr><tr><td>RAM</td><td>12GB</td></tr><tr><td>GPU</td><td>Tesla P100 GPU 16GB</td></tr><tr><td>Operating System</td><td>Linux</td></tr><tr><td>Packages</td><td>Skit-learn, Numpy, Pandas, PyTorch Geometric, and matplotlib</td></tr></table>"
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "title",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
0.51,
|
| 1106 |
+
0.237,
|
| 1107 |
+
0.75,
|
| 1108 |
+
0.251
|
| 1109 |
+
],
|
| 1110 |
+
"angle": 0,
|
| 1111 |
+
"content": "4.3. Implementation Environments"
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "text",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
0.509,
|
| 1117 |
+
0.254,
|
| 1118 |
+
0.938,
|
| 1119 |
+
0.354
|
| 1120 |
+
],
|
| 1121 |
+
"angle": 0,
|
| 1122 |
+
"content": "Experiments were carried out using a 2.3GHz 2-core Intel(R) Xeon(R) processor with 12 GB memory and Tesla P100 GPU on a Linux operating system. The proposed approach was developed using the Python programming language with several statistical and visualization packages, such as Sckt-learn, Numpy, Pandas, PyTorch Geometric, and Matplotlib. Table 1 summarizes the system configuration."
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "title",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
0.51,
|
| 1128 |
+
0.375,
|
| 1129 |
+
0.715,
|
| 1130 |
+
0.388
|
| 1131 |
+
],
|
| 1132 |
+
"angle": 0,
|
| 1133 |
+
"content": "5. Experiments and Results"
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "title",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
0.511,
|
| 1139 |
+
0.4,
|
| 1140 |
+
0.602,
|
| 1141 |
+
0.412
|
| 1142 |
+
],
|
| 1143 |
+
"angle": 0,
|
| 1144 |
+
"content": "5.1. Dataset"
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "text",
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
0.509,
|
| 1150 |
+
0.417,
|
| 1151 |
+
0.938,
|
| 1152 |
+
0.559
|
| 1153 |
+
],
|
| 1154 |
+
"angle": 0,
|
| 1155 |
+
"content": "In this paper, we adopted the Elliptic dataset [5], which is the world's largest labeled dataset of bitcoin transactions. The Elliptic dataset [5] consists of 203,769 node as transactions and 234,355 directed transaction payment flows (i.e., transaction inputs, transaction outputs). The datasets also consist of 49 different timestep graphs, which are uniformly spaced with a two-week interval, as illustrated in 3. Each connected transaction component consists of a time step that appears on the blockchain in less than three hours. Our \\( G \\) represents one such transaction graph for the 49."
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "text",
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
0.509,
|
| 1161 |
+
0.56,
|
| 1162 |
+
0.938,
|
| 1163 |
+
0.744
|
| 1164 |
+
],
|
| 1165 |
+
"angle": 0,
|
| 1166 |
+
"content": "In the Elliptic dataset [5], \\(21\\%\\) of the node entities are labeled as licit, and only \\(2\\%\\) are labeled as illicit. The remaining node entities are unlabeled but have node features. These node entities consist of 166 features (AF features), among which the first 94 features contain local information (LF features) of the transactions, including the time step, transaction fees, and the number of inputs or outputs. The remaining 72 features are aggregated features. These features can be obtained by aggregating transaction information from one-hop backward/forward graph nodes, such as the standard deviation, minimum, maximum, and correlation coefficients of the neighbor transactions for the same information data. More importantly, all features were obtained using only publicly available information."
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "title",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
0.51,
|
| 1172 |
+
0.759,
|
| 1173 |
+
0.685,
|
| 1174 |
+
0.772
|
| 1175 |
+
],
|
| 1176 |
+
"angle": 0,
|
| 1177 |
+
"content": "5.2. Performance Metric"
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "text",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
0.509,
|
| 1183 |
+
0.776,
|
| 1184 |
+
0.938,
|
| 1185 |
+
0.833
|
| 1186 |
+
],
|
| 1187 |
+
"angle": 0,
|
| 1188 |
+
"content": "To evaluate the performance of the proposed methods, the standard metrics listed in Table 2 were used, where \\( TP \\), \\( TN \\), \\( FP \\) and \\( FN \\) represent the number of True Positives, True Negatives, False Positives and False Negatives, respectively."
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "text",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
0.509,
|
| 1194 |
+
0.833,
|
| 1195 |
+
0.938,
|
| 1196 |
+
0.904
|
| 1197 |
+
],
|
| 1198 |
+
"angle": 0,
|
| 1199 |
+
"content": "In Table 2, true positive (TP) denotes the total number of true positives, true negative (TN) indicates the total number of false positives, false positive (FP) denotes the total number of false negatives and false negative (TN) shows the total number of true negatives. The proposed method was evaluated using"
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "page_number",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
0.494,
|
| 1205 |
+
0.915,
|
| 1206 |
+
0.504,
|
| 1207 |
+
0.926
|
| 1208 |
+
],
|
| 1209 |
+
"angle": 0,
|
| 1210 |
+
"content": "6"
|
| 1211 |
+
}
|
| 1212 |
+
],
|
| 1213 |
+
[
|
| 1214 |
+
{
|
| 1215 |
+
"type": "image",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
0.223,
|
| 1218 |
+
0.095,
|
| 1219 |
+
0.316,
|
| 1220 |
+
0.142
|
| 1221 |
+
],
|
| 1222 |
+
"angle": 0,
|
| 1223 |
+
"content": null
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "image",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
0.255,
|
| 1229 |
+
0.144,
|
| 1230 |
+
0.777,
|
| 1231 |
+
0.257
|
| 1232 |
+
],
|
| 1233 |
+
"angle": 0,
|
| 1234 |
+
"content": null
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "image_caption",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
0.378,
|
| 1240 |
+
0.273,
|
| 1241 |
+
0.618,
|
| 1242 |
+
0.285
|
| 1243 |
+
],
|
| 1244 |
+
"angle": 0,
|
| 1245 |
+
"content": "Figure 3: Overview of Elliptic Dataset [30]"
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "image",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
0.098,
|
| 1251 |
+
0.303,
|
| 1252 |
+
0.906,
|
| 1253 |
+
0.546
|
| 1254 |
+
],
|
| 1255 |
+
"angle": 0,
|
| 1256 |
+
"content": null
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "image_caption",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
0.396,
|
| 1262 |
+
0.563,
|
| 1263 |
+
0.601,
|
| 1264 |
+
0.575
|
| 1265 |
+
],
|
| 1266 |
+
"angle": 0,
|
| 1267 |
+
"content": "Figure 4: Illicit F1 over test timestep"
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "table_caption",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
0.147,
|
| 1273 |
+
0.598,
|
| 1274 |
+
0.398,
|
| 1275 |
+
0.61
|
| 1276 |
+
],
|
| 1277 |
+
"angle": 0,
|
| 1278 |
+
"content": "Table 2: Evaluation metrics used in this study"
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "table",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
0.126,
|
| 1284 |
+
0.62,
|
| 1285 |
+
0.418,
|
| 1286 |
+
0.717
|
| 1287 |
+
],
|
| 1288 |
+
"angle": 0,
|
| 1289 |
+
"content": "<table><tr><td>Metric</td><td>Definition</td></tr><tr><td>Detection Rate (Recall)</td><td>TP/TP+FN</td></tr><tr><td>Precision</td><td>TP/TP+FP</td></tr><tr><td>F1-Score</td><td>2×Recall×Precision/Recall+Precision</td></tr><tr><td>AUC-Score</td><td>\\(\\int_0^1 \\frac{TP}{TP+FN} d\\frac{FP}{TN+FP}\\)</td></tr></table>"
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "text",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
0.058,
|
| 1295 |
+
0.741,
|
| 1296 |
+
0.486,
|
| 1297 |
+
0.782
|
| 1298 |
+
],
|
| 1299 |
+
"angle": 0,
|
| 1300 |
+
"content": "Precision, Recall, F1-score and Area under the receiver operating characteristics (ROC) curve. All the above metrics can be obtained using the confusion matrix (CM)."
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "text",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
0.058,
|
| 1306 |
+
0.783,
|
| 1307 |
+
0.486,
|
| 1308 |
+
0.881
|
| 1309 |
+
],
|
| 1310 |
+
"angle": 0,
|
| 1311 |
+
"content": "Accuracy indicates that the model is well learned in case of a balanced test dataset; however, for imbalanced scenarios, as in this case, only considering accuracy measures may lead to misleading conclusion, since it is strongly biased in favor of the licit majority class. Thus, for this case, recall and F1-score metrics provide a more reasonable explanation of the model's performance."
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "text",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
0.084,
|
| 1317 |
+
0.883,
|
| 1318 |
+
0.486,
|
| 1319 |
+
0.896
|
| 1320 |
+
],
|
| 1321 |
+
"angle": 0,
|
| 1322 |
+
"content": "Recall (also known as Detection Rate) is the total number of"
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
0.509,
|
| 1328 |
+
0.6,
|
| 1329 |
+
0.937,
|
| 1330 |
+
0.641
|
| 1331 |
+
],
|
| 1332 |
+
"angle": 0,
|
| 1333 |
+
"content": "true positives divided by the total number of true positives and false negatives. If the recall rate is very low, this means that the classifier cannot detect illicit transactions."
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
0.509,
|
| 1339 |
+
0.643,
|
| 1340 |
+
0.938,
|
| 1341 |
+
0.713
|
| 1342 |
+
],
|
| 1343 |
+
"angle": 0,
|
| 1344 |
+
"content": "Precision measures the quality of the correct predictions. This is the number of true positives divided by the number of true positives and false positives. If the false positive is very high, it will cause low precision. Our goal is to maximize the precision as much as possible."
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "text",
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
0.509,
|
| 1350 |
+
0.714,
|
| 1351 |
+
0.939,
|
| 1352 |
+
0.742
|
| 1353 |
+
],
|
| 1354 |
+
"angle": 0,
|
| 1355 |
+
"content": "F1-score is the trade-off between precision and recall. Mathematically, it is the harmonic mean of precision and recall."
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "text",
|
| 1359 |
+
"bbox": [
|
| 1360 |
+
0.509,
|
| 1361 |
+
0.742,
|
| 1362 |
+
0.938,
|
| 1363 |
+
0.813
|
| 1364 |
+
],
|
| 1365 |
+
"angle": 0,
|
| 1366 |
+
"content": "The area under the curve (AUC) computes the trade-off between sensitivity and specificity, plotted based on the trade-off between the true positive rate on the y-axis and the false positive rate on the x-axis. Our goal is to maximize the AUC score as much as possible, making it closer to 1.0."
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "title",
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
0.51,
|
| 1372 |
+
0.827,
|
| 1373 |
+
0.692,
|
| 1374 |
+
0.84
|
| 1375 |
+
],
|
| 1376 |
+
"angle": 0,
|
| 1377 |
+
"content": "5.3. Experimental Results"
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "text",
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
0.509,
|
| 1383 |
+
0.845,
|
| 1384 |
+
0.938,
|
| 1385 |
+
0.902
|
| 1386 |
+
],
|
| 1387 |
+
"angle": 0,
|
| 1388 |
+
"content": "Table 3 shows the corresponding results of our Inspection-L compared to the state-of-the-art in terms of the key metrics. As can be observed from the table, regarding to illicit F1-Score, Inspection-L (LF+DNE and AF+DNE) outperforms the best"
|
| 1389 |
+
},
|
| 1390 |
+
{
|
| 1391 |
+
"type": "page_number",
|
| 1392 |
+
"bbox": [
|
| 1393 |
+
0.494,
|
| 1394 |
+
0.915,
|
| 1395 |
+
0.504,
|
| 1396 |
+
0.924
|
| 1397 |
+
],
|
| 1398 |
+
"angle": 0,
|
| 1399 |
+
"content": "7"
|
| 1400 |
+
}
|
| 1401 |
+
],
|
| 1402 |
+
[
|
| 1403 |
+
{
|
| 1404 |
+
"type": "image",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
0.145,
|
| 1407 |
+
0.095,
|
| 1408 |
+
0.368,
|
| 1409 |
+
0.243
|
| 1410 |
+
],
|
| 1411 |
+
"angle": 0,
|
| 1412 |
+
"content": null
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "image_caption",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
0.218,
|
| 1418 |
+
0.246,
|
| 1419 |
+
0.292,
|
| 1420 |
+
0.258
|
| 1421 |
+
],
|
| 1422 |
+
"angle": 0,
|
| 1423 |
+
"content": "(a) AF + DNE"
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "image",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
0.379,
|
| 1429 |
+
0.096,
|
| 1430 |
+
0.6,
|
| 1431 |
+
0.243
|
| 1432 |
+
],
|
| 1433 |
+
"angle": 0,
|
| 1434 |
+
"content": null
|
| 1435 |
+
},
|
| 1436 |
+
{
|
| 1437 |
+
"type": "image_caption",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
0.454,
|
| 1440 |
+
0.246,
|
| 1441 |
+
0.526,
|
| 1442 |
+
0.258
|
| 1443 |
+
],
|
| 1444 |
+
"angle": 0,
|
| 1445 |
+
"content": "(b) LF + DNE"
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "image",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
0.633,
|
| 1451 |
+
0.097,
|
| 1452 |
+
0.853,
|
| 1453 |
+
0.242
|
| 1454 |
+
],
|
| 1455 |
+
"angle": 0,
|
| 1456 |
+
"content": null
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "image_caption",
|
| 1460 |
+
"bbox": [
|
| 1461 |
+
0.721,
|
| 1462 |
+
0.246,
|
| 1463 |
+
0.764,
|
| 1464 |
+
0.257
|
| 1465 |
+
],
|
| 1466 |
+
"angle": 0,
|
| 1467 |
+
"content": "(c) DNE"
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "image_caption",
|
| 1471 |
+
"bbox": [
|
| 1472 |
+
0.42,
|
| 1473 |
+
0.268,
|
| 1474 |
+
0.578,
|
| 1475 |
+
0.281
|
| 1476 |
+
],
|
| 1477 |
+
"angle": 0,
|
| 1478 |
+
"content": "Figure 5: Confusion Matrix"
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "table_caption",
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
0.058,
|
| 1484 |
+
0.303,
|
| 1485 |
+
0.489,
|
| 1486 |
+
0.361
|
| 1487 |
+
],
|
| 1488 |
+
"angle": 0,
|
| 1489 |
+
"content": "Table 3: Results of binary classification by Inspection-L compared to the state-of-the-art. AF refers to all raw features, LF refers to the local raw features, i.e., the first 94 raw features, GNE refers to the node embeddings generated by GCN in [5] using labels and DNE refers to the node embeddings computed by DGI without using labels."
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "table",
|
| 1493 |
+
"bbox": [
|
| 1494 |
+
0.064,
|
| 1495 |
+
0.371,
|
| 1496 |
+
0.485,
|
| 1497 |
+
0.66
|
| 1498 |
+
],
|
| 1499 |
+
"angle": 0,
|
| 1500 |
+
"content": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">Illicit</td><td rowspan=\"2\">AUC</td></tr><tr><td>Precision</td><td>Recall</td><td>F1</td></tr><tr><td>Logistic RegrAF [5]</td><td>0.404</td><td>0.593</td><td>0.481</td><td>-</td></tr><tr><td>Logistic Regr AF + GNE [5]</td><td>0.537</td><td>0.528</td><td>0.533</td><td>-</td></tr><tr><td>Logistic Regr LF [5]</td><td>0.348</td><td>0.668</td><td>0.457</td><td>-</td></tr><tr><td>Logistic Regr LF + GNE [5]</td><td>0.518</td><td>0.571</td><td>0.543</td><td>-</td></tr><tr><td>RandomForest AF [5]</td><td>0.956</td><td>0.670</td><td>0.788</td><td>-</td></tr><tr><td>RandomForest AF + GNE [5]</td><td>0.971</td><td>0.675</td><td>0.796</td><td>-</td></tr><tr><td>RandomForest AF [14]</td><td>0.897</td><td>0.721</td><td>0.800</td><td>-</td></tr><tr><td>RandomForest AF + GNE [14]</td><td>0.958</td><td>0.715</td><td>0.819</td><td>-</td></tr><tr><td>XGB AF [14]</td><td>0.921</td><td>0.732</td><td>0.815</td><td>-</td></tr><tr><td>XGB AF + GNE [14]</td><td>0.986</td><td>0.692</td><td>0.813</td><td>-</td></tr><tr><td>RandomForest LF [5]</td><td>0.803</td><td>0.611</td><td>0.694</td><td>-</td></tr><tr><td>RandomForest LF + GNE [5]</td><td>0.878</td><td>0.668</td><td>0.759</td><td>-</td></tr><tr><td>MLP AF [5]</td><td>0.694</td><td>0.617</td><td>0.653</td><td>-</td></tr><tr><td>MLP AF + GNE [5]</td><td>0.780</td><td>0.617</td><td>0.689</td><td>-</td></tr><tr><td>MLP LF [5]</td><td>0.637</td><td>0.662</td><td>0.649</td><td>-</td></tr><tr><td>MLP LF + GNE [5]</td><td>0.681</td><td>0.578</td><td>0.625</td><td>-</td></tr><tr><td>GCN [5]</td><td>0.812</td><td>0.512</td><td>0.628</td><td>-</td></tr><tr><td>GCN [16]</td><td>0.899</td><td>0.678</td><td>0.773</td><td>-</td></tr><tr><td>Skip-GCN [5]</td><td>0.812</td><td>0.623</td><td>0.705</td><td>-</td></tr><tr><td>EvolveGCN [5]</td><td>0.850</td><td>0.624</td><td>0.720</td><td>-</td></tr><tr><td>Inspection-L DNE (RF)</td><td>0.593</td><td>0.032</td><td>0.061</td><td>0.735</td></tr><tr><td>Inspection-L LF + DNE (RF)</td><td>0.906</td><td>0.712</td><td>0.797</td><td>0.895</td></tr><tr><td>Inspection-L AF + DNE (RF)</td><td>0.972</td><td>0.721</td><td>0.828</td><td>0.916</td></tr></table>"
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "text",
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
0.057,
|
| 1506 |
+
0.683,
|
| 1507 |
+
0.487,
|
| 1508 |
+
0.825
|
| 1509 |
+
],
|
| 1510 |
+
"angle": 0,
|
| 1511 |
+
"content": "reported classifiers. In the best-performing variant, AF+DNE, we concatenated the node embeddings generated from DGI with all original raw features (AF). The experiment achieved an F1 score and Recall of 0.828 and 0.721, respectively. Using all features (AF) with node embeddings (DNE) as input for classification, the ML model's performance significantly increased, with an AUC of 0.916, compared to 0.735 when only the node embeddings were used for classification. The experiments demonstrate that graph information (node embeddings) is useful to enhance the transaction representations (embeddings)."
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "text",
|
| 1515 |
+
"bbox": [
|
| 1516 |
+
0.057,
|
| 1517 |
+
0.825,
|
| 1518 |
+
0.487,
|
| 1519 |
+
0.897
|
| 1520 |
+
],
|
| 1521 |
+
"angle": 0,
|
| 1522 |
+
"content": "In the second experiment LF+DNE, we concatenated the node embeddings generated from DGI with the local features (LF), which can achieve an F1-score and Recall of 0.712 and 0.797, respectively. Both the results were superior to the state-of-the-art algorithms."
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"bbox": [
|
| 1527 |
+
0.508,
|
| 1528 |
+
0.305,
|
| 1529 |
+
0.938,
|
| 1530 |
+
0.462
|
| 1531 |
+
],
|
| 1532 |
+
"angle": 0,
|
| 1533 |
+
"content": "These results demonstrate the ability of our self-supervised GIN-based approach to generate an enhanced feature set to improve anti-money-laundering detection performance. Furthermore, the results show that the accuracy of the model improves with the enhanced feature set, which contains summary information. Note that the summary information in the AF feature set consists of 1-hop forward and 1-hop backward neighborhood summaries for each node. Unfortunately, the Elliptic dataset does not provide detailed information regarding the feature descriptions, possibly due to confidentially reasons, which limits our ability to provide a deeper discussion."
|
| 1534 |
+
},
|
| 1535 |
+
{
|
| 1536 |
+
"type": "text",
|
| 1537 |
+
"bbox": [
|
| 1538 |
+
0.508,
|
| 1539 |
+
0.462,
|
| 1540 |
+
0.939,
|
| 1541 |
+
0.561
|
| 1542 |
+
],
|
| 1543 |
+
"angle": 0,
|
| 1544 |
+
"content": "Figure 4 shows the F1 measure of the three different model variants across various testing timesteps. Interestingly, none of the three variants can detect new illicit transactions with high precision after dark market shutdown, which occurs at time step 43 [5]. Thus, we note that developing robust methods to detect illicit transactions without their being affected by emerging events is a major challenge that future works need to address."
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "text",
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
0.508,
|
| 1550 |
+
0.561,
|
| 1551 |
+
0.939,
|
| 1552 |
+
0.705
|
| 1553 |
+
],
|
| 1554 |
+
"angle": 0,
|
| 1555 |
+
"content": "Figure 5 shows the confusion matrix of the three different scenarios. Although the classifier trained with embedding features cannot accurately detect illicit transactions, it rarely classifies licit transactions as illicit. Therefore, the false alarm rate is very low, as shown in Figure 5c. The RF classifier trained using both raw features and embedding features, shown in Figure 5a,5b, has the advantage of achieving a high detection rate and a low false alarm rate. As a result, the experimental results demonstrate that DNE node embeddings can be used for feature augmentation to improve overall detection performance."
|
| 1556 |
+
},
|
| 1557 |
+
{
|
| 1558 |
+
"type": "title",
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
0.51,
|
| 1561 |
+
0.717,
|
| 1562 |
+
0.747,
|
| 1563 |
+
0.732
|
| 1564 |
+
],
|
| 1565 |
+
"angle": 0,
|
| 1566 |
+
"content": "5.4. Broader applications of AML"
|
| 1567 |
+
},
|
| 1568 |
+
{
|
| 1569 |
+
"type": "text",
|
| 1570 |
+
"bbox": [
|
| 1571 |
+
0.508,
|
| 1572 |
+
0.735,
|
| 1573 |
+
0.938,
|
| 1574 |
+
0.849
|
| 1575 |
+
],
|
| 1576 |
+
"angle": 0,
|
| 1577 |
+
"content": "The blockchain operates as a decentralized bank for bitcoin cryptocurrency [31]. All bitcoin transactions are permanently recorded on the blockchain, which is a visible and verifiable public ledger [32]. Bitcoin addresses are not registered to individuals, in contrast to bank accounts [2]. Thus, due to this pseudo-anonymity [11], bitcoin and other crypto-currencies are increasingly used for ransomware [2], ponzi schemes [11] and illicit material trade on the dark web [23]"
|
| 1578 |
+
},
|
| 1579 |
+
{
|
| 1580 |
+
"type": "text",
|
| 1581 |
+
"bbox": [
|
| 1582 |
+
0.508,
|
| 1583 |
+
0.849,
|
| 1584 |
+
0.939,
|
| 1585 |
+
0.906
|
| 1586 |
+
],
|
| 1587 |
+
"angle": 0,
|
| 1588 |
+
"content": "While bitcoin transactions are difficult to track, they are not completely anonymous [2]. Users can be traced by their IP addresses and transaction flows [32]. An analysis of the bitcoin graph can reveal suspicious behavior patterns characteristic of"
|
| 1589 |
+
},
|
| 1590 |
+
{
|
| 1591 |
+
"type": "page_number",
|
| 1592 |
+
"bbox": [
|
| 1593 |
+
0.494,
|
| 1594 |
+
0.915,
|
| 1595 |
+
0.505,
|
| 1596 |
+
0.926
|
| 1597 |
+
],
|
| 1598 |
+
"angle": 0,
|
| 1599 |
+
"content": "8"
|
| 1600 |
+
}
|
| 1601 |
+
],
|
| 1602 |
+
[
|
| 1603 |
+
{
|
| 1604 |
+
"type": "text",
|
| 1605 |
+
"bbox": [
|
| 1606 |
+
0.058,
|
| 1607 |
+
0.097,
|
| 1608 |
+
0.487,
|
| 1609 |
+
0.296
|
| 1610 |
+
],
|
| 1611 |
+
"angle": 0,
|
| 1612 |
+
"content": "money laundering [2]. To break the tell-tale transnational link between bitcoin transactions and illegal activity, bitcoin mixing services provide a new, untainted bitcoin address from their reserves and the pay-outs are spread out over time [2]. Bitcoin Fog is a service that hides transaction origins by bundling multiple inputs into a smaller number of larger outputs [11]. However, the additional obscuring activities themselves could add characteristic signatures into transaction flows. Thus, it is still possible to detect patterns in the underlying transaction flow to facilitate AML detection [11, 5]. Unfortunately, next-generation cryptocurrencies such as Monero, Dash, and Z-Cash, with built-in anonymity features, make tracking and detection challenging [2]. As a result, there is a constant need for improved AML detection methodologies."
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "title",
|
| 1616 |
+
"bbox": [
|
| 1617 |
+
0.059,
|
| 1618 |
+
0.315,
|
| 1619 |
+
0.303,
|
| 1620 |
+
0.329
|
| 1621 |
+
],
|
| 1622 |
+
"angle": 0,
|
| 1623 |
+
"content": "6. Conclusions and Future Work"
|
| 1624 |
+
},
|
| 1625 |
+
{
|
| 1626 |
+
"type": "text",
|
| 1627 |
+
"bbox": [
|
| 1628 |
+
0.062,
|
| 1629 |
+
0.338,
|
| 1630 |
+
0.487,
|
| 1631 |
+
0.55
|
| 1632 |
+
],
|
| 1633 |
+
"angle": 0,
|
| 1634 |
+
"content": "This paper presents a novel approach for the detection of illicit Bitcoin transactions based on self-supervised GNNs. We first used the DGI to generate the node embedding with raw features to train the Random Forest for detection. Our experimental evaluation indicates that our approach performs exceptionally well and outperforms the state-of-the-art ML-based/Graph-based classifier overall. The evaluation results of our initial classifier demonstrate the potential of using a self-supervised GNN-based approach for illegal transaction detection in cryptocurrencies. We hope to inspire others to work on the important challenge of using graph machine learning to perform financial forensics through this research, which is lacking in the current research. In the future, we plan to integrate this with unsupervised anomaly detection algorithms to detect illegal transactions in an unsupervised manner."
|
| 1635 |
+
},
|
| 1636 |
+
{
|
| 1637 |
+
"type": "title",
|
| 1638 |
+
"bbox": [
|
| 1639 |
+
0.06,
|
| 1640 |
+
0.569,
|
| 1641 |
+
0.142,
|
| 1642 |
+
0.582
|
| 1643 |
+
],
|
| 1644 |
+
"angle": 0,
|
| 1645 |
+
"content": "References"
|
| 1646 |
+
},
|
| 1647 |
+
{
|
| 1648 |
+
"type": "ref_text",
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
0.067,
|
| 1651 |
+
0.59,
|
| 1652 |
+
0.486,
|
| 1653 |
+
0.613
|
| 1654 |
+
],
|
| 1655 |
+
"angle": 0,
|
| 1656 |
+
"content": "[1] S. Nakamoto, Bitcoin: A peer-to-peer electronic cash system, Technical Report, Manubot, 2019."
|
| 1657 |
+
},
|
| 1658 |
+
{
|
| 1659 |
+
"type": "ref_text",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
0.068,
|
| 1662 |
+
0.614,
|
| 1663 |
+
0.486,
|
| 1664 |
+
0.636
|
| 1665 |
+
],
|
| 1666 |
+
"angle": 0,
|
| 1667 |
+
"content": "[2] N. Kshetri, J. Voas, Do crypto-currencies fuel ransomware?, in: IT professional, volume 19, IEEE, 2017, pp. 11-15."
|
| 1668 |
+
},
|
| 1669 |
+
{
|
| 1670 |
+
"type": "ref_text",
|
| 1671 |
+
"bbox": [
|
| 1672 |
+
0.068,
|
| 1673 |
+
0.637,
|
| 1674 |
+
0.485,
|
| 1675 |
+
0.669
|
| 1676 |
+
],
|
| 1677 |
+
"angle": 0,
|
| 1678 |
+
"content": "[3] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, P. S. Yu, A comprehensive survey on graph neural networks, in: IEEE Transactions on Neural Networks and Learning Systems, volume 32, 2021, pp. 4-24."
|
| 1679 |
+
},
|
| 1680 |
+
{
|
| 1681 |
+
"type": "ref_text",
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
0.068,
|
| 1684 |
+
0.67,
|
| 1685 |
+
0.485,
|
| 1686 |
+
0.713
|
| 1687 |
+
],
|
| 1688 |
+
"angle": 0,
|
| 1689 |
+
"content": "[4] P. Velicković, W. Fedus, W. L. Hamilton, P. Lio, Y. Bengio, R. D. Hjelm, Deep graph infomax, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=rklz9iAcKQ."
|
| 1690 |
+
},
|
| 1691 |
+
{
|
| 1692 |
+
"type": "ref_text",
|
| 1693 |
+
"bbox": [
|
| 1694 |
+
0.068,
|
| 1695 |
+
0.715,
|
| 1696 |
+
0.486,
|
| 1697 |
+
0.77
|
| 1698 |
+
],
|
| 1699 |
+
"angle": 0,
|
| 1700 |
+
"content": "[5] M. Weber, G. Domeniconi, J. Chen, D. K. I. Weidele, C. Bellei, T. Robinson, C. E. Leiserson, Anti-money laundering in bitcoin: Experimenting with graph convolutional networks for financial forensics, in: ACM SIGKDD International Workshop on Knowledge discovery and data mining, 2019."
|
| 1701 |
+
},
|
| 1702 |
+
{
|
| 1703 |
+
"type": "ref_text",
|
| 1704 |
+
"bbox": [
|
| 1705 |
+
0.068,
|
| 1706 |
+
0.771,
|
| 1707 |
+
0.485,
|
| 1708 |
+
0.815
|
| 1709 |
+
],
|
| 1710 |
+
"angle": 0,
|
| 1711 |
+
"content": "[6] X. Liu, F. Zhang, Z. Hou, L. Mian, Z. Wang, J. Zhang, J. Tang, Self-supervised learning: Generative or contrastive, in: IEEE Transactions on Knowledge and Data Engineering, 2021, pp. 1-1. doi:10.1109/TKDE.2021.3090866."
|
| 1712 |
+
},
|
| 1713 |
+
{
|
| 1714 |
+
"type": "ref_text",
|
| 1715 |
+
"bbox": [
|
| 1716 |
+
0.068,
|
| 1717 |
+
0.816,
|
| 1718 |
+
0.486,
|
| 1719 |
+
0.85
|
| 1720 |
+
],
|
| 1721 |
+
"angle": 0,
|
| 1722 |
+
"content": "[7] Y. Liu, M. Jin, S. Pan, C. Zhou, Y. Zheng, F. Xia, P. Yu, Graph self-supervised learning: A survey, in: IEEE Transactions on Knowledge and Data Engineering, 2022, pp. 1-1. doi:10.1109/TKDE.2022.3172903."
|
| 1723 |
+
},
|
| 1724 |
+
{
|
| 1725 |
+
"type": "ref_text",
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
0.068,
|
| 1728 |
+
0.85,
|
| 1729 |
+
0.486,
|
| 1730 |
+
0.872
|
| 1731 |
+
],
|
| 1732 |
+
"angle": 0,
|
| 1733 |
+
"content": "[8] C. M. Bishop, N. M. Nasrabadi, Pattern recognition and machine learning, volume 4, Springer, 2006."
|
| 1734 |
+
},
|
| 1735 |
+
{
|
| 1736 |
+
"type": "ref_text",
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
0.068,
|
| 1739 |
+
0.873,
|
| 1740 |
+
0.486,
|
| 1741 |
+
0.905
|
| 1742 |
+
],
|
| 1743 |
+
"angle": 0,
|
| 1744 |
+
"content": "[9] T. N. Kipf, M. Welling, Semi-supervised classification with graph convolutional networks, in: International Conference on Learning Representations, 2017."
|
| 1745 |
+
},
|
| 1746 |
+
{
|
| 1747 |
+
"type": "list",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
0.067,
|
| 1750 |
+
0.59,
|
| 1751 |
+
0.486,
|
| 1752 |
+
0.905
|
| 1753 |
+
],
|
| 1754 |
+
"angle": 0,
|
| 1755 |
+
"content": null
|
| 1756 |
+
},
|
| 1757 |
+
{
|
| 1758 |
+
"type": "ref_text",
|
| 1759 |
+
"bbox": [
|
| 1760 |
+
0.513,
|
| 1761 |
+
0.099,
|
| 1762 |
+
0.938,
|
| 1763 |
+
0.144
|
| 1764 |
+
],
|
| 1765 |
+
"angle": 0,
|
| 1766 |
+
"content": "[10] A. Pareja, G. Domeniconi, J. Chen, T. Ma, T. Suzumura, H. Kanezashi, T. Kaler, T. Schardl, C. Leiserson, Evolvegen: Evolving graph convolutional networks for dynamic graphs, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 5363-5370."
|
| 1767 |
+
},
|
| 1768 |
+
{
|
| 1769 |
+
"type": "ref_text",
|
| 1770 |
+
"bbox": [
|
| 1771 |
+
0.514,
|
| 1772 |
+
0.145,
|
| 1773 |
+
0.938,
|
| 1774 |
+
0.178
|
| 1775 |
+
],
|
| 1776 |
+
"angle": 0,
|
| 1777 |
+
"content": "[11] Y. Hu, S. Seneviratne, K. Thilakarathna, K. Fukuda, A. Seneviratne, Characterizing and detecting money laundering activities on the bitcoin network, arXiv preprint arXiv:1912.12060 (2019)."
|
| 1778 |
+
},
|
| 1779 |
+
{
|
| 1780 |
+
"type": "ref_text",
|
| 1781 |
+
"bbox": [
|
| 1782 |
+
0.514,
|
| 1783 |
+
0.179,
|
| 1784 |
+
0.938,
|
| 1785 |
+
0.2
|
| 1786 |
+
],
|
| 1787 |
+
"angle": 0,
|
| 1788 |
+
"content": "[12] J. A. Bondy, U. S. R. Murty, et al., Graph theory with applications, volume 290, Macmillan London, 1976."
|
| 1789 |
+
},
|
| 1790 |
+
{
|
| 1791 |
+
"type": "ref_text",
|
| 1792 |
+
"bbox": [
|
| 1793 |
+
0.514,
|
| 1794 |
+
0.201,
|
| 1795 |
+
0.938,
|
| 1796 |
+
0.234
|
| 1797 |
+
],
|
| 1798 |
+
"angle": 0,
|
| 1799 |
+
"content": "[13] A. Grover, J. Leskovec, node2vec: Scalable feature learning for networks, in: Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining, 2016, pp. 855-864."
|
| 1800 |
+
},
|
| 1801 |
+
{
|
| 1802 |
+
"type": "ref_text",
|
| 1803 |
+
"bbox": [
|
| 1804 |
+
0.514,
|
| 1805 |
+
0.235,
|
| 1806 |
+
0.938,
|
| 1807 |
+
0.268
|
| 1808 |
+
],
|
| 1809 |
+
"angle": 0,
|
| 1810 |
+
"content": "[14] D. Vassallo, V. Vella, J. Ellul, Application of gradient boosting algorithms for anti-money laundering in cryptocurrencies, in: SN Computer Science, volume 2, Springer, 2021, pp. 1-15."
|
| 1811 |
+
},
|
| 1812 |
+
{
|
| 1813 |
+
"type": "ref_text",
|
| 1814 |
+
"bbox": [
|
| 1815 |
+
0.514,
|
| 1816 |
+
0.269,
|
| 1817 |
+
0.938,
|
| 1818 |
+
0.312
|
| 1819 |
+
],
|
| 1820 |
+
"angle": 0,
|
| 1821 |
+
"content": "[15] C. Lee, S. Maharjan, K. Ko, J. W.-K. Hong, Toward detecting illegal transactions on bitcoin using machine-learning methods, in: International Conference on Blockchain and Trustworthy Systems, Springer, 2019, pp. 520-533."
|
| 1822 |
+
},
|
| 1823 |
+
{
|
| 1824 |
+
"type": "ref_text",
|
| 1825 |
+
"bbox": [
|
| 1826 |
+
0.514,
|
| 1827 |
+
0.313,
|
| 1828 |
+
0.938,
|
| 1829 |
+
0.357
|
| 1830 |
+
],
|
| 1831 |
+
"angle": 0,
|
| 1832 |
+
"content": "[16] I. Alarab, S. Prakoonwit, M. I. Nacer, Competence of graph convolutional networks for anti-money laundering in bitcoin blockchain, in: Proceedings of the 2020 5th International Conference on Machine Learning Technologies, 2020, pp. 23-27."
|
| 1833 |
+
},
|
| 1834 |
+
{
|
| 1835 |
+
"type": "ref_text",
|
| 1836 |
+
"bbox": [
|
| 1837 |
+
0.514,
|
| 1838 |
+
0.359,
|
| 1839 |
+
0.938,
|
| 1840 |
+
0.392
|
| 1841 |
+
],
|
| 1842 |
+
"angle": 0,
|
| 1843 |
+
"content": "[17] L. Nan, D. Tao, Bitcoin mixing detection using deep autoencoder, in: 2018 IEEE Third international conference on data science in cyberspace (DSC), IEEE, 2018, pp. 280-287."
|
| 1844 |
+
},
|
| 1845 |
+
{
|
| 1846 |
+
"type": "ref_text",
|
| 1847 |
+
"bbox": [
|
| 1848 |
+
0.514,
|
| 1849 |
+
0.393,
|
| 1850 |
+
0.938,
|
| 1851 |
+
0.436
|
| 1852 |
+
],
|
| 1853 |
+
"angle": 0,
|
| 1854 |
+
"content": "[18] J. Lorenz, M. I. Silva, D. Aparicio, J. T. Ascensão, P. Bizarro, Machine learning methods to detect money laundering in the bitcoin blockchain in the presence of label scarcity, in: Proceedings of the First ACM International Conference on AI in Finance, 2020, pp. 1-8."
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"type": "ref_text",
|
| 1858 |
+
"bbox": [
|
| 1859 |
+
0.514,
|
| 1860 |
+
0.437,
|
| 1861 |
+
0.938,
|
| 1862 |
+
0.459
|
| 1863 |
+
],
|
| 1864 |
+
"angle": 0,
|
| 1865 |
+
"content": "[19] T. Pham, S. Lee, Anomaly detection in bitcoin network using unsupervised learning methods, in: arXiv preprint arXiv:1611.03941, 2016."
|
| 1866 |
+
},
|
| 1867 |
+
{
|
| 1868 |
+
"type": "ref_text",
|
| 1869 |
+
"bbox": [
|
| 1870 |
+
0.514,
|
| 1871 |
+
0.46,
|
| 1872 |
+
0.938,
|
| 1873 |
+
0.492
|
| 1874 |
+
],
|
| 1875 |
+
"angle": 0,
|
| 1876 |
+
"content": "[20] P. Monamo, V. Marivate, B. Twala, Unsupervised learning for robust bitcoin fraud detection, in: 2016 Information Security for South Africa (ISSA), IEEE, 2016, pp. 129-134."
|
| 1877 |
+
},
|
| 1878 |
+
{
|
| 1879 |
+
"type": "ref_text",
|
| 1880 |
+
"bbox": [
|
| 1881 |
+
0.514,
|
| 1882 |
+
0.494,
|
| 1883 |
+
0.938,
|
| 1884 |
+
0.526
|
| 1885 |
+
],
|
| 1886 |
+
"angle": 0,
|
| 1887 |
+
"content": "[21] S. Li, F. Xu, R. Wang, S. Zhong, Self-supervised incremental deep graph learning for ethereum phishing scam detection, in: arXiv preprint arXiv:2106.10176, 2021."
|
| 1888 |
+
},
|
| 1889 |
+
{
|
| 1890 |
+
"type": "ref_text",
|
| 1891 |
+
"bbox": [
|
| 1892 |
+
0.514,
|
| 1893 |
+
0.527,
|
| 1894 |
+
0.938,
|
| 1895 |
+
0.56
|
| 1896 |
+
],
|
| 1897 |
+
"angle": 0,
|
| 1898 |
+
"content": "[22] N. Shervashidze, P. Schweitzer, E. J. Van Leeuwen, K. Mehlhorn, K. M. Borgwardt, Weisfeiler-lehman graph kernels., in: booktitle of Machine Learning Research, volume 12, 2011."
|
| 1899 |
+
},
|
| 1900 |
+
{
|
| 1901 |
+
"type": "ref_text",
|
| 1902 |
+
"bbox": [
|
| 1903 |
+
0.514,
|
| 1904 |
+
0.561,
|
| 1905 |
+
0.938,
|
| 1906 |
+
0.594
|
| 1907 |
+
],
|
| 1908 |
+
"angle": 0,
|
| 1909 |
+
"content": "[23] G. K. Kulatilleke, M. Portmann, R. Ko, S. S. Chandra, Fdgatii: Fast dynamic graph attention with initial residual and identity mapping, in: arXiv preprint arXiv:2110.11464, 2021."
|
| 1910 |
+
},
|
| 1911 |
+
{
|
| 1912 |
+
"type": "ref_text",
|
| 1913 |
+
"bbox": [
|
| 1914 |
+
0.514,
|
| 1915 |
+
0.595,
|
| 1916 |
+
0.938,
|
| 1917 |
+
0.627
|
| 1918 |
+
],
|
| 1919 |
+
"angle": 0,
|
| 1920 |
+
"content": "[24] K. Xu, W. Hu, J. Leskovec, S. Jegelka, How powerful are graph neural networks?, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=ryGs6iA5Km."
|
| 1921 |
+
},
|
| 1922 |
+
{
|
| 1923 |
+
"type": "ref_text",
|
| 1924 |
+
"bbox": [
|
| 1925 |
+
0.514,
|
| 1926 |
+
0.628,
|
| 1927 |
+
0.938,
|
| 1928 |
+
0.66
|
| 1929 |
+
],
|
| 1930 |
+
"angle": 0,
|
| 1931 |
+
"content": "[25] W. L. Hamilton, R. Ying, J. Leskovec, Inductive representation learning on large graphs, in: Advances in Neural Information Processing Systems, 2017. arXiv:1706.02216."
|
| 1932 |
+
},
|
| 1933 |
+
{
|
| 1934 |
+
"type": "ref_text",
|
| 1935 |
+
"bbox": [
|
| 1936 |
+
0.514,
|
| 1937 |
+
0.661,
|
| 1938 |
+
0.938,
|
| 1939 |
+
0.705
|
| 1940 |
+
],
|
| 1941 |
+
"angle": 0,
|
| 1942 |
+
"content": "[26] C. Zhang, D. Song, C. Huang, A. Swami, N. V. Chawla, Heterogeneous graph neural network, in: Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 2019, pp. 793-803."
|
| 1943 |
+
},
|
| 1944 |
+
{
|
| 1945 |
+
"type": "ref_text",
|
| 1946 |
+
"bbox": [
|
| 1947 |
+
0.514,
|
| 1948 |
+
0.706,
|
| 1949 |
+
0.938,
|
| 1950 |
+
0.74
|
| 1951 |
+
],
|
| 1952 |
+
"angle": 0,
|
| 1953 |
+
"content": "[27] P. Velickovic, G. Cucurull, A. Casanova, A. Romero, P. Lio, Y. Bengio, Graph attention networks, in: International Conference on Learning Representations (ICLR), 2018."
|
| 1954 |
+
},
|
| 1955 |
+
{
|
| 1956 |
+
"type": "ref_text",
|
| 1957 |
+
"bbox": [
|
| 1958 |
+
0.514,
|
| 1959 |
+
0.741,
|
| 1960 |
+
0.938,
|
| 1961 |
+
0.773
|
| 1962 |
+
],
|
| 1963 |
+
"angle": 0,
|
| 1964 |
+
"content": "[28] S. Yun, M. Jeong, R. Kim, J. Kang, H. J. Kim, Graph transformer networks, in: Advances in neural information processing systems, volume 32, 2019."
|
| 1965 |
+
},
|
| 1966 |
+
{
|
| 1967 |
+
"type": "ref_text",
|
| 1968 |
+
"bbox": [
|
| 1969 |
+
0.514,
|
| 1970 |
+
0.775,
|
| 1971 |
+
0.938,
|
| 1972 |
+
0.807
|
| 1973 |
+
],
|
| 1974 |
+
"angle": 0,
|
| 1975 |
+
"content": "[29] S. Ioffe, C. Szegedy, Batch normalization: Accelerating deep network training by reducing internal covariate shift, in: International conference on machine learning, PMLR, 2015, pp. 448-456."
|
| 1976 |
+
},
|
| 1977 |
+
{
|
| 1978 |
+
"type": "ref_text",
|
| 1979 |
+
"bbox": [
|
| 1980 |
+
0.514,
|
| 1981 |
+
0.808,
|
| 1982 |
+
0.938,
|
| 1983 |
+
0.841
|
| 1984 |
+
],
|
| 1985 |
+
"angle": 0,
|
| 1986 |
+
"content": "[30] D. T. Robinson, How to Combat Financial Crime in Cryptocurrencies, 2019. URL: https://www.elliptic.co/blog/elliptic-dataset-cryptocurrency-financial-crime."
|
| 1987 |
+
},
|
| 1988 |
+
{
|
| 1989 |
+
"type": "ref_text",
|
| 1990 |
+
"bbox": [
|
| 1991 |
+
0.514,
|
| 1992 |
+
0.842,
|
| 1993 |
+
0.938,
|
| 1994 |
+
0.863
|
| 1995 |
+
],
|
| 1996 |
+
"angle": 0,
|
| 1997 |
+
"content": "[31] S. Nakamoto, Bitcoin: a peer-to-peer electronic cash system [eb/ol], Consulted 1 (2008) 28."
|
| 1998 |
+
},
|
| 1999 |
+
{
|
| 2000 |
+
"type": "ref_text",
|
| 2001 |
+
"bbox": [
|
| 2002 |
+
0.514,
|
| 2003 |
+
0.864,
|
| 2004 |
+
0.938,
|
| 2005 |
+
0.898
|
| 2006 |
+
],
|
| 2007 |
+
"angle": 0,
|
| 2008 |
+
"content": "[32] R. Van Wegberg, J.-J. Oerlemans, O. van Deventer, Bitcoin money laundering: mixed results? an explorative study on money laundering of cybercrime proceeds using bitcoin, Journal of Financial Crime (2018)."
|
| 2009 |
+
},
|
| 2010 |
+
{
|
| 2011 |
+
"type": "list",
|
| 2012 |
+
"bbox": [
|
| 2013 |
+
0.513,
|
| 2014 |
+
0.099,
|
| 2015 |
+
0.938,
|
| 2016 |
+
0.898
|
| 2017 |
+
],
|
| 2018 |
+
"angle": 0,
|
| 2019 |
+
"content": null
|
| 2020 |
+
},
|
| 2021 |
+
{
|
| 2022 |
+
"type": "page_number",
|
| 2023 |
+
"bbox": [
|
| 2024 |
+
0.494,
|
| 2025 |
+
0.914,
|
| 2026 |
+
0.505,
|
| 2027 |
+
0.926
|
| 2028 |
+
],
|
| 2029 |
+
"angle": 0,
|
| 2030 |
+
"content": "9"
|
| 2031 |
+
}
|
| 2032 |
+
]
|
| 2033 |
+
]
|
2203.10xxx/2203.10465/7daffd3a-1fdb-4369-b5ad-2de6805c1054_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5195ecab03275aec3f2bbcf2ed5d0541fa6150032757e49744749811e0f4934
|
| 3 |
+
size 812428
|
2203.10xxx/2203.10465/full.md
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Inspection-L: Self-Supervised GNN Node Embeddings for Money Laundering Detection in Bitcoin
|
| 2 |
+
|
| 3 |
+
Wai Weng Lo $^{a,\ast}$ , Gayan K. Kulatilleke $^{a}$ , Mohanad Sarhan $^{a}$ , Siamak Layeghy $^{a}$ , Marius Portmann $^{a}$
|
| 4 |
+
|
| 5 |
+
aThe University of Queensland, Brisbane, Australia
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Criminals have become increasingly experienced in using cryptocurrencies, such as Bitcoin, for money laundering. The use of cryptocurrencies can hide criminal identities and transfer hundreds of millions of dollars of dirty funds through their criminal digital wallets. However, this is considered a paradox because cryptocurrencies are goldmines for open-source intelligence, giving law enforcement agencies more power when conducting forensic analyses. This paper proposed Inspection-L, a graph neural network (GNN) framework based on a self-supervised Deep Graph Infomax (DGI) and Graph Isomorphism Network (GIN), with supervised learning algorithms, namely Random Forest (RF), to detect illicit transactions for anti-money laundering (AML). To the best of our knowledge, our proposal is the first to apply self-supervised GNNs to the problem of AML in Bitcoin. The proposed method was evaluated on the Elliptic dataset and shows that our approach outperforms the state-of-the-art in terms of key classification metrics, which demonstrates the potential of self-supervised GNN in the detection of illicit cryptocurrency transactions.
|
| 10 |
+
|
| 11 |
+
Keywords: graph neural networks; machine learning; forensics; anomaly detection; cryptocurrencies
|
| 12 |
+
|
| 13 |
+
# 1. Introduction
|
| 14 |
+
|
| 15 |
+
The advent of the first cryptocurrency—Bitcoin [1]—has revolutionized the conventional financial ecosystem, as it enables low-cost, near-anonymous, peer-to-peer cash transfers within and across various borders. Due to its pseudonymity, many cybercriminals, terrorists, and hackers have started to use cryptocurrency for illegal transactions. For example, the WannaCry ransomware attack used Bitcoin [2] as the payment method due to its non-traceability. The criminals received nearly 3.4 million (46.4 BTC) within four days of the WannaCry attack [2]. Therefore, effective detection of illicit transactions in Bitcoin transaction graphs is essential for preventing illegal transactions. Paradoxically, cryptocurrencies are goldmines for open-source intelligence, as transaction network data are publicly available, enabling law enforcement agencies to conduct a forensic analysis of the transaction's linkages and flows. However, the problem is challenging for law enforcement agencies, owing to its volume<sup>1</sup>, the untraceable p2p cross-border nature of Bitcoin transactions, and the use of technologies such as mixers and tumblers.
|
| 16 |
+
|
| 17 |
+
Graph representation learning has shown great potential for detecting money laundering activities using cryptocurrencies. GNNs are tailored to applications with graph-structured data, such as the social sciences, chemistry, and telecommunications,
|
| 18 |
+
|
| 19 |
+
and can leverage the inherent structure of the graph data by building relational inductive biases into the deep learning architecture. This provides the ability to learn, reason, and generalize from the graph data, inspired by the concept of message propagation [3].
|
| 20 |
+
|
| 21 |
+
The Bitcoin transaction flow data can naturally be represented in graph format. A graph is constructed from the raw Bitcoin data and labeled such that the nodes represent transactions and the edges represent the flow of Bitcoin currency (BTC) from one transaction to the next in the adjacency matrix. Both the topological information and the information contained in the node features are crucial for detecting illicit transactions.
|
| 22 |
+
|
| 23 |
+
This paper proposes Inspection-L, a Graph Neural Network (GNN) framework based on an enhanced self-supervised Deep Graph Infomax (DGI) [4] and supervised Random Forest (RF)-based classifier to detect illicit transactions for AML.
|
| 24 |
+
|
| 25 |
+
Specifically, we investigate the Elliptic dataset [5], a realistic, partially labeled Bitcoin temporal graph-based transaction dataset consisting of real entities belonging to licit (e.g., wallet, miners), illicit entities (e.g., scams, terrorist organizations, ransomware), and unknown transaction categories. The proposed Inspection-L framework aims to detect illegal transactions based on graph representation learning in a self-supervised manner. Current graph machine learning approaches, such as [5], generally apply supervised graph neural network approaches to the detection of illicit transactions. However, supervised learning requires manual labeling. In the AML scenario, building an effective model that utilizes unknown label data is required, since human's labeling Bitcoin data could be costly and ineffective. It also only performs well when the labels are enough. Thus, exploiting unlabeled data to improve performance is crit-
|
| 26 |
+
|
| 27 |
+
ical for AML. On the other hand, self-supervised graph neural network algorithms [6][7] allow for the unknown label data to be exploited, which can improve the quality of representation for the downstream tasks such as fraud transaction detection in Bitcoin. Furthermore, in supervised learning, GNN is limited to capturing K-hop neighbor information; for example, once the hops of the neighbor are larger than k, the supervised learning GNN fails to capture that node information.
|
| 28 |
+
|
| 29 |
+
In this paper, we applied DGI self-supervised learning to capture the global graph information, as this is not limited to capturing the K-layer neighborhood information, where every node can access the entire graph's structural pattern and node information using random shuffle node features. The DGI discriminator tries to determine wherever the node feature is shuffled or not. Thus, every node can access global parts of the node's properties, rather than K-layer neighborhood information.
|
| 30 |
+
|
| 31 |
+
We demonstrate how the self-supervised DGI algorithm can be integrated with standard machine learning classification algorithms, i.e., Random Forest, to build an efficient anti-money-laundering detection system. We show that our Inspection-L method outperforms the state-of-the-art in terms of F1 score.
|
| 32 |
+
|
| 33 |
+
In summary, the key contributions of this paper are:
|
| 34 |
+
|
| 35 |
+
- Different from most existing works, which typically use supervised graph representation learning to generate node embeddings for illegal transaction detection, we use a self-supervised learning approach to learn the node embeddings without using any labels.
|
| 36 |
+
- The proposed Inspection-L is based on a self-supervised DGI combined with the Random Forest (RF) supervised machine learning algorithms, to capture topological information and node features in the transaction graph to detect illegal transactions. To the best of our knowledge, our proposal is the first to utilize self-supervised GNNs to generate node embeddings for AML in Bitcoin.
|
| 37 |
+
- The comprehensive evaluation of the proposed framework using the Elliptic benchmark datasets demonstrates superior performance compared to other, supervised machine learning approaches.
|
| 38 |
+
|
| 39 |
+
# 2. RELATED WORKS
|
| 40 |
+
|
| 41 |
+
Mark et al. [5] created and published the Elliptic dataset, a temporal graph-based Bitcoin transaction dataset consisting of over 200K Bitcoin node transactions, 234K payment edges, and 49 transaction graphs with distinct time steps. Each of the transaction nodes was labeled as a "licit", "illicit", or "unknown" entity. They evaluated the Elliptic dataset using various machine learning methods, including Logistic Regression (LR), Random Forest (RF), Multilayer Perceptrons (MLP) [8], Graph Convolutional Networks (GCNs) [9] and EvolveGCN [10]. They retrieved a recall score in the illicit category of 0.67 using RF and 0.51 using GCNs.
|
| 42 |
+
|
| 43 |
+
Yining et al. [11] collected the Bitcoin transaction graph data between July 2014 and May 2017 by running a Bitcoin client and used an external trusted source, "Wallet Explorer", a website that tracks Bitcoin wallets, to label the data. They first highlighted the differences between money laundering and regular transactions using network centrality such as PageRank, clustering coefficient [12], then used a node2vec-based [13] classifier to classify money laundering transactions. The research also indicated that statistical information, such as indegree/out-degree, number of weakly connected components, and sum/mean/standard deviation of the output values, could distinguish money laundering transactions from legal transactions. However, this approach only considers graph topological patterns, without considering node features. Vassallo et al. [14] focused on the detection of illicit cryptocurrency activities (e.g., scams, terrorism financing, and Ponzi schemes). Their proposed detection framework is based on Adaptive Stacked eXtreme Gradient Boosting (ASXGB), an enhanced variation of eXtreme Gradient Boosting (XGBoost). ASXGB was evaluated using the Elliptic dataset, and the results demonstrate its superiority at both the account and transaction levels.
|
| 44 |
+
|
| 45 |
+
Chaehyeon et al. [15] applied supervised machine learning algorithms to classify illicit nodes in the Bitcoin network. They used two supervised machine learning models, namely, Random Forest (RF) and Artificial Neural Network (ANN) [8] to detect illegal transactions. First, they collected the legal and illegal Bitcoin data from the forum sites "Wallet Explorer" and "Blockchain Explorer". Next, they performed feature extraction based on the characteristics of Bitcoin transactions, such as transaction fees and transaction size. The extracted features were labeled legal or illegal for supervised training. The results indicated that relatively high F1 scores could be achieved; specifically, ANN and RF achieved 0.89 and 0.98 F1 scores, respectively. In [16] proposed using GCNs intertwined with linear layers to classify illicit nodes of the Elliptic dataset [5]. An overall classification accuracy and recall of $97.40\%$ and 0.67, respectively, can be achieved to detect illicit transactions. In [17], the authors used an autoencoder with graph embedding to detect mixing and demixing services for Bitcoin cryptocurrency. They first applied graph node embedding to generate the node representation; then, a K-means algorithm was applied to cluster the node embeddings to detect mixing and demixing services. The proposed model was evaluated based on real-world Bitcoin datasets to evaluate the model's effectiveness, and the results demonstrate that the proposed model can effectively perform demix/mixing service anomaly detection.
|
| 46 |
+
|
| 47 |
+
Lorenz et al. [18] proposed active learning techniques by using a minimum number of labels to achieve a high rate of detection of illicit transactions on the Elliptic dataset. In [19], the authors applied unsupervised learning to detect suspicious nodes in the Bitcoin transaction graph. They used various kinds of unsupervised machine learning algorithms, such as K-means and Gaussian Mixture models, to cluster normal and illicit nodes. However, since the Bitcoin transaction dataset they used lacked ground-truth labels, they simply used the internal index to validate the clustering algorithm, without confirming that those nodes are actually malicious transactions. Monamo et al. [20]
|
| 48 |
+
|
| 49 |
+
applied trimmed-Kmeans to detect fraud in the Bitcoin network. They used various graph centrality measures (i.e. in degree, out-degree of the Bitcoin transactions) and currency features (i.e. the total amount sent), which were then used for Bitcoin transaction clustering. However, similar to [19], due to the unavailability of ground-truth labels, they used clustering performance metrics such as "within the sum of squares", without being able to validate the true nature of the Bitcoin transaction anomalies. Shucheng et al. [21] proposed SIEGE, a self-supervised graph learning approach for Ethereum phishing scam detection, using two pretext tasks to generate node embeddings without using labels and an incremental paradigm to capture data distribution changes for over half a year. However, a significant limitation of this approach is that it does not consider the Bitcoin context and is limited to detecting Ethereum phishing scams. Additionally, their simple application of GCNs[9] in the pretext task phase is much less effective than the Weisfeiler-Lehman (1-WL) test[22].
|
| 50 |
+
|
| 51 |
+
In contrast with related studies, our approach can detect not only phishing scams but also other illicit transactions, such as terrorist organizations, ransomware and Ponzi schemes, by utilizing the Elliptic dataset [5].
|
| 52 |
+
|
| 53 |
+
# 3. BACKGROUND
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
Figure 1: Overview of Deep Graph Infomax
|
| 57 |
+
|
| 58 |
+
The main innovation of our proposed model is its use of DGI [4] with our proposed GIN encoder to learn node embeddings in a self-supervised manner. Then, the node embeddings can be treated as enhanced features and be combined with the raw features for standard supervised RF machine learning algorithms to classify illicit transaction. This has a clear advantage over simple features, as inputs to overall graph-structured patterns are available for the downstream classifier.
|
| 59 |
+
|
| 60 |
+
Consequently, the current graph-based approaches [5] [16] try to apply a supervised GCN-based approach to capture the overall graph-structured patterns. However, the main limitation is that GCN can only capture the neighborhood information of
|
| 61 |
+
|
| 62 |
+
limited K layers, not the global view graph and node information, due to the threat of overfitting. While some models, such as FDGATII [23], are capable of a larger K, these are still limited by their layer structure and finite k. On the other hand, our Inspection-L approach allows for every node to obtain access to the structural patterns of the entire graph, which can capture more global neighborhood information. The proposed method considers that the message-passing functions of [5] [16] are not powerful enough, as they lack injective functions. Therefore, we proposed a GIN encoder to make the message propagation function more robust.
|
| 63 |
+
|
| 64 |
+
# 3.1. Graph Neural Networks
|
| 65 |
+
|
| 66 |
+
GNNs is a deep learning approach for graph-based data and a recent and highly promising area of machine learning [23]. The key feature of GNNs is their ability to combine a topological graph structure with features. For each node in a graph, this means aggregating neighboring node features to leverage a new representation of the current node that considers the neighboring information. The output of this process is known as embeddings. Final node embeddings are low- or n-dimensional vector representations that capture topological and node properties. Embeddings can be learned in a supervised or unsupervised manner and used for downstream tasks such as node classification, clustering, and link prediction [23]. The $k$ -th layer of a typical GCN is:
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
h _ {v} ^ {(k)} = \sigma \left(W \cdot \operatorname {M E A N} \left\{h _ {u} ^ {(k - 1)}, \forall u \in N (v) \cup \{v \} \right\}\right). \tag {1}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
where $h_{\nu}^{(k)}$ is the feature vector of node $\nu$ at the $k$ -th iteration/layer, $h_{\nu}^{(0)} = X_{\nu}$ , and $N(\nu)$ is the set of neighbor nodes of $\nu$ . $W^{(l)}$ is the weight matrix that will be learned for the downstream tasks. $\sigma$ is an activation function, typically ReLU, for computing node representations.
|
| 73 |
+
|
| 74 |
+
# 3.2. Graph Isomorphism Network
|
| 75 |
+
|
| 76 |
+
Graph Isomorphism Network (GIN) is theoretically a maximally powerful GNN proposed by Xu et al. [24]. The main difference between GIN and other GNNs is the message aggregation function, which is shown below:
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
h _ {v} ^ {(k)} = \operatorname {M L P} ^ {(k)} \left(\left(1 + \epsilon^ {(k)}\right) \cdot h _ {v} ^ {(k - 1)} + \sum_ {u \in N (v)} h _ {u} ^ {(k - 1)}\right) \tag {2}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
GCNs is less effective than the Weisfeiler-Lehman (1-WL) [22] test due to the single-layer aggregation function, which is same as the hash function of a 1-WL algorithm. According to [24], a single, non-linear layer is insufficient for graph learning. Thus, GCN message passing functions are not necessarily injective. Therefore, GIN [24] was proposed to make the passing function injective, as shown in Equation 2, where $\varepsilon^{(k)}$ is a scalar parameter, and MLP stands for multilayer perceptron. $h_{\nu}^{(k)} \in \mathbb{R}^d$ is the embedding of node $\nu_{i}$ at the $k$ -th layer, $h_{\nu}^{(0)} = x_{\nu}$ is the original input node features, and $N(\nu_{i})$ is the set of neighboring nodes of node $\nu_{i}$ . We can stack $k$ layers to obtain the final node representation $h_{\nu}^{(k)}$ .
|
| 83 |
+
|
| 84 |
+
# 3.3. Deep Graph Infomax
|
| 85 |
+
|
| 86 |
+
Deep Graph Infomax (DGI) [4] is a self-supervised graph representation learning approach that relies on maximizing the mutual information between patch representations and the global graph summary. The patch representations summarize subgraphs, allowing for the preservation of similarities at the patch level. A trained encoder in DGI can be reused to generate node embeddings for downstream tasks, such as node clustering.
|
| 87 |
+
|
| 88 |
+
Most of the previous works on self-supervised representation learning approaches rely on the random walk strategy [25][26], which is extremely computationally expensive because the number of walks depends on the number of nodes on the graph, making it unscalable for large graphs. Moreover, the choice of hyperparameters (length of the walk, number of walks) can significantly impact the model performance. Overall, DGI does not require supervision or random walk techniques. Instead, it guides the model to learn node connections by simultaneously leveraging local and global information in a graph [4].
|
| 89 |
+
|
| 90 |
+
Figure 1 shows the overall operation of DGI. $G$ is a true graph with the true nodes, the true edges that connect them, and real node features associated with each node. $H$ is a corrupted graph where the nodes and edges have been changed using a corruption function. [4] suggests that the corruption function can randomly shuffle each node feature and maintain the same edges as the true graph $G$ .
|
| 91 |
+
|
| 92 |
+
The DGI training procedure consists of four components:
|
| 93 |
+
|
| 94 |
+
- A corruption procedure $C$ that changes the real input graph $G$ into a corrupted graph $H = (C(G))$ . This can be achieved by randomly shifting the node features among the nodes in a real graph $G$ or by adding and removing an edge from the real graph $G$ .
|
| 95 |
+
- An encoder $E$ that computes the node embeddings of a corrupted graph and a real graph. This can be achieved using various graph representation methods, such as Graph Convolutional Networks (GCNs) [9], Graph Attention Networks (GATs) [27] or Graph Transformer Networks (GTNs) [28].
|
| 96 |
+
- The node embedding vectors for each node in the real graph are summarized into a single embed vector of the entire graph $\overline{s}$ (global graph summary) by using a readout function $R$ to compute the whole graph embeddings.
|
| 97 |
+
- A discriminator $D$ , which is a logistic non-linear sigmoid function, compares a real node embedding vector $\vec{h}_i$ and a corrupted node embedding $\widetilde{h}_i$ against the whole real graph embedding $\overline{s}$ , and provides a score between 0 and 1, as shown in Equation 3. This binary cross-entropy loss objective function [4] can be applied to discriminate between the embedding of the real node and the corrupted node to train the encoder $E$ .
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
L = \frac {1}{N + M} \left(\sum_ {i = 1} ^ {N} \mathbb {E} _ {\left(\mathbf {X}, \mathbf {A}\right)} \left[ \log D \left(\vec {h} _ {i}, \vec {s}\right) \right] + \right. \tag {3}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\left. \sum_ {j = 1} ^ {M} \mathbb {E} _ {(\overline {{\mathbf {X}}}, \overline {{\mathbf {A}}})} \left[ \log \left(1 - D (\vec {h} _ {j}, \vec {s})\right) \right]\right)
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
# 4. PROPOSED METHOD
|
| 108 |
+
|
| 109 |
+
To construct Bitcoin transaction graphs from the dataset, we used 49 different Bitcoin transaction graphs (TGs) [5] using time steps so that the nodes can be represented as node transactions and the edges can be represented as flows of Bitcoin transactions. This is a very natural way to represent Bitcoin transactions.
|
| 110 |
+
|
| 111 |
+
The pseudocode and overall procedure of our proposed algorithm are shown in Algorithm 1 and Figure. 2. The proposed framework consists of two-stage: DGI training for node embedding extraction to perform feature augmentation, supervised machine learning classification.
|
| 112 |
+
|
| 113 |
+
# 4.1. DGI Training
|
| 114 |
+
|
| 115 |
+
To train the proposed model, the input includes the transaction graphs $G$ with node features (i.e., all 166 features, which is a combination of local and macro features, which we denote AF, or only the 94 local features), and the specified number of training epochs $K$ to extract true node embeddings and corrupted node embeddings. Before this, we need to define the corruption function $C$ to generate the corrupted transaction graphs $C(G)$ for our GIN encoder to extract the corrupted node embeddings. In this paper, we randomly shuffled all the node features among the nodes in real transaction graphs $G$ to generate the corrupted transaction graphs for each real graphs by shuffling the feature matrix in rows $\mathbf{X}$ by using Bernoulli distribution. Overall, instead of adding or removing edges from the adjacency matrix such that $\mathbf{A}_G \neq \mathbf{A}_H$ , we use corruption function $C$ , which shuffle the node features such that $\mathbf{X}_G \neq \mathbf{X}_H$ , and retain the adjacency matrix, i.e., $(\mathbf{A}_G = \mathbf{A}_H)$ . Note that the corruption function only changes the node features, and not the structure; therefore, $N_G = N_H$ . In case of the DGI implementation, we now have $N = M$ .
|
| 116 |
+
|
| 117 |
+
For each batch of graph data $G$ in the training epoch, in Algorithm 1 from Line 3 to 4, we use our proposed GIN encoder to extract true node and corrupted node embeddings. Our proposed GIN encoder is shown in Figure 2 with two layers of MLP, which consists of 128 hidden units, ReLU activation function and Batch normalization (as shown in Algorithm 2) [29].
|
| 118 |
+
|
| 119 |
+
The design of the MLPs is motivated by the fundamental goal of a GNN-based model. Ideally, various types of different graph patterns should be distinguishable via the graph encoder, which means that different graph structures should be mapped to different locations in the embedding space. This requires the
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 2: Proposed Method
|
| 123 |
+
|
| 124 |
+
Algorithm 1: Pseudocode for Our Proposed Algorithm
|
| 125 |
+
input: Set of training graphs $G^{+} = \{G(V,A,X)\}$ ; Number of training epochs $K$ ; Corruption function $C$ ; All 166 Features (AF); First 94 Local Features (LF); output: Optimized GIN encoder $g$ , Optimized RF $h\_ R$
|
| 126 |
+
1 Initialize the parameters $\theta$ and $\omega$ for the encoder $g$ and the discriminator $D$ ;
|
| 127 |
+
2 foreach batch $G \in G^{+}$ do
|
| 128 |
+
3 for epoch $\leftarrow 1$ to $K$ do
|
| 129 |
+
4 $h_i = g(G, \theta)$
|
| 130 |
+
5 $\widetilde{h}_i = g(C(G), \theta)$
|
| 131 |
+
6 $\bar{s} = \sigma \left( \frac{1}{n} \sum_{i=1}^{n} h_i^{(L)} \right)$
|
| 132 |
+
7 $D(h_i, \bar{s}) = \sigma(h_i^T \mathbf{w} \bar{s})$
|
| 133 |
+
8 $D(\widetilde{h}_i, \bar{s}) = \sigma(\widetilde{h}_i^T \mathbf{w} \bar{s})$
|
| 134 |
+
9 $L_{DGI} = \frac{1}{N + M} \left( \sum_{i=1}^{N} \mathbb{E}_{(\mathbf{X}, \mathbf{A})}[\log D(\vec{h}_i, \vec{s})] + \sum_{j=1}^{M} \mathbb{E}_{(\overline{\mathbf{X}}, \overline{\mathbf{A}})}[\log(1 - D(\vec{h}_j, \vec{s}))]\right)$
|
| 135 |
+
10 $\theta, \omega \gets$ Adam ( $L_{DGI}$ )
|
| 136 |
+
11 Select labeled node embedding $h_i$ from $h_i = g(G, \theta)$ and corresponding labels $y$ for $G \in$ training set;
|
| 137 |
+
12 $h\_ R \gets$ RF(( $h_i||\{AF$ or $LF\}$ ), y)
|
| 138 |
+
13 return $h\_ R, g$
|
| 139 |
+
|
| 140 |
+
ability to solve the graph isomorphism problem, where nonisomorphic graphs should be mapped to different representations.
|
| 141 |
+
|
| 142 |
+
We applied a full neighbor sampling technique and used two-hop neighbor samples for the GIN encoder with Batch normalization, as DGI benefits from employing wider rather than deeper models [4].
|
| 143 |
+
|
| 144 |
+
For the read-out function $R$ , we applied the mean operation on all node embeddings in the real graph $G$ and then applied a sigmoid activation function to compute the whole graph embeddings $\overline{s}$ :
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\bar {s} = \sigma \left(\frac {1}{n} \sum_ {i = 1} ^ {n} h _ {i} ^ {(L)}\right) \tag {4}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
In Algorithm 1, from line 7 to 8, as shown in Equation 5 and Equation 6, for the discriminator $D$ , we used a logistic sigmoid
|
| 151 |
+
|
| 152 |
+
Algorithm 2: Batch Normalizing Transform [29]
|
| 153 |
+
input: Values of $x$ over a mini-batch: $B = \{x_{1\dots m}\}$ Parameters can be learned: $\gamma ,\beta$ output: $\left\{y_i = \mathbf{BN}_{\gamma ,\beta}(x_i)\right\}$
|
| 154 |
+
1 $\mu_B\gets \frac{1}{m}\sum_{i = 1}^m x_i$
|
| 155 |
+
2 $\sigma_B^2\gets \frac{1}{m}\sum_{i = 1}^m (x_i - \mu_B)^2$
|
| 156 |
+
3 $\widehat{x}_i\gets \frac{x_i - \mu_B}{\sqrt{\sigma_B^2 + \epsilon}}$
|
| 157 |
+
4 $y_{i}\gets \gamma \widehat{x}_{i} + \beta \equiv \mathbf{BN}_{\gamma ,\beta}(x_{i})$
|
| 158 |
+
|
| 159 |
+
non-linear function to discriminate node embedding vector $\vec{h}_i$ against the real whole graph embedding $\overline{s}$ to calculate the score of $(\vec{h}_i,\overline{s})$ being positive or negative:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
D \left(h _ {i}, \bar {s}\right) = \sigma \left(h _ {i} ^ {T} w \bar {s}\right) \tag {5}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
D \left(\widetilde {h _ {i}}, \bar {s}\right) = \sigma \left(\widetilde {h _ {i} ^ {T}} w \bar {s}\right) \tag {6}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
We then used a binary cross-entropy loss objective function (based on Equation 3, modified so that $N = M$ ) to perform gradient descent, as shown in Algorithm 1, line 10. To perform gradient descent, we maximized the score if the node embedding is a true node embedding $\vec{h}_i$ and minimized the score if it is a corrupted node embedding $\vec{h}_i$ compared to the global graph summary generated by the read-out function $R$ (Equation 4). As a result, we maximized the mutual information between patch representations and the whole real graph summary based on the binary cross-entropy loss function (BCE), as shown in Equation 3 to perform gradient descent. After the training process, the trained encoder can be used to generate new graph embeddings for downstream purposes; in this case, the detection of illegal transactions.
|
| 170 |
+
|
| 171 |
+
In our experiments, we used all 34 different Bitcoin transaction graphs to train the DGI with the GIN encoder in a self-supervised manner. For each training graph, we trained 300 epochs using an Adam optimizer with a learning rate of 0.0001, as shown in Algorithm 1, line 10.
|
| 172 |
+
|
| 173 |
+
# 4.2. Supervised Machine Learning Classification
|
| 174 |
+
|
| 175 |
+
After the DGI training, we reused the encoder to generate node embeddings, as shown in Algorithm 1, line 11-12 to train and test the RF classifier with 100 estimators. In our experiments, we performed 70:30 splitting, 34 different Bitcoin transaction graphs for training and the remaining 15 bitcoin transaction graphs for testing. All 34 training graphs were fed to DGI to train the GIN encoder in a self-supervised manner. Once the training phase was completed, we used a trained GIN encoder to extract all the node embeddings (all 34 graph node embeddings) in the training graphs. As the datasets consist of two labels, binary classification and unknown labels, we dropped unknown label data in the RF training and testing phases and only used label data for performance. We used all training graph node embeddings to train the RF in a supervised manner. For testing, we extracted the last 15 test graph node embeddings using the trained GIN and fed the node embeddings to the trained RF for illegal transaction detection.
|
| 176 |
+
|
| 177 |
+
We experimented with the following three combinations of features and embeddings:
|
| 178 |
+
|
| 179 |
+
1. DNE : Node Embeddings only: After the DGI training, we reused the encoder to generate node embeddings for training and testing the RF classifier, as mentioned above.
|
| 180 |
+
2. $\mathbf{LF} + \mathbf{DNE}$ : Node Embeddings with LF features: Similar to scenario 1, we also combined local features (i.e, first 94 raw features) with the node embeddings generated by the trained encode for training and testing the RF classifier, as mentioned above.
|
| 181 |
+
3. AF + DNE : Node Embeddings with AF Features: Similar to scenario 1, we also combined all raw features (AF features) with the node embeddings generated by the trained encoder for training and testing the RF classifier, as mentioned above.
|
| 182 |
+
|
| 183 |
+
Table 1: Implementation environment specification
|
| 184 |
+
|
| 185 |
+
<table><tr><td>Unit</td><td>Description</td></tr><tr><td>Processor</td><td>2.3 GHz 2-core Inter Xeon(R) Processor</td></tr><tr><td>RAM</td><td>12GB</td></tr><tr><td>GPU</td><td>Tesla P100 GPU 16GB</td></tr><tr><td>Operating System</td><td>Linux</td></tr><tr><td>Packages</td><td>Skit-learn, Numpy, Pandas, PyTorch Geometric, and matplotlib</td></tr></table>
|
| 186 |
+
|
| 187 |
+
# 4.3. Implementation Environments
|
| 188 |
+
|
| 189 |
+
Experiments were carried out using a 2.3GHz 2-core Intel(R) Xeon(R) processor with 12 GB memory and Tesla P100 GPU on a Linux operating system. The proposed approach was developed using the Python programming language with several statistical and visualization packages, such as Sckt-learn, Numpy, Pandas, PyTorch Geometric, and Matplotlib. Table 1 summarizes the system configuration.
|
| 190 |
+
|
| 191 |
+
# 5. Experiments and Results
|
| 192 |
+
|
| 193 |
+
# 5.1. Dataset
|
| 194 |
+
|
| 195 |
+
In this paper, we adopted the Elliptic dataset [5], which is the world's largest labeled dataset of bitcoin transactions. The Elliptic dataset [5] consists of 203,769 node as transactions and 234,355 directed transaction payment flows (i.e., transaction inputs, transaction outputs). The datasets also consist of 49 different timestep graphs, which are uniformly spaced with a two-week interval, as illustrated in 3. Each connected transaction component consists of a time step that appears on the blockchain in less than three hours. Our $G$ represents one such transaction graph for the 49.
|
| 196 |
+
|
| 197 |
+
In the Elliptic dataset [5], $21\%$ of the node entities are labeled as licit, and only $2\%$ are labeled as illicit. The remaining node entities are unlabeled but have node features. These node entities consist of 166 features (AF features), among which the first 94 features contain local information (LF features) of the transactions, including the time step, transaction fees, and the number of inputs or outputs. The remaining 72 features are aggregated features. These features can be obtained by aggregating transaction information from one-hop backward/forward graph nodes, such as the standard deviation, minimum, maximum, and correlation coefficients of the neighbor transactions for the same information data. More importantly, all features were obtained using only publicly available information.
|
| 198 |
+
|
| 199 |
+
# 5.2. Performance Metric
|
| 200 |
+
|
| 201 |
+
To evaluate the performance of the proposed methods, the standard metrics listed in Table 2 were used, where $TP$ , $TN$ , $FP$ and $FN$ represent the number of True Positives, True Negatives, False Positives and False Negatives, respectively.
|
| 202 |
+
|
| 203 |
+
In Table 2, true positive (TP) denotes the total number of true positives, true negative (TN) indicates the total number of false positives, false positive (FP) denotes the total number of false negatives and false negative (TN) shows the total number of true negatives. The proposed method was evaluated using
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
Figure 3: Overview of Elliptic Dataset [30]
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
Figure 4: Illicit F1 over test timestep
|
| 212 |
+
|
| 213 |
+
Table 2: Evaluation metrics used in this study
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Metric</td><td>Definition</td></tr><tr><td>Detection Rate (Recall)</td><td>TP/TP+FN</td></tr><tr><td>Precision</td><td>TP/TP+FP</td></tr><tr><td>F1-Score</td><td>2×Recall×Precision/Recall+Precision</td></tr><tr><td>AUC-Score</td><td>\(\int_0^1 \frac{TP}{TP+FN} d\frac{FP}{TN+FP}\)</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Precision, Recall, F1-score and Area under the receiver operating characteristics (ROC) curve. All the above metrics can be obtained using the confusion matrix (CM).
|
| 218 |
+
|
| 219 |
+
Accuracy indicates that the model is well learned in case of a balanced test dataset; however, for imbalanced scenarios, as in this case, only considering accuracy measures may lead to misleading conclusion, since it is strongly biased in favor of the licit majority class. Thus, for this case, recall and F1-score metrics provide a more reasonable explanation of the model's performance.
|
| 220 |
+
|
| 221 |
+
Recall (also known as Detection Rate) is the total number of
|
| 222 |
+
|
| 223 |
+
true positives divided by the total number of true positives and false negatives. If the recall rate is very low, this means that the classifier cannot detect illicit transactions.
|
| 224 |
+
|
| 225 |
+
Precision measures the quality of the correct predictions. This is the number of true positives divided by the number of true positives and false positives. If the false positive is very high, it will cause low precision. Our goal is to maximize the precision as much as possible.
|
| 226 |
+
|
| 227 |
+
F1-score is the trade-off between precision and recall. Mathematically, it is the harmonic mean of precision and recall.
|
| 228 |
+
|
| 229 |
+
The area under the curve (AUC) computes the trade-off between sensitivity and specificity, plotted based on the trade-off between the true positive rate on the y-axis and the false positive rate on the x-axis. Our goal is to maximize the AUC score as much as possible, making it closer to 1.0.
|
| 230 |
+
|
| 231 |
+
# 5.3. Experimental Results
|
| 232 |
+
|
| 233 |
+
Table 3 shows the corresponding results of our Inspection-L compared to the state-of-the-art in terms of the key metrics. As can be observed from the table, regarding to illicit F1-Score, Inspection-L (LF+DNE and AF+DNE) outperforms the best
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(a) AF + DNE
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
(b) LF + DNE
|
| 240 |
+
Figure 5: Confusion Matrix
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
(c) DNE
|
| 244 |
+
|
| 245 |
+
Table 3: Results of binary classification by Inspection-L compared to the state-of-the-art. AF refers to all raw features, LF refers to the local raw features, i.e., the first 94 raw features, GNE refers to the node embeddings generated by GCN in [5] using labels and DNE refers to the node embeddings computed by DGI without using labels.
|
| 246 |
+
|
| 247 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">Illicit</td><td rowspan="2">AUC</td></tr><tr><td>Precision</td><td>Recall</td><td>F1</td></tr><tr><td>Logistic RegrAF [5]</td><td>0.404</td><td>0.593</td><td>0.481</td><td>-</td></tr><tr><td>Logistic Regr AF + GNE [5]</td><td>0.537</td><td>0.528</td><td>0.533</td><td>-</td></tr><tr><td>Logistic Regr LF [5]</td><td>0.348</td><td>0.668</td><td>0.457</td><td>-</td></tr><tr><td>Logistic Regr LF + GNE [5]</td><td>0.518</td><td>0.571</td><td>0.543</td><td>-</td></tr><tr><td>RandomForest AF [5]</td><td>0.956</td><td>0.670</td><td>0.788</td><td>-</td></tr><tr><td>RandomForest AF + GNE [5]</td><td>0.971</td><td>0.675</td><td>0.796</td><td>-</td></tr><tr><td>RandomForest AF [14]</td><td>0.897</td><td>0.721</td><td>0.800</td><td>-</td></tr><tr><td>RandomForest AF + GNE [14]</td><td>0.958</td><td>0.715</td><td>0.819</td><td>-</td></tr><tr><td>XGB AF [14]</td><td>0.921</td><td>0.732</td><td>0.815</td><td>-</td></tr><tr><td>XGB AF + GNE [14]</td><td>0.986</td><td>0.692</td><td>0.813</td><td>-</td></tr><tr><td>RandomForest LF [5]</td><td>0.803</td><td>0.611</td><td>0.694</td><td>-</td></tr><tr><td>RandomForest LF + GNE [5]</td><td>0.878</td><td>0.668</td><td>0.759</td><td>-</td></tr><tr><td>MLP AF [5]</td><td>0.694</td><td>0.617</td><td>0.653</td><td>-</td></tr><tr><td>MLP AF + GNE [5]</td><td>0.780</td><td>0.617</td><td>0.689</td><td>-</td></tr><tr><td>MLP LF [5]</td><td>0.637</td><td>0.662</td><td>0.649</td><td>-</td></tr><tr><td>MLP LF + GNE [5]</td><td>0.681</td><td>0.578</td><td>0.625</td><td>-</td></tr><tr><td>GCN [5]</td><td>0.812</td><td>0.512</td><td>0.628</td><td>-</td></tr><tr><td>GCN [16]</td><td>0.899</td><td>0.678</td><td>0.773</td><td>-</td></tr><tr><td>Skip-GCN [5]</td><td>0.812</td><td>0.623</td><td>0.705</td><td>-</td></tr><tr><td>EvolveGCN [5]</td><td>0.850</td><td>0.624</td><td>0.720</td><td>-</td></tr><tr><td>Inspection-L DNE (RF)</td><td>0.593</td><td>0.032</td><td>0.061</td><td>0.735</td></tr><tr><td>Inspection-L LF + DNE (RF)</td><td>0.906</td><td>0.712</td><td>0.797</td><td>0.895</td></tr><tr><td>Inspection-L AF + DNE (RF)</td><td>0.972</td><td>0.721</td><td>0.828</td><td>0.916</td></tr></table>
|
| 248 |
+
|
| 249 |
+
reported classifiers. In the best-performing variant, AF+DNE, we concatenated the node embeddings generated from DGI with all original raw features (AF). The experiment achieved an F1 score and Recall of 0.828 and 0.721, respectively. Using all features (AF) with node embeddings (DNE) as input for classification, the ML model's performance significantly increased, with an AUC of 0.916, compared to 0.735 when only the node embeddings were used for classification. The experiments demonstrate that graph information (node embeddings) is useful to enhance the transaction representations (embeddings).
|
| 250 |
+
|
| 251 |
+
In the second experiment LF+DNE, we concatenated the node embeddings generated from DGI with the local features (LF), which can achieve an F1-score and Recall of 0.712 and 0.797, respectively. Both the results were superior to the state-of-the-art algorithms.
|
| 252 |
+
|
| 253 |
+
These results demonstrate the ability of our self-supervised GIN-based approach to generate an enhanced feature set to improve anti-money-laundering detection performance. Furthermore, the results show that the accuracy of the model improves with the enhanced feature set, which contains summary information. Note that the summary information in the AF feature set consists of 1-hop forward and 1-hop backward neighborhood summaries for each node. Unfortunately, the Elliptic dataset does not provide detailed information regarding the feature descriptions, possibly due to confidentially reasons, which limits our ability to provide a deeper discussion.
|
| 254 |
+
|
| 255 |
+
Figure 4 shows the F1 measure of the three different model variants across various testing timesteps. Interestingly, none of the three variants can detect new illicit transactions with high precision after dark market shutdown, which occurs at time step 43 [5]. Thus, we note that developing robust methods to detect illicit transactions without their being affected by emerging events is a major challenge that future works need to address.
|
| 256 |
+
|
| 257 |
+
Figure 5 shows the confusion matrix of the three different scenarios. Although the classifier trained with embedding features cannot accurately detect illicit transactions, it rarely classifies licit transactions as illicit. Therefore, the false alarm rate is very low, as shown in Figure 5c. The RF classifier trained using both raw features and embedding features, shown in Figure 5a,5b, has the advantage of achieving a high detection rate and a low false alarm rate. As a result, the experimental results demonstrate that DNE node embeddings can be used for feature augmentation to improve overall detection performance.
|
| 258 |
+
|
| 259 |
+
# 5.4. Broader applications of AML
|
| 260 |
+
|
| 261 |
+
The blockchain operates as a decentralized bank for bitcoin cryptocurrency [31]. All bitcoin transactions are permanently recorded on the blockchain, which is a visible and verifiable public ledger [32]. Bitcoin addresses are not registered to individuals, in contrast to bank accounts [2]. Thus, due to this pseudo-anonymity [11], bitcoin and other crypto-currencies are increasingly used for ransomware [2], ponzi schemes [11] and illicit material trade on the dark web [23]
|
| 262 |
+
|
| 263 |
+
While bitcoin transactions are difficult to track, they are not completely anonymous [2]. Users can be traced by their IP addresses and transaction flows [32]. An analysis of the bitcoin graph can reveal suspicious behavior patterns characteristic of
|
| 264 |
+
|
| 265 |
+
money laundering [2]. To break the tell-tale transnational link between bitcoin transactions and illegal activity, bitcoin mixing services provide a new, untainted bitcoin address from their reserves and the pay-outs are spread out over time [2]. Bitcoin Fog is a service that hides transaction origins by bundling multiple inputs into a smaller number of larger outputs [11]. However, the additional obscuring activities themselves could add characteristic signatures into transaction flows. Thus, it is still possible to detect patterns in the underlying transaction flow to facilitate AML detection [11, 5]. Unfortunately, next-generation cryptocurrencies such as Monero, Dash, and Z-Cash, with built-in anonymity features, make tracking and detection challenging [2]. As a result, there is a constant need for improved AML detection methodologies.
|
| 266 |
+
|
| 267 |
+
# 6. Conclusions and Future Work
|
| 268 |
+
|
| 269 |
+
This paper presents a novel approach for the detection of illicit Bitcoin transactions based on self-supervised GNNs. We first used the DGI to generate the node embedding with raw features to train the Random Forest for detection. Our experimental evaluation indicates that our approach performs exceptionally well and outperforms the state-of-the-art ML-based/Graph-based classifier overall. The evaluation results of our initial classifier demonstrate the potential of using a self-supervised GNN-based approach for illegal transaction detection in cryptocurrencies. We hope to inspire others to work on the important challenge of using graph machine learning to perform financial forensics through this research, which is lacking in the current research. In the future, we plan to integrate this with unsupervised anomaly detection algorithms to detect illegal transactions in an unsupervised manner.
|
| 270 |
+
|
| 271 |
+
# References
|
| 272 |
+
|
| 273 |
+
[1] S. Nakamoto, Bitcoin: A peer-to-peer electronic cash system, Technical Report, Manubot, 2019.
|
| 274 |
+
[2] N. Kshetri, J. Voas, Do crypto-currencies fuel ransomware?, in: IT professional, volume 19, IEEE, 2017, pp. 11-15.
|
| 275 |
+
[3] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, P. S. Yu, A comprehensive survey on graph neural networks, in: IEEE Transactions on Neural Networks and Learning Systems, volume 32, 2021, pp. 4-24.
|
| 276 |
+
[4] P. Velicković, W. Fedus, W. L. Hamilton, P. Lio, Y. Bengio, R. D. Hjelm, Deep graph infomax, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=rklz9iAcKQ.
|
| 277 |
+
[5] M. Weber, G. Domeniconi, J. Chen, D. K. I. Weidele, C. Bellei, T. Robinson, C. E. Leiserson, Anti-money laundering in bitcoin: Experimenting with graph convolutional networks for financial forensics, in: ACM SIGKDD International Workshop on Knowledge discovery and data mining, 2019.
|
| 278 |
+
[6] X. Liu, F. Zhang, Z. Hou, L. Mian, Z. Wang, J. Zhang, J. Tang, Self-supervised learning: Generative or contrastive, in: IEEE Transactions on Knowledge and Data Engineering, 2021, pp. 1-1. doi:10.1109/TKDE.2021.3090866.
|
| 279 |
+
[7] Y. Liu, M. Jin, S. Pan, C. Zhou, Y. Zheng, F. Xia, P. Yu, Graph self-supervised learning: A survey, in: IEEE Transactions on Knowledge and Data Engineering, 2022, pp. 1-1. doi:10.1109/TKDE.2022.3172903.
|
| 280 |
+
[8] C. M. Bishop, N. M. Nasrabadi, Pattern recognition and machine learning, volume 4, Springer, 2006.
|
| 281 |
+
[9] T. N. Kipf, M. Welling, Semi-supervised classification with graph convolutional networks, in: International Conference on Learning Representations, 2017.
|
| 282 |
+
|
| 283 |
+
[10] A. Pareja, G. Domeniconi, J. Chen, T. Ma, T. Suzumura, H. Kanezashi, T. Kaler, T. Schardl, C. Leiserson, Evolvegen: Evolving graph convolutional networks for dynamic graphs, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 5363-5370.
|
| 284 |
+
[11] Y. Hu, S. Seneviratne, K. Thilakarathna, K. Fukuda, A. Seneviratne, Characterizing and detecting money laundering activities on the bitcoin network, arXiv preprint arXiv:1912.12060 (2019).
|
| 285 |
+
[12] J. A. Bondy, U. S. R. Murty, et al., Graph theory with applications, volume 290, Macmillan London, 1976.
|
| 286 |
+
[13] A. Grover, J. Leskovec, node2vec: Scalable feature learning for networks, in: Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining, 2016, pp. 855-864.
|
| 287 |
+
[14] D. Vassallo, V. Vella, J. Ellul, Application of gradient boosting algorithms for anti-money laundering in cryptocurrencies, in: SN Computer Science, volume 2, Springer, 2021, pp. 1-15.
|
| 288 |
+
[15] C. Lee, S. Maharjan, K. Ko, J. W.-K. Hong, Toward detecting illegal transactions on bitcoin using machine-learning methods, in: International Conference on Blockchain and Trustworthy Systems, Springer, 2019, pp. 520-533.
|
| 289 |
+
[16] I. Alarab, S. Prakoonwit, M. I. Nacer, Competence of graph convolutional networks for anti-money laundering in bitcoin blockchain, in: Proceedings of the 2020 5th International Conference on Machine Learning Technologies, 2020, pp. 23-27.
|
| 290 |
+
[17] L. Nan, D. Tao, Bitcoin mixing detection using deep autoencoder, in: 2018 IEEE Third international conference on data science in cyberspace (DSC), IEEE, 2018, pp. 280-287.
|
| 291 |
+
[18] J. Lorenz, M. I. Silva, D. Aparicio, J. T. Ascensão, P. Bizarro, Machine learning methods to detect money laundering in the bitcoin blockchain in the presence of label scarcity, in: Proceedings of the First ACM International Conference on AI in Finance, 2020, pp. 1-8.
|
| 292 |
+
[19] T. Pham, S. Lee, Anomaly detection in bitcoin network using unsupervised learning methods, in: arXiv preprint arXiv:1611.03941, 2016.
|
| 293 |
+
[20] P. Monamo, V. Marivate, B. Twala, Unsupervised learning for robust bitcoin fraud detection, in: 2016 Information Security for South Africa (ISSA), IEEE, 2016, pp. 129-134.
|
| 294 |
+
[21] S. Li, F. Xu, R. Wang, S. Zhong, Self-supervised incremental deep graph learning for ethereum phishing scam detection, in: arXiv preprint arXiv:2106.10176, 2021.
|
| 295 |
+
[22] N. Shervashidze, P. Schweitzer, E. J. Van Leeuwen, K. Mehlhorn, K. M. Borgwardt, Weisfeiler-lehman graph kernels., in: booktitle of Machine Learning Research, volume 12, 2011.
|
| 296 |
+
[23] G. K. Kulatilleke, M. Portmann, R. Ko, S. S. Chandra, Fdgatii: Fast dynamic graph attention with initial residual and identity mapping, in: arXiv preprint arXiv:2110.11464, 2021.
|
| 297 |
+
[24] K. Xu, W. Hu, J. Leskovec, S. Jegelka, How powerful are graph neural networks?, in: International Conference on Learning Representations, 2019. URL: https://openreview.net/forum?id=ryGs6iA5Km.
|
| 298 |
+
[25] W. L. Hamilton, R. Ying, J. Leskovec, Inductive representation learning on large graphs, in: Advances in Neural Information Processing Systems, 2017. arXiv:1706.02216.
|
| 299 |
+
[26] C. Zhang, D. Song, C. Huang, A. Swami, N. V. Chawla, Heterogeneous graph neural network, in: Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, 2019, pp. 793-803.
|
| 300 |
+
[27] P. Velickovic, G. Cucurull, A. Casanova, A. Romero, P. Lio, Y. Bengio, Graph attention networks, in: International Conference on Learning Representations (ICLR), 2018.
|
| 301 |
+
[28] S. Yun, M. Jeong, R. Kim, J. Kang, H. J. Kim, Graph transformer networks, in: Advances in neural information processing systems, volume 32, 2019.
|
| 302 |
+
[29] S. Ioffe, C. Szegedy, Batch normalization: Accelerating deep network training by reducing internal covariate shift, in: International conference on machine learning, PMLR, 2015, pp. 448-456.
|
| 303 |
+
[30] D. T. Robinson, How to Combat Financial Crime in Cryptocurrencies, 2019. URL: https://www.elliptic.co/blog/elliptic-dataset-cryptocurrency-financial-crime.
|
| 304 |
+
[31] S. Nakamoto, Bitcoin: a peer-to-peer electronic cash system [eb/ol], Consulted 1 (2008) 28.
|
| 305 |
+
[32] R. Van Wegberg, J.-J. Oerlemans, O. van Deventer, Bitcoin money laundering: mixed results? an explorative study on money laundering of cybercrime proceeds using bitcoin, Journal of Financial Crime (2018).
|
2203.10xxx/2203.10465/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9005df90ef0774c73eb7d3df32203319b9410928f7e1e31d24a698f46bfae59
|
| 3 |
+
size 313342
|
2203.10xxx/2203.10465/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_content_list.json
ADDED
|
@@ -0,0 +1,1999 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Unsupervised Domain Adaptation for Nighttime Aerial Tracking",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
158,
|
| 8 |
+
130,
|
| 9 |
+
813,
|
| 10 |
+
152
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Junjie Ye†, Changhong Fu†,\\*, Guangze Zheng†, Danda Pani Paudel‡, and Guang Chen† \n†Tongji University, China ETH Zürich, Switzerland",
|
| 17 |
+
"bbox": [
|
| 18 |
+
143,
|
| 19 |
+
179,
|
| 20 |
+
821,
|
| 21 |
+
215
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "{ye.jun.jie, changhongfu, mmlp, guangchen}@tongji.edu.cn, paudel@vision.ee.ethz.ch",
|
| 28 |
+
"bbox": [
|
| 29 |
+
120,
|
| 30 |
+
219,
|
| 31 |
+
841,
|
| 32 |
+
234
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Abstract",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
233,
|
| 42 |
+
268,
|
| 43 |
+
313,
|
| 44 |
+
284
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "Previous advances in object tracking mostly reported on favorable illumination circumstances while neglecting performance at nighttime, which significantly impeded the development of related aerial robot applications. This work instead develops a novel unsupervised domain adaptation framework for nighttime aerial tracking (named UDAT). Specifically, a unique object discovery approach is provided to generate training patches from raw nighttime tracking videos. To tackle the domain discrepancy, we employ a Transformer-based bridging layer post to the feature extractor to align image features from both domains. With a Transformer day/night feature discriminator, the daytime tracking model is adversarially trained to track at night. Moreover, we construct a pioneering benchmark namely NAT2021 for unsupervised domain adaptive nighttime tracking, which comprises a test set of 180 manually annotated tracking sequences and a train set of over 276k unlabelled nighttime tracking frames. Exhaustive experiments demonstrate the robustness and domain adaptability of the proposed framework in nighttime aerial tracking. The code and benchmark are available at https://github.com/vision4robotics/UDAT.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
75,
|
| 53 |
+
301,
|
| 54 |
+
473,
|
| 55 |
+
633
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "1. Introduction",
|
| 62 |
+
"text_level": 1,
|
| 63 |
+
"bbox": [
|
| 64 |
+
76,
|
| 65 |
+
665,
|
| 66 |
+
209,
|
| 67 |
+
681
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "Standing as one of the fundamental tasks in computer vision, object tracking has received widespread attention with multifarious aerial robot applications, e.g., unmanned aerial vehicle (UAV) self-localization [49], target following [25], and aerial cinematography [2]. Driven by large-scale datasets [10,17,32] with the supervision of meticulous manual annotations, emerging deep trackers [4, 8, 14, 22] keep setting state-of-the-arts (SOTAs) in recent years.",
|
| 74 |
+
"bbox": [
|
| 75 |
+
75,
|
| 76 |
+
691,
|
| 77 |
+
468,
|
| 78 |
+
813
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Despite the advances, whether current benchmarks or approaches are proposed for object tracking under favorable illumination conditions. In contrast to daytime, images captured at night have low contrast, brightness, and signal",
|
| 85 |
+
"bbox": [
|
| 86 |
+
75,
|
| 87 |
+
813,
|
| 88 |
+
470,
|
| 89 |
+
875
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "image",
|
| 95 |
+
"img_path": "images/ce284baba9fa41e31e86773b9ddc262f67a4ca770e8e47d2c80d7d8ddc18b26e.jpg",
|
| 96 |
+
"image_caption": [],
|
| 97 |
+
"image_footnote": [],
|
| 98 |
+
"bbox": [
|
| 99 |
+
511,
|
| 100 |
+
268,
|
| 101 |
+
635,
|
| 102 |
+
324
|
| 103 |
+
],
|
| 104 |
+
"page_idx": 0
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"type": "image",
|
| 108 |
+
"img_path": "images/d7a5a6b7418e8433aa016fc883fe9fc0f643129a653d00c01afb9405d61d0e96.jpg",
|
| 109 |
+
"image_caption": [],
|
| 110 |
+
"image_footnote": [],
|
| 111 |
+
"bbox": [
|
| 112 |
+
513,
|
| 113 |
+
324,
|
| 114 |
+
635,
|
| 115 |
+
378
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 0
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "image",
|
| 121 |
+
"img_path": "images/f4c2a92be04b12e296f82f171503ae6129b26aa77ceb9725d5bb6a81ec37d3b7.jpg",
|
| 122 |
+
"image_caption": [
|
| 123 |
+
"UDAT-CAR U"
|
| 124 |
+
],
|
| 125 |
+
"image_footnote": [],
|
| 126 |
+
"bbox": [
|
| 127 |
+
513,
|
| 128 |
+
378,
|
| 129 |
+
635,
|
| 130 |
+
434
|
| 131 |
+
],
|
| 132 |
+
"page_idx": 0
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "image",
|
| 136 |
+
"img_path": "images/864e5dd45b1488094355b04c4747c629dc9fbacc0d75ccda61dafdaa8a399b0d.jpg",
|
| 137 |
+
"image_caption": [],
|
| 138 |
+
"image_footnote": [],
|
| 139 |
+
"bbox": [
|
| 140 |
+
637,
|
| 141 |
+
268,
|
| 142 |
+
758,
|
| 143 |
+
324
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "image",
|
| 149 |
+
"img_path": "images/be757f684dd6898d37f1e377ac0ed1bddea527277788bec48cce3a887c1721b0.jpg",
|
| 150 |
+
"image_caption": [],
|
| 151 |
+
"image_footnote": [],
|
| 152 |
+
"bbox": [
|
| 153 |
+
637,
|
| 154 |
+
324,
|
| 155 |
+
758,
|
| 156 |
+
378
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 0
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "image",
|
| 162 |
+
"img_path": "images/38b29a113a5e8ce63b6f3715c7b007316810fbe2ab2bd35a3696a136d953d999.jpg",
|
| 163 |
+
"image_caption": [
|
| 164 |
+
"AT-BAN ——SiamCAR"
|
| 165 |
+
],
|
| 166 |
+
"image_footnote": [],
|
| 167 |
+
"bbox": [
|
| 168 |
+
637,
|
| 169 |
+
378,
|
| 170 |
+
758,
|
| 171 |
+
434
|
| 172 |
+
],
|
| 173 |
+
"page_idx": 0
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"type": "image",
|
| 177 |
+
"img_path": "images/f81ca61ae02dfea888cb54ccb4be59281526f68cd383ed551c4be8db3778451b.jpg",
|
| 178 |
+
"image_caption": [],
|
| 179 |
+
"image_footnote": [],
|
| 180 |
+
"bbox": [
|
| 181 |
+
759,
|
| 182 |
+
268,
|
| 183 |
+
883,
|
| 184 |
+
324
|
| 185 |
+
],
|
| 186 |
+
"page_idx": 0
|
| 187 |
+
},
|
| 188 |
+
{
|
| 189 |
+
"type": "image",
|
| 190 |
+
"img_path": "images/ce8668374dfbe840e543f251c5d7954a6d5efd3ca883fbb6cfb024c10480854a.jpg",
|
| 191 |
+
"image_caption": [],
|
| 192 |
+
"image_footnote": [],
|
| 193 |
+
"bbox": [
|
| 194 |
+
759,
|
| 195 |
+
324,
|
| 196 |
+
883,
|
| 197 |
+
378
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 0
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "image",
|
| 203 |
+
"img_path": "images/cd6d59c71081e486f9a15ee9c6bbc944a76377c1ed5be0baa3116566ef9b2ef3.jpg",
|
| 204 |
+
"image_caption": [
|
| 205 |
+
"SiamBAN Target object"
|
| 206 |
+
],
|
| 207 |
+
"image_footnote": [],
|
| 208 |
+
"bbox": [
|
| 209 |
+
759,
|
| 210 |
+
378,
|
| 211 |
+
883,
|
| 212 |
+
434
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 0
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "image",
|
| 218 |
+
"img_path": "images/77f06d336e165493865d24e52edaef37170dea71d1c5c33bcee3bd8f0359f3d5.jpg",
|
| 219 |
+
"image_caption": [
|
| 220 |
+
"(a) Qualitative comparison in typical night scenes.",
|
| 221 |
+
"(b) Overall performance comparison on NAT2021-test.",
|
| 222 |
+
"Figure 1. (a) Qualitative comparison of the proposed unsupervised domain adaptive trackers (i.e., UDAT-CAR and UDAT-BAN) and their baselines [8, 14]. (b) Overall performance of SOTA approaches on the constructed NAT2021-test benchmark. The proposed UDAT effectively adapts general trackers to nighttime aerial tracking scenes and yields favorable performance."
|
| 223 |
+
],
|
| 224 |
+
"image_footnote": [],
|
| 225 |
+
"bbox": [
|
| 226 |
+
509,
|
| 227 |
+
465,
|
| 228 |
+
887,
|
| 229 |
+
574
|
| 230 |
+
],
|
| 231 |
+
"page_idx": 0
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"type": "text",
|
| 235 |
+
"text": "to-noise ratio (SNR). These differences cause the discrepancy in feature distribution of day/night images. Due to the cross-domain discrepancy, current SOTA trackers generalize badly to nighttime scenes [48, 50], which severely impedes the broadening of relevant aerial robot applications.",
|
| 236 |
+
"bbox": [
|
| 237 |
+
496,
|
| 238 |
+
688,
|
| 239 |
+
890,
|
| 240 |
+
763
|
| 241 |
+
],
|
| 242 |
+
"page_idx": 0
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"type": "text",
|
| 246 |
+
"text": "Regarding such domain gap and the performance drop, this work aims to address the cross-domain object tracking problem. In particular, we target adapting SOTA tracking models in daytime general conditions to nighttime aerial perspectives. One possible straightforward solution is to collect and annotate adequate target domain data for training. Nevertheless, such a non-trivial workload is expensive and time-consuming, since backbones' pre-training alone generally takes millions of high-quality images [9]. We",
|
| 247 |
+
"bbox": [
|
| 248 |
+
496,
|
| 249 |
+
765,
|
| 250 |
+
892,
|
| 251 |
+
902
|
| 252 |
+
],
|
| 253 |
+
"page_idx": 0
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"type": "page_footnote",
|
| 257 |
+
"text": "*Corresponding author",
|
| 258 |
+
"bbox": [
|
| 259 |
+
94,
|
| 260 |
+
887,
|
| 261 |
+
220,
|
| 262 |
+
898
|
| 263 |
+
],
|
| 264 |
+
"page_idx": 0
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"type": "page_number",
|
| 268 |
+
"text": "1",
|
| 269 |
+
"bbox": [
|
| 270 |
+
480,
|
| 271 |
+
924,
|
| 272 |
+
488,
|
| 273 |
+
936
|
| 274 |
+
],
|
| 275 |
+
"page_idx": 0
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"type": "text",
|
| 279 |
+
"text": "consequently consider the problem as an unsupervised domain adaptation task, where training data in the source domain is with well-annotated bounding boxes while that in the target domain has no manually annotated labels. Therefore, an unsupervised domain adaptive tracking framework, referred to as UDAT, is proposed for nighttime aerial tracking. To generate training patches of the target domain, we develop an object discovery strategy to explore potential objects in the unlabelled nighttime data. Besides, a bridging layer is proposed to bridge the gap of domain discrepancy for the extracted features.",
|
| 280 |
+
"bbox": [
|
| 281 |
+
75,
|
| 282 |
+
90,
|
| 283 |
+
472,
|
| 284 |
+
256
|
| 285 |
+
],
|
| 286 |
+
"page_idx": 1
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"type": "text",
|
| 290 |
+
"text": "Furthermore, the feature domain is distinguished by virtue of a discriminator during adversarial learning. Drawing lessons from the huge potential of the Transformer [43] in feature representation, both the bridging layer and the discriminator utilize a Transformer structure. Figure 1 exhibits some qualitative comparisons of trackers adopting UDAT and the corresponding baselines. UDAT raises baselines' nighttime aerial tracking performance substantially. Apart from methodology, we construct NAT2021, a benchmark comprising a test set of 180 fully annotated video sequences and a train set of over 276k unlabelled nighttime tracking frames, which serves as the first benchmark for unsupervised domain adaptive nighttime tracking. The main contributions of this work are fourfold:",
|
| 291 |
+
"bbox": [
|
| 292 |
+
75,
|
| 293 |
+
257,
|
| 294 |
+
472,
|
| 295 |
+
468
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 1
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "list",
|
| 301 |
+
"sub_type": "text",
|
| 302 |
+
"list_items": [
|
| 303 |
+
"- An unsupervised domain adaptive tracking framework, namely UDAT, is proposed for nighttime aerial tracking. To the best of our knowledge, the proposed UDAT is the first unsupervised adaptation framework for object tracking.",
|
| 304 |
+
"- A bridging layer and a day/night discriminator with Transformer structures are incorporated to align extracted features from different domains and narrow the domain gap between daytime and nighttime.",
|
| 305 |
+
"- A pioneering benchmark namely NAT2021, consisting of a fully annotated test set and an unlabelled train set, is constructed for domain adaptive nighttime tracking. An object discovery strategy is introduced for the unlabelled train set preprocessing.",
|
| 306 |
+
"- Extensive experiments on NAT2021-test and the recent public UAVDark70 [21] benchmark verify the effectiveness and domain adaptability of the proposed UDAT in nighttime aerial tracking."
|
| 307 |
+
],
|
| 308 |
+
"bbox": [
|
| 309 |
+
96,
|
| 310 |
+
479,
|
| 311 |
+
468,
|
| 312 |
+
782
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 1
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "2. Related work",
|
| 319 |
+
"text_level": 1,
|
| 320 |
+
"bbox": [
|
| 321 |
+
76,
|
| 322 |
+
806,
|
| 323 |
+
215,
|
| 324 |
+
821
|
| 325 |
+
],
|
| 326 |
+
"page_idx": 1
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"type": "text",
|
| 330 |
+
"text": "2.1. Object tracking",
|
| 331 |
+
"text_level": 1,
|
| 332 |
+
"bbox": [
|
| 333 |
+
76,
|
| 334 |
+
830,
|
| 335 |
+
235,
|
| 336 |
+
848
|
| 337 |
+
],
|
| 338 |
+
"page_idx": 1
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"type": "text",
|
| 342 |
+
"text": "Generally, recent object tracking approaches can be categorized as the discriminative correlation filter (DCF)-based approaches [12, 16, 19, 27] and the Siamese network-based",
|
| 343 |
+
"bbox": [
|
| 344 |
+
76,
|
| 345 |
+
854,
|
| 346 |
+
468,
|
| 347 |
+
900
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 1
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "approaches [4, 8, 14, 22]. Due to the complicated online learning procedure, end-to-end training can be hardly realized on DCF-based trackers. Therefore, restricted to inferior handcrafted features or inappropriate deep feature extractors pre-trained for classification, DCF-based trackers lose their effectiveness in adverse conditions.",
|
| 354 |
+
"bbox": [
|
| 355 |
+
496,
|
| 356 |
+
90,
|
| 357 |
+
890,
|
| 358 |
+
180
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 1
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "Benefiting from considerable training data and end-to-end learning, Siamese network-based trackers have achieved robust tracking performance. This line of approaches is pioneered by SINT [41] and SiamFC [1], which regard object tracking as a similarity learning problem and train Siamese networks with large-scale image pairs. Drawing lessons from object detection, B. Li et al. [23] introduce the region proposal network (RPN) [33] into the Siamese framework. SiamRPN++ [22] further adopts a deeper backbone and feature aggregation architecture to improve tracking accuracy. To alleviate increasing hyperparameters along with the introduction of RPN, the anchor-free approaches [8, 14, 47] adopt the per-pixel regression to directly predict four offsets on each pixel. Recently, Transformer [43] is incorporated into the Siamese framework [4,6,45] to model global information and further boost tracking performance.",
|
| 365 |
+
"bbox": [
|
| 366 |
+
496,
|
| 367 |
+
181,
|
| 368 |
+
892,
|
| 369 |
+
439
|
| 370 |
+
],
|
| 371 |
+
"page_idx": 1
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"type": "text",
|
| 375 |
+
"text": "Despite the great progress, object tracking in adverse conditions, for instance, nighttime aerial scenarios, lacks thorough study so far. In [21], a DCF framework integrated with a low-light enhancer is constructed while lacking transferability and being restricted to handcrafted features. Some studies [48, 50] design tracking-related low-light enhancers for data preprocessing in the tracking pipeline. However, such a paradigm suffers from weak collaboration with the tracking model and the cascade structure can hardly learn to narrow the domain gap at the feature level.",
|
| 376 |
+
"bbox": [
|
| 377 |
+
496,
|
| 378 |
+
440,
|
| 379 |
+
890,
|
| 380 |
+
590
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 1
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "2.2. Domain adaptation",
|
| 387 |
+
"text_level": 1,
|
| 388 |
+
"bbox": [
|
| 389 |
+
500,
|
| 390 |
+
604,
|
| 391 |
+
684,
|
| 392 |
+
619
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 1
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "text",
|
| 398 |
+
"text": "Towards narrowing the domain discrepancy and transferring knowledge from the source domain to the target domain, domain adaptation attracts considerable attention and is widely adopted in image classification [3, 26, 40]. Beyond classification, Y. Chen et al. [7] design a domain adaptive object detection framework and tackle the domain shift on both image-level and instance-level. In [18], an image transfer model is trained to perform day-to-night transformation for data augmentation before learning a detection model. Y. Sasagawa and H. Nagahara [37] propose to merge a low-light image enhancement model and an object detection model to realize nighttime object detection. For the task of semantic segmentation, C. Sakaridis et al. [36] formulate a curriculum framework to adapt semantic segmentation models from day to night through an intermediate twilight domain. X. Wu et al. [46] employ an adversarial learning manner to train a domain adaptation network for nighttime semantic segmentation. S. Saha et al. [35]",
|
| 399 |
+
"bbox": [
|
| 400 |
+
496,
|
| 401 |
+
628,
|
| 402 |
+
890,
|
| 403 |
+
900
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 1
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "page_number",
|
| 409 |
+
"text": "2",
|
| 410 |
+
"bbox": [
|
| 411 |
+
478,
|
| 412 |
+
924,
|
| 413 |
+
491,
|
| 414 |
+
936
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 1
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "image",
|
| 420 |
+
"img_path": "images/c79565a1689e7ade0e010407ca9d3dfaf3886c487753790b39936259e33a1d14.jpg",
|
| 421 |
+
"image_caption": [
|
| 422 |
+
"Figure 2. Illustration of the proposed unsupervised domain adaptation framework for nighttime aerial tracking. The object discovery module is employed to find potential objects in raw videos for training patch generation. Features extracted from different domains are aligned via the Transformer bridging layer. A Transformer day/night discriminator is trained to distinguish features between the source domain and the target domain."
|
| 423 |
+
],
|
| 424 |
+
"image_footnote": [],
|
| 425 |
+
"bbox": [
|
| 426 |
+
138,
|
| 427 |
+
88,
|
| 428 |
+
831,
|
| 429 |
+
280
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 2
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "mine cross-task relationships and build a multi-task learning framework for semantic segmentation and depth estimation in the unsupervised domain adaptation setting. Despite the rapid development in other vision tasks, domain adaptation for object tracking has not been investigated yet. Therefore, an effective unsupervised domain adaptation framework for object tracking is urgently needed.",
|
| 436 |
+
"bbox": [
|
| 437 |
+
75,
|
| 438 |
+
353,
|
| 439 |
+
472,
|
| 440 |
+
460
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 2
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "text",
|
| 446 |
+
"text": "3. Proposed method",
|
| 447 |
+
"text_level": 1,
|
| 448 |
+
"bbox": [
|
| 449 |
+
76,
|
| 450 |
+
474,
|
| 451 |
+
246,
|
| 452 |
+
493
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 2
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "text",
|
| 458 |
+
"text": "The paradigm of the proposed UDAT framework is illustrated in Fig. 2. For data preprocessing of the unlabelled target domain, we employ a saliency detection-based strategy to locate potential objects and crop paired training patches. In the training pipeline, features generated by the feature extractor are modulated by the bridging layer. In this process, adversarial learning facilitates the reduction of feature distribution discrepancy between the source and target domains. Through this simple yet effective process, trackers can achieve pleasant efficiency and robustness for night scenes comparable to daytime tracking.",
|
| 459 |
+
"bbox": [
|
| 460 |
+
75,
|
| 461 |
+
501,
|
| 462 |
+
470,
|
| 463 |
+
667
|
| 464 |
+
],
|
| 465 |
+
"page_idx": 2
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"text": "3.1. Data preprocessing",
|
| 470 |
+
"text_level": 1,
|
| 471 |
+
"bbox": [
|
| 472 |
+
76,
|
| 473 |
+
680,
|
| 474 |
+
261,
|
| 475 |
+
696
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 2
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "Since deep trackers take training patches as input in each training step, we develop an object discovery strategy for data preprocessing on the unlabeled train set. Figure 3 illustrates the preprocessing pipeline. The object discovery strategy involves three stages, i.e., low-light enhancement, salient object detection, and dynamic programming. Given the low visibility of nighttime images, original images are first lighted up by a low-light enhancer [24]. Afterward, enhanced images are fed into the video saliency detection model [52]. Candidate boxes are then obtained by building the minimum bounding rectangle of detected salient regions. To generate a box sequence that locates the same object across the timeline, motivated by [55], we",
|
| 482 |
+
"bbox": [
|
| 483 |
+
75,
|
| 484 |
+
704,
|
| 485 |
+
470,
|
| 486 |
+
900
|
| 487 |
+
],
|
| 488 |
+
"page_idx": 2
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"type": "image",
|
| 492 |
+
"img_path": "images/60846639c9e44968f15211162a18c16fe248fbe6c8cfa9cff154e58d9a5df932.jpg",
|
| 493 |
+
"image_caption": [
|
| 494 |
+
"Figure 3. Illustration of object discovery, which contains low-light enhancement, salient object detection, and dynamic programming. The pink masks indicate detected salient regions, while the boxes are circumscribed rectangles of these regions. Dynamic programming selects red boxes and filters blue ones. The green box is obtained by linear interpolation between two adjacent frames. Note that the cropped patches are enhanced for visualization, original patches are utilized in the practical training process instead."
|
| 495 |
+
],
|
| 496 |
+
"image_footnote": [],
|
| 497 |
+
"bbox": [
|
| 498 |
+
506,
|
| 499 |
+
349,
|
| 500 |
+
883,
|
| 501 |
+
484
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 2
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "adopt dynamic programming to filter noisy boxes. Assuming two boxes from the $j$ -th frame and the $k$ -th frame as $[x_{j,m}, y_{j,m}, w_{j,m}, h_{j,m}]$ and $[x_{k,n}, y_{k,n}, w_{k,n}, h_{k,n}]$ , where $m$ and $n$ indicate the box indexes, and $(x,y)$ , $w$ , $h$ denote the top-left coordinate, width, and height of the box, respectively, the normalized distance $D_{\\mathrm{norm}}$ is obtained as:",
|
| 508 |
+
"bbox": [
|
| 509 |
+
498,
|
| 510 |
+
608,
|
| 511 |
+
890,
|
| 512 |
+
700
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 2
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "equation",
|
| 518 |
+
"text": "\n$$\n\\begin{array}{l} D _ {\\mathrm {n o r m}} = (\\frac {x _ {j , m} - x _ {k , n}}{w _ {k , n}}) ^ {2} + (\\frac {y _ {j , m} - y _ {k , n}}{h _ {k , n}}) ^ {2} \\\\ + \\left(\\log \\left(\\frac {w _ {j , m}}{w _ {k , n}}\\right)\\right) ^ {2} + \\left(\\log \\left(\\frac {h _ {j , m}}{h _ {k , n}}\\right)\\right) ^ {2}. \\tag {1} \\\\ \\end{array}\n$$\n",
|
| 519 |
+
"text_format": "latex",
|
| 520 |
+
"bbox": [
|
| 521 |
+
540,
|
| 522 |
+
708,
|
| 523 |
+
890,
|
| 524 |
+
772
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 2
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "In dynamic programming, the normalized distance of candidate boxes between frames serves as a smooth reward, while adding a box in a frame to the box sequence means an incremental reward. For frames where none of the boxes is selected by dynamic programming, linear interpolation is adopted between the two closest frames to generate a new box. Ultimately, paired training patches are cropped from original images according to the obtained box sequence.",
|
| 531 |
+
"bbox": [
|
| 532 |
+
496,
|
| 533 |
+
779,
|
| 534 |
+
892,
|
| 535 |
+
900
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 2
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "page_number",
|
| 541 |
+
"text": "3",
|
| 542 |
+
"bbox": [
|
| 543 |
+
478,
|
| 544 |
+
924,
|
| 545 |
+
490,
|
| 546 |
+
936
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 2
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "3.2. Network architecture",
|
| 553 |
+
"text_level": 1,
|
| 554 |
+
"bbox": [
|
| 555 |
+
76,
|
| 556 |
+
90,
|
| 557 |
+
277,
|
| 558 |
+
104
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 3
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "Feature extractor. Feature extraction of Siamese networks generally consists of two branches, i.e., the template branch and the search branch. Both branches generate feature maps from the template patch $\\mathbf{Z}$ and the search patch $\\mathbf{X}$ , namely $\\varphi(\\mathbf{Z})$ and $\\varphi(\\mathbf{X})$ , by adopting an identical backbone network $\\varphi$ . Generally, trackers adopt the last block or blocks of features for subsequent classification and regression, which can be represented as follows:",
|
| 565 |
+
"bbox": [
|
| 566 |
+
76,
|
| 567 |
+
113,
|
| 568 |
+
470,
|
| 569 |
+
234
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 3
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "equation",
|
| 575 |
+
"text": "\n$$\n\\begin{array}{l} \\varphi (\\mathbf {X}) = \\operatorname {C o n c a t} \\left(\\mathcal {F} _ {N - i} (\\mathbf {X}), \\dots , \\mathcal {F} _ {N - 1} (\\mathbf {X}), \\mathcal {F} _ {N} (\\mathbf {X})\\right), \\tag {2} \\\\ \\varphi (\\mathbf {Z}) = \\mathrm {C o n c a t} (\\mathcal {F} _ {N - i} (\\mathbf {Z}), \\dots , \\mathcal {F} _ {N - 1} (\\mathbf {Z}), \\mathcal {F} _ {N} (\\mathbf {Z})) , \\\\ \\end{array}\n$$\n",
|
| 576 |
+
"text_format": "latex",
|
| 577 |
+
"bbox": [
|
| 578 |
+
84,
|
| 579 |
+
244,
|
| 580 |
+
468,
|
| 581 |
+
281
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 3
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"text": "where $\\mathcal{F}_{*}(\\cdot)$ indicates features extracted from the $*$ -th block of a backbone with $N$ blocks in total, and Concat denotes channel-wise concatenation. Since both $\\varphi(\\mathbf{X})$ and $\\varphi(\\mathbf{Z})$ will pass through the following Transformer bridging layer and discriminator, we take the instance of $\\varphi(\\mathbf{X})$ in the following introduction for clarity.",
|
| 588 |
+
"bbox": [
|
| 589 |
+
76,
|
| 590 |
+
290,
|
| 591 |
+
468,
|
| 592 |
+
381
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 3
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "Transformer bridging layer. Features extracted from daytime and nighttime images are with a huge gap, such domain discrepancy leads to inferior tracking performance at night. Before feeding the obtained features to the tracker head for object localization, we propose to bridge the gap between the feature distributions through a bridging layer. In consideration of the strong modeling capability of the Transformer [43] for long-range inter-independencies, we design the bridging layer with a Transformer structure. Taking the search branch as instance, positional encodings $\\mathbf{P}$ are first added to the input feature $\\varphi (\\mathbf{X})\\in \\mathbb{R}^{N\\times H\\times W}$ . Next, the summation is flattened to $(\\mathbf{P} + \\varphi (\\mathbf{X}))\\in \\mathbb{R}^{HW\\times N}$ . Multihead self-attention (MSA) is then conducted as:",
|
| 599 |
+
"bbox": [
|
| 600 |
+
76,
|
| 601 |
+
381,
|
| 602 |
+
470,
|
| 603 |
+
579
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 3
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "equation",
|
| 609 |
+
"text": "\n$$\n\\begin{array}{l} \\begin{array}{c} \\widehat {\\boldsymbol {\\varphi} (\\mathbf {X})} ^ {\\prime} = \\operatorname {M S A} (\\mathbf {P} + \\varphi (\\mathbf {X})) + \\mathbf {P} + \\varphi (\\mathbf {X}) \\\\ \\widehat {\\boldsymbol {\\Phi}} ^ {\\prime} \\end{array} , \\tag {3} \\\\ \\widehat {\\varphi (\\mathbf {X})} = \\mathrm {L N} (\\mathrm {F F N} (\\mathrm {M o d} (\\mathrm {L N} (\\widehat {\\varphi (\\mathbf {X}) ^ {\\prime}}))) + \\widehat {\\varphi (\\mathbf {X}) ^ {\\prime}}) , \\\\ \\end{array}\n$$\n",
|
| 610 |
+
"text_format": "latex",
|
| 611 |
+
"bbox": [
|
| 612 |
+
86,
|
| 613 |
+
587,
|
| 614 |
+
468,
|
| 615 |
+
635
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 3
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "where $\\widehat{\\varphi(\\mathbf{X})}$ is an intermediate variable and LN indicates layer normalization. Moreover, FFN denotes the fully connected feed-forward network, which consists of two linear layers with a ReLU in between. Mod is a modulation layer in [4] to fully explore internal spatial information. The final output is flattened back to $N\\times H\\times W$ . For each head of MSA, the attention function can be formulated as:",
|
| 622 |
+
"bbox": [
|
| 623 |
+
76,
|
| 624 |
+
647,
|
| 625 |
+
468,
|
| 626 |
+
758
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "equation",
|
| 632 |
+
"text": "\n$$\n\\operatorname {A t t e n t i o n} (\\mathbf {Q}, \\mathbf {K}, \\mathbf {V}) = \\operatorname {S o f t m a x} \\left(\\frac {\\mathbf {Q} \\mathbf {K} ^ {T}}{\\sqrt {d _ {k}}}\\right) \\mathbf {V}. \\tag {4}\n$$\n",
|
| 633 |
+
"text_format": "latex",
|
| 634 |
+
"bbox": [
|
| 635 |
+
114,
|
| 636 |
+
765,
|
| 637 |
+
468,
|
| 638 |
+
801
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 3
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "In our case, the queries $\\mathbf{Q}$ , keys $\\mathbf{K}$ , and values $\\mathbf{V}$ are equal to the product of $(\\mathbf{P} + \\varphi(\\mathbf{X}))$ and the corresponding projection matrix. By virtue of superior information integration of self-attention, the proposed Transformer bridging layer is adequate to modulate the nighttime object features output by the backbone for effective similarity maps.",
|
| 645 |
+
"bbox": [
|
| 646 |
+
76,
|
| 647 |
+
809,
|
| 648 |
+
470,
|
| 649 |
+
902
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 3
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "image",
|
| 655 |
+
"img_path": "images/2c64d000b49189af29de30fe58ee37708fa3c1c66ff4f1d1cea4d5905f370b29.jpg",
|
| 656 |
+
"image_caption": [
|
| 657 |
+
"Figure 4. Feature visualization by t-SNE [42] of day/night similar scenes. Gold and purple indicate source and target domains, respectively. The scattergrams from top to down depict day/night features from feature extractor in the pre-trained tracker, feature extractor in the domain-adaptive tracker, and the bridging layer. The proposed Transformer bridging layer effectively narrows domain discrepancy."
|
| 658 |
+
],
|
| 659 |
+
"image_footnote": [],
|
| 660 |
+
"bbox": [
|
| 661 |
+
504,
|
| 662 |
+
90,
|
| 663 |
+
883,
|
| 664 |
+
244
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "Remark 1: Figure 4 displays the t-SNE [42] visualizations of features from feature extractor in the baseline, feature extractor in the domain-adaptive tracker, and the bridging layer. From which we can observe that features extracted by backbones have a clear discrepancy, while those modified by the bridging layer show a coincidence in distribution.",
|
| 671 |
+
"bbox": [
|
| 672 |
+
496,
|
| 673 |
+
359,
|
| 674 |
+
890,
|
| 675 |
+
450
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "Transformer discriminator. The proposed UDAT framework is trained in an adversarial learning manner. A day/night feature discriminator D is designed to facilitate aligning the source and target domain features, which consists of a gradient reverse layer (GRL) [13] and two Transformer layers. Given the modulated feature map $\\widehat{\\varphi(\\mathbf{X})}$ , the softmax function is performed and followed by a GRL:",
|
| 682 |
+
"bbox": [
|
| 683 |
+
496,
|
| 684 |
+
450,
|
| 685 |
+
890,
|
| 686 |
+
559
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 3
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "equation",
|
| 692 |
+
"text": "\n$$\n\\mathbf {F} = \\operatorname {G R L} \\left(\\operatorname {S o f t m a x} \\left(\\widehat {\\varphi (\\mathbf {X})}\\right)\\right). \\tag {5}\n$$\n",
|
| 693 |
+
"text_format": "latex",
|
| 694 |
+
"bbox": [
|
| 695 |
+
586,
|
| 696 |
+
566,
|
| 697 |
+
890,
|
| 698 |
+
585
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "The intermediate feature $\\mathbf{F} \\in \\mathbb{R}^{N \\times H \\times W}$ is then passed through a convolution layer with a kernel size of $4 \\times 4$ and stride of 4 for patch embedding. $\\mathbf{F}$ is then flattened to $\\left(\\frac{H}{4} \\times \\frac{W}{4}\\right) \\times N$ and concatenated with a classification token $\\mathbf{c}$ as:",
|
| 705 |
+
"bbox": [
|
| 706 |
+
498,
|
| 707 |
+
592,
|
| 708 |
+
890,
|
| 709 |
+
656
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 3
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "equation",
|
| 715 |
+
"text": "\n$$\n\\mathbf {F} ^ {\\prime} = \\operatorname {C o n c a t} (\\mathbf {c}, \\operatorname {F l a t} (\\operatorname {C o n v} (\\mathbf {F}))) \\quad . \\tag {6}\n$$\n",
|
| 716 |
+
"text_format": "latex",
|
| 717 |
+
"bbox": [
|
| 718 |
+
571,
|
| 719 |
+
661,
|
| 720 |
+
890,
|
| 721 |
+
679
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "Afterward, $\\mathbf{F}'$ is input to two Transformer layers. Ultimately, the classification token $\\mathbf{c}$ is regarded as the final predicted results. In the adversarial learning process, the discriminator is optimized to distinguish whether the features are from the source domain or the target domain correctly.",
|
| 728 |
+
"bbox": [
|
| 729 |
+
496,
|
| 730 |
+
685,
|
| 731 |
+
890,
|
| 732 |
+
761
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 3
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "text",
|
| 738 |
+
"text": "Tracker head. After the Transformer bridging layer, cross-correlation operation is conducted on the modulated features $\\widehat{\\varphi(\\mathbf{X})}$ and $\\widehat{\\varphi(\\mathbf{Z})}$ to generate a similarity map. Finally, the tracker head performs the classification and regression process to predict the object position.",
|
| 739 |
+
"bbox": [
|
| 740 |
+
496,
|
| 741 |
+
761,
|
| 742 |
+
890,
|
| 743 |
+
840
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 3
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "text",
|
| 749 |
+
"text": "3.3. Objective functions",
|
| 750 |
+
"text_level": 1,
|
| 751 |
+
"bbox": [
|
| 752 |
+
500,
|
| 753 |
+
847,
|
| 754 |
+
684,
|
| 755 |
+
863
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 3
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"text": "Classification and regression loss. In the source domain training line, the classification and regression loss $\\mathcal{L}_{\\mathrm{GT}}$ be",
|
| 762 |
+
"bbox": [
|
| 763 |
+
498,
|
| 764 |
+
869,
|
| 765 |
+
890,
|
| 766 |
+
901
|
| 767 |
+
],
|
| 768 |
+
"page_idx": 3
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "page_number",
|
| 772 |
+
"text": "4",
|
| 773 |
+
"bbox": [
|
| 774 |
+
478,
|
| 775 |
+
924,
|
| 776 |
+
491,
|
| 777 |
+
936
|
| 778 |
+
],
|
| 779 |
+
"page_idx": 3
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "text",
|
| 783 |
+
"text": "tween the ground truth and the predicted results are applied to ensure the normal tracking ability of trackers. We adopt tracking loss consistent with the baseline trackers without modification.",
|
| 784 |
+
"bbox": [
|
| 785 |
+
75,
|
| 786 |
+
90,
|
| 787 |
+
468,
|
| 788 |
+
150
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 4
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"text": "Domain adaptation loss. In adversarial learning, the least-square loss function [30] is introduced to train the generator $G$ , aiming at generating source domain-like features from target domain images to fool the discriminator $D$ while frozen. Here the generator $G$ can be regarded as the feature extractor along with the Transformer bridging layer. Considering both the template and search features, the adversarial loss is described as follows:",
|
| 795 |
+
"bbox": [
|
| 796 |
+
75,
|
| 797 |
+
151,
|
| 798 |
+
467,
|
| 799 |
+
271
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 4
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "equation",
|
| 805 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {a d v}} = \\left(\\mathrm {D} \\left(\\widehat {\\varphi \\left(\\mathbf {X} _ {\\mathrm {t}}\\right)}\\right) - \\ell_ {\\mathrm {s}}\\right) ^ {2} + \\left(\\mathrm {D} \\left(\\widehat {\\varphi \\left(\\mathbf {Z} _ {\\mathrm {t}}\\right)}\\right)\\right) - \\ell_ {\\mathrm {s}}\\left. \\right) ^ {2}, \\tag {7}\n$$\n",
|
| 806 |
+
"text_format": "latex",
|
| 807 |
+
"bbox": [
|
| 808 |
+
89,
|
| 809 |
+
279,
|
| 810 |
+
468,
|
| 811 |
+
301
|
| 812 |
+
],
|
| 813 |
+
"page_idx": 4
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"type": "text",
|
| 817 |
+
"text": "where $s$ and $t$ refer to the source and the target domains, respectively. Besides, $\\ell_{s}$ denotes the label for the source domain, which has the same size as the output of D. In summary, the total training loss for the tracking network is defined as:",
|
| 818 |
+
"bbox": [
|
| 819 |
+
75,
|
| 820 |
+
311,
|
| 821 |
+
468,
|
| 822 |
+
385
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 4
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "equation",
|
| 828 |
+
"text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\mathcal {L} _ {\\mathrm {G T}} + \\lambda \\mathcal {L} _ {\\mathrm {a d v}}, \\tag {8}\n$$\n",
|
| 829 |
+
"text_format": "latex",
|
| 830 |
+
"bbox": [
|
| 831 |
+
181,
|
| 832 |
+
398,
|
| 833 |
+
468,
|
| 834 |
+
414
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 4
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "text",
|
| 840 |
+
"text": "where $\\lambda$ is a weight to balance the loss terms. We set $\\lambda$ as 0.01 in implementation.",
|
| 841 |
+
"bbox": [
|
| 842 |
+
75,
|
| 843 |
+
425,
|
| 844 |
+
468,
|
| 845 |
+
455
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 4
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"text": "During the training process, the tracking network and discriminator $\\mathrm{D}$ are optimized alternatively. We define the loss function of $\\mathrm{D}$ as:",
|
| 852 |
+
"bbox": [
|
| 853 |
+
75,
|
| 854 |
+
455,
|
| 855 |
+
468,
|
| 856 |
+
500
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 4
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "equation",
|
| 862 |
+
"text": "\n$$\nL _ {\\mathrm {D}} = \\sum_ {d = \\mathrm {s}, \\mathrm {t}} \\left(\\mathrm {D} \\left(\\widehat {\\varphi \\left(\\mathbf {X} _ {d}\\right)} - \\ell_ {d}\\right) ^ {2} + \\left(\\mathrm {D} \\left(\\widehat {\\varphi \\left(\\mathbf {Z} _ {d}\\right)}\\right) - \\ell_ {d}\\right) ^ {2} \\quad . \\right. \\tag {9}\n$$\n",
|
| 863 |
+
"text_format": "latex",
|
| 864 |
+
"bbox": [
|
| 865 |
+
84,
|
| 866 |
+
508,
|
| 867 |
+
468,
|
| 868 |
+
542
|
| 869 |
+
],
|
| 870 |
+
"page_idx": 4
|
| 871 |
+
},
|
| 872 |
+
{
|
| 873 |
+
"type": "text",
|
| 874 |
+
"text": "Trained with true domain labels of input features, D learns to discriminate feature domains efficiently.",
|
| 875 |
+
"bbox": [
|
| 876 |
+
75,
|
| 877 |
+
551,
|
| 878 |
+
468,
|
| 879 |
+
583
|
| 880 |
+
],
|
| 881 |
+
"page_idx": 4
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"type": "text",
|
| 885 |
+
"text": "4. NAT2021 benchmark",
|
| 886 |
+
"text_level": 1,
|
| 887 |
+
"bbox": [
|
| 888 |
+
76,
|
| 889 |
+
595,
|
| 890 |
+
279,
|
| 891 |
+
612
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 4
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "text",
|
| 897 |
+
"text": "The nighttime aerial tracking benchmark, namely NAT2021, is developed to give a comprehensive performance evaluation of nighttime aerial tracking and provide adequate unlabelled nighttime tracking videos for unsupervised training. Compared to the existing nighttime tracking benchmark [21] in literature, NAT2021 stands a two times larger test set, an unlabelled train set, and novel illumination-oriented attributes.",
|
| 898 |
+
"bbox": [
|
| 899 |
+
75,
|
| 900 |
+
622,
|
| 901 |
+
468,
|
| 902 |
+
742
|
| 903 |
+
],
|
| 904 |
+
"page_idx": 4
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"text": "4.1. Sequence collection",
|
| 909 |
+
"text_level": 1,
|
| 910 |
+
"bbox": [
|
| 911 |
+
76,
|
| 912 |
+
752,
|
| 913 |
+
263,
|
| 914 |
+
767
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "text",
|
| 920 |
+
"text": "Images in NAT2021 are captured in diverse nighttime scenes (e.g., roads, urban landscapes, and campus) by a DJI Mavic Air 2 UAV $^{1}$ in a frame rate of 30 frames/s. Sequence categories consist of a wide variety of targets (e.g., cars, trucks, persons, groups, buses, buildings, and motorcycles) or activities (e.g., cycling, skating, running, and ball",
|
| 921 |
+
"bbox": [
|
| 922 |
+
75,
|
| 923 |
+
775,
|
| 924 |
+
468,
|
| 925 |
+
866
|
| 926 |
+
],
|
| 927 |
+
"page_idx": 4
|
| 928 |
+
},
|
| 929 |
+
{
|
| 930 |
+
"type": "image",
|
| 931 |
+
"img_path": "images/1e01a31ab9056e0c5ef8c4ff0e604108b51415b7751e4cf7d6042d4aefcff852.jpg",
|
| 932 |
+
"image_caption": [
|
| 933 |
+
"Figure 5. First frames of selected sequences from NAT2021-test. The green boxes mark the tracking objects, while the top-left corner of the images display sequence names."
|
| 934 |
+
],
|
| 935 |
+
"image_footnote": [],
|
| 936 |
+
"bbox": [
|
| 937 |
+
506,
|
| 938 |
+
90,
|
| 939 |
+
887,
|
| 940 |
+
238
|
| 941 |
+
],
|
| 942 |
+
"page_idx": 4
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "text",
|
| 946 |
+
"text": "games). Consequently, the test set contains 180 nighttime aerial tracking sequences with more than $140\\mathrm{k}$ frames in total, namely NAT2021-test. Figure 5 displays some first frames of selected sequences. In order to provide an evaluation of long-term tracking performance, we further build a long-term tracking subset namely NAT2021- $L$ -test consisting of 23 sequences that are longer than 1400 frames. Moreover, the training set involves 1400 unlabelled sequences with over $276\\mathrm{k}$ frames totally, which is adequate for the domain adaptive tracking task. A statistical summary of NAT2021 is presented in Tab. 1.",
|
| 947 |
+
"bbox": [
|
| 948 |
+
496,
|
| 949 |
+
306,
|
| 950 |
+
890,
|
| 951 |
+
472
|
| 952 |
+
],
|
| 953 |
+
"page_idx": 4
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "text",
|
| 957 |
+
"text": "Remark 2: Sequences in NAT2021 are recorded by ourselves using UAVs with permission. No personally identifiable information or offensive content is involved.",
|
| 958 |
+
"bbox": [
|
| 959 |
+
498,
|
| 960 |
+
473,
|
| 961 |
+
890,
|
| 962 |
+
517
|
| 963 |
+
],
|
| 964 |
+
"page_idx": 4
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "text",
|
| 968 |
+
"text": "4.2. Annotation",
|
| 969 |
+
"text_level": 1,
|
| 970 |
+
"bbox": [
|
| 971 |
+
500,
|
| 972 |
+
531,
|
| 973 |
+
624,
|
| 974 |
+
544
|
| 975 |
+
],
|
| 976 |
+
"page_idx": 4
|
| 977 |
+
},
|
| 978 |
+
{
|
| 979 |
+
"type": "text",
|
| 980 |
+
"text": "The frames in NAT2021-test and NAT2021- $L$ -test are all manually annotated by annotators familiar with object tracking. For accuracy, the annotation process is conducted on images enhanced by a low-light enhancement approach [24]. Afterward, visual inspection by experts and bounding box refinement by annotators are conducted iteratively for several rounds to ensure high-quality annotation.",
|
| 981 |
+
"bbox": [
|
| 982 |
+
496,
|
| 983 |
+
554,
|
| 984 |
+
890,
|
| 985 |
+
660
|
| 986 |
+
],
|
| 987 |
+
"page_idx": 4
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"type": "text",
|
| 991 |
+
"text": "4.3. Attributes",
|
| 992 |
+
"text_level": 1,
|
| 993 |
+
"bbox": [
|
| 994 |
+
500,
|
| 995 |
+
672,
|
| 996 |
+
614,
|
| 997 |
+
686
|
| 998 |
+
],
|
| 999 |
+
"page_idx": 4
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "text",
|
| 1003 |
+
"text": "To give an in-depth analysis of trackers, we annotate the test sequences into 12 different attributes, including aspect ratio change (ARC), background clutter (BC), camera motion (CM), fast motion (FM), partial occlusion (OCC), full",
|
| 1004 |
+
"bbox": [
|
| 1005 |
+
496,
|
| 1006 |
+
696,
|
| 1007 |
+
890,
|
| 1008 |
+
756
|
| 1009 |
+
],
|
| 1010 |
+
"page_idx": 4
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "table",
|
| 1014 |
+
"img_path": "images/fe4c13f466865d54d1fac3494e1533ed22a2c25ef7f7b345c85773d50a43bf21.jpg",
|
| 1015 |
+
"table_caption": [
|
| 1016 |
+
"Table 1. Statistics of NAT2021. test: test set; L-test: long-term tracking test set; train: train set."
|
| 1017 |
+
],
|
| 1018 |
+
"table_footnote": [],
|
| 1019 |
+
"table_body": "<table><tr><td></td><td>NAT2021-test</td><td>NAT2021-L-test</td><td>NAT2021-train</td></tr><tr><td>Videos</td><td>180</td><td>23</td><td>1,400</td></tr><tr><td>Total frames</td><td>140,815</td><td>53,564</td><td>276,081</td></tr><tr><td>Min frames</td><td>81</td><td>1,425</td><td>30</td></tr><tr><td>Max frames</td><td>1,795</td><td>3,866</td><td>345</td></tr><tr><td>Avg. frames</td><td>782</td><td>2,329</td><td>197</td></tr><tr><td>Manual annotation</td><td>✓</td><td>✓</td><td></td></tr></table>",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
514,
|
| 1022 |
+
809,
|
| 1023 |
+
877,
|
| 1024 |
+
898
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 4
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "page_footnote",
|
| 1030 |
+
"text": "1More information of the UAV can be found at https://www.dji. com/cn/mavic-air-2.",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
76,
|
| 1033 |
+
875,
|
| 1034 |
+
467,
|
| 1035 |
+
898
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 4
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "page_number",
|
| 1041 |
+
"text": "5",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
478,
|
| 1044 |
+
924,
|
| 1045 |
+
488,
|
| 1046 |
+
936
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 4
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "image",
|
| 1052 |
+
"img_path": "images/3111310d9e70c0c1c5c84aba2aa21747a7bd8d97f453f4f018b79ba65091e957.jpg",
|
| 1053 |
+
"image_caption": [
|
| 1054 |
+
"Ambient intensity: 7",
|
| 1055 |
+
"Figure 6. Ambient intensity of some scenarios. With an average ambient intensity of less than 20, objects are hard to distinguish with naked eyes. Such sequences are annotated with the low ambient intensity attribute."
|
| 1056 |
+
],
|
| 1057 |
+
"image_footnote": [],
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
84,
|
| 1060 |
+
89,
|
| 1061 |
+
178,
|
| 1062 |
+
162
|
| 1063 |
+
],
|
| 1064 |
+
"page_idx": 5
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"type": "image",
|
| 1068 |
+
"img_path": "images/75c3f480d5ce916ecd32c19cfab535d38408fb4e242723e8e32757026f49f151.jpg",
|
| 1069 |
+
"image_caption": [
|
| 1070 |
+
"Ambient intensity: 12"
|
| 1071 |
+
],
|
| 1072 |
+
"image_footnote": [],
|
| 1073 |
+
"bbox": [
|
| 1074 |
+
179,
|
| 1075 |
+
90,
|
| 1076 |
+
272,
|
| 1077 |
+
162
|
| 1078 |
+
],
|
| 1079 |
+
"page_idx": 5
|
| 1080 |
+
},
|
| 1081 |
+
{
|
| 1082 |
+
"type": "image",
|
| 1083 |
+
"img_path": "images/36e17b0cb098304362f29dca41c2157b1731ad096e123ca8a5f5598f41da08bf.jpg",
|
| 1084 |
+
"image_caption": [
|
| 1085 |
+
"Ambient intensity: 26"
|
| 1086 |
+
],
|
| 1087 |
+
"image_footnote": [],
|
| 1088 |
+
"bbox": [
|
| 1089 |
+
274,
|
| 1090 |
+
90,
|
| 1091 |
+
367,
|
| 1092 |
+
162
|
| 1093 |
+
],
|
| 1094 |
+
"page_idx": 5
|
| 1095 |
+
},
|
| 1096 |
+
{
|
| 1097 |
+
"type": "image",
|
| 1098 |
+
"img_path": "images/f5c3720bc654d481e51df88a8b94de1661311289866b4c914740340b56d78080.jpg",
|
| 1099 |
+
"image_caption": [
|
| 1100 |
+
"Ambient intensity: 36"
|
| 1101 |
+
],
|
| 1102 |
+
"image_footnote": [],
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
370,
|
| 1105 |
+
90,
|
| 1106 |
+
462,
|
| 1107 |
+
162
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 5
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "text",
|
| 1113 |
+
"text": "occlusion (FOC), out-of-view (OV), scale variation (SV), similar object (SOB), viewpoint change (VC), illumination variation (IV), and low ambient intensity (LAI). In particular, to take a closer look at how illumination influences trackers, we rethink and redesign the illumination-related attributes. Concretely, the average pixel intensity of the local region centered at the object is computed and regarded as the illuminance intensity of the current frame. The average illuminance level of a sequence is considered the ambient intensity of the tracking scene. Sequences with different ambient intensities are displayed in Fig. 6, we observe that objects are hard to distinguish with naked eyes with an ambient intensity of less than 20. Therefore, such sequences are labelled with the LAI attribute.",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
75,
|
| 1116 |
+
258,
|
| 1117 |
+
467,
|
| 1118 |
+
469
|
| 1119 |
+
],
|
| 1120 |
+
"page_idx": 5
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "text",
|
| 1124 |
+
"text": "Remark 3: In contrast to annotating the attribute of IV intuitively as previous tracking benchmarks do, this work judges IV according to the maximum difference of the illuminance intensity across a tracking sequence. More details of the attributes can be found in supplementary material.",
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
75,
|
| 1127 |
+
472,
|
| 1128 |
+
467,
|
| 1129 |
+
547
|
| 1130 |
+
],
|
| 1131 |
+
"page_idx": 5
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "text",
|
| 1135 |
+
"text": "Moreover, we evaluate current top-ranked trackers on the proposed benchmark (see Sec. 5.2), the results show that SOTA trackers can hardly yield satisfactory performance at a nighttime aerial view as in daytime benchmarks.",
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
75,
|
| 1138 |
+
549,
|
| 1139 |
+
467,
|
| 1140 |
+
608
|
| 1141 |
+
],
|
| 1142 |
+
"page_idx": 5
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "text",
|
| 1146 |
+
"text": "5. Experiments",
|
| 1147 |
+
"text_level": 1,
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
76,
|
| 1150 |
+
625,
|
| 1151 |
+
207,
|
| 1152 |
+
641
|
| 1153 |
+
],
|
| 1154 |
+
"page_idx": 5
|
| 1155 |
+
},
|
| 1156 |
+
{
|
| 1157 |
+
"type": "text",
|
| 1158 |
+
"text": "5.1. Implementation details",
|
| 1159 |
+
"text_level": 1,
|
| 1160 |
+
"bbox": [
|
| 1161 |
+
76,
|
| 1162 |
+
650,
|
| 1163 |
+
290,
|
| 1164 |
+
666
|
| 1165 |
+
],
|
| 1166 |
+
"page_idx": 5
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "text",
|
| 1170 |
+
"text": "We implement our UDAT framework using PyTorch on an NVIDIA RTX A6000 GPU. The discriminator is trained using the Adam [20] optimizer. The base learning rate is set to 0.005 and is decayed following the poly learning rate policy with a power of 0.8. The bridging layer adopts a base learning rate of 0.005 and is optimized with the baseline tracker. The whole training process lasts 20 epochs. The top-ranked trackers [8, 14] in the proposed benchmark are adopted as baselines. To achieve faster convergence, tracking models pre-trained on general datasets [10, 17, 28, 32, 34] are served as the baseline models. For fairness, tracking datasets [17, 32] that the pre-trained models learned on are adopted and no new datasets are introduced in the source domain. We adopt the one-pass evaluation (OPE) and rank performances using success rate, precision, and normalized",
|
| 1171 |
+
"bbox": [
|
| 1172 |
+
75,
|
| 1173 |
+
674,
|
| 1174 |
+
467,
|
| 1175 |
+
900
|
| 1176 |
+
],
|
| 1177 |
+
"page_idx": 5
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "text",
|
| 1181 |
+
"text": "precision. Evaluation metric definitions and more experiments can be found in the supplementary material.",
|
| 1182 |
+
"bbox": [
|
| 1183 |
+
498,
|
| 1184 |
+
90,
|
| 1185 |
+
888,
|
| 1186 |
+
121
|
| 1187 |
+
],
|
| 1188 |
+
"page_idx": 5
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "text",
|
| 1192 |
+
"text": "5.2. Evaluation results",
|
| 1193 |
+
"text_level": 1,
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
500,
|
| 1196 |
+
132,
|
| 1197 |
+
673,
|
| 1198 |
+
146
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 5
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "text",
|
| 1204 |
+
"text": "To give an exhaustive analysis of trackers in nighttime aerial tracking and facilitate future research, 20 SOTA trackers [1,4,5,8,11,14,15,22,29,39,44,47,51,53,54,56] are evaluated on NAT2021-test, along with the proposed UDAT. For clarity, two trackers further trained by UDAT are named UDAT-BAN and UDAT-CAR, respectively. Moreover, UAVDark70 [21] contains 70 nighttime tracking sequences with 66k frames in total, which can also serve as an evaluation benchmark.",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
496,
|
| 1207 |
+
155,
|
| 1208 |
+
890,
|
| 1209 |
+
290
|
| 1210 |
+
],
|
| 1211 |
+
"page_idx": 5
|
| 1212 |
+
},
|
| 1213 |
+
{
|
| 1214 |
+
"type": "text",
|
| 1215 |
+
"text": "5.2.1 Overall performance",
|
| 1216 |
+
"text_level": 1,
|
| 1217 |
+
"bbox": [
|
| 1218 |
+
498,
|
| 1219 |
+
311,
|
| 1220 |
+
699,
|
| 1221 |
+
325
|
| 1222 |
+
],
|
| 1223 |
+
"page_idx": 5
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "text",
|
| 1227 |
+
"text": "NAT2021-test. As shown in Fig. 7 (a), the proposed UDAT-BAN and UDAT-CAR rank first two places with a large margin compared to their baselines. A performance comparison of UDAT and baseline trackers is reported in Tab. 2. Specifically, UDAT promotes SiamBAN over $7\\%$ on all three metrics. In success rate, UDAT-BAN (0.469) and UDAT-CAR (0.483) raise the original SiamBAN (0.437) and SiamCAR (0.453) by $7.32\\%$ and $6.62\\%$ , respectively.",
|
| 1228 |
+
"bbox": [
|
| 1229 |
+
496,
|
| 1230 |
+
335,
|
| 1231 |
+
890,
|
| 1232 |
+
455
|
| 1233 |
+
],
|
| 1234 |
+
"page_idx": 5
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "text",
|
| 1238 |
+
"text": "UAVDark70. Results in Fig. 7 (b) demonstrate that the performance of existing trackers is still unsatisfactory. UDAT trackers raise the performance of their baselines by $\\sim 4\\%$ . Note that the data distribution in UAVDark70 is fairly different from that in NAT2021, while UDAT can still bring favorable performance gains, which demonstrate its generalization ability in variant nighttime conditions.",
|
| 1239 |
+
"bbox": [
|
| 1240 |
+
496,
|
| 1241 |
+
455,
|
| 1242 |
+
890,
|
| 1243 |
+
561
|
| 1244 |
+
],
|
| 1245 |
+
"page_idx": 5
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "text",
|
| 1249 |
+
"text": "Gains brought by UDAT for different trackers on different benchmarks verify the effectiveness and transferability of the proposed domain adaptation framework.",
|
| 1250 |
+
"bbox": [
|
| 1251 |
+
496,
|
| 1252 |
+
563,
|
| 1253 |
+
888,
|
| 1254 |
+
608
|
| 1255 |
+
],
|
| 1256 |
+
"page_idx": 5
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "text",
|
| 1260 |
+
"text": "5.2.2 Long-term tracking evaluation",
|
| 1261 |
+
"text_level": 1,
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
498,
|
| 1264 |
+
628,
|
| 1265 |
+
767,
|
| 1266 |
+
643
|
| 1267 |
+
],
|
| 1268 |
+
"page_idx": 5
|
| 1269 |
+
},
|
| 1270 |
+
{
|
| 1271 |
+
"type": "text",
|
| 1272 |
+
"text": "As one of the most common scenes in aerial tracking, long-term tracking involves multiple challenging attributes. We further assess trackers on NAT2021-L-test. Top-10 performances are reported in Tab. 3. Results show that UDAT realizes competitive long-term tracking performances, considerably arousing the performance upon baseline trackers.",
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
496,
|
| 1275 |
+
652,
|
| 1276 |
+
888,
|
| 1277 |
+
743
|
| 1278 |
+
],
|
| 1279 |
+
"page_idx": 5
|
| 1280 |
+
},
|
| 1281 |
+
{
|
| 1282 |
+
"type": "table",
|
| 1283 |
+
"img_path": "images/79deda2ffabd16dbd21d8d3ab51a51ba8c86543efbddf74814d962e0e24940ee.jpg",
|
| 1284 |
+
"table_caption": [
|
| 1285 |
+
"Table 2. Performance comparison of UDAT and baseline trackers. $\\Delta$ denotes gains of percentages brought by UDAT."
|
| 1286 |
+
],
|
| 1287 |
+
"table_footnote": [],
|
| 1288 |
+
"table_body": "<table><tr><td rowspan=\"2\">Benchmarks</td><td colspan=\"4\">NAT2021-test</td><td colspan=\"2\">UAVDark70</td></tr><tr><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td></tr><tr><td>SiamCAR</td><td>0.663</td><td>0.542</td><td>0.453</td><td>0.669</td><td>0.580</td><td>0.491</td></tr><tr><td>UDAT-CAR</td><td>0.687</td><td>0.564</td><td>0.483</td><td>0.695</td><td>0.592</td><td>0.512</td></tr><tr><td>ΔCAR (%)</td><td>3.62</td><td>4.06</td><td>6.62</td><td>3.89</td><td>2.07</td><td>4.28</td></tr><tr><td>SiamBAN</td><td>0.647</td><td>0.509</td><td>0.437</td><td>0.677</td><td>0.570</td><td>0.489</td></tr><tr><td>UDAT-BAN</td><td>0.694</td><td>0.546</td><td>0.469</td><td>0.702</td><td>0.597</td><td>0.510</td></tr><tr><td>ΔBAN (%)</td><td>7.26</td><td>7.27</td><td>7.32</td><td>3.69</td><td>4.74</td><td>4.29</td></tr></table>",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
513,
|
| 1291 |
+
792,
|
| 1292 |
+
879,
|
| 1293 |
+
898
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 5
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "page_number",
|
| 1299 |
+
"text": "6",
|
| 1300 |
+
"bbox": [
|
| 1301 |
+
478,
|
| 1302 |
+
925,
|
| 1303 |
+
490,
|
| 1304 |
+
936
|
| 1305 |
+
],
|
| 1306 |
+
"page_idx": 5
|
| 1307 |
+
},
|
| 1308 |
+
{
|
| 1309 |
+
"type": "image",
|
| 1310 |
+
"img_path": "images/db05bb5b01e72b6603432d801ef44ec4e00bcc8dfc4250ea1d3b863364eda4fc.jpg",
|
| 1311 |
+
"image_caption": [
|
| 1312 |
+
"(a) Precision, normalized precision, and success plots on NAT2021-test."
|
| 1313 |
+
],
|
| 1314 |
+
"image_footnote": [],
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
86,
|
| 1317 |
+
88,
|
| 1318 |
+
354,
|
| 1319 |
+
223
|
| 1320 |
+
],
|
| 1321 |
+
"page_idx": 6
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "image",
|
| 1325 |
+
"img_path": "images/5c57e5e64f0bef07004650e0446994e257aa9bbbb700dd627a82b14c6c6bb19e.jpg",
|
| 1326 |
+
"image_caption": [],
|
| 1327 |
+
"image_footnote": [],
|
| 1328 |
+
"bbox": [
|
| 1329 |
+
354,
|
| 1330 |
+
88,
|
| 1331 |
+
589,
|
| 1332 |
+
223
|
| 1333 |
+
],
|
| 1334 |
+
"page_idx": 6
|
| 1335 |
+
},
|
| 1336 |
+
{
|
| 1337 |
+
"type": "image",
|
| 1338 |
+
"img_path": "images/6e89a6e220f4f96d57f8c4951da258371a285668896fb0e2b27fe0b7f0a8ae40.jpg",
|
| 1339 |
+
"image_caption": [],
|
| 1340 |
+
"image_footnote": [],
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
591,
|
| 1343 |
+
88,
|
| 1344 |
+
887,
|
| 1345 |
+
223
|
| 1346 |
+
],
|
| 1347 |
+
"page_idx": 6
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
"type": "image",
|
| 1351 |
+
"img_path": "images/10ab92d5b6d5c9b243fe982775d3b9607e4d84c435002bfafa0acc643e99472c.jpg",
|
| 1352 |
+
"image_caption": [
|
| 1353 |
+
"(b) Precision, normalized precision, and success plots on UAVDark70."
|
| 1354 |
+
],
|
| 1355 |
+
"image_footnote": [],
|
| 1356 |
+
"bbox": [
|
| 1357 |
+
86,
|
| 1358 |
+
239,
|
| 1359 |
+
354,
|
| 1360 |
+
375
|
| 1361 |
+
],
|
| 1362 |
+
"page_idx": 6
|
| 1363 |
+
},
|
| 1364 |
+
{
|
| 1365 |
+
"type": "image",
|
| 1366 |
+
"img_path": "images/cc7543a14b0bd04fcaed20090c57b29077633455ce0ee74f6851b634d975cc2a.jpg",
|
| 1367 |
+
"image_caption": [
|
| 1368 |
+
"Figure 7. Overall performance of SOTA trackers and UDAT on nighttime aerial tracking benchmarks. The results show that the proposed UDAT trackers realize top-ranked performance and improve baseline trackers favorably."
|
| 1369 |
+
],
|
| 1370 |
+
"image_footnote": [],
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
354,
|
| 1373 |
+
239,
|
| 1374 |
+
620,
|
| 1375 |
+
375
|
| 1376 |
+
],
|
| 1377 |
+
"page_idx": 6
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "image",
|
| 1381 |
+
"img_path": "images/0f9cc47fde20eec771dde98ef856744a5ee6b955e0d638a33b7643c0b245d979.jpg",
|
| 1382 |
+
"image_caption": [],
|
| 1383 |
+
"image_footnote": [],
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
620,
|
| 1386 |
+
239,
|
| 1387 |
+
887,
|
| 1388 |
+
375
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 6
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "image",
|
| 1394 |
+
"img_path": "images/6d16acf63a6f8a9e99c867532af6f2087e21fab98050d0540ce14841a9f90f31.jpg",
|
| 1395 |
+
"image_caption": [],
|
| 1396 |
+
"image_footnote": [],
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
86,
|
| 1399 |
+
428,
|
| 1400 |
+
282,
|
| 1401 |
+
521
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 6
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "image",
|
| 1407 |
+
"img_path": "images/dd2da88c5e2e5071b10bdf15a5230b7812d3727a6e0102daea2ce24d345473a7.jpg",
|
| 1408 |
+
"image_caption": [],
|
| 1409 |
+
"image_footnote": [],
|
| 1410 |
+
"bbox": [
|
| 1411 |
+
282,
|
| 1412 |
+
428,
|
| 1413 |
+
483,
|
| 1414 |
+
521
|
| 1415 |
+
],
|
| 1416 |
+
"page_idx": 6
|
| 1417 |
+
},
|
| 1418 |
+
{
|
| 1419 |
+
"type": "image",
|
| 1420 |
+
"img_path": "images/bc0e196fe28cb987b92eb798c6436355a522bf46b683239c392a2e501dba9101.jpg",
|
| 1421 |
+
"image_caption": [],
|
| 1422 |
+
"image_footnote": [],
|
| 1423 |
+
"bbox": [
|
| 1424 |
+
488,
|
| 1425 |
+
428,
|
| 1426 |
+
687,
|
| 1427 |
+
521
|
| 1428 |
+
],
|
| 1429 |
+
"page_idx": 6
|
| 1430 |
+
},
|
| 1431 |
+
{
|
| 1432 |
+
"type": "image",
|
| 1433 |
+
"img_path": "images/a3ce167f0c03366f406c8f1eb390481432f75d68353e1ce258e97b32c1c157e5.jpg",
|
| 1434 |
+
"image_caption": [],
|
| 1435 |
+
"image_footnote": [],
|
| 1436 |
+
"bbox": [
|
| 1437 |
+
689,
|
| 1438 |
+
428,
|
| 1439 |
+
888,
|
| 1440 |
+
521
|
| 1441 |
+
],
|
| 1442 |
+
"page_idx": 6
|
| 1443 |
+
},
|
| 1444 |
+
{
|
| 1445 |
+
"type": "image",
|
| 1446 |
+
"img_path": "images/ca3f2d4d32e2426ea6a04c8243128b1f24f8e9e7c69ea83b7510e32800f559be.jpg",
|
| 1447 |
+
"image_caption": [
|
| 1448 |
+
"(a) Illumination variation on NAT2021-test."
|
| 1449 |
+
],
|
| 1450 |
+
"image_footnote": [],
|
| 1451 |
+
"bbox": [
|
| 1452 |
+
86,
|
| 1453 |
+
537,
|
| 1454 |
+
284,
|
| 1455 |
+
631
|
| 1456 |
+
],
|
| 1457 |
+
"page_idx": 6
|
| 1458 |
+
},
|
| 1459 |
+
{
|
| 1460 |
+
"type": "image",
|
| 1461 |
+
"img_path": "images/6ac6b75d1ae66b58e774fbe550493dc19f339b94d692aea056c6e2b356f948ab.jpg",
|
| 1462 |
+
"image_caption": [
|
| 1463 |
+
"(c) Illumination variation on UAVDark70."
|
| 1464 |
+
],
|
| 1465 |
+
"image_footnote": [],
|
| 1466 |
+
"bbox": [
|
| 1467 |
+
284,
|
| 1468 |
+
537,
|
| 1469 |
+
483,
|
| 1470 |
+
631
|
| 1471 |
+
],
|
| 1472 |
+
"page_idx": 6
|
| 1473 |
+
},
|
| 1474 |
+
{
|
| 1475 |
+
"type": "image",
|
| 1476 |
+
"img_path": "images/8ca96d045082761501a190c438c3ce4c57ac451edf953922d49a0d8a603fd661.jpg",
|
| 1477 |
+
"image_caption": [
|
| 1478 |
+
"(b) Low ambient intensity on NAT2021-test.",
|
| 1479 |
+
"(d) Low ambient intensity on UAVDark70.",
|
| 1480 |
+
"Figure 8. Normalized precision plots and success plots of illumination-related attributes on NAT2021-test and UAVDark70."
|
| 1481 |
+
],
|
| 1482 |
+
"image_footnote": [],
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
488,
|
| 1485 |
+
537,
|
| 1486 |
+
687,
|
| 1487 |
+
631
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 6
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "image",
|
| 1493 |
+
"img_path": "images/a1c7337154b35c1f651a856bf8bf174c6eef53d82a0816ab8623422a6898e0f6.jpg",
|
| 1494 |
+
"image_caption": [],
|
| 1495 |
+
"image_footnote": [],
|
| 1496 |
+
"bbox": [
|
| 1497 |
+
689,
|
| 1498 |
+
537,
|
| 1499 |
+
887,
|
| 1500 |
+
631
|
| 1501 |
+
],
|
| 1502 |
+
"page_idx": 6
|
| 1503 |
+
},
|
| 1504 |
+
{
|
| 1505 |
+
"type": "table",
|
| 1506 |
+
"img_path": "images/2d0c30906dbc786edb56ac6d4e73760ffcb0bf999717d8ee16e07f829d1df1c6.jpg",
|
| 1507 |
+
"table_caption": [
|
| 1508 |
+
"Table 3. Performance of top-10 trackers on NAT2021-L-test. $\\Delta$ represents the percentages of UDAT trackers exceeding the corresponding baselines. The top-2 performance is emphasized with bold font. UDAT trackers yield competitive long-term tracking performance."
|
| 1509 |
+
],
|
| 1510 |
+
"table_footnote": [],
|
| 1511 |
+
"table_body": "<table><tr><td>Trackers</td><td>HiFT [4]</td><td>SiamFC++ [47]</td><td>Ocean [54]</td><td>SiamRPN++ [22]</td><td>UpdateNet [51]</td><td>D3S [29]</td><td>SiamBAN [8]</td><td>SiamCAR [14]</td><td>UDAT-BAN</td><td>UDAT-CAR</td><td>ΔBAN(%)</td><td>ΔCAR(%)</td></tr><tr><td>Prec.</td><td>0.433</td><td>0.425</td><td>0.454</td><td>0.431</td><td>0.434</td><td>0.492</td><td>0.464</td><td>0.477</td><td>0.496</td><td>0.506</td><td>6.94</td><td>5.99</td></tr><tr><td>Norm. Prec.</td><td>0.316</td><td>0.344</td><td>0.370</td><td>0.342</td><td>0.314</td><td>0.364</td><td>0.366</td><td>0.375</td><td>0.406</td><td>0.413</td><td>11.01</td><td>9.96</td></tr><tr><td>Succ.</td><td>0.287</td><td>0.297</td><td>0.315</td><td>0.299</td><td>0.275</td><td>0.332</td><td>0.316</td><td>0.330</td><td>0.352</td><td>0.376</td><td>11.51</td><td>14.25</td></tr></table>",
|
| 1512 |
+
"bbox": [
|
| 1513 |
+
80,
|
| 1514 |
+
699,
|
| 1515 |
+
890,
|
| 1516 |
+
752
|
| 1517 |
+
],
|
| 1518 |
+
"page_idx": 6
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"type": "text",
|
| 1522 |
+
"text": "5.2.3 Illumination-oriented evaluation",
|
| 1523 |
+
"text_level": 1,
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
76,
|
| 1526 |
+
767,
|
| 1527 |
+
357,
|
| 1528 |
+
781
|
| 1529 |
+
],
|
| 1530 |
+
"page_idx": 6
|
| 1531 |
+
},
|
| 1532 |
+
{
|
| 1533 |
+
"type": "text",
|
| 1534 |
+
"text": "Since the greatest difference between daytime and nighttime tracking is illumination intensity, we perform an in-depth illumination-oriented evaluation for a better analysis of illumination influence on trackers. The results are shown in Fig. 8. Note that we additionally annotate sequences in UAVDark70 with the proposed LAI attribute. The results show that existing trackers suffer from illumination-related",
|
| 1535 |
+
"bbox": [
|
| 1536 |
+
75,
|
| 1537 |
+
794,
|
| 1538 |
+
470,
|
| 1539 |
+
900
|
| 1540 |
+
],
|
| 1541 |
+
"page_idx": 6
|
| 1542 |
+
},
|
| 1543 |
+
{
|
| 1544 |
+
"type": "text",
|
| 1545 |
+
"text": "attributes. For the IV challenge, the best success rates of existing trackers are 0.408 on NAT2021-test and 0.468 on UAVDark70. Assisted by the proposed domain adaptive training, UDAT-CAR realizes a success rate of 0.442 and 0.485, respectively, which fairly improve the existing best performance. As for LAI, UDAT-BAN raises the normalized precision of its baseline SiamBAN by over $9\\%$ on both benchmarks. From the comparison, we can see that track-",
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
496,
|
| 1548 |
+
767,
|
| 1549 |
+
893,
|
| 1550 |
+
888
|
| 1551 |
+
],
|
| 1552 |
+
"page_idx": 6
|
| 1553 |
+
},
|
| 1554 |
+
{
|
| 1555 |
+
"type": "page_number",
|
| 1556 |
+
"text": "7",
|
| 1557 |
+
"bbox": [
|
| 1558 |
+
478,
|
| 1559 |
+
924,
|
| 1560 |
+
490,
|
| 1561 |
+
935
|
| 1562 |
+
],
|
| 1563 |
+
"page_idx": 6
|
| 1564 |
+
},
|
| 1565 |
+
{
|
| 1566 |
+
"type": "text",
|
| 1567 |
+
"text": "ers' illumination-related performance remains a large room for improvement and the adoption of domain adaptation in adverse illumination scenes is effective and crucial.",
|
| 1568 |
+
"bbox": [
|
| 1569 |
+
76,
|
| 1570 |
+
90,
|
| 1571 |
+
468,
|
| 1572 |
+
136
|
| 1573 |
+
],
|
| 1574 |
+
"page_idx": 7
|
| 1575 |
+
},
|
| 1576 |
+
{
|
| 1577 |
+
"type": "text",
|
| 1578 |
+
"text": "5.2.4 Visualization",
|
| 1579 |
+
"text_level": 1,
|
| 1580 |
+
"bbox": [
|
| 1581 |
+
76,
|
| 1582 |
+
156,
|
| 1583 |
+
222,
|
| 1584 |
+
169
|
| 1585 |
+
],
|
| 1586 |
+
"page_idx": 7
|
| 1587 |
+
},
|
| 1588 |
+
{
|
| 1589 |
+
"type": "text",
|
| 1590 |
+
"text": "As shown in Fig. 9, we visualized some confidence maps of UDAT and its baseline using Grad-Cam [38]. The baseline model fails to concentrate on objects in adverse illuminance, while UDAT substantially enhances the baseline's nighttime perception ability, thus yielding satisfying nighttime tracking performance.",
|
| 1591 |
+
"bbox": [
|
| 1592 |
+
76,
|
| 1593 |
+
180,
|
| 1594 |
+
467,
|
| 1595 |
+
270
|
| 1596 |
+
],
|
| 1597 |
+
"page_idx": 7
|
| 1598 |
+
},
|
| 1599 |
+
{
|
| 1600 |
+
"type": "text",
|
| 1601 |
+
"text": "5.2.5 Source domain evaluation",
|
| 1602 |
+
"text_level": 1,
|
| 1603 |
+
"bbox": [
|
| 1604 |
+
76,
|
| 1605 |
+
291,
|
| 1606 |
+
310,
|
| 1607 |
+
305
|
| 1608 |
+
],
|
| 1609 |
+
"page_idx": 7
|
| 1610 |
+
},
|
| 1611 |
+
{
|
| 1612 |
+
"type": "text",
|
| 1613 |
+
"text": "Apart from favorable performance at nighttime, we expect that trackers do not suffer degradation at the source domain during adaptation. Evaluation on a daytime tracking benchmark UAV123 [31] is shown in Tab. 4. The results show that UDAT brings slight performance fluctuation within $2\\%$ in success rate and $0.5\\%$ in precision.",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
76,
|
| 1616 |
+
316,
|
| 1617 |
+
467,
|
| 1618 |
+
405
|
| 1619 |
+
],
|
| 1620 |
+
"page_idx": 7
|
| 1621 |
+
},
|
| 1622 |
+
{
|
| 1623 |
+
"type": "text",
|
| 1624 |
+
"text": "5.3. Empirical study",
|
| 1625 |
+
"text_level": 1,
|
| 1626 |
+
"bbox": [
|
| 1627 |
+
76,
|
| 1628 |
+
416,
|
| 1629 |
+
235,
|
| 1630 |
+
431
|
| 1631 |
+
],
|
| 1632 |
+
"page_idx": 7
|
| 1633 |
+
},
|
| 1634 |
+
{
|
| 1635 |
+
"type": "text",
|
| 1636 |
+
"text": "To demonstrate the effectiveness of proposed modules, i.e., domain adaptive training (DA), object discovery preprocessing (OD), and bridging layer (BL), this subsection provides empirical studies of UDAT. Concretely, we first ablate BL and substitute OD with random cropping to adopt naive DA on the baseline tracker. The results on the second row of Tab. 5 show that DA slightly promotes nighttime tracking, with a slight upgrade in success rate. However, adopting random cropping as preprocessing leads to abundant meaningless training samples, the model therefore can hardly learn the data distribution on the target domain. In that case, further activation of BL only makes a limited difference. As shown in the fourth row of Tab. 5, when em",
|
| 1637 |
+
"bbox": [
|
| 1638 |
+
76,
|
| 1639 |
+
440,
|
| 1640 |
+
467,
|
| 1641 |
+
635
|
| 1642 |
+
],
|
| 1643 |
+
"page_idx": 7
|
| 1644 |
+
},
|
| 1645 |
+
{
|
| 1646 |
+
"type": "table",
|
| 1647 |
+
"img_path": "images/3df0ae4cb01fbb905770333e70248067bf49bfd5cb1c09d81fbf77bcc3be1765.jpg",
|
| 1648 |
+
"table_caption": [
|
| 1649 |
+
"Table 4. Evaluation on the source domain. The results show the adaptation only brings slight performance fluctuation on the source domain."
|
| 1650 |
+
],
|
| 1651 |
+
"table_footnote": [],
|
| 1652 |
+
"table_body": "<table><tr><td>Trackers</td><td>SiamBAN</td><td>UDAT-BAN</td><td>SiamCAR</td><td>UDAT-CAR</td></tr><tr><td>Succ.</td><td>0.603</td><td>0.5911.96%↓</td><td>0.601</td><td>0.5921.58%↓</td></tr><tr><td>Prec.</td><td>0.788</td><td>0.7840.52%↓</td><td>0.793</td><td>0.7930.04%↓</td></tr></table>",
|
| 1653 |
+
"bbox": [
|
| 1654 |
+
91,
|
| 1655 |
+
705,
|
| 1656 |
+
450,
|
| 1657 |
+
752
|
| 1658 |
+
],
|
| 1659 |
+
"page_idx": 7
|
| 1660 |
+
},
|
| 1661 |
+
{
|
| 1662 |
+
"type": "table",
|
| 1663 |
+
"img_path": "images/960782aa74b50379ff8bf011d21919f497ad9d6eed6e0eb0ee28a5e6192c1ec3.jpg",
|
| 1664 |
+
"table_caption": [
|
| 1665 |
+
"Table 5. Empirical Study of the proposed UDAT on NAT2021-test. DA, OD, and BL denote domain adaptive training, object discovery preprocessing, and bridging layer, respectively."
|
| 1666 |
+
],
|
| 1667 |
+
"table_footnote": [],
|
| 1668 |
+
"table_body": "<table><tr><td>DA</td><td>OD</td><td>BL</td><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td></tr><tr><td></td><td></td><td></td><td>0.663</td><td>0.542</td><td>0.453</td></tr><tr><td>✓</td><td></td><td></td><td>0.6620.19%↓</td><td>0.5400.33%↓</td><td>0.4591.33%↑</td></tr><tr><td>✓</td><td></td><td>✓</td><td>0.6640.16%↑</td><td>0.5471.04%↑</td><td>0.4642.45%↑</td></tr><tr><td>✓</td><td>✓</td><td></td><td>0.6761.95%↑</td><td>0.5491.42%↑</td><td>0.4673.24%↑</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>0.6873.62%↑</td><td>0.5644.17%↑</td><td>0.4836.82%↑</td></tr></table>",
|
| 1669 |
+
"bbox": [
|
| 1670 |
+
83,
|
| 1671 |
+
808,
|
| 1672 |
+
460,
|
| 1673 |
+
898
|
| 1674 |
+
],
|
| 1675 |
+
"page_idx": 7
|
| 1676 |
+
},
|
| 1677 |
+
{
|
| 1678 |
+
"type": "image",
|
| 1679 |
+
"img_path": "images/b480c1939e62cfce8c48b69faa2da02cc2909abc0d9e2c4db9c7c34a77e4190e.jpg",
|
| 1680 |
+
"image_caption": [],
|
| 1681 |
+
"image_footnote": [],
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
501,
|
| 1684 |
+
89,
|
| 1685 |
+
629,
|
| 1686 |
+
145
|
| 1687 |
+
],
|
| 1688 |
+
"page_idx": 7
|
| 1689 |
+
},
|
| 1690 |
+
{
|
| 1691 |
+
"type": "image",
|
| 1692 |
+
"img_path": "images/62ff1e3cd46f817fdbae23aaa93a22b4e5eba782ced92b595eef028fd54d350d.jpg",
|
| 1693 |
+
"image_caption": [],
|
| 1694 |
+
"image_footnote": [],
|
| 1695 |
+
"bbox": [
|
| 1696 |
+
630,
|
| 1697 |
+
89,
|
| 1698 |
+
756,
|
| 1699 |
+
145
|
| 1700 |
+
],
|
| 1701 |
+
"page_idx": 7
|
| 1702 |
+
},
|
| 1703 |
+
{
|
| 1704 |
+
"type": "image",
|
| 1705 |
+
"img_path": "images/9af1456d2745d9fe3570dbdabb5351cd2ffa0678129274525bcee46add15ae4d.jpg",
|
| 1706 |
+
"image_caption": [],
|
| 1707 |
+
"image_footnote": [],
|
| 1708 |
+
"bbox": [
|
| 1709 |
+
756,
|
| 1710 |
+
90,
|
| 1711 |
+
885,
|
| 1712 |
+
145
|
| 1713 |
+
],
|
| 1714 |
+
"page_idx": 7
|
| 1715 |
+
},
|
| 1716 |
+
{
|
| 1717 |
+
"type": "image",
|
| 1718 |
+
"img_path": "images/99abc8e24b07d880464a0cc32d31eb3b43f368ca6f0514b525d6343e55c869ce.jpg",
|
| 1719 |
+
"image_caption": [],
|
| 1720 |
+
"image_footnote": [],
|
| 1721 |
+
"bbox": [
|
| 1722 |
+
503,
|
| 1723 |
+
147,
|
| 1724 |
+
629,
|
| 1725 |
+
203
|
| 1726 |
+
],
|
| 1727 |
+
"page_idx": 7
|
| 1728 |
+
},
|
| 1729 |
+
{
|
| 1730 |
+
"type": "image",
|
| 1731 |
+
"img_path": "images/ef51af6b4391dfc19df28e48c045c30462fab0ae77b8fdcf84b4fd0cc234c79f.jpg",
|
| 1732 |
+
"image_caption": [],
|
| 1733 |
+
"image_footnote": [],
|
| 1734 |
+
"bbox": [
|
| 1735 |
+
630,
|
| 1736 |
+
147,
|
| 1737 |
+
756,
|
| 1738 |
+
203
|
| 1739 |
+
],
|
| 1740 |
+
"page_idx": 7
|
| 1741 |
+
},
|
| 1742 |
+
{
|
| 1743 |
+
"type": "image",
|
| 1744 |
+
"img_path": "images/bf79c8c98fbe0ad4c390860c42460cd6caf7647d077b67d6ff923367f8d7c6aa.jpg",
|
| 1745 |
+
"image_caption": [],
|
| 1746 |
+
"image_footnote": [],
|
| 1747 |
+
"bbox": [
|
| 1748 |
+
756,
|
| 1749 |
+
147,
|
| 1750 |
+
885,
|
| 1751 |
+
203
|
| 1752 |
+
],
|
| 1753 |
+
"page_idx": 7
|
| 1754 |
+
},
|
| 1755 |
+
{
|
| 1756 |
+
"type": "image",
|
| 1757 |
+
"img_path": "images/4f4f12bd02f32baa44535dd837ed637002574721a7484bd691ef1866fff09014.jpg",
|
| 1758 |
+
"image_caption": [
|
| 1759 |
+
"Frames"
|
| 1760 |
+
],
|
| 1761 |
+
"image_footnote": [],
|
| 1762 |
+
"bbox": [
|
| 1763 |
+
501,
|
| 1764 |
+
205,
|
| 1765 |
+
629,
|
| 1766 |
+
260
|
| 1767 |
+
],
|
| 1768 |
+
"page_idx": 7
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "image",
|
| 1772 |
+
"img_path": "images/eecf761355d65f7e7d5326cac98820a90ae1a41dd1164c0104a77e2b212aa341.jpg",
|
| 1773 |
+
"image_caption": [
|
| 1774 |
+
"SiamCAR"
|
| 1775 |
+
],
|
| 1776 |
+
"image_footnote": [],
|
| 1777 |
+
"bbox": [
|
| 1778 |
+
630,
|
| 1779 |
+
205,
|
| 1780 |
+
756,
|
| 1781 |
+
260
|
| 1782 |
+
],
|
| 1783 |
+
"page_idx": 7
|
| 1784 |
+
},
|
| 1785 |
+
{
|
| 1786 |
+
"type": "image",
|
| 1787 |
+
"img_path": "images/a894482d15d227ad576795835b215889dd670432d513eb889a31117a5ee72769.jpg",
|
| 1788 |
+
"image_caption": [
|
| 1789 |
+
"UDAT-CAR",
|
| 1790 |
+
"Figure 9. Visual comparison of confidence maps generated by the baseline and the proposed UDAT. Target objects are marked by green boxes. The baseline struggles to extract discriminable features in dim light. UDAT substantially raises the perception ability of baseline in adverse illuminance."
|
| 1791 |
+
],
|
| 1792 |
+
"image_footnote": [],
|
| 1793 |
+
"bbox": [
|
| 1794 |
+
756,
|
| 1795 |
+
205,
|
| 1796 |
+
885,
|
| 1797 |
+
260
|
| 1798 |
+
],
|
| 1799 |
+
"page_idx": 7
|
| 1800 |
+
},
|
| 1801 |
+
{
|
| 1802 |
+
"type": "text",
|
| 1803 |
+
"text": "ploying OD instead of random cropping, performance on the target domain obtains a $3.24\\%$ boost in success rate, which verifies the effectiveness of the proposed saliency detection-based data preprocessing. Further, BL doubles the promotion brought by OD, complete UDAT realizes a precision of 0.687 and a success rate of 0.483, achieving favorable nighttime tracking performance. The results verify that the proposed bridging layer fairly enables the tracker to generate discriminative features from nighttime images.",
|
| 1804 |
+
"bbox": [
|
| 1805 |
+
496,
|
| 1806 |
+
363,
|
| 1807 |
+
890,
|
| 1808 |
+
500
|
| 1809 |
+
],
|
| 1810 |
+
"page_idx": 7
|
| 1811 |
+
},
|
| 1812 |
+
{
|
| 1813 |
+
"type": "text",
|
| 1814 |
+
"text": "6. Conclusion",
|
| 1815 |
+
"text_level": 1,
|
| 1816 |
+
"bbox": [
|
| 1817 |
+
500,
|
| 1818 |
+
513,
|
| 1819 |
+
617,
|
| 1820 |
+
527
|
| 1821 |
+
],
|
| 1822 |
+
"page_idx": 7
|
| 1823 |
+
},
|
| 1824 |
+
{
|
| 1825 |
+
"type": "text",
|
| 1826 |
+
"text": "In this work, a simple but effective unsupervised domain adaptive tracking framework, namely UDAT, is proposed for nighttime aerial tracking. In our UDAT, an object discovery strategy is introduced for unlabelled data preprocessing. The Transformer bridging layer is adopted to narrow the gap of image features between daytime and nighttime. Optimized through adversarial learning with a Transformer discriminator, the learned model substantially improves nighttime tracking performance upon SOTA approaches. We also construct NAT2021, a pioneering benchmark for unsupervised domain adaptive nighttime tracking. Detailed evaluation on nighttime tracking benchmarks shows the effectiveness and domain adaptability of UDAT. The limitation of this work lies in the absence of pseudo supervision in the target domain. Future work will focus on reliable pseudo supervision, with which we believe the performance of nighttime tracking can be further improved. To sum up, we are convinced that the UDAT framework along with the NAT2021 benchmark can facilitate research on visual tracking at nighttime and in other adverse conditions.",
|
| 1827 |
+
"bbox": [
|
| 1828 |
+
496,
|
| 1829 |
+
537,
|
| 1830 |
+
890,
|
| 1831 |
+
840
|
| 1832 |
+
],
|
| 1833 |
+
"page_idx": 7
|
| 1834 |
+
},
|
| 1835 |
+
{
|
| 1836 |
+
"type": "text",
|
| 1837 |
+
"text": "Acknowledgement: This work was supported in part by the National Natural Science Foundation of China under Grant 62173249 and in part by the Natural Science Foundation of Shanghai under Grant 20ZR1460100.",
|
| 1838 |
+
"bbox": [
|
| 1839 |
+
498,
|
| 1840 |
+
840,
|
| 1841 |
+
890,
|
| 1842 |
+
898
|
| 1843 |
+
],
|
| 1844 |
+
"page_idx": 7
|
| 1845 |
+
},
|
| 1846 |
+
{
|
| 1847 |
+
"type": "page_number",
|
| 1848 |
+
"text": "8",
|
| 1849 |
+
"bbox": [
|
| 1850 |
+
480,
|
| 1851 |
+
924,
|
| 1852 |
+
488,
|
| 1853 |
+
935
|
| 1854 |
+
],
|
| 1855 |
+
"page_idx": 7
|
| 1856 |
+
},
|
| 1857 |
+
{
|
| 1858 |
+
"type": "text",
|
| 1859 |
+
"text": "References",
|
| 1860 |
+
"text_level": 1,
|
| 1861 |
+
"bbox": [
|
| 1862 |
+
78,
|
| 1863 |
+
89,
|
| 1864 |
+
173,
|
| 1865 |
+
104
|
| 1866 |
+
],
|
| 1867 |
+
"page_idx": 8
|
| 1868 |
+
},
|
| 1869 |
+
{
|
| 1870 |
+
"type": "list",
|
| 1871 |
+
"sub_type": "ref_text",
|
| 1872 |
+
"list_items": [
|
| 1873 |
+
"[1] Luca Bertinetto, Jack Valmadre, João F. Henriques, Vedaldi Andrea, and Philip H. S. Torr. Fully-Convolutional Siamese Networks for Object Tracking. In ECCVW, pages 850–865, 2016. 2, 6",
|
| 1874 |
+
"[2] Rogerio Bonatti, Cherie Ho, Wenshan Wang, Sanjiban Choudhury, and Sebastian Scherer. Towards a Robust Aerial Cinematography Platform: Localizing and Tracking Moving Targets in Unstructured Environments. In IROS, pages 229-236, 2019. 1",
|
| 1875 |
+
"[3] Pau Panareda Busto and Juergen Gall. Open Set Domain Adaptation. In ICCV, pages 754-763, 2017. 2",
|
| 1876 |
+
"[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. HiFT: Hierarchical Feature Transformer for Aerial Tracking. In ICCV, pages 15437-15446, 2021. 1, 2, 4, 6, 7",
|
| 1877 |
+
"[5] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. SiamAPN++: Siamese Attentional Aggregation Network for Real-Time UAV Tracking. In IROS, pages 3086-3092, 2021. 6",
|
| 1878 |
+
"[6] Xin Chen, Bin Yan, Jiawen Zhu, Dong Wang, Xiaoyun Yang, and Huchuan Lu. Transformer Tracking. In CVPR, pages 8126-8135, 2021. 2",
|
| 1879 |
+
"[7] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain Adaptive Faster R-CNN for Object Detection in the Wild. In CVPR, pages 3339-3348, 2018. 2",
|
| 1880 |
+
"[8] Zedu Chen, Bineng Zhong, Guorong Li, Shengping Zhang, and Rongrong Ji. Siamese Box Adaptive Network for Visual Tracking. In CVPR, pages 6667-6676, 2020. 1, 2, 6, 7",
|
| 1881 |
+
"[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR, pages 248–255, 2009. 1",
|
| 1882 |
+
"[10] Heng Fan, Hexin Bai, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Harshit, Mingzhen Huang, Juehuan Liu, Yong Xu, Chunyuan Liao, Lin Yuan, and Haibin Ling. LasOT: A High-quality Large-scale Single Object Tracking Benchmark. IJCV, 129:439-461, 2021. 1, 6",
|
| 1883 |
+
"[11] Changhong Fu, Ziang Cao, Yiming Li, Junjie Ye, and Chen Feng. Siamese Anchor Proposal Network for High-Speed Aerial Tracking. In ICRA, pages 510-516, 2021. 6",
|
| 1884 |
+
"[12] Hamed Kiani Galoogahi, Ashton Fagg, and Simon Lucey. Learning Background-Aware Correlation Filters for Visual Tracking. In ICCV, pages 1144-1152, 2017. 2",
|
| 1885 |
+
"[13] Yaroslav Ganin and Victor Lempitsky. Unsupervised Domain Adaptation by Backpropagation. In ICML, volume 37, pages 1180-1189, 2015. 4",
|
| 1886 |
+
"[14] Dongyan Guo, Jun Wang, Ying Cui, Zhenhua Wang, and Shengyong Chen. SiamCAR: Siamese Fully Convolutional Classification and Regression for Visual Tracking. In CVPR, pages 6268-6276, 2020. 1, 2, 6, 7",
|
| 1887 |
+
"[15] Qing Guo, Wei Feng, Ce Zhou, Rui Huang, Liang Wan, and Song Wang. Learning Dynamic Siamese Network for Visual Object Tracking. In ICCV, pages 1781-1789, 2017. 6",
|
| 1888 |
+
"[16] João F. Henriques, Rui Caseiro, Pedro Martins, and Jorge Batista. High-Speed Tracking with Kernelized Correlation Filters. IEEE TPAMI, 37(3):583-596, 2015. 2"
|
| 1889 |
+
],
|
| 1890 |
+
"bbox": [
|
| 1891 |
+
78,
|
| 1892 |
+
116,
|
| 1893 |
+
470,
|
| 1894 |
+
900
|
| 1895 |
+
],
|
| 1896 |
+
"page_idx": 8
|
| 1897 |
+
},
|
| 1898 |
+
{
|
| 1899 |
+
"type": "list",
|
| 1900 |
+
"sub_type": "ref_text",
|
| 1901 |
+
"list_items": [
|
| 1902 |
+
"[17] Lianghua Huang, Xin Zhao, and Kaiqi Huang. GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild. IEEE TPAMI, 43(5):1562–1577, 2021. 1, 6",
|
| 1903 |
+
"[18] Sheng-Wei Huang, Che-Tsung Lin, Shu-Ping Chen, Yen-Yi Wu, Po-Hao Hsu, and Shang-Hong Lai. AugGAN: Cross Domain Adaptation with GAN-based Data Augmentation. In ECCV, page 731-744, 2018. 2",
|
| 1904 |
+
"[19] Ziyuan Huang, Changhong Fu, Yiming Li, Fuling Lin, and Peng Lu. Learning Aberrance Repressed Correlation Filters for Real-Time UAV Tracking. In ICCV, pages 2891-2900, 2019. 2",
|
| 1905 |
+
"[20] Diederik P Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimization. In ICLR, pages 1-11, 2015. 6",
|
| 1906 |
+
"[21] Bowen Li, Changhong Fu, Fangqiang Ding, Junjie Ye, and Fuling Lin. ADTrack: Target-Aware Dual Filter Learning for Real-Time Anti-Dark UAV Tracking. In ICRA, pages 496-502, 2021. 2, 5, 6",
|
| 1907 |
+
"[22] Bo Li, Wei Wu, Qiang Wang, Fangyi Zhang, Junliang Xing, and Junjie Yan. SiamRPN++: Evolution of Siamese Visual Tracking With Very Deep Networks. In CVPR, pages 4277-4286, 2019. 1, 2, 6, 7",
|
| 1908 |
+
"[23] Bo Li, Junjie Yan, Wei Wu, Zheng Zhu, and Xiaolin Hu. High Performance Visual Tracking with Siamese Region Proposal Network. In CVPR, pages 8971-8980, 2018. 2",
|
| 1909 |
+
"[24] Chongyi Li, Chunle Guo, and Change Loy Chen. Learning to Enhance Low-Light Image via Zero-Reference Deep Curve Estimation. IEEE TPAMI, pages 1–14, 2021. 3, 5",
|
| 1910 |
+
"[25] Rui Li, Minjian Pang, Cong Zhao, Guyue Zhou, and Lu Fang. Monocular Long-Term Target Following on UAVs. In CVPRW, pages 29-37, 2016. 1",
|
| 1911 |
+
"[26] Wen Li, Zheng Xu, Dong Xu, Dengxin Dai, and Luc Van Gool. Domain Generalization and Adaptation Using Low Rank Exemplar SVMs. IEEE TPAMI, 40(5):1114-1127, 2018. 2",
|
| 1912 |
+
"[27] Yiming Li, Changhong Fu, Fangqiang Ding, Ziyuan Huang, and Geng Lu. AutoTrack: Towards High-Performance Visual Tracking for UAV With Automatic Spatio-Temporal Regularization. In CVPR, pages 11920–11929, 2020. 2",
|
| 1913 |
+
"[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In ECCV, pages 740-755, 2014. 6",
|
| 1914 |
+
"[29] Alan Lukežić, Jiří Matas, and Matej Kristan. D3S – A Discriminative Single Shot Segmentation Tracker. In CVPR, pages 7131–7140, 2020. 6, 7",
|
| 1915 |
+
"[30] Xudong Mao, Qing Li, Haoran Xie, Raymond Y.K. Lau, Zhen Wang, and Stephen Paul Smolley. Least Squares Generative Adversarial Networks. In ICCV, pages 2813-2821, 2017. 5",
|
| 1916 |
+
"[31] Matthias Mueller, Neil Smith, and Bernard Ghanem. A Benchmark and Simulator for UAV Tracking. In ECCV, pages 445-461, 2016. 8",
|
| 1917 |
+
"[32] Esteban Real, Jonathon Shlens, Stefano Mazzocchi, Xin Pan, and Vincent Vanhoucke. YouTube-BoundingBoxes: A Large High-Precision Human-Annotated Data Set for Object Detection in Video. In CVPR, pages 7464-7473, 2017. 1, 6"
|
| 1918 |
+
],
|
| 1919 |
+
"bbox": [
|
| 1920 |
+
501,
|
| 1921 |
+
92,
|
| 1922 |
+
890,
|
| 1923 |
+
900
|
| 1924 |
+
],
|
| 1925 |
+
"page_idx": 8
|
| 1926 |
+
},
|
| 1927 |
+
{
|
| 1928 |
+
"type": "page_number",
|
| 1929 |
+
"text": "9",
|
| 1930 |
+
"bbox": [
|
| 1931 |
+
478,
|
| 1932 |
+
924,
|
| 1933 |
+
491,
|
| 1934 |
+
936
|
| 1935 |
+
],
|
| 1936 |
+
"page_idx": 8
|
| 1937 |
+
},
|
| 1938 |
+
{
|
| 1939 |
+
"type": "list",
|
| 1940 |
+
"sub_type": "ref_text",
|
| 1941 |
+
"list_items": [
|
| 1942 |
+
"[33] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. IEEE TPAMI, 39(6):1137-1149, 2017. 2",
|
| 1943 |
+
"[34] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet Large Scale Visual Recognition Challenge. IJCV, 115(3):211-252, 2015. 6",
|
| 1944 |
+
"[35] Suman Saha, Anton Obukhov, Danda Pani Paudel, Menelaos Kanakis, Yuhua Chen, Stamatios Georgoulis, and Luc Van Gool. Learning To Relate Depth and Semantics for Unsupervised Domain Adaptation. In CVPR, pages 8197-8207, 2021. 2",
|
| 1945 |
+
"[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Map-Guided Curriculum Domain Adaptation and Uncertainty-Aware Evaluation for Semantic Nighttime Image Segmentation. IEEE TPAMI, pages 1-15, 2020. 2",
|
| 1946 |
+
"[37] Yukihiro Sasagawa and Hajime Nagahara. YOLO in the Dark - Domain Adaptation Method for Merging Multiple Models. In ECCV, pages 345-359, 2020. 2",
|
| 1947 |
+
"[38] Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization. In ICCV, pages 618-626, 2017. 8",
|
| 1948 |
+
"[39] Ivan Sosnovik, Artem Moskalev, and Arnold W.M. Smeulders. Scale Equivalence Improves Siamese Tracking. In WACV, pages 2765-2774, January 2021. 6",
|
| 1949 |
+
"[40] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of Frustratingly Easy Domain Adaptation. In AAAI, pages 2058-2065, 2016. 2",
|
| 1950 |
+
"[41] Ran Tao, Efstratos Gavves, and Arnold W. M. Smeulders. Siamese Instance Search for Tracking. In CVPR, pages 1420-1429, 2016. 2",
|
| 1951 |
+
"[42] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9(11):2579-2605, 2008. 4",
|
| 1952 |
+
"[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention Is All You Need. In NeurIPS, pages 6000-6010, 2017. 2, 4",
|
| 1953 |
+
"[44] Ning Wang, Wengang Zhou, Yibing Song, Chao Ma, Wei Liu, and Houqiang Li. Unsupervised Deep Representation Learning for Real-Time Tracking. IJCV, 129(2):400-418, 2021. 6",
|
| 1954 |
+
"[45] Ning Wang, Wengang Zhou, Jie Wang, and Houqiang Li. Transformer Meets Tracker: Exploiting Temporal Context for Robust Visual Tracking. In CVPR, pages 1571-1580, 2021. 2",
|
| 1955 |
+
"[46] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In CVPR, pages 15769–15778, 2021. 2",
|
| 1956 |
+
"[47] Yinda Xu, Zeyu Wang, Zuoxin Li, Ye Yuan, and Gang Yu. SiamFC++: Towards Robust and Accurate Visual Tracking with Target Estimation Guidelines. In AAAI, pages 12549-12556, 2020. 2, 6, 7"
|
| 1957 |
+
],
|
| 1958 |
+
"bbox": [
|
| 1959 |
+
78,
|
| 1960 |
+
90,
|
| 1961 |
+
468,
|
| 1962 |
+
898
|
| 1963 |
+
],
|
| 1964 |
+
"page_idx": 9
|
| 1965 |
+
},
|
| 1966 |
+
{
|
| 1967 |
+
"type": "list",
|
| 1968 |
+
"sub_type": "ref_text",
|
| 1969 |
+
"list_items": [
|
| 1970 |
+
"[48] Junjie Ye, Changhong Fu, Ziang Cao, Shan An, Guangze Zheng, and Bowen Li. Tracker Meets Night: A Transformer Enhancer for UAV Tracking. IEEE RA-L, 7(2):3866-3873, 2022. 1, 2",
|
| 1971 |
+
"[49] Junjie Ye, Changhong Fu, Fuling Lin, Fangqiang Ding, Shan An, and Geng Lu. Multi-Regularized Correlation Filter for UAV Tracking and Self-Localization. IEEE TIE, 69(6):6004-6014, 2022. 1",
|
| 1972 |
+
"[50] Junjie Ye, Changhong Fu, Guangze Zheng, Ziang Cao, and Bowen Li. DarkLighter: Light Up the Darkness for UAV Tracking. In IROS, pages 3079-3085, 2021. 1, 2",
|
| 1973 |
+
"[51] Lichao Zhang, Abel Gonzalez-Garcia, Joost Van De Weijer, Martin Danelljan, and Fahad Shahbaz Khan. Learning the Model Update for Siamese Trackers. In ICCV, pages 4009-4018, 2019. 6, 7",
|
| 1974 |
+
"[52] Miao Zhang, Jie Liu, Yifei Wang, Yongri Piao, Shunyu Yao, Wei Ji, Jingjing Li, Huchuan Lu, and Zhongxuan Luo. Dynamic Context-Sensitive Filtering Network for Video Salient Object Detection. In ICCV, pages 1533-1543, 2021. 3",
|
| 1975 |
+
"[53] Zhipeng Zhang and Houwen Peng. Deeper and Wider Siamese Networks for Real-Time Visual Tracking. In CVPR, pages 4586-4595, 2019. 6",
|
| 1976 |
+
"[54] Zhipeng Zhang, Houwen Peng, Jianlong Fu, Bing Li, and Weiming Hu. Ocean: Object-Aware Anchor-Free Tracking. In ECCV, pages 771-787, 2020. 6, 7",
|
| 1977 |
+
"[55] Jilai Zheng, Chao Ma, Houwen Peng, and Xiaokang Yang. Learning to Track Objects from Unlabeled Videos. In ICCV, pages 13526-13535, 2021. 3",
|
| 1978 |
+
"[56] Zheng Zhu, Qiang Wang, Bo Li, Wei Wu, Junjie Yan, and Weiming Hu. Distractor-aware Siamese Networks for Visual Object Tracking. In ECCV, pages 103-119, 2018. 6"
|
| 1979 |
+
],
|
| 1980 |
+
"bbox": [
|
| 1981 |
+
501,
|
| 1982 |
+
92,
|
| 1983 |
+
890,
|
| 1984 |
+
531
|
| 1985 |
+
],
|
| 1986 |
+
"page_idx": 9
|
| 1987 |
+
},
|
| 1988 |
+
{
|
| 1989 |
+
"type": "page_number",
|
| 1990 |
+
"text": "10",
|
| 1991 |
+
"bbox": [
|
| 1992 |
+
477,
|
| 1993 |
+
924,
|
| 1994 |
+
495,
|
| 1995 |
+
936
|
| 1996 |
+
],
|
| 1997 |
+
"page_idx": 9
|
| 1998 |
+
}
|
| 1999 |
+
]
|
2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10541/b63ce456-502b-4012-b6a3-0c6523da6183_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f97b02dd9957e2f9c94c8568c0392bd110278a3c36f14ec0aeeeef7eb4147d2c
|
| 3 |
+
size 4781854
|
2203.10xxx/2203.10541/full.md
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Unsupervised Domain Adaptation for Nighttime Aerial Tracking
|
| 2 |
+
|
| 3 |
+
Junjie Ye†, Changhong Fu†,\*, Guangze Zheng†, Danda Pani Paudel‡, and Guang Chen†
|
| 4 |
+
†Tongji University, China ETH Zürich, Switzerland
|
| 5 |
+
|
| 6 |
+
{ye.jun.jie, changhongfu, mmlp, guangchen}@tongji.edu.cn, paudel@vision.ee.ethz.ch
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Previous advances in object tracking mostly reported on favorable illumination circumstances while neglecting performance at nighttime, which significantly impeded the development of related aerial robot applications. This work instead develops a novel unsupervised domain adaptation framework for nighttime aerial tracking (named UDAT). Specifically, a unique object discovery approach is provided to generate training patches from raw nighttime tracking videos. To tackle the domain discrepancy, we employ a Transformer-based bridging layer post to the feature extractor to align image features from both domains. With a Transformer day/night feature discriminator, the daytime tracking model is adversarially trained to track at night. Moreover, we construct a pioneering benchmark namely NAT2021 for unsupervised domain adaptive nighttime tracking, which comprises a test set of 180 manually annotated tracking sequences and a train set of over 276k unlabelled nighttime tracking frames. Exhaustive experiments demonstrate the robustness and domain adaptability of the proposed framework in nighttime aerial tracking. The code and benchmark are available at https://github.com/vision4robotics/UDAT.
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
Standing as one of the fundamental tasks in computer vision, object tracking has received widespread attention with multifarious aerial robot applications, e.g., unmanned aerial vehicle (UAV) self-localization [49], target following [25], and aerial cinematography [2]. Driven by large-scale datasets [10,17,32] with the supervision of meticulous manual annotations, emerging deep trackers [4, 8, 14, 22] keep setting state-of-the-arts (SOTAs) in recent years.
|
| 15 |
+
|
| 16 |
+
Despite the advances, whether current benchmarks or approaches are proposed for object tracking under favorable illumination conditions. In contrast to daytime, images captured at night have low contrast, brightness, and signal
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
UDAT-CAR U
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
AT-BAN ——SiamCAR
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
SiamBAN Target object
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
(a) Qualitative comparison in typical night scenes.
|
| 41 |
+
(b) Overall performance comparison on NAT2021-test.
|
| 42 |
+
Figure 1. (a) Qualitative comparison of the proposed unsupervised domain adaptive trackers (i.e., UDAT-CAR and UDAT-BAN) and their baselines [8, 14]. (b) Overall performance of SOTA approaches on the constructed NAT2021-test benchmark. The proposed UDAT effectively adapts general trackers to nighttime aerial tracking scenes and yields favorable performance.
|
| 43 |
+
|
| 44 |
+
to-noise ratio (SNR). These differences cause the discrepancy in feature distribution of day/night images. Due to the cross-domain discrepancy, current SOTA trackers generalize badly to nighttime scenes [48, 50], which severely impedes the broadening of relevant aerial robot applications.
|
| 45 |
+
|
| 46 |
+
Regarding such domain gap and the performance drop, this work aims to address the cross-domain object tracking problem. In particular, we target adapting SOTA tracking models in daytime general conditions to nighttime aerial perspectives. One possible straightforward solution is to collect and annotate adequate target domain data for training. Nevertheless, such a non-trivial workload is expensive and time-consuming, since backbones' pre-training alone generally takes millions of high-quality images [9]. We
|
| 47 |
+
|
| 48 |
+
consequently consider the problem as an unsupervised domain adaptation task, where training data in the source domain is with well-annotated bounding boxes while that in the target domain has no manually annotated labels. Therefore, an unsupervised domain adaptive tracking framework, referred to as UDAT, is proposed for nighttime aerial tracking. To generate training patches of the target domain, we develop an object discovery strategy to explore potential objects in the unlabelled nighttime data. Besides, a bridging layer is proposed to bridge the gap of domain discrepancy for the extracted features.
|
| 49 |
+
|
| 50 |
+
Furthermore, the feature domain is distinguished by virtue of a discriminator during adversarial learning. Drawing lessons from the huge potential of the Transformer [43] in feature representation, both the bridging layer and the discriminator utilize a Transformer structure. Figure 1 exhibits some qualitative comparisons of trackers adopting UDAT and the corresponding baselines. UDAT raises baselines' nighttime aerial tracking performance substantially. Apart from methodology, we construct NAT2021, a benchmark comprising a test set of 180 fully annotated video sequences and a train set of over 276k unlabelled nighttime tracking frames, which serves as the first benchmark for unsupervised domain adaptive nighttime tracking. The main contributions of this work are fourfold:
|
| 51 |
+
|
| 52 |
+
- An unsupervised domain adaptive tracking framework, namely UDAT, is proposed for nighttime aerial tracking. To the best of our knowledge, the proposed UDAT is the first unsupervised adaptation framework for object tracking.
|
| 53 |
+
- A bridging layer and a day/night discriminator with Transformer structures are incorporated to align extracted features from different domains and narrow the domain gap between daytime and nighttime.
|
| 54 |
+
- A pioneering benchmark namely NAT2021, consisting of a fully annotated test set and an unlabelled train set, is constructed for domain adaptive nighttime tracking. An object discovery strategy is introduced for the unlabelled train set preprocessing.
|
| 55 |
+
- Extensive experiments on NAT2021-test and the recent public UAVDark70 [21] benchmark verify the effectiveness and domain adaptability of the proposed UDAT in nighttime aerial tracking.
|
| 56 |
+
|
| 57 |
+
# 2. Related work
|
| 58 |
+
|
| 59 |
+
# 2.1. Object tracking
|
| 60 |
+
|
| 61 |
+
Generally, recent object tracking approaches can be categorized as the discriminative correlation filter (DCF)-based approaches [12, 16, 19, 27] and the Siamese network-based
|
| 62 |
+
|
| 63 |
+
approaches [4, 8, 14, 22]. Due to the complicated online learning procedure, end-to-end training can be hardly realized on DCF-based trackers. Therefore, restricted to inferior handcrafted features or inappropriate deep feature extractors pre-trained for classification, DCF-based trackers lose their effectiveness in adverse conditions.
|
| 64 |
+
|
| 65 |
+
Benefiting from considerable training data and end-to-end learning, Siamese network-based trackers have achieved robust tracking performance. This line of approaches is pioneered by SINT [41] and SiamFC [1], which regard object tracking as a similarity learning problem and train Siamese networks with large-scale image pairs. Drawing lessons from object detection, B. Li et al. [23] introduce the region proposal network (RPN) [33] into the Siamese framework. SiamRPN++ [22] further adopts a deeper backbone and feature aggregation architecture to improve tracking accuracy. To alleviate increasing hyperparameters along with the introduction of RPN, the anchor-free approaches [8, 14, 47] adopt the per-pixel regression to directly predict four offsets on each pixel. Recently, Transformer [43] is incorporated into the Siamese framework [4,6,45] to model global information and further boost tracking performance.
|
| 66 |
+
|
| 67 |
+
Despite the great progress, object tracking in adverse conditions, for instance, nighttime aerial scenarios, lacks thorough study so far. In [21], a DCF framework integrated with a low-light enhancer is constructed while lacking transferability and being restricted to handcrafted features. Some studies [48, 50] design tracking-related low-light enhancers for data preprocessing in the tracking pipeline. However, such a paradigm suffers from weak collaboration with the tracking model and the cascade structure can hardly learn to narrow the domain gap at the feature level.
|
| 68 |
+
|
| 69 |
+
# 2.2. Domain adaptation
|
| 70 |
+
|
| 71 |
+
Towards narrowing the domain discrepancy and transferring knowledge from the source domain to the target domain, domain adaptation attracts considerable attention and is widely adopted in image classification [3, 26, 40]. Beyond classification, Y. Chen et al. [7] design a domain adaptive object detection framework and tackle the domain shift on both image-level and instance-level. In [18], an image transfer model is trained to perform day-to-night transformation for data augmentation before learning a detection model. Y. Sasagawa and H. Nagahara [37] propose to merge a low-light image enhancement model and an object detection model to realize nighttime object detection. For the task of semantic segmentation, C. Sakaridis et al. [36] formulate a curriculum framework to adapt semantic segmentation models from day to night through an intermediate twilight domain. X. Wu et al. [46] employ an adversarial learning manner to train a domain adaptation network for nighttime semantic segmentation. S. Saha et al. [35]
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
Figure 2. Illustration of the proposed unsupervised domain adaptation framework for nighttime aerial tracking. The object discovery module is employed to find potential objects in raw videos for training patch generation. Features extracted from different domains are aligned via the Transformer bridging layer. A Transformer day/night discriminator is trained to distinguish features between the source domain and the target domain.
|
| 75 |
+
|
| 76 |
+
mine cross-task relationships and build a multi-task learning framework for semantic segmentation and depth estimation in the unsupervised domain adaptation setting. Despite the rapid development in other vision tasks, domain adaptation for object tracking has not been investigated yet. Therefore, an effective unsupervised domain adaptation framework for object tracking is urgently needed.
|
| 77 |
+
|
| 78 |
+
# 3. Proposed method
|
| 79 |
+
|
| 80 |
+
The paradigm of the proposed UDAT framework is illustrated in Fig. 2. For data preprocessing of the unlabelled target domain, we employ a saliency detection-based strategy to locate potential objects and crop paired training patches. In the training pipeline, features generated by the feature extractor are modulated by the bridging layer. In this process, adversarial learning facilitates the reduction of feature distribution discrepancy between the source and target domains. Through this simple yet effective process, trackers can achieve pleasant efficiency and robustness for night scenes comparable to daytime tracking.
|
| 81 |
+
|
| 82 |
+
# 3.1. Data preprocessing
|
| 83 |
+
|
| 84 |
+
Since deep trackers take training patches as input in each training step, we develop an object discovery strategy for data preprocessing on the unlabeled train set. Figure 3 illustrates the preprocessing pipeline. The object discovery strategy involves three stages, i.e., low-light enhancement, salient object detection, and dynamic programming. Given the low visibility of nighttime images, original images are first lighted up by a low-light enhancer [24]. Afterward, enhanced images are fed into the video saliency detection model [52]. Candidate boxes are then obtained by building the minimum bounding rectangle of detected salient regions. To generate a box sequence that locates the same object across the timeline, motivated by [55], we
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Figure 3. Illustration of object discovery, which contains low-light enhancement, salient object detection, and dynamic programming. The pink masks indicate detected salient regions, while the boxes are circumscribed rectangles of these regions. Dynamic programming selects red boxes and filters blue ones. The green box is obtained by linear interpolation between two adjacent frames. Note that the cropped patches are enhanced for visualization, original patches are utilized in the practical training process instead.
|
| 88 |
+
|
| 89 |
+
adopt dynamic programming to filter noisy boxes. Assuming two boxes from the $j$ -th frame and the $k$ -th frame as $[x_{j,m}, y_{j,m}, w_{j,m}, h_{j,m}]$ and $[x_{k,n}, y_{k,n}, w_{k,n}, h_{k,n}]$ , where $m$ and $n$ indicate the box indexes, and $(x,y)$ , $w$ , $h$ denote the top-left coordinate, width, and height of the box, respectively, the normalized distance $D_{\mathrm{norm}}$ is obtained as:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\begin{array}{l} D _ {\mathrm {n o r m}} = (\frac {x _ {j , m} - x _ {k , n}}{w _ {k , n}}) ^ {2} + (\frac {y _ {j , m} - y _ {k , n}}{h _ {k , n}}) ^ {2} \\ + \left(\log \left(\frac {w _ {j , m}}{w _ {k , n}}\right)\right) ^ {2} + \left(\log \left(\frac {h _ {j , m}}{h _ {k , n}}\right)\right) ^ {2}. \tag {1} \\ \end{array}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
In dynamic programming, the normalized distance of candidate boxes between frames serves as a smooth reward, while adding a box in a frame to the box sequence means an incremental reward. For frames where none of the boxes is selected by dynamic programming, linear interpolation is adopted between the two closest frames to generate a new box. Ultimately, paired training patches are cropped from original images according to the obtained box sequence.
|
| 96 |
+
|
| 97 |
+
# 3.2. Network architecture
|
| 98 |
+
|
| 99 |
+
Feature extractor. Feature extraction of Siamese networks generally consists of two branches, i.e., the template branch and the search branch. Both branches generate feature maps from the template patch $\mathbf{Z}$ and the search patch $\mathbf{X}$ , namely $\varphi(\mathbf{Z})$ and $\varphi(\mathbf{X})$ , by adopting an identical backbone network $\varphi$ . Generally, trackers adopt the last block or blocks of features for subsequent classification and regression, which can be represented as follows:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\begin{array}{l} \varphi (\mathbf {X}) = \operatorname {C o n c a t} \left(\mathcal {F} _ {N - i} (\mathbf {X}), \dots , \mathcal {F} _ {N - 1} (\mathbf {X}), \mathcal {F} _ {N} (\mathbf {X})\right), \tag {2} \\ \varphi (\mathbf {Z}) = \mathrm {C o n c a t} (\mathcal {F} _ {N - i} (\mathbf {Z}), \dots , \mathcal {F} _ {N - 1} (\mathbf {Z}), \mathcal {F} _ {N} (\mathbf {Z})) , \\ \end{array}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
where $\mathcal{F}_{*}(\cdot)$ indicates features extracted from the $*$ -th block of a backbone with $N$ blocks in total, and Concat denotes channel-wise concatenation. Since both $\varphi(\mathbf{X})$ and $\varphi(\mathbf{Z})$ will pass through the following Transformer bridging layer and discriminator, we take the instance of $\varphi(\mathbf{X})$ in the following introduction for clarity.
|
| 106 |
+
|
| 107 |
+
Transformer bridging layer. Features extracted from daytime and nighttime images are with a huge gap, such domain discrepancy leads to inferior tracking performance at night. Before feeding the obtained features to the tracker head for object localization, we propose to bridge the gap between the feature distributions through a bridging layer. In consideration of the strong modeling capability of the Transformer [43] for long-range inter-independencies, we design the bridging layer with a Transformer structure. Taking the search branch as instance, positional encodings $\mathbf{P}$ are first added to the input feature $\varphi (\mathbf{X})\in \mathbb{R}^{N\times H\times W}$ . Next, the summation is flattened to $(\mathbf{P} + \varphi (\mathbf{X}))\in \mathbb{R}^{HW\times N}$ . Multihead self-attention (MSA) is then conducted as:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\begin{array}{l} \begin{array}{c} \widehat {\boldsymbol {\varphi} (\mathbf {X})} ^ {\prime} = \operatorname {M S A} (\mathbf {P} + \varphi (\mathbf {X})) + \mathbf {P} + \varphi (\mathbf {X}) \\ \widehat {\boldsymbol {\Phi}} ^ {\prime} \end{array} , \tag {3} \\ \widehat {\varphi (\mathbf {X})} = \mathrm {L N} (\mathrm {F F N} (\mathrm {M o d} (\mathrm {L N} (\widehat {\varphi (\mathbf {X}) ^ {\prime}}))) + \widehat {\varphi (\mathbf {X}) ^ {\prime}}) , \\ \end{array}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\widehat{\varphi(\mathbf{X})}$ is an intermediate variable and LN indicates layer normalization. Moreover, FFN denotes the fully connected feed-forward network, which consists of two linear layers with a ReLU in between. Mod is a modulation layer in [4] to fully explore internal spatial information. The final output is flattened back to $N\times H\times W$ . For each head of MSA, the attention function can be formulated as:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\operatorname {A t t e n t i o n} (\mathbf {Q}, \mathbf {K}, \mathbf {V}) = \operatorname {S o f t m a x} \left(\frac {\mathbf {Q} \mathbf {K} ^ {T}}{\sqrt {d _ {k}}}\right) \mathbf {V}. \tag {4}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
In our case, the queries $\mathbf{Q}$ , keys $\mathbf{K}$ , and values $\mathbf{V}$ are equal to the product of $(\mathbf{P} + \varphi(\mathbf{X}))$ and the corresponding projection matrix. By virtue of superior information integration of self-attention, the proposed Transformer bridging layer is adequate to modulate the nighttime object features output by the backbone for effective similarity maps.
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 4. Feature visualization by t-SNE [42] of day/night similar scenes. Gold and purple indicate source and target domains, respectively. The scattergrams from top to down depict day/night features from feature extractor in the pre-trained tracker, feature extractor in the domain-adaptive tracker, and the bridging layer. The proposed Transformer bridging layer effectively narrows domain discrepancy.
|
| 123 |
+
|
| 124 |
+
Remark 1: Figure 4 displays the t-SNE [42] visualizations of features from feature extractor in the baseline, feature extractor in the domain-adaptive tracker, and the bridging layer. From which we can observe that features extracted by backbones have a clear discrepancy, while those modified by the bridging layer show a coincidence in distribution.
|
| 125 |
+
|
| 126 |
+
Transformer discriminator. The proposed UDAT framework is trained in an adversarial learning manner. A day/night feature discriminator D is designed to facilitate aligning the source and target domain features, which consists of a gradient reverse layer (GRL) [13] and two Transformer layers. Given the modulated feature map $\widehat{\varphi(\mathbf{X})}$ , the softmax function is performed and followed by a GRL:
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\mathbf {F} = \operatorname {G R L} \left(\operatorname {S o f t m a x} \left(\widehat {\varphi (\mathbf {X})}\right)\right). \tag {5}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
The intermediate feature $\mathbf{F} \in \mathbb{R}^{N \times H \times W}$ is then passed through a convolution layer with a kernel size of $4 \times 4$ and stride of 4 for patch embedding. $\mathbf{F}$ is then flattened to $\left(\frac{H}{4} \times \frac{W}{4}\right) \times N$ and concatenated with a classification token $\mathbf{c}$ as:
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\mathbf {F} ^ {\prime} = \operatorname {C o n c a t} (\mathbf {c}, \operatorname {F l a t} (\operatorname {C o n v} (\mathbf {F}))) \quad . \tag {6}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Afterward, $\mathbf{F}'$ is input to two Transformer layers. Ultimately, the classification token $\mathbf{c}$ is regarded as the final predicted results. In the adversarial learning process, the discriminator is optimized to distinguish whether the features are from the source domain or the target domain correctly.
|
| 139 |
+
|
| 140 |
+
Tracker head. After the Transformer bridging layer, cross-correlation operation is conducted on the modulated features $\widehat{\varphi(\mathbf{X})}$ and $\widehat{\varphi(\mathbf{Z})}$ to generate a similarity map. Finally, the tracker head performs the classification and regression process to predict the object position.
|
| 141 |
+
|
| 142 |
+
# 3.3. Objective functions
|
| 143 |
+
|
| 144 |
+
Classification and regression loss. In the source domain training line, the classification and regression loss $\mathcal{L}_{\mathrm{GT}}$ be
|
| 145 |
+
|
| 146 |
+
tween the ground truth and the predicted results are applied to ensure the normal tracking ability of trackers. We adopt tracking loss consistent with the baseline trackers without modification.
|
| 147 |
+
|
| 148 |
+
Domain adaptation loss. In adversarial learning, the least-square loss function [30] is introduced to train the generator $G$ , aiming at generating source domain-like features from target domain images to fool the discriminator $D$ while frozen. Here the generator $G$ can be regarded as the feature extractor along with the Transformer bridging layer. Considering both the template and search features, the adversarial loss is described as follows:
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\mathcal {L} _ {\mathrm {a d v}} = \left(\mathrm {D} \left(\widehat {\varphi \left(\mathbf {X} _ {\mathrm {t}}\right)}\right) - \ell_ {\mathrm {s}}\right) ^ {2} + \left(\mathrm {D} \left(\widehat {\varphi \left(\mathbf {Z} _ {\mathrm {t}}\right)}\right)\right) - \ell_ {\mathrm {s}}\left. \right) ^ {2}, \tag {7}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where $s$ and $t$ refer to the source and the target domains, respectively. Besides, $\ell_{s}$ denotes the label for the source domain, which has the same size as the output of D. In summary, the total training loss for the tracking network is defined as:
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\mathrm {G T}} + \lambda \mathcal {L} _ {\mathrm {a d v}}, \tag {8}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
where $\lambda$ is a weight to balance the loss terms. We set $\lambda$ as 0.01 in implementation.
|
| 161 |
+
|
| 162 |
+
During the training process, the tracking network and discriminator $\mathrm{D}$ are optimized alternatively. We define the loss function of $\mathrm{D}$ as:
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
L _ {\mathrm {D}} = \sum_ {d = \mathrm {s}, \mathrm {t}} \left(\mathrm {D} \left(\widehat {\varphi \left(\mathbf {X} _ {d}\right)} - \ell_ {d}\right) ^ {2} + \left(\mathrm {D} \left(\widehat {\varphi \left(\mathbf {Z} _ {d}\right)}\right) - \ell_ {d}\right) ^ {2} \quad . \right. \tag {9}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
Trained with true domain labels of input features, D learns to discriminate feature domains efficiently.
|
| 169 |
+
|
| 170 |
+
# 4. NAT2021 benchmark
|
| 171 |
+
|
| 172 |
+
The nighttime aerial tracking benchmark, namely NAT2021, is developed to give a comprehensive performance evaluation of nighttime aerial tracking and provide adequate unlabelled nighttime tracking videos for unsupervised training. Compared to the existing nighttime tracking benchmark [21] in literature, NAT2021 stands a two times larger test set, an unlabelled train set, and novel illumination-oriented attributes.
|
| 173 |
+
|
| 174 |
+
# 4.1. Sequence collection
|
| 175 |
+
|
| 176 |
+
Images in NAT2021 are captured in diverse nighttime scenes (e.g., roads, urban landscapes, and campus) by a DJI Mavic Air 2 UAV $^{1}$ in a frame rate of 30 frames/s. Sequence categories consist of a wide variety of targets (e.g., cars, trucks, persons, groups, buses, buildings, and motorcycles) or activities (e.g., cycling, skating, running, and ball
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
Figure 5. First frames of selected sequences from NAT2021-test. The green boxes mark the tracking objects, while the top-left corner of the images display sequence names.
|
| 180 |
+
|
| 181 |
+
games). Consequently, the test set contains 180 nighttime aerial tracking sequences with more than $140\mathrm{k}$ frames in total, namely NAT2021-test. Figure 5 displays some first frames of selected sequences. In order to provide an evaluation of long-term tracking performance, we further build a long-term tracking subset namely NAT2021- $L$ -test consisting of 23 sequences that are longer than 1400 frames. Moreover, the training set involves 1400 unlabelled sequences with over $276\mathrm{k}$ frames totally, which is adequate for the domain adaptive tracking task. A statistical summary of NAT2021 is presented in Tab. 1.
|
| 182 |
+
|
| 183 |
+
Remark 2: Sequences in NAT2021 are recorded by ourselves using UAVs with permission. No personally identifiable information or offensive content is involved.
|
| 184 |
+
|
| 185 |
+
# 4.2. Annotation
|
| 186 |
+
|
| 187 |
+
The frames in NAT2021-test and NAT2021- $L$ -test are all manually annotated by annotators familiar with object tracking. For accuracy, the annotation process is conducted on images enhanced by a low-light enhancement approach [24]. Afterward, visual inspection by experts and bounding box refinement by annotators are conducted iteratively for several rounds to ensure high-quality annotation.
|
| 188 |
+
|
| 189 |
+
# 4.3. Attributes
|
| 190 |
+
|
| 191 |
+
To give an in-depth analysis of trackers, we annotate the test sequences into 12 different attributes, including aspect ratio change (ARC), background clutter (BC), camera motion (CM), fast motion (FM), partial occlusion (OCC), full
|
| 192 |
+
|
| 193 |
+
Table 1. Statistics of NAT2021. test: test set; L-test: long-term tracking test set; train: train set.
|
| 194 |
+
|
| 195 |
+
<table><tr><td></td><td>NAT2021-test</td><td>NAT2021-L-test</td><td>NAT2021-train</td></tr><tr><td>Videos</td><td>180</td><td>23</td><td>1,400</td></tr><tr><td>Total frames</td><td>140,815</td><td>53,564</td><td>276,081</td></tr><tr><td>Min frames</td><td>81</td><td>1,425</td><td>30</td></tr><tr><td>Max frames</td><td>1,795</td><td>3,866</td><td>345</td></tr><tr><td>Avg. frames</td><td>782</td><td>2,329</td><td>197</td></tr><tr><td>Manual annotation</td><td>✓</td><td>✓</td><td></td></tr></table>
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Ambient intensity: 7
|
| 199 |
+
Figure 6. Ambient intensity of some scenarios. With an average ambient intensity of less than 20, objects are hard to distinguish with naked eyes. Such sequences are annotated with the low ambient intensity attribute.
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Ambient intensity: 12
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Ambient intensity: 26
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
Ambient intensity: 36
|
| 209 |
+
|
| 210 |
+
occlusion (FOC), out-of-view (OV), scale variation (SV), similar object (SOB), viewpoint change (VC), illumination variation (IV), and low ambient intensity (LAI). In particular, to take a closer look at how illumination influences trackers, we rethink and redesign the illumination-related attributes. Concretely, the average pixel intensity of the local region centered at the object is computed and regarded as the illuminance intensity of the current frame. The average illuminance level of a sequence is considered the ambient intensity of the tracking scene. Sequences with different ambient intensities are displayed in Fig. 6, we observe that objects are hard to distinguish with naked eyes with an ambient intensity of less than 20. Therefore, such sequences are labelled with the LAI attribute.
|
| 211 |
+
|
| 212 |
+
Remark 3: In contrast to annotating the attribute of IV intuitively as previous tracking benchmarks do, this work judges IV according to the maximum difference of the illuminance intensity across a tracking sequence. More details of the attributes can be found in supplementary material.
|
| 213 |
+
|
| 214 |
+
Moreover, we evaluate current top-ranked trackers on the proposed benchmark (see Sec. 5.2), the results show that SOTA trackers can hardly yield satisfactory performance at a nighttime aerial view as in daytime benchmarks.
|
| 215 |
+
|
| 216 |
+
# 5. Experiments
|
| 217 |
+
|
| 218 |
+
# 5.1. Implementation details
|
| 219 |
+
|
| 220 |
+
We implement our UDAT framework using PyTorch on an NVIDIA RTX A6000 GPU. The discriminator is trained using the Adam [20] optimizer. The base learning rate is set to 0.005 and is decayed following the poly learning rate policy with a power of 0.8. The bridging layer adopts a base learning rate of 0.005 and is optimized with the baseline tracker. The whole training process lasts 20 epochs. The top-ranked trackers [8, 14] in the proposed benchmark are adopted as baselines. To achieve faster convergence, tracking models pre-trained on general datasets [10, 17, 28, 32, 34] are served as the baseline models. For fairness, tracking datasets [17, 32] that the pre-trained models learned on are adopted and no new datasets are introduced in the source domain. We adopt the one-pass evaluation (OPE) and rank performances using success rate, precision, and normalized
|
| 221 |
+
|
| 222 |
+
precision. Evaluation metric definitions and more experiments can be found in the supplementary material.
|
| 223 |
+
|
| 224 |
+
# 5.2. Evaluation results
|
| 225 |
+
|
| 226 |
+
To give an exhaustive analysis of trackers in nighttime aerial tracking and facilitate future research, 20 SOTA trackers [1,4,5,8,11,14,15,22,29,39,44,47,51,53,54,56] are evaluated on NAT2021-test, along with the proposed UDAT. For clarity, two trackers further trained by UDAT are named UDAT-BAN and UDAT-CAR, respectively. Moreover, UAVDark70 [21] contains 70 nighttime tracking sequences with 66k frames in total, which can also serve as an evaluation benchmark.
|
| 227 |
+
|
| 228 |
+
# 5.2.1 Overall performance
|
| 229 |
+
|
| 230 |
+
NAT2021-test. As shown in Fig. 7 (a), the proposed UDAT-BAN and UDAT-CAR rank first two places with a large margin compared to their baselines. A performance comparison of UDAT and baseline trackers is reported in Tab. 2. Specifically, UDAT promotes SiamBAN over $7\%$ on all three metrics. In success rate, UDAT-BAN (0.469) and UDAT-CAR (0.483) raise the original SiamBAN (0.437) and SiamCAR (0.453) by $7.32\%$ and $6.62\%$ , respectively.
|
| 231 |
+
|
| 232 |
+
UAVDark70. Results in Fig. 7 (b) demonstrate that the performance of existing trackers is still unsatisfactory. UDAT trackers raise the performance of their baselines by $\sim 4\%$ . Note that the data distribution in UAVDark70 is fairly different from that in NAT2021, while UDAT can still bring favorable performance gains, which demonstrate its generalization ability in variant nighttime conditions.
|
| 233 |
+
|
| 234 |
+
Gains brought by UDAT for different trackers on different benchmarks verify the effectiveness and transferability of the proposed domain adaptation framework.
|
| 235 |
+
|
| 236 |
+
# 5.2.2 Long-term tracking evaluation
|
| 237 |
+
|
| 238 |
+
As one of the most common scenes in aerial tracking, long-term tracking involves multiple challenging attributes. We further assess trackers on NAT2021-L-test. Top-10 performances are reported in Tab. 3. Results show that UDAT realizes competitive long-term tracking performances, considerably arousing the performance upon baseline trackers.
|
| 239 |
+
|
| 240 |
+
Table 2. Performance comparison of UDAT and baseline trackers. $\Delta$ denotes gains of percentages brought by UDAT.
|
| 241 |
+
|
| 242 |
+
<table><tr><td rowspan="2">Benchmarks</td><td colspan="4">NAT2021-test</td><td colspan="2">UAVDark70</td></tr><tr><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td></tr><tr><td>SiamCAR</td><td>0.663</td><td>0.542</td><td>0.453</td><td>0.669</td><td>0.580</td><td>0.491</td></tr><tr><td>UDAT-CAR</td><td>0.687</td><td>0.564</td><td>0.483</td><td>0.695</td><td>0.592</td><td>0.512</td></tr><tr><td>ΔCAR (%)</td><td>3.62</td><td>4.06</td><td>6.62</td><td>3.89</td><td>2.07</td><td>4.28</td></tr><tr><td>SiamBAN</td><td>0.647</td><td>0.509</td><td>0.437</td><td>0.677</td><td>0.570</td><td>0.489</td></tr><tr><td>UDAT-BAN</td><td>0.694</td><td>0.546</td><td>0.469</td><td>0.702</td><td>0.597</td><td>0.510</td></tr><tr><td>ΔBAN (%)</td><td>7.26</td><td>7.27</td><td>7.32</td><td>3.69</td><td>4.74</td><td>4.29</td></tr></table>
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
(a) Precision, normalized precision, and success plots on NAT2021-test.
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
(b) Precision, normalized precision, and success plots on UAVDark70.
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
Figure 7. Overall performance of SOTA trackers and UDAT on nighttime aerial tracking benchmarks. The results show that the proposed UDAT trackers realize top-ranked performance and improve baseline trackers favorably.
|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
(a) Illumination variation on NAT2021-test.
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
(c) Illumination variation on UAVDark70.
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
(b) Low ambient intensity on NAT2021-test.
|
| 275 |
+
(d) Low ambient intensity on UAVDark70.
|
| 276 |
+
Figure 8. Normalized precision plots and success plots of illumination-related attributes on NAT2021-test and UAVDark70.
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+
Table 3. Performance of top-10 trackers on NAT2021-L-test. $\Delta$ represents the percentages of UDAT trackers exceeding the corresponding baselines. The top-2 performance is emphasized with bold font. UDAT trackers yield competitive long-term tracking performance.
|
| 281 |
+
|
| 282 |
+
<table><tr><td>Trackers</td><td>HiFT [4]</td><td>SiamFC++ [47]</td><td>Ocean [54]</td><td>SiamRPN++ [22]</td><td>UpdateNet [51]</td><td>D3S [29]</td><td>SiamBAN [8]</td><td>SiamCAR [14]</td><td>UDAT-BAN</td><td>UDAT-CAR</td><td>ΔBAN(%)</td><td>ΔCAR(%)</td></tr><tr><td>Prec.</td><td>0.433</td><td>0.425</td><td>0.454</td><td>0.431</td><td>0.434</td><td>0.492</td><td>0.464</td><td>0.477</td><td>0.496</td><td>0.506</td><td>6.94</td><td>5.99</td></tr><tr><td>Norm. Prec.</td><td>0.316</td><td>0.344</td><td>0.370</td><td>0.342</td><td>0.314</td><td>0.364</td><td>0.366</td><td>0.375</td><td>0.406</td><td>0.413</td><td>11.01</td><td>9.96</td></tr><tr><td>Succ.</td><td>0.287</td><td>0.297</td><td>0.315</td><td>0.299</td><td>0.275</td><td>0.332</td><td>0.316</td><td>0.330</td><td>0.352</td><td>0.376</td><td>11.51</td><td>14.25</td></tr></table>
|
| 283 |
+
|
| 284 |
+
# 5.2.3 Illumination-oriented evaluation
|
| 285 |
+
|
| 286 |
+
Since the greatest difference between daytime and nighttime tracking is illumination intensity, we perform an in-depth illumination-oriented evaluation for a better analysis of illumination influence on trackers. The results are shown in Fig. 8. Note that we additionally annotate sequences in UAVDark70 with the proposed LAI attribute. The results show that existing trackers suffer from illumination-related
|
| 287 |
+
|
| 288 |
+
attributes. For the IV challenge, the best success rates of existing trackers are 0.408 on NAT2021-test and 0.468 on UAVDark70. Assisted by the proposed domain adaptive training, UDAT-CAR realizes a success rate of 0.442 and 0.485, respectively, which fairly improve the existing best performance. As for LAI, UDAT-BAN raises the normalized precision of its baseline SiamBAN by over $9\%$ on both benchmarks. From the comparison, we can see that track-
|
| 289 |
+
|
| 290 |
+
ers' illumination-related performance remains a large room for improvement and the adoption of domain adaptation in adverse illumination scenes is effective and crucial.
|
| 291 |
+
|
| 292 |
+
# 5.2.4 Visualization
|
| 293 |
+
|
| 294 |
+
As shown in Fig. 9, we visualized some confidence maps of UDAT and its baseline using Grad-Cam [38]. The baseline model fails to concentrate on objects in adverse illuminance, while UDAT substantially enhances the baseline's nighttime perception ability, thus yielding satisfying nighttime tracking performance.
|
| 295 |
+
|
| 296 |
+
# 5.2.5 Source domain evaluation
|
| 297 |
+
|
| 298 |
+
Apart from favorable performance at nighttime, we expect that trackers do not suffer degradation at the source domain during adaptation. Evaluation on a daytime tracking benchmark UAV123 [31] is shown in Tab. 4. The results show that UDAT brings slight performance fluctuation within $2\%$ in success rate and $0.5\%$ in precision.
|
| 299 |
+
|
| 300 |
+
# 5.3. Empirical study
|
| 301 |
+
|
| 302 |
+
To demonstrate the effectiveness of proposed modules, i.e., domain adaptive training (DA), object discovery preprocessing (OD), and bridging layer (BL), this subsection provides empirical studies of UDAT. Concretely, we first ablate BL and substitute OD with random cropping to adopt naive DA on the baseline tracker. The results on the second row of Tab. 5 show that DA slightly promotes nighttime tracking, with a slight upgrade in success rate. However, adopting random cropping as preprocessing leads to abundant meaningless training samples, the model therefore can hardly learn the data distribution on the target domain. In that case, further activation of BL only makes a limited difference. As shown in the fourth row of Tab. 5, when em
|
| 303 |
+
|
| 304 |
+
Table 4. Evaluation on the source domain. The results show the adaptation only brings slight performance fluctuation on the source domain.
|
| 305 |
+
|
| 306 |
+
<table><tr><td>Trackers</td><td>SiamBAN</td><td>UDAT-BAN</td><td>SiamCAR</td><td>UDAT-CAR</td></tr><tr><td>Succ.</td><td>0.603</td><td>0.5911.96%↓</td><td>0.601</td><td>0.5921.58%↓</td></tr><tr><td>Prec.</td><td>0.788</td><td>0.7840.52%↓</td><td>0.793</td><td>0.7930.04%↓</td></tr></table>
|
| 307 |
+
|
| 308 |
+
Table 5. Empirical Study of the proposed UDAT on NAT2021-test. DA, OD, and BL denote domain adaptive training, object discovery preprocessing, and bridging layer, respectively.
|
| 309 |
+
|
| 310 |
+
<table><tr><td>DA</td><td>OD</td><td>BL</td><td>Prec.</td><td>Norm. Prec.</td><td>Succ.</td></tr><tr><td></td><td></td><td></td><td>0.663</td><td>0.542</td><td>0.453</td></tr><tr><td>✓</td><td></td><td></td><td>0.6620.19%↓</td><td>0.5400.33%↓</td><td>0.4591.33%↑</td></tr><tr><td>✓</td><td></td><td>✓</td><td>0.6640.16%↑</td><td>0.5471.04%↑</td><td>0.4642.45%↑</td></tr><tr><td>✓</td><td>✓</td><td></td><td>0.6761.95%↑</td><td>0.5491.42%↑</td><td>0.4673.24%↑</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>0.6873.62%↑</td><td>0.5644.17%↑</td><td>0.4836.82%↑</td></tr></table>
|
| 311 |
+
|
| 312 |
+

|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
|
| 324 |
+

|
| 325 |
+
Frames
|
| 326 |
+
|
| 327 |
+

|
| 328 |
+
SiamCAR
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
UDAT-CAR
|
| 332 |
+
Figure 9. Visual comparison of confidence maps generated by the baseline and the proposed UDAT. Target objects are marked by green boxes. The baseline struggles to extract discriminable features in dim light. UDAT substantially raises the perception ability of baseline in adverse illuminance.
|
| 333 |
+
|
| 334 |
+
ploying OD instead of random cropping, performance on the target domain obtains a $3.24\%$ boost in success rate, which verifies the effectiveness of the proposed saliency detection-based data preprocessing. Further, BL doubles the promotion brought by OD, complete UDAT realizes a precision of 0.687 and a success rate of 0.483, achieving favorable nighttime tracking performance. The results verify that the proposed bridging layer fairly enables the tracker to generate discriminative features from nighttime images.
|
| 335 |
+
|
| 336 |
+
# 6. Conclusion
|
| 337 |
+
|
| 338 |
+
In this work, a simple but effective unsupervised domain adaptive tracking framework, namely UDAT, is proposed for nighttime aerial tracking. In our UDAT, an object discovery strategy is introduced for unlabelled data preprocessing. The Transformer bridging layer is adopted to narrow the gap of image features between daytime and nighttime. Optimized through adversarial learning with a Transformer discriminator, the learned model substantially improves nighttime tracking performance upon SOTA approaches. We also construct NAT2021, a pioneering benchmark for unsupervised domain adaptive nighttime tracking. Detailed evaluation on nighttime tracking benchmarks shows the effectiveness and domain adaptability of UDAT. The limitation of this work lies in the absence of pseudo supervision in the target domain. Future work will focus on reliable pseudo supervision, with which we believe the performance of nighttime tracking can be further improved. To sum up, we are convinced that the UDAT framework along with the NAT2021 benchmark can facilitate research on visual tracking at nighttime and in other adverse conditions.
|
| 339 |
+
|
| 340 |
+
Acknowledgement: This work was supported in part by the National Natural Science Foundation of China under Grant 62173249 and in part by the Natural Science Foundation of Shanghai under Grant 20ZR1460100.
|
| 341 |
+
|
| 342 |
+
# References
|
| 343 |
+
|
| 344 |
+
[1] Luca Bertinetto, Jack Valmadre, João F. Henriques, Vedaldi Andrea, and Philip H. S. Torr. Fully-Convolutional Siamese Networks for Object Tracking. In ECCVW, pages 850–865, 2016. 2, 6
|
| 345 |
+
[2] Rogerio Bonatti, Cherie Ho, Wenshan Wang, Sanjiban Choudhury, and Sebastian Scherer. Towards a Robust Aerial Cinematography Platform: Localizing and Tracking Moving Targets in Unstructured Environments. In IROS, pages 229-236, 2019. 1
|
| 346 |
+
[3] Pau Panareda Busto and Juergen Gall. Open Set Domain Adaptation. In ICCV, pages 754-763, 2017. 2
|
| 347 |
+
[4] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. HiFT: Hierarchical Feature Transformer for Aerial Tracking. In ICCV, pages 15437-15446, 2021. 1, 2, 4, 6, 7
|
| 348 |
+
[5] Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, and Yiming Li. SiamAPN++: Siamese Attentional Aggregation Network for Real-Time UAV Tracking. In IROS, pages 3086-3092, 2021. 6
|
| 349 |
+
[6] Xin Chen, Bin Yan, Jiawen Zhu, Dong Wang, Xiaoyun Yang, and Huchuan Lu. Transformer Tracking. In CVPR, pages 8126-8135, 2021. 2
|
| 350 |
+
[7] Yuhua Chen, Wen Li, Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Domain Adaptive Faster R-CNN for Object Detection in the Wild. In CVPR, pages 3339-3348, 2018. 2
|
| 351 |
+
[8] Zedu Chen, Bineng Zhong, Guorong Li, Shengping Zhang, and Rongrong Ji. Siamese Box Adaptive Network for Visual Tracking. In CVPR, pages 6667-6676, 2020. 1, 2, 6, 7
|
| 352 |
+
[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR, pages 248–255, 2009. 1
|
| 353 |
+
[10] Heng Fan, Hexin Bai, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Harshit, Mingzhen Huang, Juehuan Liu, Yong Xu, Chunyuan Liao, Lin Yuan, and Haibin Ling. LasOT: A High-quality Large-scale Single Object Tracking Benchmark. IJCV, 129:439-461, 2021. 1, 6
|
| 354 |
+
[11] Changhong Fu, Ziang Cao, Yiming Li, Junjie Ye, and Chen Feng. Siamese Anchor Proposal Network for High-Speed Aerial Tracking. In ICRA, pages 510-516, 2021. 6
|
| 355 |
+
[12] Hamed Kiani Galoogahi, Ashton Fagg, and Simon Lucey. Learning Background-Aware Correlation Filters for Visual Tracking. In ICCV, pages 1144-1152, 2017. 2
|
| 356 |
+
[13] Yaroslav Ganin and Victor Lempitsky. Unsupervised Domain Adaptation by Backpropagation. In ICML, volume 37, pages 1180-1189, 2015. 4
|
| 357 |
+
[14] Dongyan Guo, Jun Wang, Ying Cui, Zhenhua Wang, and Shengyong Chen. SiamCAR: Siamese Fully Convolutional Classification and Regression for Visual Tracking. In CVPR, pages 6268-6276, 2020. 1, 2, 6, 7
|
| 358 |
+
[15] Qing Guo, Wei Feng, Ce Zhou, Rui Huang, Liang Wan, and Song Wang. Learning Dynamic Siamese Network for Visual Object Tracking. In ICCV, pages 1781-1789, 2017. 6
|
| 359 |
+
[16] João F. Henriques, Rui Caseiro, Pedro Martins, and Jorge Batista. High-Speed Tracking with Kernelized Correlation Filters. IEEE TPAMI, 37(3):583-596, 2015. 2
|
| 360 |
+
|
| 361 |
+
[17] Lianghua Huang, Xin Zhao, and Kaiqi Huang. GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild. IEEE TPAMI, 43(5):1562–1577, 2021. 1, 6
|
| 362 |
+
[18] Sheng-Wei Huang, Che-Tsung Lin, Shu-Ping Chen, Yen-Yi Wu, Po-Hao Hsu, and Shang-Hong Lai. AugGAN: Cross Domain Adaptation with GAN-based Data Augmentation. In ECCV, page 731-744, 2018. 2
|
| 363 |
+
[19] Ziyuan Huang, Changhong Fu, Yiming Li, Fuling Lin, and Peng Lu. Learning Aberrance Repressed Correlation Filters for Real-Time UAV Tracking. In ICCV, pages 2891-2900, 2019. 2
|
| 364 |
+
[20] Diederik P Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimization. In ICLR, pages 1-11, 2015. 6
|
| 365 |
+
[21] Bowen Li, Changhong Fu, Fangqiang Ding, Junjie Ye, and Fuling Lin. ADTrack: Target-Aware Dual Filter Learning for Real-Time Anti-Dark UAV Tracking. In ICRA, pages 496-502, 2021. 2, 5, 6
|
| 366 |
+
[22] Bo Li, Wei Wu, Qiang Wang, Fangyi Zhang, Junliang Xing, and Junjie Yan. SiamRPN++: Evolution of Siamese Visual Tracking With Very Deep Networks. In CVPR, pages 4277-4286, 2019. 1, 2, 6, 7
|
| 367 |
+
[23] Bo Li, Junjie Yan, Wei Wu, Zheng Zhu, and Xiaolin Hu. High Performance Visual Tracking with Siamese Region Proposal Network. In CVPR, pages 8971-8980, 2018. 2
|
| 368 |
+
[24] Chongyi Li, Chunle Guo, and Change Loy Chen. Learning to Enhance Low-Light Image via Zero-Reference Deep Curve Estimation. IEEE TPAMI, pages 1–14, 2021. 3, 5
|
| 369 |
+
[25] Rui Li, Minjian Pang, Cong Zhao, Guyue Zhou, and Lu Fang. Monocular Long-Term Target Following on UAVs. In CVPRW, pages 29-37, 2016. 1
|
| 370 |
+
[26] Wen Li, Zheng Xu, Dong Xu, Dengxin Dai, and Luc Van Gool. Domain Generalization and Adaptation Using Low Rank Exemplar SVMs. IEEE TPAMI, 40(5):1114-1127, 2018. 2
|
| 371 |
+
[27] Yiming Li, Changhong Fu, Fangqiang Ding, Ziyuan Huang, and Geng Lu. AutoTrack: Towards High-Performance Visual Tracking for UAV With Automatic Spatio-Temporal Regularization. In CVPR, pages 11920–11929, 2020. 2
|
| 372 |
+
[28] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common Objects in Context. In ECCV, pages 740-755, 2014. 6
|
| 373 |
+
[29] Alan Lukežić, Jiří Matas, and Matej Kristan. D3S – A Discriminative Single Shot Segmentation Tracker. In CVPR, pages 7131–7140, 2020. 6, 7
|
| 374 |
+
[30] Xudong Mao, Qing Li, Haoran Xie, Raymond Y.K. Lau, Zhen Wang, and Stephen Paul Smolley. Least Squares Generative Adversarial Networks. In ICCV, pages 2813-2821, 2017. 5
|
| 375 |
+
[31] Matthias Mueller, Neil Smith, and Bernard Ghanem. A Benchmark and Simulator for UAV Tracking. In ECCV, pages 445-461, 2016. 8
|
| 376 |
+
[32] Esteban Real, Jonathon Shlens, Stefano Mazzocchi, Xin Pan, and Vincent Vanhoucke. YouTube-BoundingBoxes: A Large High-Precision Human-Annotated Data Set for Object Detection in Video. In CVPR, pages 7464-7473, 2017. 1, 6
|
| 377 |
+
|
| 378 |
+
[33] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. IEEE TPAMI, 39(6):1137-1149, 2017. 2
|
| 379 |
+
[34] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet Large Scale Visual Recognition Challenge. IJCV, 115(3):211-252, 2015. 6
|
| 380 |
+
[35] Suman Saha, Anton Obukhov, Danda Pani Paudel, Menelaos Kanakis, Yuhua Chen, Stamatios Georgoulis, and Luc Van Gool. Learning To Relate Depth and Semantics for Unsupervised Domain Adaptation. In CVPR, pages 8197-8207, 2021. 2
|
| 381 |
+
[36] Christos Sakaridis, Dengxin Dai, and Luc Van Gool. Map-Guided Curriculum Domain Adaptation and Uncertainty-Aware Evaluation for Semantic Nighttime Image Segmentation. IEEE TPAMI, pages 1-15, 2020. 2
|
| 382 |
+
[37] Yukihiro Sasagawa and Hajime Nagahara. YOLO in the Dark - Domain Adaptation Method for Merging Multiple Models. In ECCV, pages 345-359, 2020. 2
|
| 383 |
+
[38] Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-CAM: Visual Explanations from Deep Networks via Gradient-Based Localization. In ICCV, pages 618-626, 2017. 8
|
| 384 |
+
[39] Ivan Sosnovik, Artem Moskalev, and Arnold W.M. Smeulders. Scale Equivalence Improves Siamese Tracking. In WACV, pages 2765-2774, January 2021. 6
|
| 385 |
+
[40] Baochen Sun, Jiashi Feng, and Kate Saenko. Return of Frustratingly Easy Domain Adaptation. In AAAI, pages 2058-2065, 2016. 2
|
| 386 |
+
[41] Ran Tao, Efstratos Gavves, and Arnold W. M. Smeulders. Siamese Instance Search for Tracking. In CVPR, pages 1420-1429, 2016. 2
|
| 387 |
+
[42] Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. JMLR, 9(11):2579-2605, 2008. 4
|
| 388 |
+
[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention Is All You Need. In NeurIPS, pages 6000-6010, 2017. 2, 4
|
| 389 |
+
[44] Ning Wang, Wengang Zhou, Yibing Song, Chao Ma, Wei Liu, and Houqiang Li. Unsupervised Deep Representation Learning for Real-Time Tracking. IJCV, 129(2):400-418, 2021. 6
|
| 390 |
+
[45] Ning Wang, Wengang Zhou, Jie Wang, and Houqiang Li. Transformer Meets Tracker: Exploiting Temporal Context for Robust Visual Tracking. In CVPR, pages 1571-1580, 2021. 2
|
| 391 |
+
[46] Xinyi Wu, Zhenyao Wu, Hao Guo, Lili Ju, and Song Wang. DANNet: A One-Stage Domain Adaptation Network for Unsupervised Nighttime Semantic Segmentation. In CVPR, pages 15769–15778, 2021. 2
|
| 392 |
+
[47] Yinda Xu, Zeyu Wang, Zuoxin Li, Ye Yuan, and Gang Yu. SiamFC++: Towards Robust and Accurate Visual Tracking with Target Estimation Guidelines. In AAAI, pages 12549-12556, 2020. 2, 6, 7
|
| 393 |
+
|
| 394 |
+
[48] Junjie Ye, Changhong Fu, Ziang Cao, Shan An, Guangze Zheng, and Bowen Li. Tracker Meets Night: A Transformer Enhancer for UAV Tracking. IEEE RA-L, 7(2):3866-3873, 2022. 1, 2
|
| 395 |
+
[49] Junjie Ye, Changhong Fu, Fuling Lin, Fangqiang Ding, Shan An, and Geng Lu. Multi-Regularized Correlation Filter for UAV Tracking and Self-Localization. IEEE TIE, 69(6):6004-6014, 2022. 1
|
| 396 |
+
[50] Junjie Ye, Changhong Fu, Guangze Zheng, Ziang Cao, and Bowen Li. DarkLighter: Light Up the Darkness for UAV Tracking. In IROS, pages 3079-3085, 2021. 1, 2
|
| 397 |
+
[51] Lichao Zhang, Abel Gonzalez-Garcia, Joost Van De Weijer, Martin Danelljan, and Fahad Shahbaz Khan. Learning the Model Update for Siamese Trackers. In ICCV, pages 4009-4018, 2019. 6, 7
|
| 398 |
+
[52] Miao Zhang, Jie Liu, Yifei Wang, Yongri Piao, Shunyu Yao, Wei Ji, Jingjing Li, Huchuan Lu, and Zhongxuan Luo. Dynamic Context-Sensitive Filtering Network for Video Salient Object Detection. In ICCV, pages 1533-1543, 2021. 3
|
| 399 |
+
[53] Zhipeng Zhang and Houwen Peng. Deeper and Wider Siamese Networks for Real-Time Visual Tracking. In CVPR, pages 4586-4595, 2019. 6
|
| 400 |
+
[54] Zhipeng Zhang, Houwen Peng, Jianlong Fu, Bing Li, and Weiming Hu. Ocean: Object-Aware Anchor-Free Tracking. In ECCV, pages 771-787, 2020. 6, 7
|
| 401 |
+
[55] Jilai Zheng, Chao Ma, Houwen Peng, and Xiaokang Yang. Learning to Track Objects from Unlabeled Videos. In ICCV, pages 13526-13535, 2021. 3
|
| 402 |
+
[56] Zheng Zhu, Qiang Wang, Bo Li, Wei Wu, Junjie Yan, and Weiming Hu. Distractor-aware Siamese Networks for Visual Object Tracking. In ECCV, pages 103-119, 2018. 6
|
2203.10xxx/2203.10541/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e7c2a256d4ba09590463e52fb03c36ef597c14411683eeecb592ca2b39d82ea
|
| 3 |
+
size 808337
|
2203.10xxx/2203.10541/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10545/4847a6d1-0ac3-446d-a898-a4ec88a50a0e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:588b1acf28cd092f2511147ef1058df23558a92659019726697743d89c5cf34b
|
| 3 |
+
size 1402779
|
2203.10xxx/2203.10545/full.md
ADDED
|
@@ -0,0 +1,435 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Parallel Instance Query Network for Named Entity Recognition
|
| 2 |
+
|
| 3 |
+
Yongliang Shen $^{1*}$ , Xiaobin Wang $^{2}$ , Zeqi Tan $^{1}$ , Guangwei Xu $^{2}$ , Pengjun Xie $^{2}$ , Fei Huang $^{2}$ , Weiming Lu $^{1\dagger}$ , Yueting Zhuang $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>College of Computer Science and Technology, Zhejiang University
|
| 6 |
+
|
| 7 |
+
$^{2}$ DAMO Academy, Alibaba Group
|
| 8 |
+
|
| 9 |
+
{syl,luwm}@zju.edu.cn
|
| 10 |
+
|
| 11 |
+
xuanjie.wxb@alibaba-inc.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Named entity recognition (NER) is a fundamental task in natural language processing. Recent works treat named entity recognition as a reading comprehension task, constructing type-specific queries manually to extract entities. This paradigm suffers from three issues. First, type-specific queries can only extract one type of entities per inference, which is inefficient. Second, the extraction for different types of entities is isolated, ignoring the dependencies between them. Third, query construction relies on external knowledge and is difficult to apply to realistic scenarios with hundreds of entity types. To deal with them, we propose Parallel Instance Query Network (PIQN), which sets up global and learnable instance queries to extract entities from a sentence in a parallel manner. Each instance query predicts one entity, and by feeding all instance queries simultaneously, we can query all entities in parallel. Instead of being constructed from external knowledge, instance queries can learn their different query semantics during training. For training the model, we treat label assignment as a one-to-many Linear Assignment Problem (LAP) and dynamically assign gold entities to instance queries with minimal assignment cost. Experiments on both nested and flat NER datasets demonstrate that our proposed method outperforms previous state-of-the-art models<sup>1</sup>.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Named Entity Recognition (NER) aims to identify text spans to specific entity types such as Person, Location, Organization. It has been widely used in many downstream applications such as entity linking (Ganea and Hofmann, 2017; Le and Titov, 2018) and relation extraction (Li and Ji, 2014;
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
(a) Type-specific Query
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
(b) Instance Query
|
| 26 |
+
Figure 1: (a) For a sentence, type-specific queries can only extract entities of one type per inference, so the model needs to be run multiple times. (b) In contrast, instance-based queries can be input into the model simultaneously, and all entities can be extracted in parallel. Furthermore, the parallel manner can model the interactions between entities of different types.
|
| 27 |
+
|
| 28 |
+
Miwa and Bansal, 2016; Shen et al., 2021b). Traditional approaches for NER are based on sequence labeling, assigning a single tag to each word in a sentence. However, the words of nested entities have more than one tag, thus these methods lack the ability to identify nested entities.
|
| 29 |
+
|
| 30 |
+
Recently, Ju et al. (2018); Straková et al. (2019); Wang et al. (2020a) redesign sequence labeling models to support nested structures using different strategies. Instead of labeling each word, Luan et al. (2019); Tan et al. (2020); Li et al. (2021); Shen et al. (2021a) perform a classification task on the text span, and Straková et al. (2019); Paolini et al. (2021); Yan et al. (2021); Tan et al. (2021) treat NER as a sequence generation or set prediction task and design encoder-decoder models to generate entities. Recently, Li et al. (2020b); Mengge et al. (2020); Zheng et al. (2021) reformulate the NER task as a machine reading task and achieve a promising performance on both flat and nested datasets. As shown in Figure 1(a), they treat the sentence as context and construct typespecific queries from external knowledge to extract entities. For example, for the sentence "U.S. President Barack Obama and his wife spent eight years
|
| 31 |
+
|
| 32 |
+
in the White House", Li et al. (2020b) constructs the PER-specific query in natural language form - "Find person entity in the text, including a single individual or a group" to extract the PER entities, such as "U.S. President", "Barack Obama". However, since the queries are type-specific, only one type of entities can be extracted for each inference. This manner not only leads to inefficient prediction but also ignores the intrinsic connections between different types of entities, such as "U.S." and "U.S. President". In addition, type-specific queries rely on external knowledge for manual construction, which makes it difficult to fit realistic scenarios with hundreds of entity types.
|
| 33 |
+
|
| 34 |
+
In this paper, we propose the Parallel Instance Query Network (PIQN), where global and learnable instance queries replace type-specific ones to extract entities in parallel. As shown in Figure 1(b), each instance query predicts one entity, and multiple instance queries can be fed simultaneously to predict all entities. Different from previous methods, we do not need external knowledge to construct the query into natural language form. The instance query can learn different query semantics during training, such as position-related or type-related semantics. Since the semantics of instance queries are implicit, we cannot assign gold entities as their labels in advance. To tackle this, we treat label assignment as a one-to-many Linear Assignment Problem (LAP) (Burkard and Cela, 1999), and design a dynamic label assignment mechanism to assign gold entities for instance queries.
|
| 35 |
+
|
| 36 |
+
Our main contributions are as follow:
|
| 37 |
+
|
| 38 |
+
- Different from type-specific queries that require multiple rounds of query, our model employs instance queries that can extract all entities in parallel. Furthermore, the style of parallel query can model the interactions between entities of different types.
|
| 39 |
+
Instead of relying on external knowledge to construct queries in natural language form, instance queries learn their query semantics related to entity location and entity type during training.
|
| 40 |
+
- To train the model, we design a dynamic one-to-many label assignment mechanism, where the entities are dynamically assigned as labels for the instance queries during training. The one-to-many manner allows multiple queries
|
| 41 |
+
|
| 42 |
+
to predict the same entity, which can further improve the model performance.
|
| 43 |
+
|
| 44 |
+
- Experiments show that our model achieves state-of-the-art performance consistently on several nested and flat NER datasets.
|
| 45 |
+
|
| 46 |
+
# 2 Related Work
|
| 47 |
+
|
| 48 |
+
Traditional approaches for NER can be divided into three categories, including tagging-based, hypergraph-based and span-based approaches. The typical sequence labeling approach (Huang et al., 2015) predicts labels for each token, and struggles to address nested NER. Some works (Alex et al., 2007; Wang et al., 2020a) adapt the sequence labeling model to nested entity structures by designing a special tagging scheme. Different from the decoding on the linear sequence, the hypergraph-based approaches (Lu and Roth, 2015; Muis and Lu, 2017; Katiyar and Cardie, 2018) construct hypergraphs based on the entity nesting structure and decode entities on the hypergraph. Span-based methods first extract spans by enumeration (Sohrab and Miwa, 2018; Luan et al., 2019) or boundary identification (Zheng et al., 2019; Tan et al., 2020), and then classify the spans. Based on these, Shen et al. (2021a) treats NER as a joint task of boundary regression and span classification and proposes a two-stage identifier of locating entities first and labeling them later.
|
| 49 |
+
|
| 50 |
+
Three novel paradigms for NER have recently been proposed, reformulating named entity recognition as sequence generation, set prediction, and reading comprehension tasks, respectively. Yan et al. (2021) formulates NER as an entity span sequence generation problem and uses a BART (Lewis et al., 2020) model with the pointer mechanism to tackle NER tasks. Tan et al. (2021) formulates NER as an entity set prediction task. Different from Straková et al. (2019), they utilize a non-autoregressive decoder to predict entity set. Li et al. (2020b); Mengge et al. (2020) reformulate the NER task as an MRC question answering task. They construct type-specific queries using semantic prior information for entity categories.
|
| 51 |
+
|
| 52 |
+
Different from Li et al. (2020b); Jiang et al. (2021), our method attempts to query at the entity level, where it adaptively learns query semantics for instance queries and extracts all types of entities in parallel. It is worth noting that Seq2Set (Tan et al., 2021) is quite different from ours: (1)
|
| 53 |
+
|
| 54 |
+
Seq2Set attempts to eliminate the incorrect bias introduced by specified entity decoding order in the seq2seq framework, and proposes an entity set predictor, while we follow the MRC paradigm and focus on extracting entities using instance queries. (2) Seq2Set is an encoder-decoder architecture, while our model throws away the decoder and keeps only the encoder as in Wang et al. (2022a), which speeds up inference and allows full interaction between query and context. (3) Seq2Set uses bipartite graph matching to compute the entity-set level loss, while we focus on the label assignment for each instance query and propose a one-to-many dynamic label assignment mechanism.
|
| 55 |
+
|
| 56 |
+
# 3 Method
|
| 57 |
+
|
| 58 |
+
In this section, we first introduce the task formulation in § 3.1, and then describe our method. As shown in Figure 2, our method consists of three components: the Encoder (§ 3.2), the Entity Prediction (§ 3.3) and the Dynamic Label Assignment (§ 3.4). The encoder encodes both the sentence and instance queries. Then for each instance query, we perform entity localization and entity classification using Entity Pointer and Entity Classifier respectively. For training the model, we introduce a dynamic label assignment mechanism to assign gold entities to the instance queries in § 3.4.
|
| 59 |
+
|
| 60 |
+
# 3.1 Task Formulation
|
| 61 |
+
|
| 62 |
+
We use $(X,Y)$ to denote a training sample, where $X$ is a sentence consisting of $N$ words labeled by a set of triples $Y = \{ < Y_{k}^{l}, Y_{k}^{r}, Y_{k}^{t} > \}_{k=0}^{G-1}$ . $Y_{k}^{l} \in [0,N-1]$ , $Y_{k}^{r} \in [0,N-1]$ and $Y_{k}^{t} \in \mathcal{E}$ are the indices for the left boundary, right boundary and entity type of the $k$ -th entity, where $\mathcal{E}$ is a finite set of entity types. In our approach, We set up $M(M > G)$ global and learnable instance queries $I = \mathbb{R}^{M \times h}$ , each of which (denoted as a vector of size $h$ ) extracts one entity from the sentence. They are randomly initialized and can learn the query semantics automatically during training. Thus we define the task as follows: given an input sentence $X$ , the aim is to extract the entities $Y$ based on the learnable instance queries $I$ .
|
| 63 |
+
|
| 64 |
+
# 3.2 Encoder
|
| 65 |
+
|
| 66 |
+
Model input consists of two sequences, the sentence $X$ of length $N$ and the instance queries $I$ of length $M$ . The encoder concatenates them into one sequence and encodes them simultaneously.
|
| 67 |
+
|
| 68 |
+
Input Embedding We calculate the token embeddings $E_{tok}$ , position embeddings $E_{pos}$ and type embeddings $E_{typ}$ of the input from two sequences as follows $(E_{tok}, E_{pos}, E_{typ} \in \mathbb{R}^{(N + M) \times h})$ :
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
E _ {t o k} = \operatorname {C o n c a t} (V, I)
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
E _ {p o s} = \operatorname {C o n c a t} \left(P ^ {w}, P ^ {q}\right) \tag {1}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
E _ {t y p} = \operatorname {C o n c a t} ([ U ^ {w} ] ^ {N}, [ U ^ {q} ] ^ {M})
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $V \in \mathbb{R}^{N \times h}$ are token embeddings of the word sequence, $I \in \mathbb{R}^{M \times h}$ are the vectors of instance queries, $P^w \in \mathbb{R}^{N \times h}$ and $P^q \in \mathbb{R}^{M \times h}$ are separate learnable position embeddings. $U^w$ and $U^q$ are type embeddings and $[\cdot]^{N}$ means repeating $N$ times. Then the input can be represented as $H^0 = E_{tok} + E_{pos} + E_{typ} \in R^{(N + M) \times h}$ .
|
| 83 |
+
|
| 84 |
+
One-Way Self-Attention Normal self-attention would let the sentence interact with all instance queries. In such a way, randomly initialized instance queries can affect the sentence encoding and break the semantics of the sentence. To keep the sentence semantics isolated from the instance queries, we replace the self-attention in BERT (Devlin et al., 2019) with the one-way version:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\mathrm {O W} - \mathrm {S A} (H) = \alpha H W _ {v} \tag {2}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\alpha = \operatorname {s o f t m a x} \left(\frac {H W _ {q} \left(H W _ {k}\right) ^ {T}}{\sqrt {h}} + \mathcal {M}\right) \tag {3}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $W_{q}, W_{k}, W_{v} \in \mathbb{R}^{h \times h}$ are parameter matrices and $\mathcal{M} \in \{0, -\inf\}^{(N + M) \times (N + M)}$ is a mask matrix for the attention score where elements in $\mathcal{M}$ set to 0 for kept units and -- inf for removed ones. In our formula, the upper right sub-matrix of $\mathcal{M}$ is a full -- inf matrix of size $(N \times M)$ and other elements are zero, which can prevent the sentence encoding from attending on the instance queries. In addition, the self-attention among instance queries can model the connections between each other, and then enhance their query semantics.
|
| 95 |
+
|
| 96 |
+
After BERT encoding, we further encode the sequence at word-level by two bidirectional LSTM layers and $L$ extra transformer layers. Finally we split $H \in \mathbb{R}^{(N + M) \times h}$ into two parts: the sentence encoding $H^w \in \mathbb{R}^{N \times h}$ and the instance query encoding $H^q \in \mathbb{R}^{M \times h}$ .
|
| 97 |
+
|
| 98 |
+
# 3.3 Entity Prediction
|
| 99 |
+
|
| 100 |
+
Each instance query can predict one entity from the sentence, and with $M$ instances queries, we can predict at most $M$ entities in parallel. Entity prediction
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
Figure 2: The overall architecture of the model.
|
| 104 |
+
|
| 105 |
+
can be viewed as a joint task of boundary prediction and category prediction. We design Entity Pointer and Entity Classifier for them respectively.
|
| 106 |
+
|
| 107 |
+
Entity Pointer For the $i$ -th instance query $H_{i}^{q}$ , we first interact the query with each word of the sentence by two linear layers. The fusion representation of the $i$ -th instance query and $j$ -th word is computed as:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
S _ {i j} ^ {\delta} = \operatorname {R e L U} \left(H _ {i} ^ {q} W _ {\delta} ^ {q} + H _ {j} ^ {w} W _ {\delta} ^ {w}\right) \tag {4}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\delta \in \{l,r\}$ denotes the left or right boundary and $W_{\delta}^{q},W_{\delta}^{w}\in \mathbb{R}^{h\times h}$ are trainable projection parameters. Then we calculate the probability that the $j$ -th word of the sentence is a left or right boundary:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
P _ {i j} ^ {\delta} = \operatorname {s i g m o i d} \left(S _ {i j} ^ {\delta} W _ {\delta} + b _ {\delta}\right) \tag {5}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $W_{\delta}\in \mathbb{R}^{h}$ and $b_{\delta}$ are learnable parameters.
|
| 120 |
+
|
| 121 |
+
Entity Classifier Entity boundary information are useful for entity typing. We use $P_{i}^{\delta} = [P_{i0}^{\delta}, P_{i1}^{\delta}, \dots, P_{iN-1}^{\delta}], \delta \in \{l, r\}$ to weigh all words and then concatenate them with instance
|
| 122 |
+
|
| 123 |
+
queries. The boundary-aware representation of the $i$ -th instance query can be calculated as:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
S _ {i} ^ {t} = \operatorname {R e L U} \left(\left[ H _ {i} ^ {q} W _ {t} ^ {q}; P _ {i} ^ {l} H ^ {w}; P _ {i} ^ {r} H ^ {w} \right]\right) \tag {6}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
where $W_{t}^{q}\in \mathbb{R}^{h\times h}$ is a learnable parameter. Then we can get the probability of the entity queried by the $i$ -th instance query belonging to category $c$ :
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
P _ {i c} ^ {t} = \frac {\exp \left(S _ {i} ^ {t} W _ {t} ^ {c} + b _ {t} ^ {c}\right)}{\sum_ {c ^ {\prime} \in \mathcal {E}} \exp \left(S _ {i} ^ {t} W _ {t} ^ {c ^ {\prime}} + b _ {t} ^ {c ^ {\prime}}\right)} \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
where $W_{t}^{c^{\prime}}\in \mathbb{R}^{h}$ and $b_{t}^{c^{\prime}}$ are learnable parameters.
|
| 136 |
+
|
| 137 |
+
Finally, the entity predicted by the $i$ -th instance query is $\mathcal{T}_i = (\mathcal{T}_i^l,\mathcal{T}_i^r,\mathcal{T}_i^t)$ . $\mathcal{T}_i^l = \arg \max_j(P_{ij}^l)$ and $\mathcal{T}_i^r = \arg \max_j(P_{ij}^r)$ are the left and right boundary, $\mathcal{T}_i^t = \arg \max_c(P_{ic}^t)$ is the entity type. We perform entity localization and entity classification on all instance queries to extract entities in parallel. If multiple instance queries locate the same entity but predict different entity types, we keep only the prediction with the highest classification probability.
|
| 138 |
+
|
| 139 |
+
# 3.4 Dynamic Label Assignment for Training
|
| 140 |
+
|
| 141 |
+
Dynamic Label Assignment Since instance queries are implicit (not in natural language form), we cannot assign gold entities to them in advance. To tackle this, we dynamically assign labels for the instance queries during training. Specifically, we treat label assignment as a Linear Assignment Problem. Any entity can be assigned to any instance query, incurring some cost that may vary depending on the entity-query assignment. We define the cost of assigning the $k$ -th entity ( $Y_{k} = <Y_{k}^{l}, Y_{k}^{r}, Y_{k}^{t}>$ ) to the $i$ -th instance query as:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\operatorname {C o s t} _ {i k} = - \left(P _ {i Y _ {k} ^ {t}} ^ {t} + P _ {i Y _ {k} ^ {l}} ^ {l} + P _ {i Y _ {k} ^ {r}} ^ {r}\right) \tag {8}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $Y_{k}^{t}, Y_{k}^{l}$ and $Y_{k}^{r}$ denote the indices for the entity type, left boundary and right boundary of the $k$ -th entity. It is required to allocate as many entities as possible by assigning at most one entity to each query and at most one query to each entity, in such a way that the total cost of the assignment is minimized. However, the one-to-one manner does not fully utilize instance queries, and many instance queries are not assigned to gold entities. Thus we extend the traditional LAP to one-to-many one, where each entity can be assigned to multiple instance queries. The optimization objective of this one-to-many LAP is defined as:
|
| 148 |
+
|
| 149 |
+
s.t. $\sum_{i}A_{ih}\leq 1$ (9)
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \min \sum_ {i = 0} ^ {M - 1} \sum_ {k = 0} ^ {G - 1} A _ {i k} C o s t _ {i k} \\ \begin{array}{l} \sum_ {i} ^ {k} A _ {i k} = q _ {k} \end{array} \\ \forall i, k, A _ {i k} \in \{0, 1 \} \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
where $A \in \{0,1\}^{M \times G}$ is the assignment matrix, $G$ denotes the number of the entities and $A_{ik} = 1$ indicates the $k$ -th entity assigned to the $i$ -th instance query. $q_k$ denotes the assignable quantity of the $k$ -th gold entity and $Q = \sum_{k} q_k$ denotes the total assignable quantity for all entities. In our experiments, the assignable quantities of different entities are balanced.
|
| 156 |
+
|
| 157 |
+
We then use the Hungarian (Kuhn, 1955) algorithm to solve Equation 9, which yields the label assignment matrix with the minimum total cost. However, the number of instance queries is greater than the total assignable quantity of entity labels $(M > Q)$ , so some of them will not be assigned to any entity label. We assign None label to them by
|
| 158 |
+
|
| 159 |
+
extending a column for the assignment matrix. The new column vector $a$ is set as follows:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
a _ {i} = \left\{ \begin{array}{l l} 0, & \sum_ {k} A _ {i k} = 1 \\ 1, & \sum_ {k} A _ {i k} = 0 \end{array} \right. \tag {10}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
Based on the new assignment matrix $\hat{A} \in \{0,1\}^{M \times (G + 1)}$ , we can further get the labels $\hat{Y} = Y$ . index by $(\pi^{*})$ for $M$ instance queries, where $\pi^{*} = \operatorname{argmax}_{dim = 1} (\hat{A})$ is the label index vector for instance queries under the optimal assignment.
|
| 166 |
+
|
| 167 |
+
Training Objective We have computed the entity predictions for $M$ instance queries in § 3.3 and got their labels $\hat{Y}$ with the minimum total assignment cost in § 3.4. To train the model, we define boundary loss and classification loss. For left and right boundary prediction, we use binary cross entropy function as a loss:
|
| 168 |
+
|
| 169 |
+
$$
|
| 170 |
+
\begin{array}{l} \mathcal {L} _ {b} = - \sum_ {\delta \in \{l, r \}} \sum_ {i = 0} ^ {M - 1} \sum_ {j = 0} ^ {N - 1} \mathbb {1} \left[ \hat {Y} _ {i} ^ {\delta} = j \right] \log P _ {i j} ^ {\delta} \tag {11} \\ + \mathbb {1} \left[ \hat {Y} _ {i} ^ {\delta} \neq j \right] \log \left(1 - P _ {i j} ^ {\delta}\right) \\ \end{array}
|
| 171 |
+
$$
|
| 172 |
+
|
| 173 |
+
and for entity classification we use cross entropy function as a loss:
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\mathcal {L} _ {t} = - \sum_ {i = 0} ^ {M - 1} \sum_ {c \in \mathcal {E}} \mathbb {1} \left[ \hat {Y} _ {i} ^ {t} = c \right] \log P _ {i c} ^ {t} \tag {12}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
where $\mathbb{1}[\omega ]$ denotes indicator function that takes 1 when $\omega$ is true and 0 otherwise.
|
| 180 |
+
|
| 181 |
+
Follow Al-Rfou et al. (2019) and Carion et al. (2020), we add Entity Pointer and Entity Classifier after each word-level transformer layer, and we can get the two losses at each layer. Thus, the total loss on the train set $D$ can be defined as:
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
\mathcal {L} = \sum_ {D} \sum_ {\tau = 1} ^ {L} \mathcal {L} _ {t} ^ {\tau} + \mathcal {L} _ {b} ^ {\tau} \tag {13}
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
where $\mathcal{L}_t^\tau, \mathcal{L}_b^\tau$ are classification loss and boundary loss at the $\tau$ -th layer. For prediction, we just perform entity prediction at the final layer.
|
| 188 |
+
|
| 189 |
+
# 4 Experiment Settings
|
| 190 |
+
|
| 191 |
+
# 4.1 Datasets
|
| 192 |
+
|
| 193 |
+
To provide empirical evidence for the effectiveness of the proposed model, we conduct our experiments
|
| 194 |
+
|
| 195 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">ACE04</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Li et al. (2020b)</td><td>85.05</td><td>86.32</td><td>85.98</td></tr><tr><td>Wang et al. (2020a)</td><td>86.08</td><td>86.48</td><td>86.28</td></tr><tr><td>Yu et al. (2020)</td><td>87.30</td><td>86.00</td><td>86.70</td></tr><tr><td>Yan et al. (2021)</td><td>87.27</td><td>86.41</td><td>86.84</td></tr><tr><td>Yang and Tu (2022)</td><td>86.60</td><td>87.28</td><td>86.94</td></tr><tr><td>Tan et al. (2021)</td><td>88.46</td><td>86.10</td><td>87.26</td></tr><tr><td>Shen et al. (2021a)</td><td>87.44</td><td>87.38</td><td>87.41</td></tr><tr><td>PIQN</td><td>88.48</td><td>87.81</td><td>88.14</td></tr><tr><td rowspan="2">Model</td><td colspan="3">ACE05</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Lin et al. (2019)</td><td>76.20</td><td>73.60</td><td>74.90</td></tr><tr><td>Luo and Zhao (2020)</td><td>75.00</td><td>75.20</td><td>75.10</td></tr><tr><td>Li et al. (2021)</td><td>-</td><td>-</td><td>83.00</td></tr><tr><td>Wang et al. (2020a)</td><td>83.95</td><td>85.39</td><td>84.66</td></tr><tr><td>Yan et al. (2021)</td><td>83.16</td><td>86.38</td><td>84.74</td></tr><tr><td>Yu et al. (2020)</td><td>85.20</td><td>85.60</td><td>85.40</td></tr><tr><td>Yang and Tu (2022)</td><td>84.61</td><td>86.43</td><td>85.53</td></tr><tr><td>Li et al. (2020b)</td><td>87.16</td><td>86.59</td><td>86.88</td></tr><tr><td>Shen et al. (2021a)</td><td>86.09</td><td>87.27</td><td>86.67</td></tr><tr><td>Tan et al. (2021)</td><td>87.48</td><td>86.63</td><td>87.05</td></tr><tr><td>PIQN</td><td>86.27</td><td>88.60</td><td>87.42</td></tr><tr><td rowspan="2">Model</td><td colspan="3">GENIA</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Lin et al. (2019)</td><td>75.80</td><td>73.90</td><td>74.80</td></tr><tr><td>Luo and Zhao (2020)</td><td>77.40</td><td>74.60</td><td>76.00</td></tr><tr><td>Wang et al. (2020b)</td><td>78.10</td><td>74.40</td><td>76.20</td></tr><tr><td>Yang and Tu (2022)</td><td>78.08</td><td>78.26</td><td>78.16</td></tr><tr><td>Li et al. (2020b)†</td><td>81.14</td><td>76.82</td><td>78.92</td></tr><tr><td>Wang et al. (2020a)</td><td>79.45</td><td>78.94</td><td>79.19</td></tr><tr><td>Yan et al. (2021)</td><td>78.87</td><td>79.6</td><td>79.23</td></tr><tr><td>Tan et al. (2021)</td><td>82.31</td><td>78.66</td><td>80.44</td></tr><tr><td>Yu et al. (2020)</td><td>81.80</td><td>79.30</td><td>80.50</td></tr><tr><td>Shen et al. (2021a)</td><td>80.19</td><td>80.89</td><td>80.54</td></tr><tr><td>PIQN</td><td>83.24</td><td>80.35</td><td>81.77</td></tr><tr><td rowspan="2">Model</td><td colspan="3">KBP17</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Li et al. (2017)</td><td>76.20</td><td>73.00</td><td>72.80</td></tr><tr><td>Lin et al. (2019)</td><td>77.70</td><td>71.80</td><td>74.60</td></tr><tr><td>Luo and Zhao (2020)</td><td>77.10</td><td>74.30</td><td>75.60</td></tr><tr><td>Li et al. (2020b)</td><td>80.97</td><td>81.12</td><td>80.97</td></tr><tr><td>Tan et al. (2021)</td><td>84.91</td><td>83.04</td><td>83.96</td></tr><tr><td>Shen et al. (2021a)</td><td>85.46</td><td>82.67</td><td>84.05</td></tr><tr><td>PIQN</td><td>85.67</td><td>83.37</td><td>84.50</td></tr><tr><td rowspan="2">Model</td><td colspan="3">NNE</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Li et al. (2020b)‡</td><td>53.13</td><td>56.67</td><td>54.84</td></tr><tr><td>Wang and Lu (2018)</td><td>77.40</td><td>70.10</td><td>73.60</td></tr><tr><td>Ringland et al. (2019)</td><td>91.80</td><td>91.00</td><td>91.40</td></tr><tr><td>Tan et al. (2021)‡</td><td>93.01</td><td>89.21</td><td>91.07</td></tr><tr><td>Shen et al. (2021a)‡</td><td>92.86</td><td>91.12</td><td>91.98</td></tr><tr><td>Wang et al. (2020a)†</td><td>92.64</td><td>93.53</td><td>93.08</td></tr><tr><td>PIQN</td><td>93.85</td><td>94.23</td><td>94.04</td></tr></table>
|
| 196 |
+
|
| 197 |
+
Table 1: Results for nested NER task. $\dagger$ means the reproduction on the same preprocessed dataset and $\ddagger$ means that we run the code on the unreported dataset.
|
| 198 |
+
|
| 199 |
+
on eight English datasets, including five nested NER datasets: ACE04 (Doddington et al., 2004), ACE05 (Walker et al., 2006), KBP17 (Ji et al., 2017), GENIA (Ohta et al., 2002), NNE(Ringland et al., 2019) and three flat NER dataset: FewNERD (Ding et al., 2021), CoNLL03 (Tjong Kim Sang and De Meulder, 2003), OntoNotes (Pradhan et al., 2013), and one Chinese flat NER dataset: MSRA (Levow, 2006). FewNERD and NNE are two datasets with large entity type inventories, containing 66 and 114 fine-grained entity types. Please refer to Appendix A for statistical information about the datasets.
|
| 200 |
+
|
| 201 |
+
# 4.2 Implementation Details
|
| 202 |
+
|
| 203 |
+
In our experiments, we use pretrained BERT (Devlin et al., 2019) in our encoder. For a fair comparison, we use bert-large on ACE04, ACE05, NNE, CoNLL03 and OntoNotes, bert-base on KBP17 and FewNERD, biobert-large (Chiu et al., 2016) on GENIA and chinese-bert-wwm (Cui et al., 2020) on Chinese MSRA. For all datasets, we train our model for 30-60 epochs and use the Adam Optimizer (Kingma and Ba, 2015) with a linear warmup-decay learning rate schedule. We initialize all instance queries using the normal distribution $\mathcal{N}(0.0,0.02)$ . See Appendix B for more detailed parameter settings and Appendix C for all baseline models.
|
| 204 |
+
|
| 205 |
+
# 4.3 Evaluation Metrics
|
| 206 |
+
|
| 207 |
+
We use strict evaluation metrics that an entity is confirmed correct when the entity boundary and the entity type are correct simultaneously. We employ precision, recall and F1-score to evaluate the performance. We also report the F1-scores on the entity localization and entity classification subtasks in § 5.2 and Appendix D.2. We consider the localization as correct when the left and right boundaries are predicted correctly. Based on the accurately localized entities, we then evaluate the performance of entity classification.
|
| 208 |
+
|
| 209 |
+
# 5 Results and Analysis
|
| 210 |
+
|
| 211 |
+
# 5.1 Performance
|
| 212 |
+
|
| 213 |
+
Overall Performance Table 1 illustrates the performance of the proposed model as well as baselines on the nested NER datasets. We observe significant performance boosts on the nested NER datasets over previous state-of-the-art models,
|
| 214 |
+
|
| 215 |
+
achieving F1-scores of $81.77\%$ , $88.14\%$ , $87.42\%$ and $84.50\%$ on GENIA, ACE04, ACE05, KBP17 and NNE datasets with $+1.23\%$ , $+0.73\%$ , $+0.37\%$ , $+0.45\%$ and $+0.96\%$ improvements. Our model can be applied to flat NER. As shown in Table 2, our model achieves state-of-the-art performance on the FewNERD and Chinese MSRA datasets with $+1.44\%$ and $+0.88\%$ improvements. On the CoNLL03 and OntoNotes datasets, our model also achieves comparable results. Compared with the type-specific query-based method (Li et al., 2020b), our model improves by $+2.85\%$ , $+2.16\%$ , $+0.54\%$ , $+3.53\%$ on the GENIA, ACE04, ACE05 and KBP17 datasets. We believe there are three reasons: (1) Rather than relying on external knowledge to inject semantics, instance queries can learn query semantics adaptively, avoiding the sensitivity to hand-constructed queries of varying quality. (2) Each query no longer predicts a group of entities of a specific type, but only one entity. This manner refines the query to the entity level with more precise query semantics. (3) Instance queries are fed into the model in parallel for encoding and prediction, and different instance queries can exploit the intrinsic connections between entities.
|
| 216 |
+
|
| 217 |
+
Inference Speed We compare the inference speed on ACE04 and NNE, as shown in Table 4. Compared to the type-specific query method (Li et al., 2020b), our model not only improves the performance, but also gains significant inference speedup. In particular, on the NNE dataset with 114 entity types, our model speeds up by $30.46 \times$ and improves performance by $+39.2\%$ . This is because Li et al. (2020b) requires one inference for each type-specific query, while our approach performs parallel inference for all instance queries and only needs to be run once. We also compare previous state-of-the-art models (Tan et al., 2021; Shen et al., 2021a) and our method is still faster and performs better.
|
| 218 |
+
|
| 219 |
+
# 5.2 Ablation Study
|
| 220 |
+
|
| 221 |
+
In this section, we analyze the effects of different components in PIQN. As shown in Table 3, we have the following observations: (1) Compared to the static label assignment in order of occurrence, the dynamic label assignment shows significant improvement on localization, classification, and NER F1-score, which improves NER F1-score by $+5.71\%$ on ACE04 and $+8.84\%$ on GENIA. This shows that modeling label assignment as a LAP
|
| 222 |
+
|
| 223 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">FewNERD</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Ding et al. (2021)</td><td>65.56</td><td>68.78</td><td>67.13</td></tr><tr><td>Shen et al. (2021a)‡</td><td>64.69</td><td>70.87</td><td>67.64</td></tr><tr><td>Tan et al. (2021)‡</td><td>67.37</td><td>69.12</td><td>68.23</td></tr><tr><td>PIQN</td><td>70.16</td><td>69.18</td><td>69.67</td></tr><tr><td rowspan="2">Model</td><td colspan="3">English CoNLL03</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Peters et al. (2018)</td><td>-</td><td>-</td><td>92.22</td></tr><tr><td>Devlin et al. (2019)</td><td>-</td><td>-</td><td>92.80</td></tr><tr><td>Li et al. (2020b)*</td><td>92.47</td><td>93.27</td><td>92.87</td></tr><tr><td>Yu et al. (2020)*</td><td>92.85</td><td>92.15</td><td>92.50</td></tr><tr><td>Shen et al. (2021a)</td><td>92.13</td><td>93.73</td><td>92.94</td></tr><tr><td>PIQN</td><td>93.29</td><td>92.46</td><td>92.87</td></tr><tr><td rowspan="2">Model</td><td colspan="3">English OntoNotes</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Li et al. (2020b)*</td><td>91.34</td><td>88.39</td><td>89.84</td></tr><tr><td>Yu et al. (2020)*</td><td>89.74</td><td>89.92</td><td>89.83</td></tr><tr><td>Yan et al. (2021)</td><td>89.99</td><td>90.77</td><td>90.38</td></tr><tr><td>Xu et al. (2021)</td><td>90.14</td><td>91.58</td><td>90.85</td></tr><tr><td>PIQN</td><td>91.43</td><td>90.73</td><td>90.96</td></tr><tr><td rowspan="2">Model</td><td colspan="3">Chinese MSRA</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Devlin et al. (2019)</td><td>-</td><td>-</td><td>92.60</td></tr><tr><td>Li et al. (2020b)†</td><td>90.38</td><td>89.00</td><td>89.68</td></tr><tr><td>Shen et al. (2021a)‡</td><td>92.20</td><td>90.72</td><td>91.46</td></tr><tr><td>Tan et al. (2021)‡</td><td>93.21</td><td>91.97</td><td>92.58</td></tr><tr><td>PIQN</td><td>93.61</td><td>93.35</td><td>93.48</td></tr></table>
|
| 224 |
+
|
| 225 |
+
Table 2: Results for flat NER task. * means the result reproduced by (Yan et al., 2021), † means the reproduction on the same preprocessed dataset and ‡ means that we run the code on the unreported dataset.
|
| 226 |
+
|
| 227 |
+
problem enables dynamic assignment of optimal labels to instance queries during training, eliminating the incorrect bias when pre-specifying labels. Furthermore, one-to-many for label assignment is more effective than one-to-one, improving the F1-score by $+3.86\%$ on ACE04 and $+0.51\%$ on GENIA. (2) The one-way self-attention blocks the attention of sentence encoding on instance queries, which improves the F1-score by $+0.98\%$ on ACE04 and $+0.57\%$ on GENIA. It illustrates the importance of keeping the semantics of the sentence independent of the query. In contrast, semantic interactions between queries are effective, which improves the F1-score by $+0.92\%$ on ACE04 and $+0.67\%$ on GENIA. The major reason is that entities in the same sentence are closely related and the interaction between instance queries can capture the relation between them.
|
| 228 |
+
|
| 229 |
+
<table><tr><td rowspan="2">Model</td><td colspan="5">ACE04</td><td colspan="5">GENIA</td></tr><tr><td>Loc. F1</td><td>Cls. F1</td><td>Pr.</td><td>Rec.</td><td>F1</td><td>Loc. F1</td><td>Cls. F1</td><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Default</td><td>92.23</td><td>91.53</td><td>88.48</td><td>87.81</td><td>88.14</td><td>84.43</td><td>87.83</td><td>83.24</td><td>80.35</td><td>81.77</td></tr><tr><td>w/o Dynamic LA</td><td>88.22</td><td>88.29</td><td>80.95</td><td>83.99</td><td>82.43</td><td>77.01</td><td>81.90</td><td>73.56</td><td>72.30</td><td>72.93</td></tr><tr><td>w/o OvM LA</td><td>89.22</td><td>87.61</td><td>87.04</td><td>81.68</td><td>84.28</td><td>83.87</td><td>87.38</td><td>83.02</td><td>79.57</td><td>81.26</td></tr><tr><td>w/o One Way SA</td><td>91.90</td><td>90.62</td><td>87.56</td><td>86.75</td><td>87.16</td><td>84.11</td><td>87.21</td><td>82.94</td><td>79.53</td><td>81.20</td></tr><tr><td>w/o Query Interaction</td><td>91.84</td><td>90.42</td><td>88.21</td><td>86.26</td><td>87.22</td><td>83.87</td><td>87.05</td><td>83.15</td><td>79.15</td><td>81.10</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Table 3: Ablation Study. (1) w/o Dynamic LA: replace dynamic label assignment to static label assignment, i.e., assign labels to instance queries in the order of the entities' occurrence in the sentence. (2) w/o OvM LA: replace the one-to-many label assignment to one-to-one, i.e., set the number of queries to which each entity can be assigned to be 1. (3) w/o One Way SA: encode sentences and instance queries using the original BERT. (4) w/o Query Interaction: eliminate interactions between instance queries by masking the attention weights between them.
|
| 232 |
+
|
| 233 |
+
<table><tr><td rowspan="2">Model</td><td colspan="2">ACE04</td><td colspan="2">NNE</td></tr><tr><td>Speedup</td><td>F1</td><td>Speedup</td><td>F1</td></tr><tr><td>Li et al. (2020b)</td><td>1.00×</td><td>85.98</td><td>1.00×</td><td>54.84</td></tr><tr><td>Tan et al. (2021)</td><td>1.40×</td><td>87.26</td><td>22.18×</td><td>91.07</td></tr><tr><td>Shen et al. (2021a)</td><td>0.96×</td><td>87.41</td><td>11.41×</td><td>91.98</td></tr><tr><td>PIQN</td><td>2.16×</td><td>88.14</td><td>30.46×</td><td>94.04</td></tr></table>
|
| 234 |
+
|
| 235 |
+
Table 4: Inference Speed on ACE04 and NNE. All experiments are conducted on a single NVIDIA RTX A6000 Graphical Card with 48G graphical memory.
|
| 236 |
+
|
| 237 |
+
# 5.3 Analysis
|
| 238 |
+
|
| 239 |
+
In order to analyze the query semantics learned by the instance query in the training, we randomly selected several instance queries and analyzed the locations and types of entities they predicted.
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Figure 3: Kernel density estimation of entity distribution at different locations.
|
| 243 |
+
|
| 244 |
+
Entity Location We normalize the predicted central locations of the entities and use kernel density estimation to draw the distribution of the predicted entity locations for different queries, as shown in Figure 3. We observe that different instance queries focus on entities at different positions, which means that the instance queries can learn the query semantics related to entity position.
|
| 245 |
+
|
| 246 |
+
For example, instance queries #28 and #39 prefer to predict entities at the beginning of sentences, while #11 and #53 prefer entities at the end.
|
| 247 |
+
|
| 248 |
+
Entity Type We count the co-occurrence of different instance queries and different entity types they predicted. To eliminate the imbalance of entity types, we normalize the co-occurrence matrix on the entity type axis. As shown in Figure 4, different instance queries have preferences for different entity types. For example, instance queries #11 and #13 prefer to predict PER entities, #30 and #43 prefer VEH entities, #25 and #49 prefer WEA entities, #12 prefers FAC entities, and #35 prefers LOC entities.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 4: Co-occurrence statistics between instance queries and different entity types
|
| 252 |
+
|
| 253 |
+
We also analyze the auxiliary loss, the dynamic label assignment mechanism, and the performance on entity localization and classification, please see the Appendix D.
|
| 254 |
+
|
| 255 |
+
# 6 Case Study
|
| 256 |
+
|
| 257 |
+
Table 5 shows a case study about model predictions. Our model can recognize nested entities and
|
| 258 |
+
|
| 259 |
+
<table><tr><td>#</td><td>Sentence with Gold Entities</td><td>Prediction← Instance Query IDs</td></tr><tr><td>1</td><td>[0A number of powerful international companies and commercial agencies, such as [12Ito Bureau of [15Japan15]GPE15]ORG, [17Han Hua Group of [21South Korea22]GPE22]ORG, [24Jeffrey Group of [27the US28]GPE28]ORG, [30etc30]ORG]ORG. participated in this Urumchi Negotiation Meeting.</td><td>✓ (24, 28, ORG)← 0 23 33 45 51
|
| 260 |
+
✓ (27, 28, GPE)← 2 3 19 26 27 46 50
|
| 261 |
+
✓ (15, 15, GPE)← 9 11 14 42
|
| 262 |
+
✓ ... ...
|
| 263 |
+
✓ (0, 30, ORG)← 10 20 24 37 53 55
|
| 264 |
+
X (12, 30, ORG)← 16 22 47 57
|
| 265 |
+
None← 1 12 13 15 17 21 29 30 31 32 34 35 40 49 52 59</td></tr><tr><td>2</td><td>For example, as instant messaging migrates to cell phones or hand - held computer organizers, [17consumers17]PER won 't want to have to install multiple services on these devices, said [33Brian Park34]PER, [36senior product for [39Yahoo!40]ORG Communications Services42]PER.</td><td>X (39, 42, ORG)← 0 2 15 19 26 27 29 35 46 49 50
|
| 266 |
+
✓ (17, 17, PER)← 1 10 20 22 24 32 37 47 53 55 57
|
| 267 |
+
✓ (33, 34, PER)← 6 9 11 12 14 18 34 38 42 48 59
|
| 268 |
+
✓ (36, 42, PER)← 8 17 25 28 30 31 36 40 54 56 58
|
| 269 |
+
None← 3 4 5 7 13 16 21 23 33 39 41 43 44 45 51 52</td></tr><tr><td>3</td><td>[0Hector Rodriguez1]PER told the hearing of [6the Venezuelan consumer protection agency10]ORG that [12Bridgeton Firestone13]ORG knew about the tyre defects for many months and should be held responsible for the accidents.</td><td>✓ (0, 1, PER)← 1 10 20 24 32 37 47 53 55
|
| 270 |
+
✓ (12, 13, ORG)← 2 3 19 26 27 35 46 49 50
|
| 271 |
+
X (7, 8, PER)← 4 7 12 18 38 39 41 43 44
|
| 272 |
+
✓ (6, 10, ORG)← 5 6 9 11 14 21 48 57 59
|
| 273 |
+
X (7, 7, GPE)← 8 25 28 30 31 36 40 54 56 58
|
| 274 |
+
None← 0 13 15 16 17 22 23 29 33 34 42 45 51 52</td></tr></table>
|
| 275 |
+
|
| 276 |
+
Table 5: Cases Study. In the left column, the label in the lower right corner indicates the type of entity, and the superscripts indicate the positions of the left and right boundary words. In the right column, we show the correspondence between the instance queries and the predicted entities.
|
| 277 |
+
|
| 278 |
+
long entities well. In case 1, the entities of length 31 or with the three-level nested structure are predicted accurately. And thanks to the one-to-many dynamic label assignment mechanism, each entity can be predicted by multiple instance queries, which guarantees a high coverage of entity prediction. However, the model's ability to understand sentences is still insufficient, mainly in the following ways: (1) There is a deficiency in the understanding of special phrases. Yahoo! Communications Services in case 2 is misclassified as ORG, but in fact Yahoo! is ORG. (2) Over-focus on local semantics. In case 3, the model misclassifies Venezuelan consumer as PER, ignoring the full semantics of the long phrase the Venezuelan consumer protection agency, which should be ORG. (3) Insensitivity to morphological variation. The model confused Venezuelan and Venezuela, and misidentified the former as GPE in case 3.
|
| 279 |
+
|
| 280 |
+
# 7 Conclusion
|
| 281 |
+
|
| 282 |
+
In this paper, we propose Parallel Instance Query Network for nested NER, where a collection of instance queries are fed into the model simultaneously and can predict all entities in parallel. The instance queries can automatically learn query semantics related to entity types or entity locations during training, avoiding manual constructions that rely on external knowledge. To train the model, we design a dynamic label assignment mechanism
|
| 283 |
+
|
| 284 |
+
to assign gold entities for these instance queries. Experiments on both nested and flat NER datasets demonstrate that the proposed model achieves state-of-the-art performance.
|
| 285 |
+
|
| 286 |
+
# Acknowledgments
|
| 287 |
+
|
| 288 |
+
This work is supported by the Key Research and Development Program of Zhejiang Province, China (No. 2021C01013), the National Key Research and Development Project of China (No. 2018AAA0101900), the Chinese Knowledge Center of Engineering Science and Technology (CK-CEST) and MOE Engineering Research Center of Digital Library.
|
| 289 |
+
|
| 290 |
+
# References
|
| 291 |
+
|
| 292 |
+
Rami Al-Rfou, Dokook Choe, Noah Constant, Mandy Guo, and Llion Jones. 2019. Character-level language modeling with deeper self-attention. Proceedings of the AAAI Conference on Artificial Intelligence, 33:3159-3166.
|
| 293 |
+
Beatrice Alex, Barry Haddow, and Claire Grover. 2007. Recognising nested named entities in biomedical text. In Biological, translational, and clinical language processing, pages 65-72, Prague, Czech Republic. Association for Computational Linguistics.
|
| 294 |
+
R. Burkard and E. Cela. 1999. Linear assignment problems and extensions. In Handbook of Combinatorial Optimization.
|
| 295 |
+
|
| 296 |
+
Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. 2020. End-to-end object detection with transformers. In ECCV 2020, pages 213-229, Cham. Springer International Publishing.
|
| 297 |
+
Billy Chiu, Gamal Crichton, Anna Korhonen, and Sampo Pyysalo. 2016. How to train good word embeddings for biomedical NLP. In Proceedings of the 15th Workshop on Biomedical Natural Language Processing, pages 166-174, Berlin, Germany. Association for Computational Linguistics.
|
| 298 |
+
Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Shijin Wang, and Guoping Hu. 2020. Revisiting pretrained models for Chinese natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 657-668, Online. Association for Computational Linguistics.
|
| 299 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 300 |
+
Ning Ding, Guangwei Xu, Yulin Chen, Xiaobin Wang, Xu Han, Pengjun Xie, Haitao Zheng, and Zhiyuan Liu. 2021. Few-NERD: A few-shot named entity recognition dataset. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pages 3198-3213, Online. Association for Computational Linguistics.
|
| 301 |
+
George Doddington, Alexis Mitchell, Mark Przybocki, Lance Ramshaw, Stephanie Strassel, and Ralph Weischedel. 2004. The automatic content extraction (ACE) program - tasks, data, and evaluation. In Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04), Lisbon, Portugal. European Language Resources Association (ELRA).
|
| 302 |
+
Octavian-Eugen Ganea and Thomas Hofmann. 2017. Deep joint entity disambiguation with local neural attention. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2619-2629, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 303 |
+
Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirectional lstm-crf models for sequence tagging. arXiv preprint arXiv:1508.01991.
|
| 304 |
+
Heng Ji, Xiaoman Pan, Boliang Zhang, Joel Nothman, James Mayfield, Paul McNamee, and Cash Costello. 2017. Overview of TAC-KBP2017 13 languages entity discovery and linking. In Proceedings of the 2017 Text Analysis Conference, TAC 2017, Gaithersburg, Maryland, USA, November 13-14, 2017. NIST.
|
| 305 |
+
|
| 306 |
+
Xiaobo Jiang, Kun He, Jiajun He, and Guangyu Yan. 2021. A new entity extraction method based on machine reading comprehension.
|
| 307 |
+
Meizhi Ju, Makoto Miwa, and Sophia Ananiadou. 2018. A neural layered model for nested named entity recognition. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics, pages 1446-1459, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 308 |
+
Arzoo Katiyar and Claire Cardie. 2018. Nested named entity recognition revisited. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics, pages 861-871, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 309 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3th International Conference on Learning Representations, ICLR 2021.
|
| 310 |
+
Harold W Kuhn. 1955. The hungarian method for the assignment problem. Naval research logistics quarterly, 2(1-2):83-97.
|
| 311 |
+
Phong Le and Ivan Titov. 2018. Improving entity linking by modeling latent relations between mentions. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, pages 1595-1604, Melbourne, Australia. Association for Computational Linguistics.
|
| 312 |
+
Gina-Anne Levow. 2006. The third international Chinese language processing bakeoff: Word segmentation and named entity recognition. In Proceedings of the Fifth SIGHAN Workshop on Chinese Language Processing, pages 108-117, Sydney, Australia. Association for Computational Linguistics.
|
| 313 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
|
| 314 |
+
Fei Li, ZhiChao Lin, Meishan Zhang, and Donghong Ji. 2021. A span-based model for joint overlapped and discontinuous named entity recognition. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pages 4814-4828, Online. Association for Computational Linguistics.
|
| 315 |
+
Qi Li and Heng Ji. 2014. Incremental joint extraction of entity mentions and relations. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics, pages 402-412, Baltimore, Maryland. Association for Computational Linguistics.
|
| 316 |
+
|
| 317 |
+
Xiaonan Li, Hang Yan, Xipeng Qiu, and Xuanjing Huang. 2020a. FLAT: Chinese NER using flat-lattice transformer. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6836-6842, Online. Association for Computational Linguistics.
|
| 318 |
+
Xiaoya Li, Jingrong Feng, Yuxian Meng, Qinghong Han, Fei Wu, and Jiwei Li. 2020b. A unified MRC framework for named entity recognition. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5849-5859, Online. Association for Computational Linguistics.
|
| 319 |
+
Hongyu Lin, Yaojie Lu, Xianpei Han, and Le Sun. 2019. Sequence-to-nuggets: Nested entity mention detection via anchor-region networks. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5182-5192, Florence, Italy. Association for Computational Linguistics.
|
| 320 |
+
Wei Lu and Dan Roth. 2015. Joint mention extraction and classification with mention hypergraphs. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 857-867, Lisbon, Portugal. Association for Computational Linguistics.
|
| 321 |
+
Yi Luan, Dave Wadden, Luheng He, Amy Shah, Mari Ostendorf, and Hannaneh Hajishirzi. 2019. A general framework for information extraction using dynamic span graphs. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, pages 3036-3046, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 322 |
+
Ying Luo and Hai Zhao. 2020. Bipartite flat-graph network for nested named entity recognition. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6408-6418, Online. Association for Computational Linguistics.
|
| 323 |
+
Yuxian Meng, Wei Wu, Fei Wang, Xiaoya Li, Ping Nie, Fan Yin, Muyu Li, Qinghong Han, Xiaofei Sun, and Jiwei Li. 2019. Glyce: Glyph-vectors for chinese character representations. In Advances in Neural Information Processing Systems. Curran Associates.
|
| 324 |
+
Xue Mengge, Bowen Yu, Zhenyu Zhang, Tingwen Liu, Yue Zhang, and Bin Wang. 2020. Coarse-to-Fine Pre-training for Named Entity Recognition. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6345-6354, Online. Association for Computational Linguistics.
|
| 325 |
+
Makoto Miwa and Mohit Bansal. 2016. End-to-end relation extraction using LSTMs on sequences and tree structures. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics, pages 1105-1116, Berlin, Germany. Association for Computational Linguistics.
|
| 326 |
+
|
| 327 |
+
Aldrian Obaja Muis and Wei Lu. 2017. Labeling gaps between words: Recognizing overlapping mentions with mention separators. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2608-2618, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 328 |
+
Tomoko Ohta, Yuka Tateisi, and Jin-Dong Kim. 2002. The genia corpus: An annotated research abstract corpus in molecular biology domain. In Proceedings of the Second International Conference on Human Language Technology Research, page 82-86, San Francisco, USA. Morgan Kaufmann Publishers Inc.
|
| 329 |
+
Giovanni Paolini, Ben Athiwaratkun, Jason Krone, Jie Ma, Alessandro Achille, Rishita Anubhai, Cicero Nogueira dos Santos, Bing Xiang, and Stefano Soatto. 2021. Structured prediction as translation between augmented natural languages. In 9th International Conference on Learning Representations, ICLR 2021.
|
| 330 |
+
Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics, pages 2227-2237, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 331 |
+
Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Hwee Tou Ng, Anders Björkelund, Olga Uryupina, Yuchen Zhang, and Zhi Zhong. 2013. Towards robust linguistic analysis using OntoNotes. In Proceedings of the Seventeenth Conference on Computational Natural Language Learning, pages 143-152, Sofia, Bulgaria. Association for Computational Linguistics.
|
| 332 |
+
Nicky Ringland, Xiang Dai, Ben Hachey, Sarvnaz Karimi, Cecile Paris, and James R. Curran. 2019. NNE: A dataset for nested named entity recognition in English newswire. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5176-5181, Florence, Italy. Association for Computational Linguistics.
|
| 333 |
+
Yongliang Shen, Xinyin Ma, Zeqi Tan, Shuai Zhang, Wen Wang, and Weiming Lu. 2021a. Locate and label: A two-stage identifier for nested named entity recognition. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pages 2782-2794, Online. Association for Computational Linguistics.
|
| 334 |
+
Yongliang Shen, Xinyin Ma, Yechun Tang, and Weiming Lu. 2021b. A trigger-sense memory flow framework for joint entity and relation extraction. In Proceedings of the Web Conference 2021, WWW '21, page 1704-1715, New York, NY, USA. ACM.
|
| 335 |
+
Mohammad Golam Sohrab and Makoto Miwa. 2018. Deep exhaustive model for nested named entity
|
| 336 |
+
|
| 337 |
+
recognition. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2843-2849, Brussels, Belgium. Association for Computational Linguistics.
|
| 338 |
+
Jana Straková, Milan Straka, and Jan Hajic. 2019. Neural architectures for nested NER through linearization. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5326-5331, Florence, Italy. Association for Computational Linguistics.
|
| 339 |
+
Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. 2020. Ernie 2.0: A continual pre-training framework for language understanding. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):8968-8975.
|
| 340 |
+
Chuanqi Tan, Wei Qiu, Mosha Chen, Rui Wang, and Fei Huang. 2020. Boundary enhanced neural span classification for nested named entity recognition. Proceedings of the AAAI Conference on Artificial Intelligence, 34(05):9016-9023.
|
| 341 |
+
Zeqi Tan, Yongliang Shen, Shuai Zhang, Weiming Lu, and Yueting Zhuang. 2021. A sequence-to-set network for nested named entity recognition. In Proceedings of the 30th International Joint Conference on Artificial Intelligence, IJCAI-21.
|
| 342 |
+
Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003, pages 142-147.
|
| 343 |
+
Christopher Walker, Stephanie Strassel, and Kazuaki Maeda. 2006. Ace 2005 multilingual training corpus. linguistic. In Linguistic Data Consortium, Philadelphia 57.
|
| 344 |
+
Bailin Wang and Wei Lu. 2018. Neural segmental hypergraphs for overlapping mention recognition. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 204-214, Brussels, Belgium. Association for Computational Linguistics.
|
| 345 |
+
Jue Wang, Lidan Shou, Ke Chen, and Gang Chen. 2020a. Pyramid: A layered model for nested named entity recognition. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5918-5928, Online. Association for Computational Linguistics.
|
| 346 |
+
Wen Wang, Yang Cao, Jing Zhang, and Dacheng Tao. 2022a. Fp-detr: Detection transformer advanced by fully pre-training. In 10th International Conference on Learning Representations, ICLR 2022.
|
| 347 |
+
Xinyu Wang, Yong Jiang, Nguyen Bach, Tao Wang, Zhongqiang Huang, Fei Huang, and Kewei Tu. 2021. Improving named entity recognition by external context retrieving and cooperative learning. In Proceedings of the 59th Annual Meeting of the Association
|
| 348 |
+
|
| 349 |
+
for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, pages 1800-1812, Online. Association for Computational Linguistics.
|
| 350 |
+
Xinyu Wang, Yongliang Shen, Jiong Cai, Tao Wang, Xiaobin Wang, Pengjun Xie, Fei Huang, Weiming Lu, Yueting Zhuang, Kewei Tu, Wei Lu, and Yong Jiang. 2022b. DAMO-NLP at SemEval-2022 Task 11: A Knowledge-based System for Multilingual Named Entity Recognition.
|
| 351 |
+
Yu Wang, Yun Li, Hanghang Tong, and Ziye Zhu. 2020b. HIT: Nested named entity recognition via head-tail pair and token interaction. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6027-6036, Online. Association for Computational Linguistics.
|
| 352 |
+
Lu Xu, Zhanming Jie, Wei Lu, and Lidong Bing. 2021. Better feature integration for named entity recognition. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3457-3469, Online. Association for Computational Linguistics.
|
| 353 |
+
Hang Yan, Tao Gui, Junqi Dai, Qipeng Guo, Zheng Zhang, and Xipeng Qiu. 2021. A unified generative framework for various NER subtasks. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, pages 5808-5822, Online. Association for Computational Linguistics.
|
| 354 |
+
Songlin Yang and Kewei Tu. 2022. Bottom-up constituency parsing and nested named entity recognition with pointer networks. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. Association for Computational Linguistics.
|
| 355 |
+
Juntao Yu, Bernd Bohnet, and Massimo Poesio. 2020. Named entity recognition as dependency parsing. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6470-6476, Online. Association for Computational Linguistics.
|
| 356 |
+
Changmeng Zheng, Yi Cai, Jingyun Xu, Ho-fung Leung, and Guandong Xu. 2019. A boundary-aware neural model for nested named entity recognition. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing, pages 357-366, Hong Kong, China. Association for Computational Linguistics.
|
| 357 |
+
Hengyi Zheng, Bin Qin, and Ming Xu. 2021. Chinese medical named entity recognition using crf-mt-adapt and ner-mrc. In 2th International Conference on Computing and Data Science, pages 362-365.
|
| 358 |
+
|
| 359 |
+
# A Datasets
|
| 360 |
+
|
| 361 |
+
GENIA (Ohta et al., 2002) is an English biology nested named entity dataset and contains 5 entity types, including DNA, RNA, protein, cell line, and cell type categories. Follow Yu et al. (2020), we use $90\% /10\%$ train/test split and evaluate the model on the last epoch.
|
| 362 |
+
|
| 363 |
+
ACE04 and ACE05 (Doddington et al., 2004; Walker et al., 2006) are two English nested datasets, each of them contains 7 entity categories. We follow the same setup as previous work Katiyar and Cardie (2018); Lin et al. (2019).
|
| 364 |
+
|
| 365 |
+
KBP17 (Ji et al., 2017) has 5 entity categories, including GPE, ORG, PER, LOC, and FAC. We follow Lin et al. (2019) to split all documents into 866/20/167 documents for train/dev/test set.
|
| 366 |
+
|
| 367 |
+
NNE (Ringland et al., 2019) is a English nested NER dataset with 114 fine-grained entity types. Follow Wang et al. (2020a), we keep the original dataset split and pre-processing.
|
| 368 |
+
|
| 369 |
+
FewNERD (Ding et al., 2021) is a large-scale English flat NER dataset with 66 fine-grained entity types. Follow Ding et al. (2021), we adopt a standard supervised setting.
|
| 370 |
+
|
| 371 |
+
CoNLL03 (Tjong Kim Sang and De Meulder, 2003) is an English dataset with 4 types of named entities: LOC, ORG, PER and MISC. Follow Yan et al. (2021); Yu et al. (2020), we train our model on the train and development sets.
|
| 372 |
+
|
| 373 |
+
OntoNotes (Pradhan et al., 2013) is an English dataset with 18 types of named entity, consisting of 11 types and 7 values. We use the same train, development, test splits as Li et al. (2020b).
|
| 374 |
+
|
| 375 |
+
Chinese MSRA (Levow, 2006) is a Chinese dataset with 3 named entity types, including ORG, PER, LOC. We keep the original dataset split and pre-processing.
|
| 376 |
+
|
| 377 |
+
In Table 6 and Table 7, we report the number of sentences, the number of sentences containing nested entities, the average sentence length, the total number of entities, the number of nested entities, the nesting ratio, the maximum and the average number of entities in a sentence on all datasets.
|
| 378 |
+
|
| 379 |
+
# B Implementation Details
|
| 380 |
+
|
| 381 |
+
In default setting, we set the number of instance queries $M = 60$ , and the total assignable quantity
|
| 382 |
+
|
| 383 |
+
$Q = M \times 0.75 = 45$ . To ensure that the assignable quantities of different entities are balanced, we randomly divide $Q$ to different entities and adjust each division to be larger than $Q / G$ , where $G$ is the number of the ground-truth entities. When the number of entities is more than the total assignable quantity, we specify $Q = G$ . We have also tried other configurations that will be discussed in Appendix D.3. We set $L$ word-level transformer layers after BERT and set auxiliary losses in each layer. In the default setting $L$ equals 5. We compare the effect of different auxiliary layers on the model performance, which will be discussed in Appendix D.1. Since the instance queries are randomly initialized and do not have query semantics at the initial stage of training, we first fix the parameters of BERT and train the model for 5 epochs, allowing the instance queries to initially learn the query semantics. When decoding entities, we filter out the predictions with localization probability and classification probability less than the threshold 0.6 and 0.8, respectively.
|
| 384 |
+
|
| 385 |
+
# C Baselines
|
| 386 |
+
|
| 387 |
+
We compare PIQN with the following baselines:
|
| 388 |
+
|
| 389 |
+
- ARN (Lin et al., 2019) designs a sequence-to-nuggets architecture for nested mention detection, which first identifies anchor words and then recognizes the mention boundaries.
|
| 390 |
+
- HIT (Wang et al., 2020b) designs a head-tail detector and a token interaction tagger, which can leverage the head-tail pair and token interaction to express the nested structure.
|
| 391 |
+
- Pyramid (Wang et al., 2020a) presents a layered neural model for nested entity recognition, consisting of a stack of inter-connected layers.
|
| 392 |
+
- Biaffine (Yu et al., 2020) formulates NER as a structured prediction task and adopts a dependency parsing approach for NER.
|
| 393 |
+
- BiFlaG (Luo and Zhao, 2020) designs a bipartite flat-graph network with two subgraph modules for outermost and inner entities.
|
| 394 |
+
- BERT-MRC (Li et al., 2020b) formulates the NER task as a question answering task. They construct type-specific queries using semantic prior information for entity categories.
|
| 395 |
+
|
| 396 |
+
<table><tr><td rowspan="2"></td><td colspan="3">ACE04</td><td colspan="3">ACE05</td><td colspan="3">KBP17</td><td colspan="2">GENIA</td><td colspan="2">NNE</td><td></td></tr><tr><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td>#S</td><td>6200</td><td>745</td><td>812</td><td>7194</td><td>969</td><td>1047</td><td>10546</td><td>545</td><td>4267</td><td>16692</td><td>1854</td><td>43457</td><td>1989</td><td>3762</td></tr><tr><td>#NS</td><td>2712</td><td>294</td><td>388</td><td>2691</td><td>338</td><td>320</td><td>2809</td><td>182</td><td>1223</td><td>3522</td><td>446</td><td>28606</td><td>1292</td><td>2489</td></tr><tr><td>#E</td><td>22204</td><td>2514</td><td>3035</td><td>24441</td><td>3200</td><td>2993</td><td>31236</td><td>1879</td><td>12601</td><td>50509</td><td>5506</td><td>248136</td><td>10463</td><td>21196</td></tr><tr><td>#NE</td><td>10149</td><td>1092</td><td>1417</td><td>9389</td><td>1112</td><td>1118</td><td>8773</td><td>605</td><td>3707</td><td>9064</td><td>1199</td><td>206618</td><td>8487</td><td>17670</td></tr><tr><td>NR</td><td>45.71</td><td>46.69</td><td>45.61</td><td>38.41</td><td>34.75</td><td>37.35</td><td>28.09</td><td>32.20</td><td>29.42</td><td>17.95</td><td>21.78</td><td>83.27</td><td>81.11</td><td>83.36</td></tr><tr><td>AL</td><td>22.50</td><td>23.02</td><td>23.05</td><td>19.21</td><td>18.93</td><td>17.2</td><td>19.62</td><td>20.61</td><td>19.26</td><td>25.35</td><td>25.99</td><td>23.84</td><td>24.20</td><td>23.80</td></tr><tr><td>#ME</td><td>28</td><td>22</td><td>20</td><td>27</td><td>23</td><td>17</td><td>58</td><td>15</td><td>21</td><td>25</td><td>14</td><td>149</td><td>58</td><td>64</td></tr><tr><td>#AE</td><td>3.58</td><td>3.37</td><td>3.73</td><td>3.39</td><td>3.30</td><td>2.86</td><td>2.96</td><td>3.45</td><td>2.95</td><td>3.03</td><td>2.97</td><td>5.71</td><td>5.26</td><td>5.63</td></tr></table>
|
| 397 |
+
|
| 398 |
+
Table 6: Statistics of the nested datasets used in the experiments. #S: the number of sentences, #NS: the number of sentences containing nested entities, #E: the total number of entities, #NE: the number of nested entities, NR: the nesting ratio (%), AL: the average sentence length, #ME: the maximum number of entities in a sentence, #AE: the average number of entities in a sentence
|
| 399 |
+
|
| 400 |
+
<table><tr><td></td><td colspan="3">CoNLL03</td><td colspan="3">OntoNotes</td><td colspan="3">FewNERD</td><td colspan="3">Chinese MSRA</td></tr><tr><td></td><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td>#S</td><td>14041</td><td>3250</td><td>3453</td><td>49706</td><td>13900</td><td>10348</td><td>131965</td><td>18824</td><td>37648</td><td>41728</td><td>4636</td><td>4365</td></tr><tr><td>#E</td><td>23499</td><td>5942</td><td>5648</td><td>128738</td><td>20354</td><td>12586</td><td>340247</td><td>48770</td><td>96902</td><td>70446</td><td>4257</td><td>6181</td></tr><tr><td>AL</td><td>14.50</td><td>15.80</td><td>13.45</td><td>24.94</td><td>20.11</td><td>19.74</td><td>24.49</td><td>24.61</td><td>24.47</td><td>46.87</td><td>46.17</td><td>39.54</td></tr><tr><td>#ME</td><td>20</td><td>20</td><td>31</td><td>32</td><td>71</td><td>21</td><td>50</td><td>35</td><td>49</td><td>125</td><td>18</td><td>461</td></tr><tr><td>#AE</td><td>1.67</td><td>1.83</td><td>1.64</td><td>2.59</td><td>1.46</td><td>1.22</td><td>2.58</td><td>2.59</td><td>2.57</td><td>1.69</td><td>0.92</td><td>1.42</td></tr></table>
|
| 401 |
+
|
| 402 |
+
Table 7: Statistics of the flat datasets used in the experiments. #S: the number of sentences, #E: the total number of entities, AL: the average sentence length, #ME: the maximum number of entities in a sentence, #AE: the average number of entities in a sentence
|
| 403 |
+
|
| 404 |
+
- BARTNER (Yan et al., 2021) formulates NER as an entity span sequence generation problem and uses a unified Seq2Seq model with the pointer mechanism to tackle flat, nested, and discontinuous NER tasks.
|
| 405 |
+
- Seq2Set (Tan et al., 2021) formulates NER as an entity set prediction task. Different from Straková et al. (2019), they utilize a non-autoregressive decoder to predict entity set.
|
| 406 |
+
- Locate&Label (Shen et al., 2021a) treats NER as a joint task of boundary regression and span classification and proposed a two-stage identifier of locating entities first and labeling them later.
|
| 407 |
+
|
| 408 |
+
For a fair comparison, we did not compare with Sun et al. (2020); Li et al. (2020a); Meng et al. (2019) on Chinese MSRA because they either used glyphs or an external lexicon or a larger pre-trained language model. In addition, some works (Wang et al., 2021, 2022b) used search engines to retrieve input-related contexts to introduce external information, and we did not compare with them as well.
|
| 409 |
+
|
| 410 |
+
# D Analysis
|
| 411 |
+
|
| 412 |
+
# D.1 Analysis of Auxiliary Loss
|
| 413 |
+
|
| 414 |
+
Many works (Al-Rfou et al., 2019; Carion et al., 2020) have demonstrated that the auxiliary loss in the middle layer introduces supervised signals in advance and can improve model performance. We compared the effect of the different number of auxiliary-loss layers on the model performance (F1-score on ACE04). Overall, the model performs better as the number of auxiliary-loss layers increases. The model achieves the best results when the number of layers equals 5.
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
Figure 5: Analysis of Auxiliary Loss
|
| 418 |
+
|
| 419 |
+
# D.2 Analysis of Two Subtasks
|
| 420 |
+
|
| 421 |
+
We compare the model performance on entity localization and entity classification subtasks on the ACE04 dataset, as shown in Table 8. Compared with the previous state-of-the-art models (Tan et al., 2021; Shen et al., 2021a), our model achieves better performance on both entity localization and entity classification subtasks. This illustrates that the instance queries can automatically learn their query semantics about location and type of entities, which is consistent with our analysis in 5.3.
|
| 422 |
+
|
| 423 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">Localization</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Tan et al. (2021)</td><td>92.75</td><td>90.24</td><td>91.48</td></tr><tr><td>Shen et al. (2021a)</td><td>92.28</td><td>90.97</td><td>91.62</td></tr><tr><td>PIQN</td><td>92.56</td><td>91.89</td><td>92.23</td></tr><tr><td rowspan="2">Model</td><td colspan="3">Classification</td></tr><tr><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>Tan et al. (2021)</td><td>95.36</td><td>86.03</td><td>90.46</td></tr><tr><td>Shen et al. (2021a)</td><td>95.40</td><td>86.75</td><td>90.87</td></tr><tr><td>PIQN</td><td>95.59</td><td>87.81</td><td>91.53</td></tr></table>
|
| 424 |
+
|
| 425 |
+
# D.3 Analysis of Label Assignment
|
| 426 |
+
|
| 427 |
+
Table 8: Localization and Classification Performance on ACE04
|
| 428 |
+
|
| 429 |
+
<table><tr><td>(M,Q)</td><td>Loc. F1</td><td>Cls. F1</td><td>Pr.</td><td>Rec.</td><td>F1</td></tr><tr><td>(60,15)</td><td>91.05</td><td>90.15</td><td>87.57</td><td>85.67</td><td>86.61</td></tr><tr><td>(60,30)</td><td>91.76</td><td>90.37</td><td>88.23</td><td>86.16</td><td>87.18</td></tr><tr><td>(60,45)</td><td>92.23</td><td>91.53</td><td>88.48</td><td>87.81</td><td>88.14</td></tr><tr><td>(60,50)</td><td>92.01</td><td>90.81</td><td>87.38</td><td>87.12</td><td>87.25</td></tr><tr><td>(30,15)</td><td>91.26</td><td>89.66</td><td>88.61</td><td>84.88</td><td>86.70</td></tr><tr><td>(60,30)</td><td>91.76</td><td>90.37</td><td>88.23</td><td>86.16</td><td>87.18</td></tr><tr><td>(90,45)</td><td>91.88</td><td>90.56</td><td>88.23</td><td>86.46</td><td>87.34</td></tr><tr><td>(120,60)</td><td>91.75</td><td>90.45</td><td>87.19</td><td>86.56</td><td>86.87</td></tr></table>
|
| 430 |
+
|
| 431 |
+
Table 9: Analysis on Dynamic Label Assignment for different combinations of the number $M$ of instance queries and the total assignable quantity $Q$ of labels.
|
| 432 |
+
|
| 433 |
+
We analyze the impact of dynamic label assignment on model performance for different combinations of the number $M$ of instance queries and the total assignable quantity $Q$ of labels. From Table 9, we observe that (1) there is a tradeoff between $M$ and $Q$ , and the model achieves the best performance with a ratio of 4:3. With this setting, the ratio of positive to negative instances of instance queries is 3:1. (2) The number of instance queries and the total assignable quantity is not as large as possible, and an excessive number may de
|
| 434 |
+
|
| 435 |
+
grade the model performance. In our experiments $(M, Q) = (60, 45)$ is the best combination.
|
2203.10xxx/2203.10545/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:272bf079f297944a307429796f5311e44681300f9d37d833e14d054c60b451f0
|
| 3 |
+
size 969870
|
2203.10xxx/2203.10545/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10552/0f90ec88-1526-4a07-920a-d6c1d8306159_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a6a5e12d4e98a726f31220c8ecee62b22c2ed56ee7c1cfd17005893e471f2eb1
|
| 3 |
+
size 958172
|
2203.10xxx/2203.10552/full.md
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Learning Convolutional Neural Network Approach for Network Robustness Prediction
|
| 2 |
+
|
| 3 |
+
Yang Lou, Ruizi Wu, Junli Li, Lin Wang, Xiang Li, and Guanrong Chen
|
| 4 |
+
|
| 5 |
+
Abstract—Network robustness is critical for various societal and industrial networks again malicious attacks. In particular, connectivity robustness and controllability robustness reflect how well a networked system can maintain its connectedness and controllability against destructive attacks, which can be quantified by a sequence of values that record the remaining connectivity and controllability of the network after a sequence of node- or edge-removal attacks. Traditionally, robustness is determined by attack simulations, which are computationally very time-consuming or even practically infeasible. In this paper, an improved method for network robustness prediction is developed based on learning feature representation using convolutional neural network (LFR-CNN). In this scheme, higher-dimensional network data are compressed to lower-dimensional representations, and then passed to a CNN to perform robustness prediction. Extensive experimental studies on both synthetic and real-world networks, both directed and undirected, demonstrate that 1) the proposed LFR-CNN performs better than other two state-of-the-art prediction methods, with significantly lower prediction errors; 2) LFR-CNN is insensitive to the variation of the network size, which significantly extends its applicability; 3) although LFR-CNN needs more time to perform feature learning, it can achieve accurate prediction faster than attack simulations; 4) LFR-CNN not only can accurately predict network robustness, but also provides a good indicator for connectivity robustness, better than the classical spectral measures.
|
| 6 |
+
|
| 7 |
+
Index Terms—Complex network, robustness, convolutional neural network, graph representation learning, prediction.
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
A COMPLEX network is a graph consisting of large numbers of nodes and edges with complicated connections. Many natural and engineering systems can be modeled as complex networks, and then studied using graph theory and network analysis tools. The study of complex networks attracts
|
| 12 |
+
|
| 13 |
+
Yang Lou is with the Department of Computing and Decision Sciences, Lingnan University, Hong Kong, China, and also with the Key Laboratory of System Control and Information Processing, Ministry of Education of China, Shanghai 200240, China (e-mail: felixLou@ieee.org).
|
| 14 |
+
|
| 15 |
+
Ruizi Wu and Junli Li are with the College of Computer Science, Sichuan Normal University, Chengdu 610066, China (e-mail: vridge@foxmail.com; lijunli@sicnu.edu.cn).
|
| 16 |
+
|
| 17 |
+
Lin Wang is with the Department of Automation, Shanghai Jiao Tong University, Shanghai 200240, China, and also with the Key Laboratory of System Control and Information Processing, Ministry of Education, Shanghai 200240, China (e-mail: wanglin@sjtu.edu.cn).
|
| 18 |
+
|
| 19 |
+
Xiang Li is with the Institute of Complex Networks and Intelligent Systems, Shanghai Research Institute for Intelligent Autonomous Systems, Tongji University, Shanghai 201210, and also with the Department of Control Science and Engineering, Tongji University, Shanghai 200240, China (e-mail: lix2021@tongji.edu.cn).
|
| 20 |
+
|
| 21 |
+
Guanrong Chen is with the Department of Electrical Engineering, City University of Hong Kong, Hong Kong, China (e-mail:eegchen@cityu.edu.hk).
|
| 22 |
+
|
| 23 |
+
(Yang Lou and Ruizi Wu contributed equally to this work)
|
| 24 |
+
|
| 25 |
+
(Corresponding author: Yang Lou and Lin Wang)
|
| 26 |
+
|
| 27 |
+
increasing interest from research communities in various scientific and technological fields, including computer science, systems engineering, applied mathematics, statistical physics, biological sciences, and social sciences [1]–[4].
|
| 28 |
+
|
| 29 |
+
In the pursuit of networked systems control for beneficial applications, the network controllability [5]–[20] is a fundamental issue, which refers to the ability of a network of interconnected dynamic systems in changing from any initial state to any desired state under feasible control input within finite time [18]. The network connectivity is fundamentally important for a network to function, affecting particularly the network controllability [18] and synchronizability [21]. It is easy to see that good controllability requires good connectivity, but good connectivity does not necessarily guarantee good controllability [22]. In fact, network connectivity and controllability have very different characteristics and measures: the former is guaranteed by a sufficient number of edges, while the later further requires a proper organization of the sufficient number of edges.
|
| 30 |
+
|
| 31 |
+
Today, malicious attacks and random failures widely exist in many engineering and technological facilities and processes, which degrade or even destroy certain network functions typically through destructing the network connectivity. Therefore, it is essential to strengthen the network connectivity against such attacks and failures [22]–[29]. In general, destructive attacks and failures take place in the forms of node- and edge-removals, which may cause significant degeneration of network connectivity and controllability. In such situations, the abilities of a network to maintain its connectivity and controllability against attacks or failures are of great concerns, which are referred to as the connectivity robustness and controllability robustness, respectively.
|
| 32 |
+
|
| 33 |
+
Connectivity robustness is commonly measured by using the change of the portion of nodes in the largest connected component (LCC) [25] that survives from a sequence of attacks. A network is deemed more robust against attacks if it can always maintain higher values of the fractions of LCC nodes throughout an attack process. The investigation and optimization of connectivity robustness using this measure emphasize on protecting the LCC. Given certain practical constraints, e.g., node degree preservation, connectivity robustness can be enhanced by edge rewiring, which actually imposes disturbances onto the network structure [28], [30]-[37]. After some edge rewiring operations, whether such disturbance enhances the robustness or not has to be evaluated, typically by using very time-consuming attack simulations. As a remedy, several easy-to-access indicators, e.g. assortativity [38] and spectral measures [39], are adopted for estimating the
|
| 34 |
+
|
| 35 |
+
network connectivity robustness. For example, it is found that onion-like structured heterogeneous networks with positive assortativity coefficients are robust against attacks [25], [30], [40], [41]. However, these measures have limited scopes of applications, and therefore the time-consuming attack simulation remains as the main approach today.
|
| 36 |
+
|
| 37 |
+
Controllability robustness is generally measured using the change of density of driver nodes, at which external control signals can be imposed as input. A network is deemed more robust against attacks, if it can maintain a lower density of driver nodes throughout an attack process. The studies and optimization of controllability robustness using this measure emphasize on maintaining a low demand of additional driver nodes. Although controllability robustness can be enhanced by edge rewiring as in connectivity robustness enhancement, their objective functions in optimization are very different. In fact, on top of the connectedness, the way the nodes are connected makes a huge impact on the controllability. For example, it is observed that a power-law degree distribution does not necessarily imply weak controllability robustness; while multichain [42] and multi-loop [43], [44] structures significantly strengthen the controllability robustness. It is empirically found that extreme homogeneity is necessary for the optimal topology that has the best controllability robustness against random node attacks [45]. Likewise, attack simulation is a main approach to measuring network controllability robustness today, which however is even more time-consuming than measuring the network connectivity discussed above.
|
| 38 |
+
|
| 39 |
+
For both connectivity and controllability robustness enhancements, deep neural networks [46]–[48] provide a useful tool for computation, optimization and analysis. Successful deep learning applications on complex networks include network robustness prediction using convolutional neural networks (CNNs) [22], [49]–[52], and critical node identification using deep reinforcement learning [27] and graph attention networks [29]. Main advantages of CNN-based approaches for robustness prediction include: 1) the method is straightforward, where the adjacency matrix of a complex network is treated as a gray-scale image, and then the classification (if any) and regression tasks are same as in image processing. 2) The performance of CNN-based approach is stable and reliable: all types of network adjacency matrices are acceptable as input, which is also shift-invariant [53], namely shuffling and transposing pixels of an image (while keeping the network topology unchanged) does not degrade the performance of the prediction [22], [52]. In addition, it has been experimentally demonstrated that CNN is tolerable to slightly changes of the network size.
|
| 40 |
+
|
| 41 |
+
However, the above CNN-based approaches cannot guarantee the prediction performance when the input size has significant changes (e.g., $\pm 20\%$ or more) from the training samples. In addition, since many complex networks are sparse, the gray-scale images converted from network adjacency matrices typically contain a large amount of useless information, where quite a lot of pixels can be removed or compressed.
|
| 42 |
+
|
| 43 |
+
To overcome the aforementioned issues, a learning feature representation-based CNN (LFR-CNN) approach is proposed in this paper for precise network robustness prediction. LFR-
|
| 44 |
+
|
| 45 |
+
CNN consists of an LFR module and a CNN. The LFR module performs feature extraction and dimensionality reduction, so that the size of input to the CNN for prediction can be significantly reduced, and simultaneously redundant information can be filtered out.
|
| 46 |
+
|
| 47 |
+
The following text is organized as follows. Section II reviews the measures of network connectivity and controllability robustness against destructive node-removal attacks. Section III introduces the details of the proposed LRF-CNN. Section IV presents experimental results with analysis and comparison. Section V concludes the investigation.
|
| 48 |
+
|
| 49 |
+
# II. ROBUSTNESS OF COMPLEX NETWORKS
|
| 50 |
+
|
| 51 |
+
The concepts and calculations of connectivity robustness and controllability robustness are introduced in this section, where connectivity robustness reflects how well a networked system can maintain its connectedness under a sequence of node-removal attacks, while controllability robustness reflects how well it can maintain its controllable state. In this paper, only node-removal attacks are investigated, while edge-removal attacks can be studied in a similar manner.
|
| 52 |
+
|
| 53 |
+
# A. Connectivity Robustness
|
| 54 |
+
|
| 55 |
+
An undirected network is connected if and only if for each pair of nodes there is a path between them. A directed network is weakly connected if it remains to be connected after all the directions are removed. Both connectedness and weak connectedness are employed as measures of the network connectivity in this paper, for undirected and directed networks respectively.
|
| 56 |
+
|
| 57 |
+
Under a sequence of node-removal attacks, connectivity robustness is evaluated using the fraction of nodes in LCC after each node-removal [25], as follows:
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
p (i) = \frac {N _ {\mathrm {L C C}} (i)}{N - i}, \quad i = 0, 1, \dots , N - 1, \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $p(i)$ is the fractions of nodes in LCC after a total number of $i$ nodes removed; $N_{\mathrm{LCC}}(i)$ is the number of nodes in LCC after a total number of $i$ nodes have been removed from the network; $N$ is the number of nodes in the network before being attacked. When these values are plotted versus the fraction of removed nodes, a curve is obtained, called the connectivity curve.
|
| 64 |
+
|
| 65 |
+
# B. Controllability Robustness
|
| 66 |
+
|
| 67 |
+
For a linear time-invariant networked system $\dot{\mathbf{x}} = A\mathbf{x} + B\mathbf{u}$ where $A$ and $B$ are constant matrices of compatible dimensions, and $\mathbf{x}$ and $\mathbf{u}$ are the state vector and control input, respectively. The system is state controllable if and only if the controllability matrix $[BABA^2 B\cdots A^{N - 1}B]$ has a full row-rank, where $N$ is the dimension of $A$ , which is also the size of the network in the present study. It is shown [5] that, for a directed network, identifying the set of the minimum number of driver nodes $N_{D}$ can be converted to searching for a maximum matching of the network: $N_{D} = \max \{1,N - |E^{*}|\}$ , where $|E^{*}|$ is the number of edges in the maximum matching $E^{*}$ . For an undirected network, the minimum number of needed
|
| 68 |
+
|
| 69 |
+
driver nodes can be calculated using the exact controllability formula [6]: $N_{D} = \max \{1, N - \mathrm{rank}(A)\}$ . Then, the network controllability robustness is calculated as follows:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
q (i) = \frac {N _ {D} (i)}{N - i}, \quad i = 0, 1, \dots , N - 1, \tag {2}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $N_{D}(i)$ is the number of driver nodes needed to retain the network controllability after a total of $i$ nodes have been removed, and $N$ is the network size. When these values are plotted versus the fraction of removed nodes, a curve is obtained, called the controllability curve.
|
| 76 |
+
|
| 77 |
+
# C. Error Measures
|
| 78 |
+
|
| 79 |
+
For either connectivity or controllability, consider three curves: $\mathbf{s}_t = [s_t(0),\dots ,s_t(N - 1)]$ denotes the true curve obtained by attack simulations, and $\mathbf{s}_1 = [s_1(0),\dots ,s_1(N - 1)]$ and $\mathbf{s}_2 = [s_2(0),\dots ,s_2(N - 1)]$ denote two predicted curves, respectively. The difference between the true curve and a predicted curve is calculated by $\xi_{\alpha} = |\mathbf{s}_t - \mathbf{s}_{\alpha}|,$ where $\xi_{\alpha} = [\xi_{\alpha}(0),\dots ,\xi_{\alpha}(N - 1)]$ is the sequence of errors between the two curves, where $\xi_{\alpha}(i) = |s_t(i) - s_\alpha (i)|$ for $\alpha = 1$ or 2, and $i = 0,1,\dots ,N - 1$
|
| 80 |
+
|
| 81 |
+
The prediction error $\bar{\xi}_{\alpha}$ is then calculated by
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\bar {\xi} _ {\alpha} = \frac {1}{N} \sum_ {i = 0} ^ {N - 1} \xi (i) _ {\alpha}. \tag {3}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
The vector $\xi_{\alpha}$ can be used to visualize the prediction errors throughout the attack process. The scalar $\bar{\xi}_{\alpha}$ measures the overall prediction error, i.e., $\bar{\xi}_1 < \bar{\xi}_2$ means that the predicted curve $\mathbf{s}_1$ obtains lower prediction error than $\mathbf{s}_2$ .
|
| 88 |
+
|
| 89 |
+
For notational convenience, the integer index sequence $i = 0,1,\dots ,N - 1$ , will be replaced by the fractional index sequence $\delta = 0,\frac{1}{N},\ldots ,\frac{N - 1}{N}$ , thereby equivalently replacing $n_D(i)$ , with $n_D(\delta)$ .
|
| 90 |
+
|
| 91 |
+
# III. PERFORMANCE PREDICTOR
|
| 92 |
+
|
| 93 |
+
This section briefly reviews the predictor for controllability robustness (PCR) [50], which employs a VGG-structured CNN [54] and PATCHY-SAN [55] consisting of an LFR-based 1D-CNN. Pros and cons of these two approaches are discussed. Then, a structural LFR-CNN is designed by incorporating the LFR module and a simplified VGG-structured CNN. LFR-CNN has a parameter magnitude significantly greater than PATCHY-SAN, but less than PCR.
|
| 94 |
+
|
| 95 |
+
# A. Convolutional Neural Network
|
| 96 |
+
|
| 97 |
+
PCR is a CNN-based framework for predicting the controllability robustness [50], which has also been applied to predict connectivity robustness [22]. The CNN structure of the PCR is shown in Fig. 1. Network adjacency matrices are converted to gray-scale images and then used directly as input to CNN. Both classification and regression tasks can be performed using such an image-processing mechanism. Due to a sufficiently large source of training data that can be generated using various synthetic network models, tens of millions of internal parameters in a CNN can be properly trained.
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
Fig. 1. CNN structure of PCR. The input is adjacency matrix; the output is an $N$ -vector. For $N = 1000$ , seven feature map (FM) groups are installed with $N_{i} = \lceil N / 2^{(i + 1)} \rceil$ , for $i = 1, 2, \ldots, 7$ . The concatenation layer reshapes the matrix to a vector, from FM 7 to FC 1, i.e., $\mathrm{FC1} = N_7 \times N_7 \times 512$ and $\mathrm{FC2} = 4096$ [50].
|
| 101 |
+
|
| 102 |
+
The mean-squared error between the predicted connectivity or controllability curve $\hat{v}$ and true curve $v$ is used as the loss function:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\mathcal {L} = \frac {1}{N + 1} \sum_ {i = 0} ^ {N} \left| \left| \hat {v} (i) - v (i) \right| \right|, \tag {4}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where $\hat{v}(i)$ represents the predicted connectivity or controllability value when a total proportion of $i/N$ nodes have been removed from the network; $v(i)$ represents the corresponding true value obtained by attack simulation; $||\cdot||$ is the Euclidean norm. The training process aims at adjusting the internal parameters [46], with the objective of minimizing $\mathcal{L}$ .
|
| 109 |
+
|
| 110 |
+
# B. PATCHY-SAN
|
| 111 |
+
|
| 112 |
+
Complex network data have distinguished continuous and discrete attributes that are different from general image data. A group of recurrent neural networks, namely the graph neural networks (GNNs) [56]–[58], are specifically designed for processing graph data. Specifically, lower-dimensional representations are generated from compacting higher-dimensional raw graph data, and then classification or/and regression tasks are performed by processing the lower-dimensional representation data. PATCHY-SAN [55], as a successful GNN technique, processes graph data with selecting, assembling, and normalizing (SAN) operations, detailed below.
|
| 113 |
+
|
| 114 |
+
1) Node Sequence Selection: A fixed-length sequence of $W$ nodes are selected from the $N$ nodes in the network. Nodes are arranged in descending order according to certain importance measure. Thus, for different networks, similar important nodes are arranged in similar ranks in the node sequence.
|
| 115 |
+
|
| 116 |
+
Node sequence selection is the process of sorting and identifying critical nodes. Each node is assigned a score via a labeling procedure, where node centrality measures such as degree and betweenness are used to describe the importance of a node. Then, all the nodes are sorted in descending order of the labeling scores; the first $W$ nodes are selected as the node sequence. A receptive fields of size $g$ will be created for each node in the selected sequence. Each receptive field is constructed by assembling and normalizing as introduced in the following. Note that if $N < W$ , all-zero receptive fields are added for padding.
|
| 117 |
+
|
| 118 |
+
2) Neighborhood Assembly: A set of neighboring nodes is collected for each node in the selected sequence. A breadth-first search is used to collect the neighborhood field, namely if
|
| 119 |
+
|
| 120 |
+
there is not enough neighboring nodes collected in the current depth, then search in the one-step further neighborhoods, and so on, until at lease $g$ neighboring nodes are collected, or no more neighboring node to explore.
|
| 121 |
+
|
| 122 |
+
3) Normalization: The extracted neighborhood data are ranked to create the normalized receptive fields. The normalization process also imposes an order on the neighboring field for each selected node such that the unordered neighboring field is mapped into an embedding vector space in a linear order. The orders of nodes are determined by a labeling procedure using node centrality measures. In the resultant normalized vector, the root node is assigned as the first element, followed by the second to the $g$ -th neighboring nodes. This normalization procedure leverages graph labeling on the neighboring nodes of the root node.
|
| 123 |
+
|
| 124 |
+
To this end, an $N$ -node network is represented by a $W$ -unit receptive field, where each receptive field is a $g \times h$ matrix, with $h$ representing the number of attributes used for the neighboring nodes. Since generally $W \leq N$ , $g \ll N$ , and $h \ll N$ , an $N^2$ adjacency matrix is mapped to a compressed representation of size $Wgh$ , which will be reshaped and then passed to a 1D-CNN for further processing in PATCHY-SAN.
|
| 125 |
+
|
| 126 |
+
Since this procedure generates learned feature representations for graph data, it is named an LFR module.
|
| 127 |
+
|
| 128 |
+
# C. LFR-CNN
|
| 129 |
+
|
| 130 |
+
PCR is straightforward and fast, while PATCHY-SAN extracts topological features first. The input of PCR is the raw adjacency matrix. Since many real-world networks are sparse, which have much fewer edges than the possible maximum number of edges, the input adjacency matrix contains a lot of meaningless information that can be removed or compressed. In contrast, PATCHY-SAN employs a shallow 1D-CNN structure. Empirically, if properly trained and used, deeper neural networks with more layers and parameters are prone to having better performances than those with fewer layers and parameters, especially for large-scale complex network data.
|
| 131 |
+
|
| 132 |
+
TABLEI COMPARISON OF PCR, PATCHY-SAN, LFR-CNN IN TERMS OF REPRESENTATION, REPRESENTATION SIZE, NUMBER OF LAYERS, AND MAGNITUDE OF NUMBER OF PARAMETERS.
|
| 133 |
+
|
| 134 |
+
<table><tr><td></td><td>Converted Representation</td><td>Size</td><td>Feature Maps</td><td>Parameters</td></tr><tr><td>PCR</td><td>Gray-Scale Image</td><td>N2</td><td>7(6)</td><td>2.4 × 107</td></tr><tr><td>PATCHY-SAN</td><td>LFR</td><td>Wgh</td><td>2</td><td>5.1 × 105</td></tr><tr><td>LFR-CNN</td><td>LFR</td><td>Wgh</td><td>3</td><td>6.0 × 106</td></tr></table>
|
| 135 |
+
|
| 136 |
+
Table I shows that PCR converts an $N^2$ adjacency matrix to an gray-scale image without compression, while for PATCHY-SAN an adjacency matrix is compressed to an LFR of size $Wgh$ . The core components of PCR and PATCHY-SAN are a 2D-CNN and a 1D-CNN, respectively. A CNN with 7 feature map (FM) groups (or 6-FM for small-sized networks) in PCR requires training a total number of $2.4 \times 10^7$ internal parameters, while the 1D-CNN in PATCHY-SAN requires training $5.1 \times 10^5$ parameters.
|
| 137 |
+
|
| 138 |
+
In this paper, an LFR-CNN is proposed by installing a 2D-CNN (similar to PCR, but with shallower structure) following the LFR module of PATCHY-SAN. Compared to PCR and PATCHY-SAN, LFR-CNN has the following advantages: 1) a 2D-CNN can be more powerful than the 1D-CNN in PATCHY-SAN. 2) With LFR, the required number of FMs in 2D-CNN can be significantly reduced, and more importantly the required number of FMs does not need to change for different network sizes. 3) LFR-CNN requires an intermediate number of training parameters, i.e., $6.0 \times 10^{6}$ . LFR-CNN achieves a balance in CNN structure and magnitude of number of parameters between PCR and PATCHY-SAN.
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Fig. 2. General framework of PATCHY-SAN, LFR-CNN, and PCR: PATCHY-SAN and LFR-CNN share the common module of LFR performing the selection, assembly, and normalization (SAN) tasks. LFR-CNN and PCR share a similar VGG-structured 2D-CNN module.
|
| 142 |
+
|
| 143 |
+
The different structures of PCR, PATCHY-SAN, and LFR-CNN are shown in Fig. 2, where the LFR module consists of selection, assembly, and normalization operations. Given the same LFR as the input, a 2D-CNN can capture more feature details than a 1D-CNN, therefore is more suitable to be applied to large-scale complex network data. The proposed LFR-CNN naturally combines PATCHY-SAN and PRC by incorporating their advantages.
|
| 144 |
+
|
| 145 |
+
Similarly to PCR, a VGG-structured [54] CNN is installed in LFR-CNN. For network sizes around $N = 1000$ , PCR needs seven FM groups to perform prediction. When the network size is reduced (e.g., $N = 500$ ), the number of FMs can be reduced (e.g., 6 FMs). In contrast, since raw graph data are compressed by the LFR module, the CNN in LFR-CNN is not necessary to be adjusted if the network sizes are not significantly changed. Specifically, as shown in the experimental studies, LFR-CNN is able to process different network sizes $N \in [350, 1300]$ using the same 3-FM CNN.
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Fig. 3. The simplified 2D-CNN structure with three feature map groups installed with $N_{i}^{r} = \lceil N^{r} / 2^{(i + 1)}\rceil$ , for $i = 1,2,3$ , where $N^r\times N^r$ is the size of the input reshaped representation. The concatenation layer reshapes the matrix to a vector from FM 3 to FC 1.
|
| 149 |
+
|
| 150 |
+
TABLE II PARAMETER SETTING OF THE 3-FM 2D-CNN INSTALLED IN LFR-CNN.
|
| 151 |
+
|
| 152 |
+
<table><tr><td>Group</td><td>Layer</td><td>Kernel Size</td><td>Stride</td><td>Output Channel</td></tr><tr><td rowspan="2">Group 1</td><td>Conv7-64</td><td>7 × 7</td><td>1</td><td>64</td></tr><tr><td>Max2</td><td>2 × 2</td><td>2</td><td>64</td></tr><tr><td rowspan="2">Group 2</td><td>Conv5-64</td><td>5 × 5</td><td>1</td><td>64</td></tr><tr><td>Max2</td><td>2 × 2</td><td>2</td><td>64</td></tr><tr><td rowspan="2">Group 3</td><td>Conv3-128</td><td>3 × 3</td><td>1</td><td>128</td></tr><tr><td>Max2</td><td>2 × 2</td><td>2</td><td>128</td></tr></table>
|
| 153 |
+
|
| 154 |
+
The detailed structure is illustrated in Fig. 3, and the parameters are summarized in Table II. Each group of FM1-FM3 contains a convolution layer, a ReLU performing the activation function $f(x) = \max(0, x)$ [59], and a max pooling layer. The output of each hidden layer is summed up, rectified by a ReLU, and then transmitted to the next layer. To that end, max pooling layers will reduce the data dimension as input to the next layer. Then, two fully-connected layers are installed to map feature representations and reshape the regression output. The same loss function as in PCR is employed, as shown in Eq. (4).
|
| 155 |
+
|
| 156 |
+
# IV. EXPERIMENTAL STUDIES
|
| 157 |
+
|
| 158 |
+
A total of 9 synthetic network models are simulated, including the Erdős-Rényi (ER) random-graph [60], Barabási-Albert (BA) scale-free [61], [62], generic scale-free (SF) [63], onion-like generic scale-free (OS) [25], Newman-Watts small-world (SW-NW) [64], Watts-Strogatz small-world (SW-WS) [65], $q$ -snapback (QS) [43], random triangle (RT) [44] and random hexagon (RH) [44] networks.
|
| 159 |
+
|
| 160 |
+
Specifically, a BA network is generated according to the preferential attachment scheme [61], while an SF network is generated according to a set of predefined weights $w_{i} = (i + \theta)^{-\sigma}$ , where $i = 1,2,\dots,N$ , $\sigma \in [0,1)$ and $\theta \ll N$ . Two nodes $i$ and $j$ are randomly picked with a probability proportional to their weights $w_{i}$ and $w_{j}$ , respectively. An OS network is generated based on an SF, with $2N$ rewiring operations towards assortativity maximization. The degree distributions of BA, SF, and OS all follow the power law.
|
| 161 |
+
|
| 162 |
+
Both SW-NW and SW-WS start from an $N$ -node loop having $K(= 2)$ connected nearest-neighbors. The difference is that additional edges are added without removing any existing edges in SW-NW [64]; while rewiring operations are performed in SW-WS [65].
|
| 163 |
+
|
| 164 |
+
QS consists of a backbone chain and multiple snapback edges [43]. RT and RH consist of random triangles and hexagons, respectively [44].
|
| 165 |
+
|
| 166 |
+
To exactly control the number of generated edges to be $M$ , uniformly-randomly adding or removing edges can be performed. A directed network can be converted to an undirected network by removing the direction. However, when converting an undirected network to be directed, it should follow some specific patterns, e.g., a directed backbone chain in QS and a directed loop in SW-NW and SW-WS should be ensured; while for some other edges, directions can be assigned randomly.
|
| 167 |
+
|
| 168 |
+
For each synthetic network, 1000 instances are randomly generated for training, thus there are $1000 \times 9 = 9000$
|
| 169 |
+
|
| 170 |
+
training samples in total. In addition, two different sets of $100 \times 9 = 900$ samples are used for cross validation and testing, respectively.
|
| 171 |
+
|
| 172 |
+
The size of each synthetic network is randomly determined in three different settings, namely, 1) set $N \in [350,650]$ (with an average $\bar{N} = 500.5$ ) for the experiments of predicting connectivity and controllability robustness in Subsections IV-A, IV-B, IV-C, IV-D, IV-G, and IV-H; 2) set $N \in [700,1300]$ (with an average $\bar{N} = 999.8$ ) for the scalability investigation in Subsection IV-E; 3) set $N \in [700,900]$ (with an average $\bar{N} = 800.0$ ) for the study of the influence of information loss on the three comparative approaches in Subsection IV-F.
|
| 173 |
+
|
| 174 |
+
The average degrees are also assigned randomly. The ranges are set differently for various network models. For SW $\langle k\rangle \in [2.5,5]$ , for RH, $\langle k\rangle \in [2,4]$ , for RT, $\langle k\rangle \in [1.5,3]$ , while for other models, $\langle k\rangle \in [3,6]$ . The overall average degree of the training network is 4.33, while that of the testing network is 4.36, with data obtained by performing posterior statistics.
|
| 175 |
+
|
| 176 |
+
The proposed LFR-CNN is compared with PATCHY-SAN [55] and PCR [22], [50] in predicting the connectivity and controllability robustness for both synthetic and real-world networks under various node-removal attacks, including random attack (RA), targeted betweenness-based (TB) attack, and targeted degree-based (TD) attack. For PCR, a 6-FM CNN is used for $N < 700$ and a 7-FM structure is used for $N \geq 700$ . For PATCHY-SAN and LFR-CNN, the structures remain the same for all networks with $N \in [350,1300]$ . For LFR, set the length of the selected node sequence to be $W = 500$ for $N < 700$ , and $W = 1000$ for $N \geq 700$ ; the receptive field size $g = 10$ ; the number of attributes $h = 2$ (the two default attributes are node degree and clustering coefficient).
|
| 177 |
+
|
| 178 |
+
All experiments are performed on a PC Intel (R) Core i7-8750H CPU @ 2.20GHz, which has memory (RAM) 16 GB with running Windows 10 Home 64-bit Operating System.
|
| 179 |
+
|
| 180 |
+
# A. Predicting Controllability Robustness for Directed Networks
|
| 181 |
+
|
| 182 |
+
Controllability robustness of directed networks under RA and TB is predicted using LFR-CNN, PCR, and PATCHY-SAN. The simulation results in terms of controllability curves are shown in Figs. 4 and 5, respectively. A network controllability curve is denoted by $q(\delta)$ , where $\delta$ represents the proportion of removed nodes. For each predictor, its predicted controllability curve and prediction error curve are plotted in the same color; 'SIM' denotes the controllability curve obtained by attack simulations. Each curve is averaged from 100 testing samples.
|
| 183 |
+
|
| 184 |
+
As shown in Figs. 4 and 5, PCR performs badly in prediction. This is due to the following two reasons: 1) both the training and testing data have a wide network size variation with $N \in [350,650]$ and $\langle k \rangle \in [1.5,6]$ ; and 2) there 9 synthetic network types trained and tested. As a result, PCR predicts the controllability curves almost in the same pattern for all different networks with different sizes and average degrees. In contrast, LFR-CNN and PATCHY-SAN, both contain an LFR module, are able to predict different controllability curves for different scenarios. In Figs. 4 (c), (d), (i), and Fig. 5 (i), it is
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
Fig. 4. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for controllability robustness of directed networks $(N\in [350,650])$ under RA.
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
Fig. 5. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for controllability robustness of directed networks $(N\in [350,650])$ under TB.
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
|
| 224 |
+
visible that the green curves (LFR-CNN predictions) are closer to the black dotted curves (true simulation results) than the red curves (PATCHY-SAN predictions), meaning that LFR-CNN performs clearly better than PATCHY-SAN in prediction.
|
| 225 |
+
|
| 226 |
+
Table III summarizes the overall prediction errors of the three predictors in different experiments, with Kruskal-Wallis H-test [66] results. The overall errors of the results in Fig. 4 are shown in Table III (I), which shows that 1) LFR-CNN performs significantly better than PCR for all networks; 2) LFR-CNN performs significantly better than PATCHY-SAN for ER, SF, OS, and RT, but significantly worse than PATCHY-SAN for SW-WS, QS, and RH. The overall errors of the results in Fig. 5 are shown in Table III (II), which shows that 1) LFR-CNN performs significantly better than PCR for all networks; 2) LFR-CNN performs significantly better than PATCHY-SAN for ER, SW-NW, SW-WS, RH, and RT, but significantly worse than PATCHY-SAN for BA. All in all, LFR-CNN outperforms PCR for all networks; LFR-CNN outperforms PATCHY-SAN in 9 comparisons, but is worse in 4 comparisons, while in the other 5 comparisons, two predictors have no significant differences.
|
| 227 |
+
|
| 228 |
+
B. Predicting Controllability Robustness for Real-world Networks
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Fig. 6. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for controllability robustness of REDDIT-MULTI [67] real-world networks ( $N \in [419,570]$ ) under RA.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+
A total of 9 real-world network instances are randomly selected from the Reddit multiset data [67]. Three predictors are used to predict the controllability robustness of there real-world networks under RA. The basic information of these networks and the prediction errors obtained by the three
|
| 250 |
+
|
| 251 |
+
TABLE III COMPARISON OF AVERAGE PREDICTION ERRORS AMONG LFR-CNN, PCR AND PATCHY-SAN, WHERE $N\in [350,650]$ . THE SIGNS IN PARENTHESES DENOTE THE KRUSKAL-WALLIS H-TEST [66] RESULTS OF LFR-CNN VS PCR AND LFR-CNN VS PATCHY-SAN, RESPECTIVELY. A \*+ SIGN DENOTES THAT LFR-CNN SIGNIFICANTLY OUTPERFORMS THE OTHER METHOD BY OBTAINING LOWER ERRORS; A $\approx$ ' SIGN DENOTES NO SIGNIFICANT DIFFERENCE BETWEEN TWO METHODS; AND A $-$ ' SIGN DENOTES THAT LFR-CNN PERFORMS SIGNIFICANTLY WORSE THAN THE OTHER METHODS WITH GREATER ERRORS.
|
| 252 |
+
|
| 253 |
+
<table><tr><td colspan="2">Average Prediction Error ξ</td><td>ER</td><td>BA</td><td>SF</td><td>OS</td><td>SW-NW</td><td>SW-WS</td><td>QS</td><td>RH</td><td>RT</td></tr><tr><td>(I) Controllability</td><td rowspan="2">LFR-CNN</td><td>0.0450</td><td>0.0395</td><td>0.0601</td><td>0.0567</td><td>0.0480</td><td>0.0361</td><td>0.0375</td><td>0.0440</td><td>0.0474</td></tr><tr><td rowspan="3">Robustness of Directed Networks under RA</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.1280</td><td>0.1509</td><td>0.2689</td><td>0.2541</td><td>0.1139</td><td>0.1358</td><td>0.1301</td><td>0.1331</td><td>0.1360</td></tr><tr><td>PATCHY-SAN</td><td>0.0313</td><td>0.0458</td><td>0.0732</td><td>0.0601</td><td>0.0450</td><td>0.0253</td><td>0.0272</td><td>0.0304</td><td>0.0541</td></tr><tr><td>(II) Controllability</td><td rowspan="2">LFR-CNN</td><td>0.02544</td><td>0.05219</td><td>0.04376</td><td>0.04650</td><td>0.02355</td><td>0.02445</td><td>0.02210</td><td>0.02134</td><td>0.03641</td></tr><tr><td rowspan="3">Robustness of Directed Networks under TB</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.1369</td><td>0.1625</td><td>0.2704</td><td>0.2570</td><td>0.1374</td><td>0.1548</td><td>0.1384</td><td>0.1302</td><td>0.1300</td></tr><tr><td>PATCHY-SAN</td><td>0.0354</td><td>0.0351</td><td>0.0391</td><td>0.0388</td><td>0.0273</td><td>0.0333</td><td>0.0238</td><td>0.0258</td><td>0.0614</td></tr><tr><td>(III) Connectivity</td><td rowspan="2">LFR-CNN</td><td>0.0362</td><td>0.0665</td><td>0.0868</td><td>0.0908</td><td>0.0338</td><td>0.0365</td><td>0.0350</td><td>0.0406</td><td>0.0767</td></tr><tr><td rowspan="3">Robustness of Undirected Networks under RA</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.0695</td><td>0.0767</td><td>0.1167</td><td>0.1219</td><td>0.0663</td><td>0.0863</td><td>0.0825</td><td>0.0728</td><td>0.0779</td></tr><tr><td>PATCHY-SAN</td><td>0.0639</td><td>0.0692</td><td>0.0835</td><td>0.0803</td><td>0.0703</td><td>0.0670</td><td>0.0663</td><td>0.0590</td><td>0.0635</td></tr><tr><td>(IV) Connectivity</td><td rowspan="2">LFR-CNN</td><td>0.0302</td><td>0.0334</td><td>0.0215</td><td>0.0262</td><td>0.0279</td><td>0.0265</td><td>0.0254</td><td>0.0345</td><td>0.0563</td></tr><tr><td rowspan="3">Robustness of Undirected Networks under TD</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.1423</td><td>0.1680</td><td>0.2724</td><td>0.2792</td><td>0.1644</td><td>0.1520</td><td>0.1402</td><td>0.1351</td><td>0.1386</td></tr><tr><td>PATCHY-SAN</td><td>0.0404</td><td>0.0420</td><td>0.0230</td><td>0.0282</td><td>0.0501</td><td>0.0446</td><td>0.0439</td><td>0.0408</td><td>0.0460</td></tr></table>
|
| 254 |
+
|
| 255 |
+
TABLE IV BASIC INFORMATION OF REDDIT-MULTI REAL-WORLD NETWORKS. COMPARISON OF AVERAGE PREDICTION ERRORS AMONG LFR-CNN, PCR AND PATCHY-SAN, WHERE $N\in [419,570]$ . NUMBERS IN PARENTHESES DENOTE THE RANKS OF PREDICTORS IN ASCENDING ORDER OF PREDICTION ERRORS.
|
| 256 |
+
|
| 257 |
+
<table><tr><td></td><td>RW1</td><td>RW2</td><td>RW3</td><td>RW4</td><td>RW5</td><td>RW6</td><td>RW7</td><td>RW8</td><td>RW9</td></tr><tr><td>REDDIT-MULTI [67]</td><td>12K-16</td><td>12K-40</td><td>12K-41</td><td>12K-49</td><td>12K-81</td><td>12K-124</td><td>12K-129</td><td>5K-1</td><td>5K-2</td></tr><tr><td>N</td><td>499</td><td>510</td><td>538</td><td>551</td><td>499</td><td>522</td><td>570</td><td>419</td><td>428</td></tr><tr><td>{k}</td><td>6.31</td><td>8.93</td><td>6.84</td><td>7.15</td><td>4.95</td><td>7.56</td><td>5.75</td><td>47.07</td><td>35.01</td></tr><tr><td>LFR-CNN</td><td>0.1082 (2)</td><td>0.0667 (1)</td><td>0.1035 (1)</td><td>0.1014 (2)</td><td>0.0856 (1)</td><td>0.1041 (1)</td><td>0.0824 (1)</td><td>0.1168 (1)</td><td>0.0875 (1)</td></tr><tr><td>PCR</td><td>0.0969 (1)</td><td>0.0938 (2)</td><td>0.1104 (2)</td><td>0.0949 (1)</td><td>0.1532 (2)</td><td>0.1224 (2)</td><td>0.1378 (2)</td><td>0.1866 (2)</td><td>0.1718 (2)</td></tr><tr><td>PATCHY-SAN</td><td>0.1503 (3)</td><td>0.1211 (3)</td><td>0.1497 (3)</td><td>0.1531 (3)</td><td>0.1733 (3)</td><td>0.1563 (3)</td><td>0.1679 (3)</td><td>0.3611 (3)</td><td>0.2636 (3)</td></tr></table>
|
| 258 |
+
|
| 259 |
+
predictors are summarized in Table IV. Ranks of predictors in ascending order are attached in parentheses following the prediction errors, where the average ranks of LFR-CNN, PCR and PATCHY-SAN are 1.22, 1.78, and 3, respectively. This suggests that LFR-CNN and PCR have better generalizability than PATCHY-SAN for unknown real-world networks, although the overall prediction errors for all three predictors are relatively greater than that for synthetic networks. The predicted controllability curves are shown in Fig. 6, which demonstrate that LFR-CNN predicts the controllability curves closer to the simulation results than the other two predictors.
|
| 260 |
+
|
| 261 |
+
# C. Predicting Connectivity Robustness for Undirected Networks
|
| 262 |
+
|
| 263 |
+
CNN-based approaches are capable of dealing with all types of complex networks, including weighted and unweighted, directed and undirected, real-world and synthetic networks [52]. Here, for brevity, a comparison of connectivity robustness predictions is performed only on undirected networks. The predicted connectivity curves under RA are shown in Fig. 7, for which the overall prediction errors are summarized in Table III (III). Figure 7 shows that all the three predictors perform well (or fairly good) on predicting the connectivity curves, which are denoted by $p(\delta)$ . Table III (III) shows that the prediction errors are mostly in a magnitude of $10^{-2}$ . The predicted curves under TD are shown in Fig. 8, for which the
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
Fig. 7. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for connectivity robustness of undirected networks $(N\in [350,650])$ under RA.
|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
|
| 282 |
+

|
| 283 |
+
|
| 284 |
+
TABLE V COMPARISON OF AVERAGE PREDICTION ERRORS OBTAINED USING DIFFERENT ATTRBUTE COMBINATIONS IN LFR-CNN. THREE NODE ATTRIBUTES, INCLUDING DEGREE (deg), CLUSTERING COEFFICIENT (cc), AND BETWEENNESS (bet), COMPOSE THREE PAIRWISE COMBINATIONS.
|
| 285 |
+
|
| 286 |
+
<table><tr><td colspan="2"></td><td>ER</td><td>BA</td><td>SF</td><td>OS</td><td>SW-NW</td><td>SW-WS</td><td>QS</td><td>RH</td><td>RT</td></tr><tr><td>(I) Controllability</td><td>deg & cc</td><td>0.0432</td><td>0.0357</td><td>0.0436</td><td>0.0372</td><td>0.0581</td><td>0.0322</td><td>0.0351</td><td>0.0399</td><td>0.0421</td></tr><tr><td rowspan="3">Robustness of Directed Networks under RA</td><td>(deg & bet)</td><td>(≈,+)</td><td>(+,+)</td><td>(≈,+)</td><td>(+,+)</td><td>(+,+)</td><td>(≈,+)</td><td>(≈,+)</td><td>(≈,+)</td><td>(+,+)</td></tr><tr><td>deg & bet</td><td>0.0384</td><td>0.0562</td><td>0.0472</td><td>0.0556</td><td>0.0439</td><td>0.0321</td><td>0.0337</td><td>0.0394</td><td>0.0515</td></tr><tr><td>bet & cc</td><td>0.0589</td><td>0.0865</td><td>0.1203</td><td>0.1179</td><td>0.0640</td><td>0.0543</td><td>0.0566</td><td>0.0571</td><td>0.0681</td></tr><tr><td>(II) Connectivity</td><td>deg & cc</td><td>0.0293</td><td>0.0490</td><td>0.0791</td><td>0.0769</td><td>0.0287</td><td>0.0288</td><td>0.0287</td><td>0.0340</td><td>0.0461</td></tr><tr><td rowspan="3">Robustness of Undirected Networks under RA</td><td>(deg & bet)</td><td>(+,+)</td><td>(≈,+)</td><td>(≈,+)</td><td>(+,+)</td><td>(+,+)</td><td>(+,+)</td><td>(+,+)</td><td>(+,+)</td><td>(≈,+)</td></tr><tr><td>deg & bet</td><td>0.0503</td><td>0.0494</td><td>0.0921</td><td>0.0937</td><td>0.0635</td><td>0.0562</td><td>0.0527</td><td>0.0508</td><td>0.0568</td></tr><tr><td>bet & cc</td><td>0.1291</td><td>0.1434</td><td>0.1628</td><td>0.1632</td><td>0.1331</td><td>0.1339</td><td>0.1298</td><td>0.1325</td><td>0.1454</td></tr></table>
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
E. Scalability of Network Size
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
Fig. 8. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for connectivity robustness of undirected networks $(N\in [350,650])$ under TD.
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
|
| 306 |
+

|
| 307 |
+
|
| 308 |
+
overall errors are summarized in Table III (IV). It is clear that PCR performs imprecisely well.
|
| 309 |
+
|
| 310 |
+
The data summarized in Tables III (III) and (IV) suggest that LFR-CNN outperforms PCR and PATCHY-SAN in predicting 16 out of 18 and 10 out of 18 comparisons, respectively, while for the rest networks, LFR-CNN performs statistically equivalently well as PCR and PATCHY-SAN.
|
| 311 |
+
|
| 312 |
+
In a nutshell, LFR-CNN outperforms PCR in 34/36 cases, and outperforms PACTHY-SAN in 19/36 cases; PACTHY-SAN outperforms LFR-CNN in 4/36 cases, while PCR does not outperform LFR-CNN in any case; for the rest cases, no significant differences are detected.
|
| 313 |
+
|
| 314 |
+
# D. Node Attributes as Receptive Fields
|
| 315 |
+
|
| 316 |
+
In the normalization step of LFR, the attributes of the selected neighborhood nodes are embedded in a receptive field. Here, different combinations of node attributes including degree, clustering coefficient, and betweenness are compared.
|
| 317 |
+
|
| 318 |
+
Table V shows the prediction errors for (I) controllability robustness and (II) connectivity robustness, among the three combinations. It is clear that the default setting using degree and clustering coefficient $(deg\&cc)$ outperforms the other two combinations.
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
|
| 324 |
+

|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
Fig. 9. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for controllability robustness of directed networks $(N\in [700,1300])$ under RA.
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
|
| 337 |
+

|
| 338 |
+
|
| 339 |
+
To further verify the scalability, the proposed LFR-CNN is compared with PCR and PATCHY-SAN on predicting a set of networks of sizes $N \in [700, 1300]$ . Here, a 7-FM PCR is employed and $W = 1000$ is set for LFR-CNN and PATCHY-SAN.
|
| 340 |
+
|
| 341 |
+
The predicted controllability and connectivity curves under RA are shown in Figs. 9 and 10, respectively. It is visible that LFR-CNN and PATCHY-SAN perform better than PCR in controllability robustness prediction. As for connectivity robustness, LFR-CNN performs visibly better than PATCHY-SAN and PCR in Figs. 10 (c) and (d).
|
| 342 |
+
|
| 343 |
+
TABLE VI COMPARISON OF AVERAGE PREDICTION ERRORS AMONG LFR-CNN, PCR AND PATCHY-SAN, WHERE $N\in [700,1300]$ . THE SIGNS IN PARENTHESES DENOTE THE KRUSKAL-WALLIS H-TEST [66] RESULTS OF LFR-CNN VS PCR AND LFR-CNN VS PATCHY-SAN, RESPECTIVELY. A $^+$ SIGN DENOTES THAT LFR-CNN SIGNIFICANTLY OUTPERFORMS THE OTHER METHOD BY OBTAINING LOWER ERRORS; A $\approx$ ' SIGN DENOTES NO SIGNIFICANT DIFFERENCE BETWEEN TWO METHODS; AND A $-$ ' SIGN DENOTES THAT LFR-CNN PERFORMS SIGNIFICANTLY WORSE THAN THE OTHER METHODS WITH GREATER ERRORS.
|
| 344 |
+
|
| 345 |
+
<table><tr><td colspan="2">Average Prediction Error ξ</td><td>ER</td><td>BA</td><td>SF</td><td>OS</td><td>SW-NW</td><td>SW-WS</td><td>QS</td><td>RH</td><td>RT</td></tr><tr><td>(I) Controllability</td><td rowspan="2">LFR-CNN</td><td>0.0191</td><td>0.0406</td><td>0.0356</td><td>0.0341</td><td>0.0151</td><td>0.0171</td><td>0.0162</td><td>0.0177</td><td>0.0316</td></tr><tr><td rowspan="3">Robustness of Directed Networks under RA</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.1433</td><td>0.1408</td><td>0.2820</td><td>0.2706</td><td>0.1349</td><td>0.1282</td><td>0.1242</td><td>0.1395</td><td>0.1284</td></tr><tr><td>PATCHY-SAN</td><td>0.0374</td><td>0.0387</td><td>0.0420</td><td>0.0448</td><td>0.0259</td><td>0.0240</td><td>0.0375</td><td>0.0268</td><td>0.0499</td></tr><tr><td>(II) Connectivity</td><td rowspan="2">LFR-CNN</td><td>0.0266</td><td>0.0594</td><td>0.0705</td><td>0.0790</td><td>0.0239</td><td>0.0297</td><td>0.0271</td><td>0.0293</td><td>0.0424</td></tr><tr><td rowspan="3">Robustness of Undirected Networks under RA</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td><td>(+)</td></tr><tr><td>PCR</td><td>0.0654</td><td>0.0744</td><td>0.1321</td><td>0.1348</td><td>0.0784</td><td>0.0861</td><td>0.0833</td><td>0.0741</td><td>0.0809</td></tr><tr><td>PATCHY-SAN</td><td>0.0440</td><td>0.0757</td><td>0.0971</td><td>0.1070</td><td>0.0479</td><td>0.0444</td><td>0.0427</td><td>0.0357</td><td>0.0398</td></tr></table>
|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
Fig. 10. [color online] Comparison of prediction results using LFR-CNN, PCR, and PATCHY-SAN, for connectivity robustness of undirected networks $(N\in [700,1300])$ under RA.
|
| 349 |
+
|
| 350 |
+
The overall prediction errors are shown in Table VI. LFR-CNN outperforms PCR for 17 out of 18 cases, and outperforms PATCHY-SAN for 13 out of 18 cases; while for the rest comparisons, LFR-CNN performs statistically equivalently to PCR or PATCHY-SAN in prediction.
|
| 351 |
+
|
| 352 |
+
# F. Network Size Variation
|
| 353 |
+
|
| 354 |
+
The core prediction component in LFR-CNN, PCR, and PATCHY-SAN is a 3-FM CNN, a 7-FM CNN, and a 1D-CNN, respectively. These CNN-based core components perform the regression task and predict the robustness performance for an input network. In PCR, the input data to CNN are adjacency matrices, while for LFR-CNN and PATCHY-SAN, the LFR module will convert the raw adjacency matrices to lower-dimensional representations before inputting them to the respective CNNs. Specifically, suppose that $H$ is the input size
|
| 355 |
+
|
| 356 |
+
TABLE VII COMPARISON OF AVERAGE PREDICTION ERRORS AMONG LFR-CNN, PCR AND PATCHY-SAN, WHERE $N = 800$
|
| 357 |
+
|
| 358 |
+
<table><tr><td>Average Prediction Error ξ̅</td><td>ER</td><td>SF</td><td>QS</td><td>SW-NW</td></tr><tr><td>LFR-CNN</td><td>0.0189 (≈,+ )</td><td>0.0750 (−,+ )</td><td>0.0162 (≈,+ )</td><td>0.0157 (≈,+ )</td></tr><tr><td>PCR</td><td>0.0166</td><td>0.0194</td><td>0.0145</td><td>0.0141</td></tr><tr><td>PATCHY-SAN</td><td>0.0253</td><td>0.1074</td><td>0.0208</td><td>0.0263</td></tr></table>
|
| 359 |
+
|
| 360 |
+
of the prediction component of LFR-CNN, PCR, or PATCHY-SAN, and given an input adjacency matrix of size $J \times J$ ( $J \neq H$ ). Upsampling or downsampling is necessary to resize the input for PCR, where the original adjacency information may be significantly modified. In contrast, for LFR-CNN and PATCHY-SAN, the $J \times J$ matrix is represented by a sequence of $W$ receptive field, namely the information of $W$ most important nodes is input, while if $J > W$ , some less important information will be discarded. Therefore, if a network size disagrees with the input size of a predictor, information loss is more severe in PCR than in LFR-CNN and PATCHY-SAN.
|
| 361 |
+
|
| 362 |
+
Table VII shows the prediction errors when all the network sizes are equal to the input size of CNNs, for both training and testing data, namely $H = J = W = 800$ , with $\langle k \rangle \in [1.5, 6]$ . Neither upsampling nor downsampling is required for PRC. In this case, all three predictors perform quite well, with very low prediction errors. LFR-CNN outperforms PATCHY-SAN for all 4 networks, and PCR outperforms LFR-CNN for SF network. This suggests that PCR is fragile to the variation of network size. This verifies that LFR makes the prediction performance more robust against network size variation.
|
| 363 |
+
|
| 364 |
+
# G. Run Time Comparison
|
| 365 |
+
|
| 366 |
+
Table VIII shows the run time comparison of PCR, PATCHY-SAN, LFR-CNN, and attack simulation, for both controllability and connectivity robustness predictions. The network size is $N \in [350,650]$ ; the data are averaged from 100 independent runs. As shown in Table VIII, the simulation time for controllability robustness is longer than that for connectivity robustness, while for the three predictors, there is no significant difference. It is also notable that PCR is significantly faster than attack simulation, PATCHY-SAN, and LFR-CNN. Running the LFR module is time-consuming,
|
| 367 |
+
|
| 368 |
+
TABLE VIII RUN TIME COMPARISON OF PCR, PATCHY-SAN, LFR-CNN, AND ATTACK SIMULATION (SIM).
|
| 369 |
+
|
| 370 |
+
<table><tr><td>Unit:
|
| 371 |
+
Second</td><td colspan="2">Controllability
|
| 372 |
+
Robustness</td><td colspan="2">Connectivity
|
| 373 |
+
Robustness</td></tr><tr><td>SIM</td><td colspan="2">4.7902</td><td colspan="2">1.3704</td></tr><tr><td>PCR</td><td colspan="2">0.0463</td><td colspan="2">0.0477</td></tr><tr><td rowspan="2">PATCHY-SAN</td><td>LFR
|
| 374 |
+
1.1312</td><td>1D-CNN
|
| 375 |
+
0.0034</td><td>LFR
|
| 376 |
+
1.1302</td><td>1D-CNN
|
| 377 |
+
0.0035</td></tr><tr><td colspan="2">1.1346</td><td colspan="2">1.1337</td></tr><tr><td rowspan="2">LFR-CNN</td><td>LFR
|
| 378 |
+
1.1320</td><td>CNN
|
| 379 |
+
0.0051</td><td>LFR
|
| 380 |
+
1.1300</td><td>CNN
|
| 381 |
+
0.0049</td></tr><tr><td colspan="2">1.1371</td><td colspan="2">1.1349</td></tr></table>
|
| 382 |
+
|
| 383 |
+
while running the CNN in either PATCHY-SAN or LFR-CNN is faster than PCR due to a simpler structure used.
|
| 384 |
+
|
| 385 |
+
Overall, compared to attack simulation, LFR-CNN is able to predict relatively precise controllability and connectivity curves, by saving about $76\%$ and $17\%$ computational time, respectively. In addition, run time for attack simulation increases faster than CNN-based schemes, e.g., with $N \in [700, 1300]$ , the run time for controllability robustness attack simulation is 41.62 seconds, while it is only 3.67 seconds for LFR-CNN.
|
| 386 |
+
|
| 387 |
+
# H. Compared to Spectral Measures
|
| 388 |
+
|
| 389 |
+
Spectral measures are widely used for estimating network connectivity robustness of undirected networks [35]. Table IX shows the estimated connectivity robustness ranks of different networks, using three CNN-based predictors and six spectral measures, including algebraic connectivity (AC), effective resistance (EF), natural connectivity (NC), spectral gap (SG), spectral radius (SR), and spanning tree count (ST). Undirected networks with $N \in [350,650]$ and $\langle k \rangle \in [1.5,6]$ are used for comparison. Prediction results are unified by the predicted rank errors of network robustness, calculated by $\xi_r = |\hat{r}l - r|$ , where $\hat{r}l$ represents a predicted rank-list and $r|l$ is the true rank-list by simulation. For example, given $\hat{r}l = [5,3,1,4,2]$ and $r|l = [2,3,1,5,4]$ , the rank error is $\xi_r = |\hat{r}l - r| = [3,0,0,1,2]$ and the average rank error is $\hat{\xi}_r = 1.2$ . As shown in Table IX, PATCHY-SAN and LFR-CNN obtain the best two average rank errors, while PCR does not perform well due to a larger variation of network size and average degree.
|
| 390 |
+
|
| 391 |
+
# V. CONCLUSION
|
| 392 |
+
|
| 393 |
+
In this paper, a learning feature representation-based convolutional neural network, namely LFR-CNN, is developed for network robustness performance prediction, including both connectivity robustness and controllability robustness. Conventionally, network robustness is evaluated by time-consuming attack simulations, from which a sequence of network connectivity or controllability values are collected and used to measure the remaining network after a sequence of destructive attacks (here, node-removal attacks). LFR-CNN is designed to gain a balance between PCR and patchySAN, in terms of both input size and internal parameters. The LFR module not only compresses the raw higher-dimensional adjacency matrix to a lower-dimensional representation, but
|
| 394 |
+
|
| 395 |
+
also extends the capability of LFR-CNN to process complex network data with a wide-ranged variation of network size and average degree.
|
| 396 |
+
|
| 397 |
+
Extensive numerical experiments are performed using both synthetic and real-world networks, including directed and undirected networks, and then analyzed and compared, revealing clearly the pros and cons of several typical and comparable schemes and measures. Specifically, the good performance of LFR-CNN in predicting both connectivity robustness and controllability robustness is verified by comparing with other two state-of-the-art network robustness predictors, namely PCR and PATCHY-SAN. LFR-CNN is much less sensitive than PCR to the network size variation. Although LFR-CNN requires a relatively long run time for feature learning, it can still achieve accurate prediction faster than the conventional attack simulations. Meanwhile, LFR-CNN not only can accurately predict the connectivity and controllability robustness curves of various complex networks under different types of attacks, but also serves as an excellent indicator for the connectivity robustness, better than spectral measures.
|
| 398 |
+
|
| 399 |
+
The present study, after all, makes the current investigation of network controllability and connectivity robustness more subtle and complete. Yet, it should be noted that the correlation between controllability robustness and spectral measures has not been investigated, leaving a good but challenging topic for future research.
|
| 400 |
+
|
| 401 |
+
# REFERENCES
|
| 402 |
+
|
| 403 |
+
[1] A.-L. Barabási, Network Science. Cambridge University Press, 2016.
|
| 404 |
+
[2] M. E. Newman, Networks: An Introduction. Oxford University Press, 2010.
|
| 405 |
+
[3] G. Chen, X. Wang, and X. Li, Fundamentals of Complex Networks: Models, Structures and Dynamics, 2nd ed. John Wiley & Sons, 2014.
|
| 406 |
+
[4] G. Chen and Y. Lou, Naming Game: Models, Simulations and Analysis. Springer, 2019.
|
| 407 |
+
[5] Y.-Y. Liu, J.-J. Slotine, and A.-L. Barabási, "Controllability of complex networks," Nature, vol. 473, no. 7346, pp. 167-173, 2011.
|
| 408 |
+
[6] Z. Z. Yuan, C. Zhao, Z. R. Di, W.-X. Wang, and Y.-C. Lai, "Exact controllability of complex networks," Nature Communications, vol. 4, p. 2447, 2013.
|
| 409 |
+
[7] M. Pósfai, Y.-Y. Liu, J.-J. Slotine, and A.-L. Barabási, “Effect of correlations on network controllability,” Scientific Reports, vol. 3, p. 1067, 2013.
|
| 410 |
+
[8] G. Menichetti, L. Dall'Asta, and G. Bianconi, "Network controllability is determined by the density of low in-degree and out-degree nodes," Physical Review Letters, vol. 113, no. 7, p. 078701, 2014.
|
| 411 |
+
[9] Y. Pan and X. Li, "Structural controllability and controlling centrality of temporal networks," PLoS One, vol. 9, no. 4, p. e94998, 2014.
|
| 412 |
+
[10] A. E. Motter, “Networkcontrology,” Chaos: An Interdisciplinary Journal of Nonlinear Science, vol. 25, no. 9, p. 097621, 2015.
|
| 413 |
+
[11] L. Wang, X. Wang, G. Chen, and W. K. S. Tang, "Controllability of networked MIMO systems," Automatica, vol. 69, pp. 405-409, 2016.
|
| 414 |
+
[12] B. Hou, X. Li, and G. Chen, "Structural controllability of temporally switching networks," IEEE Transactions on Circuits and Systems I: Regular Papers, vol. 63, no. 10, pp. 1771-1781, 2016.
|
| 415 |
+
[13] Y.-Y. Liu and A.-L. Barabási, “Control principles of complex systems,” Review of Modern Physics, vol. 88, no. 3, p. 035006, 2016.
|
| 416 |
+
[14] L. Wang, X. Wang, and G. Chen, "Controllability of networked higher-dimensional systems with one-dimensional communication channels," Royal Society Philosophical Transactions A, vol. 375, no. 2088, p. 20160215, 2017.
|
| 417 |
+
[15] L.-Z. Wang, Y.-Z. Chen, W.-X. Wang, and Y.-C. Lai, "Physical controllability of complex networks," Scientific Reports, vol. 7, p. 40198, 2017.
|
| 418 |
+
[16] B. Hou, X. Li, and G. Chen, "The roles of input matrix and nodal dynamics in network controllability," IEEE Transactions on Control of Network Systems, vol. 5, no. 4, pp. 1764-1774, 2017.
|
| 419 |
+
|
| 420 |
+
TABLE IX PREDICTION RANK ERRORS OF THE SIX SPECTRAL MEASURES, PCR, PATCHY-SAN, AND LFR-CNN. BOLD NUMBERS INDICATE THE BEST PERFORMING PREDICTION MEASURES.
|
| 421 |
+
|
| 422 |
+
<table><tr><td>Average Rank Error</td><td>ER</td><td>BA</td><td>SF</td><td>OS</td><td>QS</td><td>SW-NW</td><td>SW-WS</td><td>RH</td><td>RT</td><td>Overall</td><td>Rank</td></tr><tr><td>AC</td><td>34.7</td><td>35.2</td><td>35.4</td><td>39.7</td><td>34.2</td><td>30.4</td><td>31.5</td><td>35.6</td><td>32.8</td><td>34.4</td><td>8</td></tr><tr><td>EF</td><td>30.6</td><td>37.3</td><td>35.5</td><td>39.7</td><td>32.8</td><td>33.6</td><td>34.4</td><td>32.2</td><td>36.0</td><td>34.7</td><td>9</td></tr><tr><td>NC</td><td>31.8</td><td>33.6</td><td>34.2</td><td>33.7</td><td>30.9</td><td>27.9</td><td>34.0</td><td>32.1</td><td>32.7</td><td>32.3</td><td>4</td></tr><tr><td>SG</td><td>31.3</td><td>33.2</td><td>31.0</td><td>34.2</td><td>34.0</td><td>29.1</td><td>32.6</td><td>33.0</td><td>35.8</td><td>32.7</td><td>6</td></tr><tr><td>SR</td><td>33.5</td><td>30.9</td><td>29.9</td><td>33.3</td><td>33.8</td><td>31.7</td><td>30.3</td><td>34.6</td><td>33.2</td><td>32.4</td><td>5</td></tr><tr><td>ST</td><td>37.6</td><td>32.1</td><td>32.4</td><td>30.7</td><td>34.0</td><td>27.2</td><td>33.0</td><td>32.0</td><td>29.0</td><td>32.0</td><td>3</td></tr><tr><td>PCR</td><td>33.4</td><td>35.1</td><td>35.5</td><td>33.5</td><td>37.9</td><td>31.0</td><td>34.3</td><td>32.8</td><td>33.2</td><td>34.1</td><td>7</td></tr><tr><td>PATCHY-SAN</td><td>35.4</td><td>28.7</td><td>30.6</td><td>31.1</td><td>32.5</td><td>28.4</td><td>30.1</td><td>30.3</td><td>29.6</td><td>30.7</td><td>1</td></tr><tr><td>LFR-CNN</td><td>33.5</td><td>36.7</td><td>31.7</td><td>31.6</td><td>30.3</td><td>28.3</td><td>29.2</td><td>30.6</td><td>29.4</td><td>31.3</td><td>2</td></tr></table>
|
| 423 |
+
|
| 424 |
+
[17] Y. Zhang and T. Zhou, "Controllability analysis for a networked dynamic system with autonomous subsystems," IEEE Transactions on Automatic Control, vol. 62, no. 7, pp. 3408-3415, 2016.
|
| 425 |
+
[18] L. Xiang, F. Chen, W. Ren, and G. Chen, “Advances in network controllability,” IEEE Circuits and Systems Magazine, vol. 19, no. 2, pp. 8-32, 2019.
|
| 426 |
+
[19] J.-N. Wu, X. Li, and G. Chen, "Controllability of deep-coupling dynamical networks," IEEE Transactions on Circuits and Systems I: Regular Papers, vol. 67, no. 12, pp. 5211-5222, 2020.
|
| 427 |
+
[20] B. Hou, "Relevance of network characteristics to controllability degree," IEEE Transactions on Automatic Control, 2020.
|
| 428 |
+
[21] D. Shi, G. Chen, W. W. K. Thong, and X. Yan, "Searching for optimal network topology with best possible synchronizability," IEEE Circuits and Systems Magazine, vol. 13, no. 1, pp. 66-75, 2013.
|
| 429 |
+
[22] Y. Lou, R. Wu, J. Li, L. Wang, and G. Chen, "A convolutional neural network approach to predicting network connectedness robustness," IEEE Transactions on Network Science and Engineering, vol. 8, no. 4, pp. 3209-3219, 2021.
|
| 430 |
+
[23] P. Holme, B. J. Kim, C. N. Yoon, and S. K. Han, "Attack vulnerability of complex networks," Physical Review E, vol. 65, no. 5, p. 056109, 2002.
|
| 431 |
+
[24] B. Shargel, H. Sayama, I. R. Epstein, and Y. Bar-Yam, “Optimization of robustness and connectivity in complex networks,” *Physical Review Letters*, vol. 90, no. 6, p. 068701, 2003.
|
| 432 |
+
[25] C. M. Schneider, A. A. Moreira, J. S. Andrade, S. Havlin, and H. J. Herrmann, “Mitigation of malicious attacks on networks,” Proceedings of the National Academy of Sciences, vol. 108, no. 10, pp. 3838–3841, 2011.
|
| 433 |
+
[26] A. Bashan, Y. Berezin, S. Buldyrev, and S. Havlin, “The extreme vulnerability of interdependent spatially embedded networks,” Nature Physics, vol. 9, pp. 667–672, 2013.
|
| 434 |
+
[27] C. Fan, L. Zeng, Y. Sun, and Y.-Y. Liu, “Finding key players in complex networks through deep reinforcement learning,” Nature Machine Intelligence, vol. 2, pp. 317-324, 2020.
|
| 435 |
+
[28] S. Wang, J. Liu, and Y. Jin, "A computationally efficient evolutionary algorithm for multiobjective network robustness optimization," IEEE Transactions on Evolutionary Computation, vol. 25, no. 3, pp. 419-432, 2021.
|
| 436 |
+
[29] M. Grassia, M. De Domenico, and G. Mangioni, "Machine learning dismantling and early-warning signals of disintegration in complex systems," Nature Communications, vol. 12, no. 5190, 2021.
|
| 437 |
+
[30] Z.-X. Wu and P. Holme, "Onion structure and network robustness," Physical Review E, vol. 84, no. 2, p. 026106, 2011.
|
| 438 |
+
[31] A. Zeng and W. Liu, “Enhancing network robustness against malicious attacks,” Physical Review E, vol. 85, no. 6, p. 066130, 2012.
|
| 439 |
+
[32] V. H. Louzada, F. Daolio, H. J. Herrmann, and M. Tomassini, “Smart rewiring for network robustness,” Journal of Complex Networks, vol. 1, no. 2, pp. 150–159, 2013.
|
| 440 |
+
[33] C. M. Schneider, N. Yazdani, N. A. Araújo, S. Havlin, and H. J. Herrmann, “Towards designing robust coupled networks,” Scientific Reports, vol. 3, no. 1, pp. 1-7, 2013.
|
| 441 |
+
[34] L. Bai, Y.-D. Xiao, L.-L. Hou, and S.-Y. Lao, "Smart rewiring: Improving network robustness faster," Chinese Physics Letters, vol. 32, no. 7, p. 078901, 2015.
|
| 442 |
+
[35] H. Chan and L. Akoglu, "Optimizing network robustness by edge rewiring: A general framework," Data Mining and Knowledge Discovery, vol. 30, no. 5, pp. 1395-1425, 2016.
|
| 443 |
+
|
| 444 |
+
[36] Y. Lou, S. Xie, and G. Chen, "Searching better rewiring strategies and objective functions for stronger controllability robustness," IEEE Transactions on Circuits and Systems II: Express Briefs, vol. 68, no. 6, pp. 2112-2116, 2021.
|
| 445 |
+
[37] S. Wang, J. Liu, and Y. Jin, "Surrogate-assisted robust optimization of large-scale networks based on graph embedding," IEEE Transactions on Evolutionary Computation, vol. 24, no. 4, pp. 735-749, 2020.
|
| 446 |
+
[38] M. E. Newman, “Mixing patterns in networks,” Physical Review E, vol. 67, no. 2, p. 026126, 2003.
|
| 447 |
+
[39] N. Perra and S. Fortunato, "Spectral centrality measures in complex networks," Physical Review $E$ , vol. 78, no. 3, p. 036107, 2008.
|
| 448 |
+
[40] T. Tanizawa, S. Havlin, and H. E. Stanley, "Robustness of onionlike correlated networks against targeted attacks," *Physical Review E*, vol. 85, no. 4, p. 046109, 2012.
|
| 449 |
+
[41] Y. Hayashi and N. Uchiyama, "Onion-like networks are both robust and resilient," Scientific Reports, vol. 8, 2018.
|
| 450 |
+
[42] X.-Y. Yan, W.-X. Wang, G. Chen, and D.-H. Shi, "Multiplex congruence network of natural numbers," Scientific Reports, vol. 6, p. 23714, 2016.
|
| 451 |
+
[43] Y. Lou, L. Wang, and G. Chen, "Toward stronger robustness of network controllability: A snapback network model," IEEE Transactions on Circuits and Systems I: Regular Papers, vol. 65, no. 9, pp. 2983-2991, 2018.
|
| 452 |
+
[44] G. Chen, Y. Lou, and L. Wang, “A comparative study on controllability robustness of complex networks,” IEEE Transactions on Circuits and Systems II: Express Briefs, vol. 66, no. 5, pp. 828-832, 2019.
|
| 453 |
+
[45] Y. Lou, L. Wang, K.-F. Tsang, and G. Chen, "Towards optimal robustness of network controllability: An empirical necessary condition," IEEE Transactions on Circuits and Systems I: Regular Papers, 2020, doi:10.1109/TCSI.2020.2986215.
|
| 454 |
+
[46] H. Iiduka, "Appropriate learning rates of adaptive learning rate optimization algorithms for training deep neural networks," IEEE Transactions on Cybernetics, 2021, doi:10.1109/TCYB.2021.3107415 (online published).
|
| 455 |
+
[47] B. Xiao, Z. Yang, X. Qiu, J. Xiao, G. Wang, W. Zeng, W. Li, Y. Nian, and W. Chen, “PAM-DenseNet: A deep convolutional neural network for computer-aided COVID-19 diagnosis,” IEEE Transactions on Cybernetics, 2021, doi:10.1109/TCYB.2020.3042837 (online published).
|
| 456 |
+
[48] J. Sun, W. Zheng, Q. Zhang, and Z. Xu, "Graph neural network encoding for community detection in attribute networks," IEEE Transactions on Cybernetics, 2021, doi:10.1109/TCYB.2021.3051021 (online published).
|
| 457 |
+
[49] J. Schmidhuber, “Deep learning in neural networks: An overview,” Neural Networks, vol. 61, pp. 85–117, 2015.
|
| 458 |
+
[50] Y. Lou, Y. He, L. Wang, and G. Chen, "Predicting network controllability robustness: A convolutional neural network approach," IEEE Transactions on Cybernetics, 2020, doi:10.1109/TCYB.2020.3013251 (online published).
|
| 459 |
+
[51] A. Dhiman, P. Sun, and R. Kooij, "Using machine learning to quantify the robustness of network controllability," in International Conference on Machine Learning for Networking. Springer, 2021, pp. 19-39.
|
| 460 |
+
[52] Y. Lou, Y. He, L. Wang, K. F. Tsang, and G. Chen, "Knowledge-based prediction of network controllability robustness," IEEE Transactions on Neural Networks and Learning Systems, 2021, doi:10.1109/TNNLS.2021.3071367 (online published).
|
| 461 |
+
[53] R. Zhang, “Making convolutional networks shift-invariant again,” in International Conference on Machine Learning. PMLR, 2019, pp. 7324-7334.
|
| 462 |
+
|
| 463 |
+
[54] K. Simonyan and A. Zisserman, "Very deep convolutional networks for large-scale image recognition," arXiv Preprint: 1409.1556, 2014.
|
| 464 |
+
[55] M. Niepert, M. Ahmed, and K. Kutzkov, “Learning convolutional neural networks for graphs,” in International Conference on Machine Learning (ICML), 2016, pp. 2014–2023.
|
| 465 |
+
[56] T. N. Kipf and M. Welling, "Semi-supervised classification with graph convolutional networks," arXiv preprint arXiv:1609.02907, 2016.
|
| 466 |
+
[57] W. L. Hamilton, R. Ying, and J. Leskovec, "Inductive representation learning on large graphs," in International Conference on Neural Information Processing Systems, 2017, pp. 1025-1035.
|
| 467 |
+
[58] W. L. Hamilton, “Graph representation learning,” Synthesis Lectures on Artificial Intelligence and Machine Learning, vol. 14, no. 3, pp. 1–159, 2020.
|
| 468 |
+
[59] X. Glorot, A. Bordes, and Y. Bengio, "Deep sparse rectifier neural networks," in International Conference on Artificial Intelligence and Statistics, 2011, pp. 315-323.
|
| 469 |
+
[60] P. Erdős and A. Rényi, “On the strength of connectedness of a random graph,” Acta Mathematica Hungarica, vol. 12, no. 1-2, pp. 261-267, 1964.
|
| 470 |
+
[61] A.-L. Barabási and R. Albert, “Emergence of scaling in random networks,” Science, vol. 286, no. 5439, pp. 509–512, 1999.
|
| 471 |
+
[62] A.-L. Barabasi, "Scale-free networks: A decade and beyond," Science, vol. 325, no. 5939, pp. 412-413, 2009.
|
| 472 |
+
[63] K.-I. Goh, B. Kahng, and D. Kim, "Universal behavior of load distribution in scale-free networks," Physical Review Letters, vol. 87, no. 27, p. 278701, 2001.
|
| 473 |
+
[64] M. E. Newman and D. J. Watts, “Renormalization group analysis of the small-world network model,” Physics Letters A, vol. 263, no. 4-6, pp. 341–346, 1999.
|
| 474 |
+
[65] D. J. Watts and S. H. Strogatz, "Collective dynamics of 'small-world' networks," Nature, vol. 393, no. 6684, pp. 440-442, 1998.
|
| 475 |
+
[66] W. H. Kruskal and W. A. Wallis, "Use of ranks in one-criterion variance analysis," Journal of the American statistical Association, vol. 47, no. 260, pp. 583-621, 1952.
|
| 476 |
+
[67] P. Yanardag and S. Vishwanathan, "Deep graph kernels," in Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), 2015, pp. 1365-1374.
|
2203.10xxx/2203.10552/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1a347aafca575b044149438b6d9721a6cbaa65921723cb70e2cc1f47ad6720d3
|
| 3 |
+
size 1332157
|
2203.10xxx/2203.10552/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10555/a9f52836-7f7e-414f-badd-670fdccd2125_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9e4ee579b160709e23fc9e8a34762aaf42780d864c0f8731df8b3c39723a166
|
| 3 |
+
size 1131111
|
2203.10xxx/2203.10555/full.md
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# An Empirical Investigation on the Challenges Faced by Women in the Software Industry: A Case Study
|
| 2 |
+
|
| 3 |
+
Bianca Trinkenreich
|
| 4 |
+
|
| 5 |
+
Northern of Arizona University
|
| 6 |
+
|
| 7 |
+
Flagstaff, AZ, USA
|
| 8 |
+
|
| 9 |
+
bianca_trinkenreich@nau.edu
|
| 10 |
+
|
| 11 |
+
Marco A. Gerosa
|
| 12 |
+
|
| 13 |
+
Northern Arizona University
|
| 14 |
+
|
| 15 |
+
Flagstaff, AZ, USA
|
| 16 |
+
|
| 17 |
+
marco.gerosa@nau.edu
|
| 18 |
+
|
| 19 |
+
Ricardo Britto
|
| 20 |
+
|
| 21 |
+
Ericsson
|
| 22 |
+
|
| 23 |
+
Blekinge Institute of Technology
|
| 24 |
+
|
| 25 |
+
Kalrksona, Sweden
|
| 26 |
+
|
| 27 |
+
ricardo.britto@ericsson.com
|
| 28 |
+
|
| 29 |
+
Igor Steinmacher
|
| 30 |
+
|
| 31 |
+
Univ. Tecnológica Federal do Paraná
|
| 32 |
+
|
| 33 |
+
Brazil
|
| 34 |
+
|
| 35 |
+
igorfs@utfpr.edu.br
|
| 36 |
+
|
| 37 |
+
# ABSTRACT
|
| 38 |
+
|
| 39 |
+
Context: Addressing women's under-representation in the software industry, a widely recognized concern, requires attracting as well as retaining more women. Hearing from women practitioners, particularly those positioned in multi-cultural settings, about their challenges and and adopting their lived experienced solutions can support the design of programs to resolve the under-representation issue. Goal: We investigated the challenges women face in global software development teams, particularly what motivates women to leave their company; how those challenges might break down according to demographics; and strategies to mitigate the identified challenges. Method: To achieve this goal, we conducted an exploratory case study in Ericsson, a global technology company. We surveyed 94 women and employed mixed-methods to analyze the data. Results: Our findings reveal that women face socio-cultural challenges, including work-life balance issues, benevolent and hostile sexism, lack of recognition and peer parity, impostor syndrome, glass ceiling bias effects, the prove-it-again phenomenon, and the maternal wall. The participants of our research provided different suggestions to address/mitigate the reported challenges, including sabbatical policies, flexibility of location and time, parenthood support, soft skills training for managers, equality of payment and opportunities between genders, mentoring and role models to support career growth, directives to hire more women, inclusive groups and events, women's empowerment, and recognition for women's success. The framework of challenges and suggestions can inspire further initiatives both in academia and industry to onboard and retain women.
|
| 40 |
+
|
| 41 |
+
# KEYWORDS
|
| 42 |
+
|
| 43 |
+
women, diversity, gender, inclusion, software engineering
|
| 44 |
+
|
| 45 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 46 |
+
|
| 47 |
+
ICSE-SEIS'22, May 21-29, 2022, Pittsburgh, PA, USA
|
| 48 |
+
|
| 49 |
+
© 2022 Association for Computing Machinery.
|
| 50 |
+
|
| 51 |
+
ACM ISBN 978-1-4503-9227-3/22/05...$15.00
|
| 52 |
+
|
| 53 |
+
https://doi.org/10.1145/3510458.3513018
|
| 54 |
+
|
| 55 |
+
# ACM Reference Format:
|
| 56 |
+
|
| 57 |
+
Bianca Trinkenreich, Ricardo Britto, Marco A. Gerosa, and Igor Steinmacher. 2022. An Empirical Investigation on the Challenges Faced by Women in the Software Industry: A Case Study. In Software Engineering in Society (ICSE-SEIS'22), May 21–29, 2022, Pittsburgh, PA, USA. ACM, New York, NY, USA, 12 pages. https://doi.org/10.1145/3510458.3513018
|
| 58 |
+
|
| 59 |
+
# LAY ABSTRACT
|
| 60 |
+
|
| 61 |
+
Women represent less than $24\%$ of employees in software development industry and experience various types of prejudice and bias. Even in companies that care about Diversity & Inclusion, "untying the mooring ropes" of socio-cultural problems is hard. Hearing from women, especially those working in a multi-cultural organization, about their challenges and adopting their suggestions can be vital to design programs and resolve the under-representation issue. In this work we work closely with a large software development organization which invests and believes in diversity and inclusion. We listened to women and the challenges they face in global software development teams of this company and what these women suggest reduce the problems and increase retention. Our research showed that women face work-life balance issues and encounter invisible barriers that prevent them from rising to top positions. They also suffer micro-aggression and sexism, need to show competence constantly, be supervised in essential tasks, and receive less work after becoming mothers. Moreover, women miss having more female colleagues, lack self-confidence and recognition. The women from the company suggested sabbatical policies, the flexibility of location and time, parenthood support, soft skills training for managers, equality of opportunities, role models to support career growth, directives to hire more women, support groups, and more interaction between women, inclusive groups and events, women's empowerment by publishing their success stories in media and recognizing their achievements. Our results had been shared with the company Human Resources department and management and they considered the diagnosis helpful and will work on actions to mitigate the challenges that women still perceive.
|
| 62 |
+
|
| 63 |
+
# 1 INTRODUCTION
|
| 64 |
+
|
| 65 |
+
Diverse software teams are more likely to understand user needs, contributing to a better alignment between the delivered software
|
| 66 |
+
|
| 67 |
+
and its intended customers [51]. Diversity further positively affects productivity by bringing together different perspectives [73] and fosters innovation and problem-solving capacity, leading to a healthier work environment [19]. However, women are still underrepresented in the software industry [35]. Reducing the gender gap in the software industry requires not only attracting, but also retaining women.
|
| 68 |
+
|
| 69 |
+
Women<sup>1</sup> often face socio-cultural challenges in the software industry and could decide to leave their jobs (or even the software industry) if diversity is not a priority [42]. Gender-related incidents can be so severe that they motivate women to leave a project or their jobs [55, 72]. Kuechler et al. [40] suggest that women drop out because their jobs are not aligned to their motivations or due to the unappealing and hostile social dynamics in their daily work. Understanding the reasons behind the decision to step out of a project or role can help create strategies to increase retention.
|
| 70 |
+
|
| 71 |
+
Challenges faced by women have been largely investigated in the free and open source software (F/OSS) development context (e.g., [5, 12, 13, 36, 42, 53, 55, 56, 58, 67, 71, 72, 74]). Although there are a few studies focusing on software companies [14, 52, 54, 78], the results are still preliminary and more studies are needed to contribute to theory building. In this context, theory is built by aggregating results from case studies and related qualitative research by making comparisons, looking for similarities and differences within the collected data, and by examining future questions [41].
|
| 72 |
+
|
| 73 |
+
In this paper, we contribute to this scientific body of knowledge by reporting a case study in a multi-cultural global software development organization from a large company, namely Ericsson, which is a global private company that has more than 100,000 employees around the world, and is one of the leading providers of Information and Communication Technology (ICT). One of the Diversity & Inclusion business goals of the company is to reach at least $25\%$ proportion of women in every suborganization of the company. However, the software development organization still struggles to attract and retain women and has partnered with external researchers to understand the challenges and strategies from the point of view of the women themselves. As the phenomenon under study is complex because it involves different perspectives and disciplines, it is critical to consider how women perceive and give meaning to their social reality.
|
| 74 |
+
|
| 75 |
+
Therefore, our goal is to identify the challenges that women face in the company, according to them, and investigate the measures that women recommend to mitigate the identified challenges. To achieve our goal, we defined the following research questions:
|
| 76 |
+
|
| 77 |
+
RQ1: What challenges do women face in software teams?
|
| 78 |
+
|
| 79 |
+
RQ2: What are possible actions to mitigate the identified challenges, from the women's perspectives?
|
| 80 |
+
|
| 81 |
+
To answer our research questions, we collected data through an online questionnaire that included questions about the challenges that are currently being faced, reasons women would decide to leave, and suggestions to increase women's participation. The questionnaire was answered by 94 women from a software development
|
| 82 |
+
|
| 83 |
+
suborganization of Ericsson. The novelty of the work includes considering a multi-cultural global software development organization from a large IT company, breaking down the challenges according to demographics, identifying challenges that push women out of the company, and connecting challenges to potential strategies to attract and retain women from the point of view of the women themselves, who are on the front lines of the problem.
|
| 84 |
+
|
| 85 |
+
We introduce our research design in Section 2 and the study results in Section 3. Section 4 discusses the results and implications of our results, followed by related work in Section 5, limitations, and conclusions in Sections 6 and 7.
|
| 86 |
+
|
| 87 |
+
# 2 RESEARCH DESIGN
|
| 88 |
+
|
| 89 |
+
To answer our research questions, we conducted an exploratory case study [62] via a questionnaire administered to women from one of the software development suborganizations of Ericsson. In this section, we describe the case and the phases of planning, data collection, and analysis.
|
| 90 |
+
|
| 91 |
+
# 2.1 The Case and Unit of Analysis
|
| 92 |
+
|
| 93 |
+
The case and unit of analysis is one of Ericsson's software development organizations. Ericsson is a global and large company that develops telecommunications-related products. It has more than 100,000 employees who are geographically distributed in several countries, including India, Sweden, Canada, USA, Poland, Brazil, and Germany.
|
| 94 |
+
|
| 95 |
+
To diagnose how the company is doing in terms of gender diversity, every year, Ericsson<sup>2</sup> publishes a diversity report with the percentage of employees who identify themselves as women<sup>3</sup>. The company has a goal to achieve at least $25\%$ of women as employees in all suborganizations. This percentage was achieved for the whole company in 2020, but not in a consistent way in every suborganization. One of the software development organizations still dominated by men (the name of which is omitted for confidentially reasons) decided to conduct a systematic research to understand the viewpoint of the women who currently work at the company, in order to plan informed and bottom-up actions to mitigate the reported challenges. The company is interested in understanding the challenges women report as well as their suggestions to improve the current situation, as a contemporary phenomenon within its real-life context [80]. This case study was collaboratively conducted by one researcher from the company and three outsiders.
|
| 96 |
+
|
| 97 |
+
# 2.2 Data Collection
|
| 98 |
+
|
| 99 |
+
We administered an online questionnaire ${}^{4}$ using the Qualtrics tool ${}^{5}$ to employees from the software development suborganization who self-identify as women. We opted for a questionnaire instead of interviews to increase participation and coverage of the research. The questionnaire was designed to understand the state of the problem, asking open questions about the challenges faced, the reasons that drove women they know to leave the company, and their suggestions to retain more women in the company. The demographic
|
| 100 |
+
|
| 101 |
+
questions were the last part of the questionnaire, and were chosen to help us investigate possible relations between groups. Although family is universal to all genders, women culturally face a greater pressure to balance work and family [70]. Considering women as the target population of the present study, besides country, age, and experience, we included demographics questions to understand possible intersections of the challenges and family status. All questions were optional to increase the response rate by making respondents more comfortable [57]. After proofreading and testing in multiple browsers and devices, we invited three participants to pilot the questionnaire so we could collect feedback and measure the time to answer. No modification of the questionnaire was necessary, and we discarded these initial answers.
|
| 102 |
+
|
| 103 |
+
The managers of the company software teams sent emails to the women on their teams with the questionnaire link. Every week, the first author of this paper followed up with the company's manager to check the number of answers and identify sites that needed additional encouragement.
|
| 104 |
+
|
| 105 |
+
The questionnaire was open for answers between June 11 and July 20, 2021. Our questionnaire received 94 non-blank answers. We report the demographics of the respondents in Table 1. Most of the respondents were residents of India $(65.6\%)$ , less than 35 years old $(50.0\%)$ , married $(62.8\%)$ , living with children $(52.1\%)$ , having more than 10 years of experience in the software industry $(45.7\%)$ and less than 5 years working in the company $(35.1\%)$ . A balanced proportion of respondents have and do not have children living with them. The sample mirrors the company's numbers. In 2020, the prevalent nationality $(55\%)$ of the employees from the studied suborganization was from India. Although we do not have data about the employees' age of each suborganization, $31\%$ of the overall number of employees age less than 35 years old. The other demographics are not measured by the company's annual report.
|
| 106 |
+
|
| 107 |
+
We filtered our data to consider only valid responses. We manually inspected the open text questions, looking for blank answers. Instead of removing the entire response when one of the questions was blank or did not report challenges or suggestions, we separated data into two datasets of 94 answers each: one dataset for challenges and one dataset for suggestions. Then, we named each respondent from S1 to S94, removed blanks for each dataset (3 removed from challenges and 15 for suggestions). Next, we removed the answers that were not informing any challenge or suggestion (27 removed from challenges who reported that they do not face any challenge and 9 from the suggestions who reported having no proposed solutions). The final step was to check potential duplicate participation, even though the tool used in our investigation (Qualtrics) has mechanisms to prevent multiples responses from the same participant. The final challenges' dataset had 64 answers, while the suggestions' dataset had 70 answers.
|
| 108 |
+
|
| 109 |
+
# 2.3 Data Analysis
|
| 110 |
+
|
| 111 |
+
To answer both RQ1 and RQ2, we analyzed the responses to the open questions about challenges, reasons for leaving, and suggestions to increase women's participation in the company. The first author qualitatively analyzed the answers for the open questions by inductively applying open coding[49] to organize what participants reported. We then organized our categories following concepts
|
| 112 |
+
|
| 113 |
+
Table 1: Personal characteristics of the respondents (n=94)
|
| 114 |
+
|
| 115 |
+
<table><tr><td>Demographics</td><td>#</td><td>%</td></tr><tr><td>Experience: ≤ 5 years in software industry</td><td>12</td><td>12.8%</td></tr><tr><td>Experience: > 5 & < 10 years in software industry</td><td>21</td><td>22.3%</td></tr><tr><td>Experience: ≥ 10 years in software industry</td><td>43</td><td>45.7%</td></tr><tr><td>Did not inform</td><td>18</td><td>19.1%</td></tr><tr><td>Tenure: ≤ 5 years in Ericsson</td><td>33</td><td>35.1%</td></tr><tr><td>Tenure: > 5 & < 10 years in Ericsson</td><td>26</td><td>27.7%</td></tr><tr><td>Tenure: ≥ 10 years in Ericsson</td><td>24</td><td>25.5%</td></tr><tr><td>Did not inform</td><td>11</td><td>11.7%</td></tr><tr><td>Age: Less than 35</td><td>47</td><td>50.0%</td></tr><tr><td>Age: 35 to 44</td><td>27</td><td>28.7%</td></tr><tr><td>Age: 45 to 54</td><td>10</td><td>10.6%</td></tr><tr><td>Did not inform</td><td>10</td><td>10.6%</td></tr><tr><td>Country: India</td><td>61</td><td>64.9%</td></tr><tr><td>Country: Brazil</td><td>11</td><td>11.7%</td></tr><tr><td>Country: Canada</td><td>8</td><td>8.5%</td></tr><tr><td>Country: Others</td><td>5</td><td>5.3%</td></tr><tr><td>Did not inform</td><td>9</td><td>9.6%</td></tr><tr><td>Marital Status: Married or domestic partnership</td><td>59</td><td>62.8%</td></tr><tr><td>Marital Status: Single or divorced</td><td>24</td><td>25.5%</td></tr><tr><td>Did not inform</td><td>11</td><td>11.7%</td></tr><tr><td>Have children living with: Yes</td><td>49</td><td>52.1%</td></tr><tr><td>Have children living with: No</td><td>36</td><td>38.3%</td></tr><tr><td>Did not inform</td><td>9</td><td>9.6%</td></tr></table>
|
| 116 |
+
|
| 117 |
+
from existent theories, such as sexism (hostile and benevolent) [27], impostor syndrome [16], maternal wall [75], prove-it again [7], glass ceiling [37, 64], work-life balance issues [28], lack of peer parity [22], and lack of recognition [4]. We built post-formed codes, having three of the authors conducting card sorting sessions [68], including discussing the codes and categorization until reaching consensus about the codes and the corresponding literature.
|
| 118 |
+
|
| 119 |
+
After completing the qualitative analysis, we checked the distribution of answers categorized in each challenge. From the 64 women who reported they face some type(s) of challenges, 34 reported challenges related to only one category, 25 to two different categories, 3 to three categories, and 2 women reported challenges related to four categories. We also used descriptive statistics to summarize the responses and their association with the demographics data [77].
|
| 120 |
+
|
| 121 |
+
To analyze how the challenges differ according to individual characteristics, we segmented our sample based on experience in software industry (experienced: $\geq 10$ years of experience vs. less experienced: $\leq 10$ years of experience), tenure or years in company (more years in company: $\geq 10$ years in company vs. less years in company: $\leq 10$ years in company), age (older: $\geq$ median, 35 years old vs. younger: $<$ median, 35 years old), married or not, and lives, or not, with children.
|
| 122 |
+
|
| 123 |
+
See supplementary material<sup>6</sup> for additional details, including the answers to the demographics, the open questions and the qualitative analysis codes.
|
| 124 |
+
|
| 125 |
+
Next, we calculated the odds ratio for each challenge and demographic information. We interpreted the results as follows:
|
| 126 |
+
|
| 127 |
+
- if Odds Ratio = 1, both groups are equally distributed for the reported challenge.
|
| 128 |
+
- if Odds Ratio $>1$ , the likelihood for the reported challenge is higher for the first group (in our case: experienced, older than 35, married and living with children).
|
| 129 |
+
- if Odds Ratio $< 1$ , the likelihood for the reported challenge is higher for the second group (in our case: novices, younger than 35 years, single or divorced, without children).
|
| 130 |
+
|
| 131 |
+
# 3 STUDY RESULTS
|
| 132 |
+
|
| 133 |
+
In this section, we present the results of our investigation, which are grouped by research question.
|
| 134 |
+
|
| 135 |
+
# 3.1 RQ1: What challenges do women face in software development teams?
|
| 136 |
+
|
| 137 |
+
We found eight challenge categories, as presented in Fig. 1. We marked with an asterisk (*) the challenges reported by at least one person as a reason to leave the company. Table 2 presents the number of participants whose responses fit in each category. In the following, we present more details about our findings organized by challenge category.
|
| 138 |
+
|
| 139 |
+
3.1.1 Work-Life Balance Issues. Our participants reported that before the COVID-19 pandemic the company was STRICT TO THE PHYSICAL LOCATION and many women left due to facing the "trailing spouse" effect [10, 31], moving to another city or country when their spouse was relocated.
|
| 140 |
+
|
| 141 |
+
After the COVID-19 pandemic and suddenly working from home, women reported facing PRESSURE TO WORK EXTRA HOURS, "having no boundary set for working hours" (S24), and having to either attend meetings in different time zones or to learn new knowledge for the work. They also mentioned the consequences for not giving in to this pressure: they would be excluded from decisions that are made during the meetings and are perceived by others as "lacking in teamwork" (S30).
|
| 142 |
+
|
| 143 |
+
When working extra and long hours, women feel stressed and have trouble disconnecting from work, "impacting other household chores and having hardly any time left to bring some peace to the mind" (S49). The LACK OF WELL-BEING SUPPORT causes high levels of stress, which would be one of the reasons to quit.
|
| 144 |
+
|
| 145 |
+
Besides the inflexible location, our participants also mentioned that before the COVID-19 they lacked flexible work hours (which improved during the pandemic) and paid sick leave (specific for one location, for which the local laws do not cover this). These points are really important for those who have parenting and caretaking responsibilities, though. Lack of daycare in the office was associated with LACK OF PARENTAL SUPPORT, which can cause women to leave the company.
|
| 146 |
+
|
| 147 |
+
3.1.2 Sexism. The ambivalent sexism theory [27] defines sexism as a multidimensional construct that includes two sets of attitudes: hostile and benevolent. Sexism has typically been conceptualized as a reflection of hostility toward women [27]. Hostile sexism is related to the classic definition of prejudice [3].
|
| 148 |
+
|
| 149 |
+
Our participants from our investigation reported microaggressions in which their "voices are suppressed per opposite gender" (S19). Besides not being heard during technical discussions, women receive various diminishing comments, such as that women cannot bring the same results as men and that "women come to work only for time pass or are not brilliant enough" (S85). Exposure to such diminishing comments can be a reason to leave.
|
| 150 |
+
|
| 151 |
+
BENEVOLENT SEXISM represents the subjectively positive feelings toward a gender that often bring some sexist antipathy [27]. The participants reported being "pampered, never been given a hard/straight feedback" (S57) and being included in initiatives only because they are women, not because of their skills and capacity.
|
| 152 |
+
|
| 153 |
+
3.1.3 Lack of Recognition. Feeling valued or appreciated is part of Maslow's hierarchy of human needs [4]. The participants mentioned "Not been recognized by her job" (S63) and that WOMEN'S RESULTS ARE USUALLY EVALUATED AS OK, NEVER AS EXCELLENT (S57), even when accomplishing exceptional work. No PRAISES from managers was considered one of the reasons to leave.
|
| 154 |
+
|
| 155 |
+
3.1.4 Lack of Peer Parity. Being surrounded by similar individuals to which to compare oneself, or identifying with at least one other peer in the team, is known as peer parity [22].
|
| 156 |
+
|
| 157 |
+
The participants mentioned an "[im]balance in men:women ratio" (S37) and two consequences: (i) impact on their social capital, as they considered it "HARD TO COLLABORATE SOCIALLY" (S55), "[be]cause men socialize in a different way than women do" (S12); and (ii) impact on developing their self-confidence due to LACK OF ROLE MODELS, as they "lack [...] strong women leaders as mentors" (S14).
|
| 158 |
+
|
| 159 |
+
3.1.5 Impostor Syndrome. Impostor Syndrome (also known as impostor phenomenon, fraud syndrome, perceived fraudulence, or impostor experience), describes an experience of individuals who, despite their objective successes, struggle to internalize their accomplishments, feel persistent self-doubt, and being exposed as a fraud or impostor [16].
|
| 160 |
+
|
| 161 |
+
Our participants mentioned as a challenge and reason to leave situations in which WOMEN PERSONALIZE FAILURES AND “FEEL ASHAMED AND INFERIOR more than men and they tend to escape it by leaving [the company], but always masked as personal reasons” (S6).
|
| 162 |
+
|
| 163 |
+
3.1.6 Glass Ceiling. describes a corporate world phenomenon in which minorities' access to the top-management positions is blocked by tradition or culture [37], as an invisible structural barrier that prevent minorities from career advancement [64].
|
| 164 |
+
|
| 165 |
+
Two reasons to leave reported by the participants included the perception of PAY INEQUALITY BETWEEN GENDERS and INFERIOR CAREER GROWTH OPPORTUNITIES for women. For the former, S30 stated that "women employees are paid less compared to male counterparts", while for the last, S69 mentioned that she "reach[ed] a stage where [they] have nowhere to [climb next] in the ladder" (S69). Still, women reported that they feel that they work harder to achieve the same positions as men, indicating a possible LACK OF TRANSPARENCY ABOUT THE LADDER CRITERIA, as their ambition is discouraged,
|
| 166 |
+
|
| 167 |
+
Table 2: Representative examples of answers to the challenges' open question, number and percentage of women whose answer was coded for each category. In parenthesis the number of women who reported that challenge as a reason to leave.
|
| 168 |
+
|
| 169 |
+
<table><tr><td>Challenge</td><td>Representative examples</td><td>#</td><td>%(n=64)</td></tr><tr><td>Work-Life Balance Issues</td><td>"To be successful, any professional is expected to work super-hard and go far and beyond, working overtime, constantly learn new things and this takes a lot of energy, but after my working hours end, my home, my second and more important work starts..." (S40)</td><td>35 (25)</td><td>54.7%</td></tr><tr><td>Sexism</td><td>"In technical discussions, people feel that women cannot do it better so they make comments which are makes you uncomfortable" (S23)</td><td>14 (3)</td><td>21.9%</td></tr><tr><td>Lack of Recognition</td><td>"Lack of praise" (S38). Not been recognized by the job" (S63)</td><td>2 (2)</td><td>3.1%</td></tr><tr><td>Lack of Peer Parity</td><td>"Working in a place where there is a dominant number of male colleagues, inclusion in all discussions might not be uniform across" (S43)</td><td>9</td><td>14.1%</td></tr><tr><td>Impostor Syndrome</td><td>"When it comes to failure, the failures are easily owned and personalised by a women and they on resign their own compared to men" (S57)</td><td>2 (1)</td><td>3.1%</td></tr><tr><td>Glass Ceiling</td><td>"Glass roof, only a few women as leaders" (S38). "When women try to achieve more things they are called ambitious in a negative way. Whereas men are expected to" (S11)</td><td>26 (19)</td><td>40.6%</td></tr><tr><td>Prove-it Again</td><td>"As a woman I need to work harder to achieve the same as a man. I need to show competence and to be 100% right all the time." (S25)</td><td>9</td><td>14.1%</td></tr><tr><td>Maternal Wall</td><td>"Some people treat you differently because you just had a kid, giving you less work [...] and framing you inside a box" (S79).</td><td>4 (1)</td><td>6.3%</td></tr></table>
|
| 170 |
+
|
| 171 |
+
The total per challenge is not the sum of the respondents since the participants often provided an answer that was categorized into more than one challenge.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 1: The challenges reported by women who participated in our study. Those marked with an asterisk (*) were reported as a challenge that ultimately can lead women to leave the company.
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
|
| 178 |
+
while "corporate politics are played by men" (S21) and MEN LIFT ONLY THEIR COUNTERPARTS TO TOP LAYER.
|
| 179 |
+
|
| 180 |
+
3.1.7 Prove-it Again. Effect is a bias that occurs when a member of a group that does not align with stereotypes is measured at a stricter standard than those who do align with the stereotypes and, consequently, has to constantly provide more evidence to demonstrate competence [7].
|
| 181 |
+
|
| 182 |
+
The participants mentioned that women NEED TO SHOW COMPETENCE ALL THE TIME: "put extra effort to be heard when there is competition between men" (S84) and having "no room to slip [up]" (S41). Lastly, women feel they need to prove themselves WHEN RECEIVING AN IMPORTANT TASK, [AS] THEY ARE SUPERVISED by another person to guarantee they do it correctly (S65).
|
| 183 |
+
|
| 184 |
+
3.1.8 Maternal Wall. describes the experience of mothers whose coworkers perceive and judge them as having made one of two choices: either they continue to work and neglect their family, making the mother less likable, or the mother prioritizes family over work, making them less reliable in the workplace [75]. Our participants reported that women who are mothers receive [FEWER] RESPONSIBILITIES BECAUSE THEY HAVE KIDS, as they are believed to not be able to handle much work. One of them reported "surprising colleagues that [they] are able to handle it all" (S7). In addition, one of the reasons that cause women to leave is that "when returning from maternity leave, not enough support is provided or generally THE WOMAN IS ASKED TO STEP DOWN FROM THE ROLE" (S38).
|
| 185 |
+
|
| 186 |
+
3.1.9 A segmented look at the challenges perceived by women. In addition to the categorization described above, we took a deeper look into the results to understand the prevailing reports of challenges among our respondents and across different demographics. We avoid using the numerical prevalence of evidence to indicate the importance or criticality of any challenge. However, when presenting the results, we use supplementary and corroborative counting of the responses to triangulate the qualitative analysis [29]. The majority of respondents reported challenges related to WORK-LIFE BALANCE ISSUES (54.7%), GLASS CEILING (40.6%), and SEXISM (21.9%). Figure 2 illustrates those three categories of challenges for each demographic.
|
| 187 |
+
|
| 188 |
+
We used the most representative demographic subgroup (see Table 1) to present the percentages, which reflect the number of participants who mentioned any difficulties that were classified under each category of challenge. Most of the respondents who reported challenges related to WORK-LIFE BALANCE ISSUES are married and live in India, with proportions higher than $80\%$ . No demographic was so prevalent for glass ceiling and sexism, demonstrating that both challenges were reported by a broader subgroup.
|
| 189 |
+
|
| 190 |
+
We also calculated the odds ratio for each of the three most reported challenges, considering the demographics detailed in Section 2.3. Table 3 presents the results of the odds ratio for each category of challenge. According to our sample, women with more experience in the software industry have higher odds (3.62x) than those with less experience to report challenges related to WORK-LIFE BALANCE ISSUES. The odds that married women report challenges related to WORK-LIFE BALANCE ISSUES are $5.08\mathrm{x}$ higher than for single or divorced women.
|
| 191 |
+
|
| 192 |
+
When analyzing the intersectionality of the demographics, we learned that all of the 20 women who have more than 10 years of experience in the software industry and reported challenges that were categorized into WORK-LIFE BALANCE ISSUES are married or live in a domestic relationship with children. Regarding the GLASS CEILING, we also observed that more than the half of the women who reported this challenge have more than 10 years of experience in the software industry, are married or live in a domestic relationship, and live with children.
|
| 193 |
+
|
| 194 |
+
We found eight categories of challenges, with the most mentioned ones categorized into work-life balance issues, glass ceiling, and sexism. Women from our sample with more experience in the software industry and who were married were more likely to report challenges related to work-life balance issues.
|
| 195 |
+
|
| 196 |
+
# 3.2 RQ2: What are possible actions to mitigate the identified challenges, from the women's perspectives?
|
| 197 |
+
|
| 198 |
+
We now present the actions recommended by the participants to help mitigate the challenges and retain more women in the company. Our analysis revealed six categories that explain the actions suggested by women, which we present in Fig. 3.
|
| 199 |
+
|
| 200 |
+
Table 4 presents the number of participants whose responses fit in each category. Most of the respondents suggested the company HIRE MORE WOMEN (35.7%), SUPPORT WORK-LIFE BALANCE (30.0%), and EMBRACE EQUALITY (28.6%). We used the most representative demographic subgroup (see Table 1) to present the percentages, which reflect the number of participants who mentioned any difficulties that were classified under each category of suggestion. In the following, we present our findings organized by category.
|
| 201 |
+
|
| 202 |
+
3.2.1 Embrace Equality. Using the definition provided by UNESCO: "Gender equality exists when all genders enjoy the same status and have equal conditions, treatment, and opportunities for realizing their full potential, human rights and for contributing to and benefiting from economic, social, cultural and political development." [65].
|
| 203 |
+
|
| 204 |
+
In terms of cultural improvements, the participants suggested the company TRAIN ALL MANAGERS IN SOFT SKILLS TO HAVE MORE EMPATHY AND AVOID BURNOUT in their teams, so they can also RESPECT AND GIVE VOICE TO WOMEN.
|
| 205 |
+
|
| 206 |
+
In terms of process improvements, participants called for more equality in terms of payment, opportunities, and challenges. Providing "equal payment between genders" (S28), "equal opportunities without considering whether it is a women who is applying" (S88), and "giving [women] equal challenges like [those] given to the male employees which allow them to venture into more of learning and become confident" (S54).
|
| 207 |
+
|
| 208 |
+
Finally, our participants asked to "NOT ALLOCATE WOMEN ONLY IN PROCEDURAL TASKS, but include [them] in projects in which they feel [they are a] part [of the team], responsible and that can challenge their skills" (S62). This can give them equal opportunities and break the glass ceiling.
|
| 209 |
+
|
| 210 |
+
3.2.2 Support Women's Career Growth. To break the glass ceiling, the participants mentioned the need to ENCOURAGE WOMEN TO ADVANCE IN THEIR CAREER, "moving ahead in other streams apart from people management roles as well and HAVING MORE WOMEN IN TECHNICAL LEADERSHIP ROLES where very few women seem to step in" (S30).
|
| 211 |
+
|
| 212 |
+
Mentoring can "help younger talents to identify themselves and give them confidence and more prospects of continuing their career in the company" (S65). Prepare women to advance in career laddering by mentoring by OTHER WOMEN WHO ARE ROLE MODELS, which can happen during "programs for women in leadership roles", "showing them how to grow, by teaching the skills that will help them get recognized in the crowd and make her feel valued" (S42).
|
| 213 |
+
|
| 214 |
+
3.2.3 Hire More Women. To hire more women, our participants suggested changes to the job opportunities the company offers and to the recruitment process, and to invest in marketing Regarding
|
| 215 |
+
|
| 216 |
+
Table 3: Odds ratios per personal characteristic
|
| 217 |
+
|
| 218 |
+
<table><tr><td></td><td>More Experienced vs. Less Experienced</td><td>More Years in Company vs. Less Years in Company</td><td>Older vs. Younger</td><td>Married vs. Single</td><td>Child vs. no child</td></tr><tr><td>Work-Life Balance Issues</td><td>3.17**</td><td>1.09</td><td>2.51</td><td>5.08**</td><td>3.29</td></tr><tr><td>Glass Ceiling</td><td>0.48</td><td>1.29</td><td>0.38</td><td>0.76</td><td>1.46</td></tr><tr><td>Sexism</td><td>0.46</td><td>0.34</td><td>0.47</td><td>0.37</td><td>2.13</td></tr></table>
|
| 219 |
+
|
| 220 |
+
Significance codes: $^* p < 0.10$ $^{**}p < 0.05$ $^{**}p < 0.01$
|
| 221 |
+
Note: Odds ratio $> 1$ means that the first segment has greater chances of reporting the challenge than the second. Ratio $< 1$ means the opposite. The challenges were coded from the open question.
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Figure 2: Subgroup analysis of the top three categories of challenges. The opacity of the icons represents the percentage of each category of challenge. Darker means a higher and lighter a lower percentage. The percentage below each challenge represents the number of respondents who reported that challenge. Some respondents provided answers about challenges that accounted for more than one category.
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
|
| 230 |
+
Table 4: Representative examples of answers to the suggestions' open question, number and percentage of women whose answer were coded for each category
|
| 231 |
+
|
| 232 |
+
<table><tr><td>Suggestion</td><td>Representative examples</td><td>#</td><td>% (n=70)</td></tr><tr><td>Embrace Equality</td><td>‘Speaking gender-diversity alone will not be suffice, it should also be reflected in the equal payouts of deserving female candidates” (S28)</td><td>20</td><td>28.6%</td></tr><tr><td>Support Women’s Career Growth</td><td>“Promoting women to senior jobs and leadership would help younger talents to identify themselves with the company, giving them confidence and more prospects of continuing their career in the company” (S65)</td><td>14</td><td>20.0%</td></tr><tr><td>Hire More Women</td><td>“Active search for female talents” (S39) Conduct women-only drives to hire fresh talent from girls colleges.” (S59) “Publicize openings in workshops that are focused on women in market” (S63)</td><td>25</td><td>35.7%</td></tr><tr><td>Promote Women’s Groups and Events Women</td><td>“Virtual meetings, debates..” (S66) “diversity events, so minorities to feel more valued.” (S40)</td><td>3</td><td>4.3%</td></tr><tr><td>Empower Women</td><td>“Be more active on social media and do external open talks with woman from Ericsson talking about their work.” (S10) “Deserving women should be recognized and rewarded” (S28)</td><td>7</td><td>10.0%</td></tr><tr><td>Support Work-Life Balance</td><td>“Flexible work hours, and to focus on ensuring work-life balance. Workaholics tend to breach the latter, not just their own, but the team’s too in a collaborative environment.” (S43) “Enable women taking a break from career due to motherhood to return to the workforce. That is where we loose them.” (S2)</td><td>21</td><td>30.0%</td></tr></table>
|
| 233 |
+
|
| 234 |
+
The total per suggestion of improvement is not the sum of the respondents since the participants often provided an answer that was categorized into more than one suggestion.
|
| 235 |
+
|
| 236 |
+
the job opportunities, they suggest the company MAKE THEM ATTRACTIVE TO WOMEN'S NEEDS, create more part-time positions, and reserve positions prioritized for internal candidates and women.
|
| 237 |
+
|
| 238 |
+
Regarding the action related to CHANGE TO THE RECRUITMENT PROCESS, besides HAVING MORE WOMEN AS RECRUITERS, the suggestions included transparency about the required skills, advertising
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Figure 3: Strategies suggested by women for the company to increase women in software development teams
|
| 242 |
+
|
| 243 |
+
job openings to women's groups and events, and raising awareness about how the company supports women's growth. Finally, the suggestions included base-level actions like INVESTING IN PROGRAMS TO ATTRACT GIRLS TO STEM, and being "active in the social media that are accessed by the audience (e.g., Snap/Instagram)" (S90) so they would be "securing more talent women to join" (S38) and "from a fresher level" (S58).
|
| 244 |
+
|
| 245 |
+
3.2.4 Promote Women's Groups and Events. Considering that many teams still have only a few women, our participants mentioned that the lack of parity can be mitigated by having WOMEN'S SUPPORT GROUPS and events for "INTERACTION BETWEEN WOMEN FROM DIFFERENT DEPARTMENTS" (S81). The events were suggested to target not only women, but "to celebrate differences and diversity (gender, cultural, age), so that minorities feel more valued" (S40).
|
| 246 |
+
|
| 247 |
+
3.2.5 Empower Women. Empowerment is a strong strategy to fight the impostor syndrome [17] and one of the strategies to foster it is recognition [44]. The study participants recommended RECOGNITION AND ACHIEVEMENTS' REWARDS. Moreover, since women's empowerment refers to enhancement of their power and position in society, some participants suggested that "sharing the stories of [the company's] women can encourage and motivate women employees in achieving the same" (S49). Moreover, empowerment could be achieved by "PUBLISH WOMEN'S SUCCESS STORIES IN SOCIAL MEDIA" (S47) AND EXTERNAL EVENTS.
|
| 248 |
+
|
| 249 |
+
3.2.6 Support Work-Life Balance. Sabbaticals are paid leaves for personal and professional development reasons [50] to promote well-being, and are also beneficial for the company, as they increase future productivity [18]. The participants suggested to IMPLEMENT SABBATICAL POLICIES so they can "take a break from [their] career" (S2) to rest, dedicate some time to their family, and acquire new knowledge.
|
| 250 |
+
|
| 251 |
+
Another suggestion was to "DISCOURGE EXTRA HOURS, as the workaholics tend to breach the latter, not just their own, but the team's too in a collaborative environment" (S43).
|
| 252 |
+
|
| 253 |
+
Maternity leaves usually relate to each country's law. Some countries mandate a long maternity leave, while others mandate a shorter one [25]. The participants protested that "maternity leaves should not be considered an impediment for a woman to grow" (S54)
|
| 254 |
+
|
| 255 |
+
and asked for the company to ASSIST MATERNITY by implementing own rules to extend the paid leave to 1 year, even "beyond local laws of the country" (S22).
|
| 256 |
+
|
| 257 |
+
Participants called for actions to embrace equality and help reduce the sexist culture inside the company, and to mitigate the biases that create maternity wall and prove-it-again effects. Initiatives to support women's growth were suggested to start breaking the glass ceiling. Hiring more women and organizing women's groups could foster peer parity, and women's empowerment could reduce impostor syndrome and provide recognition. Sabbaticals and discouraging long work hours were mentioned as ways to improve the work-life balance.
|
| 258 |
+
|
| 259 |
+
# 4 DISCUSSION
|
| 260 |
+
|
| 261 |
+
In this section, we present a more in-depth discussion of our results in the context of the literature.
|
| 262 |
+
|
| 263 |
+
Work-Life Balance Issues faced by mothers. Work and family are the two most important domains in a person's life and their interface has been the object of study for researchers worldwide [70]. As women assume the role of working professional in addition to their traditional role of homemaker, they are under great pressure to balance their work and personal lives [70]. The societal role expectations, women's career ambitions, and the nature of the IT industry challenges the way women manage their professional and personal lives [30]. The COVID-19 pandemic and the need to work from home cast new light on these issues. While it brought more flexibility to many workers (which is the case of the company studied), it also brought new challenges [23, 60]. For a great share of the population, it became hard to separate personal and professional life. Women felt this more than men, given the aforementioned societal expectations[43].
|
| 264 |
+
|
| 265 |
+
Work-life balance is a challenge that happens in and beyond the IT industry. In Japan, work-life balance is a general challenge, and the low numbers of women in medicine reflect the societal belief that careers and motherhood do not mix [61]. In contrast, Scandinavia has similar numbers of men and women physicians, which has coincided with the emergence of progressive work-life policies,
|
| 266 |
+
|
| 267 |
+
the belief that women can combine motherhood and employment, and changing expectations of work-life balance. This shows that it is possible and that the mindset in the software industry needs to change.
|
| 268 |
+
|
| 269 |
+
The lack of parent support is one of the major challenges that Indian companies within the IT sector have been trying to navigate over the past decade to ensure a gender inclusive workplace [59]. This is reinforced by our work, in which women provided suggestions on how to IMPROVE PARENTAL POLICIES TO SUPPORT FAMILIES and mitigate this challenge, as we showed is Section 3.2. Sponsoring child care, preferentially in the office and specially for young children, providing adequate maternity leave beyond the relevant country's laws, and also providing more flexibility in work hours and location were some of the suggestions for the company to take in order to mitigate the challenges of work-life balance issues related to motherhood. This is harder to implement in many cases due to the countries' legislation and the local culture, which influence how organizational policies are defined.
|
| 270 |
+
|
| 271 |
+
Prepare women to break the glass ceiling. Analogous to the IT industry, women's barriers in the medical profession and their ability to rise to leadership positions are also influenced by social and cultural context [61]. Similar to software teams, where women are instrumental to reducing community smells [15], in international relations the collaboration between women delegates and women civil society groups positively impacts and brings more durable peace when negotiating peace agreements [39]. By analyzing the career trajectories of women executives across a variety of sectors, Glass and Cook [26] concluded that while attaining promotion to leadership is not easy, serving in a high position can be even more challenging. Although women can be more likely than men to be empowered to high-risk leadership positions, they often lack the support or authority to accomplish their strategic goals. As a result, women leaders often experience shorter tenures compared to their peers who are men [26].
|
| 272 |
+
|
| 273 |
+
The respondents provided suggestions to SUPPORT THEIR CAREER GROWTH to mitigate this challenge, including having women to inspire, encourage and mentor other women, as we showed is Section 3.2. Providing training for mentors on topics such as speaking up on behalf of women who are being disrespected in meetings, managing bias in the workplace, and raising awareness of microaggressions at work are some examples of what should be included as part of standard training and preparation for mentors [24]. The mentoring program can start by having a different woman leader each month discuss her career trajectory and the benefits and challenges of holding her job. Women could share their techniques for managing time, balancing family and career demands, making themselves heard by men, and highlighting how they learn new skills on a regular basis [79]. Besides joining ongoing support groups, women can be assigned to formal mentors for one-on-one regular meetings [38].
|
| 274 |
+
|
| 275 |
+
Combining synergistic suggestions. One option for companies looking to improve women's participation is to combine strategies that are synergistic. The company can start by implementing simple, but structured actions combining ideas from more than one strategy. For example, by publishing success stories of women on media, the company can EMPOWER WOMEN and also attract and HIIRE MORE WOMEN. Another action that combines synergistic strategies
|
| 276 |
+
|
| 277 |
+
is to ARRANGE WOMEN-ONLY GROUPS and analyze messages to implement feasible changes to problems that are being actively discussed and could potentially cause women to leave, as when women report facing hostile sexism. The literature reports that women experience computing environments differently due to sexism and racism, both historically and as part of the current culture [6, 45, 46, 69], potentially leading them to feel unwelcome and lacking of sense of belonging [63], and ultimately to leave [21].
|
| 278 |
+
|
| 279 |
+
Some problems come from beyond the company gates. Ever in a company like Ericsson, which cares about Diversity & Inclusion, "untying the mooring ropes" of socio-cultural problems is difficult. Historically, the social differences influenced by gender roles (i.e., the roles that men and women are expected to occupy based on their sex) may be amplified because of the gendered division of housework and child care tasks, especially for mothers of young children. Impostor syndrome [5, 42, 78], Sexism [12, 14, 53, 67], Lack of Peer Parity [13, 56], Prove-it-Again [36], Glass Ceiling [14] and Work-Life Balance issues [42, 43] were challenges reported by women from the present study and also reported by both women in other software development contexts and in F/OSS. Some problems surpass the organization and are related to the local culture of the employees and managers. There are problems that go beyond the company's gates and bump into the society, which many times contributes to this cultural legacy. One example is the "trailing spouse", when a person who follows his or her life partner to another city because of a work assignment [10, 31]. Moreover, during the COVID-19 pandemic, a longer "double-shift" of paid and unpaid work in a context of school closures and limited availability of care services have contributed to an overall increase in stress, anxiety around job insecurity, and difficulty in maintaining work-life balance among women with children [1]. However, there is also space for improvement in the organization, and Ericsson is committed to implementing the suggested changes to mitigate the challenges faced by its women employees.
|
| 280 |
+
|
| 281 |
+
# 4.1 Implications to the company
|
| 282 |
+
|
| 283 |
+
We presented the results to the managers of the studied suborganization and to the managers of Human Resources department. The feedback was very positive regarding the usefulness of the research. Managers considered the results helpful for the company to understand the current situation and to decide about actions can mitigate the challenges that women currently face and avoid them leaving the company. For the suggestions that are already in place, such as publishing successful stories of women and support groups, the company plans to expand and raise awareness to the employees. Sexism is considered an unacceptable behavior to Ericsson. The managers of the studied suborganization already started to have collective meetings with the team to spread the message and remind that sexism is not tolerated by the company. Following, the company is planning recurrent meetings to plan strategies that address the suggestions provided by the participants. In addition, Ericsson plans to raise awareness about the solutions that already exist (and maybe women are not aware of) and additional actions that can complement the suggestions to mitigate the reported challenges.
|
| 284 |
+
|
| 285 |
+
# 5 RELATED WORK
|
| 286 |
+
|
| 287 |
+
Although diversity is a multidimensional concept that refers to the variety of representations that exist within a group [2, 76], gender (with a focus on women), is the most explored aspect of diversity in software engineering literature [47, 66]. The prevalence of gender as the most studied diversity aspect can be explained by the fact that the technology professions are known to be male dominated, despite the fact that programming was originally seen as a female occupation [8, 9, 20, 34]. According to a longitudinal study that evaluated 50 years of data and the evolution of code contributions since 1970, Zacchiroli [81] showed that woman developers' contributions remain low when compared to those of men. Although the study found that men have always authored more open source code than women, the gap has begun to narrow and women are slowly gaining space.
|
| 288 |
+
|
| 289 |
+
The biases and challenges faced by women in software development teams have been investigated in different industry cases [14, 52, 54, 78]. The Finish women from Wolff et al. [78]'s study reported lack of self-efficacy, which is a possible predecessor of IMPOSTOR SYNDROME, also found in our study. Also similar to our results, the Brazilian women from Canedo et al. [14]'s study also reported HOSTILE SEXISM, including discrimination and bias, and BENEVOLENT SEXISM, when women do not receive the more complex tasks, and GLASS CEILING, as only few women perform a leadership role in their team. Another study with Brazilian women during the COVID-19 pandemic [43] revealed that women faced even more WORK-LIFE BALANCE ISSUES, lacking support with housework and child care responsibilities. As opposed to the GLASS CEILING reported by Canedo et al. [14]'s study and our results, the main challenge reported by the Zimbabwean women from [52]'s study was lack of digital exposure and career guidance from young age. So far, the studies point at generic aspects, focusing on understanding a broader, more comprehensive picture of the challenges faced by women. In our work, we evolve the findings by diagnosing an organization that is investing in programs to attract and retain women.
|
| 290 |
+
|
| 291 |
+
Women who contribute to F/OSS projects have reported some of the challenges also reported by women from the present study: work-life balance issues [42], impostor syndrome [5, 42], lack of peer parity [13, 56], prove-it-again [36] and sexism [12, 53, 67]. Non-inclusive communication is faced by F/OSS women in code reviews and mailing lists [5, 55, 58], and was reported as an aspect of the hostile sexism faced by women in the present study, which they described taking place during meetings. While in F/OSS women face bias against contributions when they explicitly identify as women[13, 71], in our study we found a lack of recognition. Stereotypes manifest the common expectations about members of certain social groups. Both the descriptive (how women are) and prescriptive (how women should be) gender stereotypes and the expectations they produce can compromise a woman's career progress [32, 33]. In F/OSS projects, women reported facing stereotyping that box them into specializations despite their manifest protest [74] and being treated by men as if they were their mothers, asking for advice about how to dress and behave but refusing to enter into a technical dialogue [53]. While women in our study reported also facing stereotypes, for them it was part of the MATERNITY WALL and receiving fewer responsibilities because they
|
| 292 |
+
|
| 293 |
+
had children. And whereas F/OSS women reported that they faced obstacles to finding a mentor, since upon discovering their mentee's gender, men mentors can treat the relationship as a dating opportunity [53], this was not reported by women from the present study.
|
| 294 |
+
|
| 295 |
+
# 6 THREATS TO VALIDITY
|
| 296 |
+
|
| 297 |
+
There are some limitations related to our research results.
|
| 298 |
+
|
| 299 |
+
Internal validity. The characteristics of our sample may have influenced our results. Although the company has office in several locations, a great part of the responses (42 out of 64) were from women who live in India. Thus, the challenges and suggestions can reflect some of the specific socio-cultural problems and aspirations for women from this country. The most prevalent nationality of employees overall from the studied suborganization is Indian $(55\%)$ .
|
| 300 |
+
|
| 301 |
+
External Validity. The results are valid for the studied sub-organization of Ericsson and additional research is necessary to investigate the challenges and suggestions in other contexts.
|
| 302 |
+
|
| 303 |
+
Survival bias. Our results reflect the opinion of current employees. Therefore, to increase women's participation by fully understanding the reasons they might leave, we acknowledge that additional research is necessary to understand the point of view of the women who left the company. To mitigate this threat, we asked the participants about reasons that prompted a woman they know to leave the company.
|
| 304 |
+
|
| 305 |
+
Recall bias. As our questions were open-ended, our results could be impacted by either salience bias, where respondents focus on definitions that are prominent or emotionally striking and not necessarily all the factors that matter; or by memory bias, where participants answered questions based on what they can first recall. However, topics that are relevant to the respondent often emerge from the spontaneous answers.
|
| 306 |
+
|
| 307 |
+
Data Consistency. Consistency refers to ensuring that the results consistently follow from the data and there is no inference that cannot be supported after data analysis [48]. The group of researchers performed the qualitative analysis of questionnaire's responses. We had weekly meetings to discuss and adjust codes and categories until reaching agreement. In the meetings, we also checked the consistency of our interpretations. All analysis was thoroughly grounded in the data collected and exhaustively discussed amongst the whole team. The team includes researchers with extensive experience in qualitative methods.
|
| 308 |
+
|
| 309 |
+
Theoretical saturation. A potential limitation in qualitative studies regards reaching theoretical saturation. From participants in this study with different backgrounds and perceptions about the studied phenomenon, we received 64 responses for the challenges question and 70 for the strategies. The participants were diverse in terms of experience, tenure, age, family status. Therefore, although theoretical saturation cannot be claimed, we believe that we obtained a consistent and comprehensive account of the phenomenon for the studied case. After analyzing the $40^{th}$ response of challenge and the $29^{th}$ response of suggestion we did not find any new categories, using the existing categories for the following 24 challenges and 41 suggestions.
|
| 310 |
+
|
| 311 |
+
# 7 CONCLUSION
|
| 312 |
+
|
| 313 |
+
This paper presents a case study aiming at understanding the challenges faced by women in a large software company and collecting strategies to increase the number of women. We found that even with the commitment with diversity and inclusion from Ericsson, women still perceive challenges and call for changes.
|
| 314 |
+
|
| 315 |
+
We also showed that the cultural structural sexism present in society is mirrored in the professional environment. There is still a long work ahead for Ericsson, for the software industry, and for us, as society, to create a more diverse and inclusive environment. We hope our results will enlighten actions towards reducing the perceived challenges and (more importantly, maybe) increasing awareness about the structural and cultural hurdles imposed on women that negatively influence diversity in the software industry.
|
| 316 |
+
|
| 317 |
+
# REFERENCES
|
| 318 |
+
|
| 319 |
+
[1] 2021. Global Gender Gap Report. Technical Report. World Economic Forum. https://www3.weforum.org/docs/WEF_GGGR_2021.pdf Accessed: 2021-10-18.
|
| 320 |
+
[2] Khaled Albusays, Pernille Bjorn, Laura Dabbish, Denae Ford, Emerson Murphy-Hill, Alexander Serebrenik, and Margaret-Anne Storey. 2021. The diversity crisis in software development. IEEE Software 38, 2 (2021), 19-25.
|
| 321 |
+
[3] Gordon Willard Allport, Kenneth Clark, and Thomas Pettigrew. 1954. The nature of prejudice. (1954).
|
| 322 |
+
[4] A Arlow, Jacob. 1955. Motivation and Personality: By AH Maslow. New York: Harper & Brothers, 1954. 411 pp. Psychoanalytic Quarterly 24 (1955), 447-448.
|
| 323 |
+
[5] Sogol Balali, Igor Steinmacher, Umayal Annamalai, Anita Sarma, and Marco Aurelio Gerosa. 2018. Newcomers' barriers... is that all? an analysis of mentors' and newcomers' barriers in OSS projects. Computer Supported Cooperative Work (CSCW) 27, 3-6 (2018), 679-714.
|
| 324 |
+
[6] Lecia J Barker, Charlie McDowell, and Kimberly Kalahar. 2009. Exploring factors that influence computer science introductory course students to persist in the major. ACM Sigcse Bulletin 41, 1 (2009), 153-157.
|
| 325 |
+
[7] Monica Biernat and Diane Kobrynowicz. 1997. Gender-and race-based standards of competence: lower minimum standards but higher ability standards for devalued groups. Journal of personality and social psychology 72, 3 (1997), 544.
|
| 326 |
+
[8] Pernille Bjørn and Maria Menendez-Blanco. 2019. FemTech: Broadening participation to digital technology development. In Proceedings of the 27th ACM International Conference on Multimedia. 510-511.
|
| 327 |
+
[9] Pernille Bjørn and Daniela K Rosner. 2021. Intertextual design: The hidden stories of Atari women. Human-Computer Interaction (2021), 1-26.
|
| 328 |
+
[10] Maria Brandén. 2014. Gender, gender ideology, and couples' migration decisions. Journal of Family Issues 35, 7 (2014), 950-971.
|
| 329 |
+
[11] Judith Butler. 1999. Gender is burning: Questions of. Feminist film theory: A reader (1999), 336.
|
| 330 |
+
[12] Dafne Calvo. 2021. The (in) visible barriers to free software: Inequalities in online communities in Spain. Studies in Communication Sciences 21, 1 (2021), 163-178.
|
| 331 |
+
[13] Edna Dias Canedo, Rodrigo Bonifacio, Mácio Vinicius Okimoto, Alexander Serebrenik, Gustavo Pinto, and Eduardo Monteiro. 2020. Work Practices and Perceptions from Women Core Developers in OSS Communities. In Proceedings of the 14th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement (ESEM). 1-11.
|
| 332 |
+
[14] Edna Dias Canedo, Fabiana Freitas Mendes, Anderson Jefferson Cerqueira, Rodrigo Bonifacio, Mário Vinicius Okimoto, and Gustavo Pinto. 2021. Breaking one barrier at a time: how women developers cope in a men-dominated industry. In Software Engineering (SBES), 2021 Brazilian Symposium on. IEEE.
|
| 333 |
+
[15] Gemma Catolino, Fabio Palomba, Damian A Tamburri, Alexander Serebrenik, and Filomena Ferrucci. 2019. Gender diversity and women in software teams: How do they affect community smells?. In 2019 IEEE/ACM 41st International Conference on Software Engineering: Software Engineering in Society (ICSE-SEIS). IEEE, 11–20.
|
| 334 |
+
[16] Pauline Rose Clance and Suzanne Ament Imes. 1978. The imposter phenomenon in high achieving women: Dynamics and therapeutic intervention. *Psychotherapy: Theory, research & practice* 15, 3 (1978), 241.
|
| 335 |
+
[17] Pauline Rose Clance and Maureen Ann OToole. 1987. The imposter phenomenon: An internal barrier to empowerment and achievement. *Women & Therapy* 6, 3 (1987), 51-64.
|
| 336 |
+
[18] Oranit B Davidson, Dov Eden, Mina Westman, Yochi Cohen-Charash, Leslie B Hammer, Avraham N Kluger, Moshe Krausz, Christina Maslach, Michael O'Driscoll, Pamela L Perrewe, et al. 2010. Sabbatical leave: who gains and how much? Journal of Applied Psychology 95, 5 (2010), 953.
|
| 337 |
+
|
| 338 |
+
[19] Christopher P Earley and Elaine Mosakowski. 2000. Creating hybrid team cultures: An empirical test of transnational team functioning. Academy of Management journal 43, 1 (2000), 26-49.
|
| 339 |
+
[20] Nathan L Ensmenger. 2012. The computer boys take over: Computers, programmers, and the politics of technical expertise. Mit Press.
|
| 340 |
+
[21] Lorelle Espinosa. 2011. Pipelines and pathways: Women of color in undergraduate STEM majors and the college experiences that contribute to persistence. Harvard Educational Review 81, 2 (2011), 209-241.
|
| 341 |
+
[22] Denae Ford, Alisse Harkins, and Chris Parnin. 2017. Someone like me: How does peer parity influence participation of women on stack overflow?. In 2017 IEEE symposium on visual languages and human-centric computing (VL/HCC). IEEE, 239-243.
|
| 342 |
+
[23] Denae Ford, Margaret-Anne Storey, Thomas Zimmermann, Christian Bird, Sonia Jaffe, Chandra Maddila, Jenna L Butler, Brian Houck, and Nachiappan Nagappan. 2020. A tale of two cities: Software developers working from home during the Covid-19 pandemic. arXiv preprint arXiv:2008.11147 (2020).
|
| 343 |
+
[24] Katherine Giscombe. 2017. Creating effective formal mentoring programs for women of color. In Mentoring Diverse Leaders. Routledge, 145-158.
|
| 344 |
+
[25] Yehonatan Givati and Ugo Troiano. 2012. Law, economics, and culture: Theory of mandated benefits and evidence from maternity leave policies. The Journal of Law and Economics 55, 2 (2012), 339-364.
|
| 345 |
+
[26] Christy Glass and Alison Cook. 2016. Leading at the top: Understanding women's challenges above the glass ceiling. The Leadership Quarterly 27, 1 (2016), 51-63.
|
| 346 |
+
[27] Peter Glick and Susan T Fiske. 1996. The ambivalent sexism inventory: Differentiating hostile and benevolent sexism. Journal of personality and social psychology 70, 3 (1996), 491.
|
| 347 |
+
[28] David E Guest. 2002. Perspectives on the study of work-life balance. Social Science Information 41, 2 (2002), 255-279.
|
| 348 |
+
[29] David R Hannah and Brenda A Lautsch. 2011. Counting in qualitative research: Why to conduct it, when to avoid it, and when to closet it. Journal of Management Inquiry 20, 1 (2011), 14-22.
|
| 349 |
+
[30] Rana Haq, 2013. Intersectionality of gender and other forms of identity: Dilemmas and challenges facing women in India. Gender in Management: An International Journal (2013).
|
| 350 |
+
[31] Michael Harvey. 1998. Dual-career couples during international relocation: The trailing spouse. International Journal of Human Resource Management 9, 2 (1998), 309-331.
|
| 351 |
+
[32] Madeline E Heilman. 2001. Description and prescription: How gender stereotypes prevent women's ascent up the organizational ladder. Journal of social issues 57, 4 (2001), 657-674.
|
| 352 |
+
[33] Madeline E Heilman. 2012. Gender stereotypes and workplace bias. Research in organizational Behavior 32 (2012), 113-135.
|
| 353 |
+
[34] Mar Hicks. 2017. Programmed inequality: How Britain discarded women technologists and lost its edge in computing. MIT Press.
|
| 354 |
+
[35] Sonja M Hyrynsalmi. 2019. The underrepresentation of women in the software industry: thoughts from career-changing women. In 2019 IEEE/ACM 2nd International Workshop on Gender Equality in Software Engineering (GE). IEEE, 1-4.
|
| 355 |
+
[36] Nasif Imtiaz, Justin Middleton, Joymallya Chakraborty, Neill Robson, Gina Bai, and Emerson Murphy-Hill. 2019. Investigating the Effects of Gender Bias on GitHub. In 2019 IEEE/ACM 41st International Conference on Software Engineering (ICSE). IEEE, 700-711.
|
| 356 |
+
[37] Janet Cooper Jackson. 2001. Women middle managers' perception of the glass ceiling. *Women in management review* (2001).
|
| 357 |
+
[38] Omofolasade Kosoko-Lasaki, Roberta E Sonnino, and Mary Lou Voytko. 2006. Mentoring for women and underrepresented minority faculty and students: experience at two institutions of higher education. Journal of the national medical association 98, 9 (2006), 1449.
|
| 358 |
+
[39] Jana Krause, Werner Krause, and Piaa Bränfors. 2018. Women's participation in peace negotiations and the durability of peace. International Interactions 44, 6 (2018), 985-1016.
|
| 359 |
+
[40] Victor Kuechler, Claire Gilbertson, and Carlos Jensen. 2012. Gender differences in early free and open source software joining process. In IFIP International Conference on Open Source Systems. Springer, 78-93.
|
| 360 |
+
[41] William Lawrence Neuman. 2014. Social research methods: Qualitative and quantitative approaches.
|
| 361 |
+
[42] Amanda Lee and Jeffrey Carver. 2019. FLOSS Participants' Perceptions about Gender and Inclusiveness: A Survey. In 41st International Conference on Software Engineering.
|
| 362 |
+
[43] Leticia S Machado, Clara Caldeira, Marcelo Gattermann Perin, and Cleidson RB de Souza. 2020. Gendered experiences of software engineers during the COVID-19 crisis. IEEE Software 38, 2 (2020), 38–44.
|
| 363 |
+
[44] Sujata Manohar. 2001. Human rights for women's empowerment. Empowering the Indian Women, Publications Division, Ministry of Information and Broadcasting, Government of India, New Delhi (2001).
|
| 364 |
+
[45] Jane Margolis, Rachel Estrella, Joanna Goode, Jennifer Jellison Holme, and Kim Mao. 2017. Stuck in the shallow end: Education, race, and computing.
|
| 365 |
+
|
| 366 |
+
[46] Jane Margolis and Allan Fisher. 2002. Unlocking the clubhouse: Women in computing. MIT press.
|
| 367 |
+
[47] Alvaro Menezes and Rafael Prikladnicki. 2018. Diversity in software engineering. In Proceedings of the 11th International Workshop on Cooperative and Human Aspects of Software Engineering. 45-48.
|
| 368 |
+
[48] Sharan B Merriam and Elizabeth J Tisdell. 2015. Qualitative research: A guide to design and implementation. John Wiley & Sons.
|
| 369 |
+
[49] Matthew B Miles and A Michael Huberman. 1994. Qualitative data analysis: An expanded sourcebook. sage.
|
| 370 |
+
[50] Michael T Miller and Bai Kang. 1998. A Case Study of Post-Sabbatical Assessment Measures. Journal of Staff, Program & Organization Development 15, 1 (1998), 11-16.
|
| 371 |
+
[51] Michael J Muller and Sarah Kuhn. 1993. Participatory design. *Commun. ACM* 36, 6 (1993), 24-28.
|
| 372 |
+
[52] Samuel Musungwini, Tinashe Gwendoline Zhou, and Linnet Musungwini. 2020. Challenges facing women in ICT from a women perspective: A case study of the Zimbabwean Banking Sector and Telecommunications Industry. Journal of Systems Integration 11, 1 (2020), 21-33.
|
| 373 |
+
[53] Dawn Nafus. 2012. 'Patches don't have gender': What is not open in open source software. New Media & Society 14, 4 (2012), 669-683. https://doi.org/10.1177/146144811422887
|
| 374 |
+
[54] Barbara Orser, Allan Riding, and Joanne Stanley. 2012. Perceived career challenges and response strategies of women in the advanced technology sector. Entrepreneurship & Regional Development 24, 1-2 (2012), 73-93.
|
| 375 |
+
[55] Rajshakhar Paul, Amiangshu Bosu, and Kazi Zakia Sultana. 2019. Expressions of sentiments during code reviews: Male vs. female. In 2019 IEEE 26th International Conference on Software Analysis, Evolution and Reengineering (SANER). IEEE, 26-37.
|
| 376 |
+
[56] Whitney E Powell, D Scott Hunsinger, and B Dawn Medlin. 2010. Gender differences within the open source community: An exploratory study. Journal of Information Technology 21, 4 (2010), 29-37.
|
| 377 |
+
[57] Teade Punter, Marcus Ciolkowski, Bernd Freimut, and Isabel John. 2003. Conducting on-line surveys in software engineering. In 2003 Intel Symp on Emp Softw Eng. 80-88.
|
| 378 |
+
[58] Huilian Sophie Qiu, Yucen Lily Li, Susmita Padala, Anita Sarma, and Bogdan Vasilescu. 2019. The signals that potential contributors look for when choosing open-source projects. Proceedings of the ACM on Human-Computer Interaction 3, CSCW (2019), 1-29.
|
| 379 |
+
[59] Parvati Raghuram, Clem Herman, Esther Ruiz-Ben, and Gunjan Sondhi. 2017. Women and IT scorecard-India.
|
| 380 |
+
[60] Paul Ralph, Sebastian Baltes, Gianisa Adisaputri, Richard Torkar, Vladimir Kovalenko, Marcos Kalinowski, Nicole Novielli, Shin Yoo, Xavier Devroey, Xin Tan, et al. 2020. Pandemic programming. Empirical Software Engineering 25, 6 (2020), 4927-4961.
|
| 381 |
+
[61] Aditi Ramakrishnan, Dana Sambuco, and Reshma Jagsi. 2014. Women's participation in the medical profession: insights from experiences in Japan, Scandinavia, Russia, and Eastern Europe. Journal of Women's Health 23, 11 (2014), 927-934.
|
| 382 |
+
[62] P Runeson, M Höst, A Rainer, and B Regnell. 2012. Case study research in software engineering-guidelines and examples Wiley.
|
| 383 |
+
[63] Linda J Sax, Jennifer M Blaney, Kathleen J Lehman, Sarah L Rodriguez, Kari L George, and Christina Zavala. 2018. Sense of belonging in computing: The role of introductory courses for women and underrepresented minority students. Social Sciences 7, 8 (2018), 122.
|
| 384 |
+
[64] Sakshi Sharma and Parul Sehrawat. 2014. Glass ceiling for women: Does it exist in the modern India. Journal of Organization & Human Behaviour 3, 2-3 (2014), 9-15.
|
| 385 |
+
[65] Lucy Sharp. 2016. UNESCO's Priority Gender Equality Action Plan 2014-2021. Impact 2016, 1 (2016), 37-38.
|
| 386 |
+
[66] Karina Kohl Silveira and Rafael Prikladnicki. 2019. A systematic mapping study of diversity in software engineering: a perspective from the agile methodologies. In 2019 IEEE/ACM 12th International Workshop on Cooperative and Human Aspects of Software Engineering (CHASE). IEEE, 7-10.
|
| 387 |
+
[67] Vandana Singh, 2019. Women participation in open source software communities. In Proceedings of the 13th European Conference on Software Architecture-Volume 2, 94-99.
|
| 388 |
+
[68] Donna Spencer. 2009. Card sorting: Designing usable categories. Rosenfeld Media.
|
| 389 |
+
[69] Terrell L Strayhorn. 2012. Exploring the impact of Facebook and Myspace use on first-year students' sense of belonging and persistence decisions. Journal of College Student Development 53, 6 (2012), 783-796.
|
| 390 |
+
[70] Jane Sturges and David Guest. 2004. Working to live or living to work? Work/life balance early in the career. Human Resource Management Journal 14, 4 (2004), 5-20.
|
| 391 |
+
[71] Josh Terrell, Andrew Kofink, Justin Middleton, Clarissa Raineur, Emerson Murphy-Hill, Chris Parnin, and Jon Stallings. 2017. Gender differences and bias in open source: Pull request acceptance of women versus men. Peerj Computer Science 3 (2017), e111. https://doi.org/10.7287/peerj.preprints.1733v2
|
| 392 |
+
|
| 393 |
+
[72] Bogdan Vasilescu, Vladimir Filkov, and Alexander Serebrenik. 2015. Perceptions of diversity on git hub: A user survey. In 2015 IEEE/ACM 8th International Workshop on Cooperative and Human Aspects of Software Engineering. IEEE, 50-56.
|
| 394 |
+
[73] Bogdan Vasilescu, Daryl Posnett, Baishakhi Ray, Mark GJ van den Brand, Alexander Serebrenik, Premkumar Devanbu, and Vladimir Filkov. 2015. Gender and tenure diversity in GitHub teams. In Proceedings of the 33rd annual ACM conference on human factors in computing systems. 3789-3798.
|
| 395 |
+
[74] Balazs Vedres and Orsolya Vasarhelyi. 2019. Gendered behavior as a disadvantage in open source software development. EPJ Data Science 8, 1 (2019), 25.
|
| 396 |
+
[75] Joan C Williams and Stephanie Bornstein. 2007. Evolution of fred: Family responsibilities discrimination and developments in the law of stereotyping and implicit bias. *HastIngs* I59 (2007), 1311.
|
| 397 |
+
[76] Katherine Y Williams and AO Charles. 1998. 'Reilly. 1998. Demography and diversity in organizations: A review of 40 years of research. Research in organizational behavior 20, 20 (1998), 77-140.
|
| 398 |
+
[77] Claes Wohlin and Ayülke Aurum. 2015. Towards a decision-making structure for selecting a research design in empirical software engineering. Empirical Software Engineering 20, 6 (2015), 1427-1455.
|
| 399 |
+
[78] Annika Wolff, Antti Knutas, and Paula Savolainen. 2020. What prevents Finnish women from applying to software engineering roles? A preliminary analysis of survey data. In Proceedings of the ACM/IEEE 42nd International Conference on Software Engineering: Software Engineering Education and Training. 93-102.
|
| 400 |
+
[79] Joyce Yen, Kate Quinn, Sheila Edwards Lange, Eve Riskin, and Denice Denton. 2005. ADVANCE mentoring programs for women faculty in SEM at the University of Washington. In 2005 Annual Conference. 10-134.
|
| 401 |
+
[80] Robert K Yin. 2009. Case study research: Design and methods. Vol. 5. sage.
|
| 402 |
+
[81] Stefano Zacchiroli. 2020. Gender Differences in Public Code Contributions: a 50-year Perspective. IEEE Software (2020).
|
2203.10xxx/2203.10555/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3baedc80b8c288f649bed5b8599b6470b34a4495bb09f09aeb7d5f3fab2c75d
|
| 3 |
+
size 659145
|
2203.10xxx/2203.10555/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.10xxx/2203.10576/3d1e200d-486c-45fe-a2fc-9629576fe0af_content_list.json
ADDED
|
@@ -0,0 +1,1761 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
218,
|
| 8 |
+
140,
|
| 9 |
+
782,
|
| 10 |
+
183
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yiqing Wu $^{1,2,3}$ , Ruobing Xie $^{3}$ , Yongchun Zhu $^{1,2}$ , Xiang Ao $^{1,2}$ , Xin Chen $^{3}$ , Xu Zhang $^{3}$ , Fuzhen Zhuang $^{4,5}$ , Leyu Lin $^{3}$ , and Qing He $^{\\star 1,2}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
217,
|
| 19 |
+
210,
|
| 20 |
+
785,
|
| 21 |
+
243
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup> Key Lab of Intelligent Information Processing of Chinese Academy of Sciences (CAS), Institute of Computing Technology, CAS, Beijing 100190, China",
|
| 28 |
+
"bbox": [
|
| 29 |
+
232,
|
| 30 |
+
253,
|
| 31 |
+
767,
|
| 32 |
+
282
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ University of Chinese Academy of Sciences, Beijing 100049, China",
|
| 39 |
+
"bbox": [
|
| 40 |
+
274,
|
| 41 |
+
282,
|
| 42 |
+
728,
|
| 43 |
+
297
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "<sup>3</sup> WeChat Search Application Department, Tencent, China",
|
| 50 |
+
"bbox": [
|
| 51 |
+
303,
|
| 52 |
+
296,
|
| 53 |
+
697,
|
| 54 |
+
310
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "$^{4}$ Institute of Artificial Intelligence, Beihang University, Beijing 100191, China",
|
| 61 |
+
"bbox": [
|
| 62 |
+
240,
|
| 63 |
+
309,
|
| 64 |
+
761,
|
| 65 |
+
324
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "$^{5}$ Xiamen Institute of Data Intelligence, Xiamen, China {wuyiqing20s,zhuyongchun18s,aoxiang,heqing}@ict.ac.cn,",
|
| 72 |
+
"bbox": [
|
| 73 |
+
272,
|
| 74 |
+
323,
|
| 75 |
+
702,
|
| 76 |
+
352
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "{ruobing xie, andrewxchen, xuonezhang, goshawklin}@tencent.com,",
|
| 83 |
+
"bbox": [
|
| 84 |
+
271,
|
| 85 |
+
353,
|
| 86 |
+
730,
|
| 87 |
+
366
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "zhuangfuzhen@buaa.edu.cn",
|
| 94 |
+
"bbox": [
|
| 95 |
+
406,
|
| 96 |
+
367,
|
| 97 |
+
594,
|
| 98 |
+
378
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "Abstract. Multi-behavior recommendation (MBR) aims to jointly consider multiple behaviors to improve the target behavior's performance. We argue that MBR models should: (1) model the coarse-grained commonalities between different behaviors of a user, (2) consider both individual sequence view and global graph view in multi-behavior modeling, and (3) capture the fine-grained differences between multiple behaviors of a user. In this work, we propose a novel Multi-behavior Multi-view Contrastive Learning Recommendation (MMCLR) framework, including three new CL tasks to solve the above challenges, respectively. The multi-behavior CL aims to make different user single-behavior representations of the same user in each view to be similar. The multi-view CL attempts to bridge the gap between a user's sequence-view and graph-view representations. The behavior distinction CL focuses on modeling fine-grained differences of different behaviors. In experiments, we conduct extensive evaluations and ablation tests to verify the effectiveness of MMCLR and various CL tasks on two real-world datasets, achieving SOTA performance over existing baselines. Our code will be available on https://github.com/wyqing20/MMCLR",
|
| 105 |
+
"bbox": [
|
| 106 |
+
259,
|
| 107 |
+
411,
|
| 108 |
+
738,
|
| 109 |
+
661
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "Keywords: multi-behavior recommendation $\\cdot$ contrastive learning",
|
| 116 |
+
"bbox": [
|
| 117 |
+
261,
|
| 118 |
+
674,
|
| 119 |
+
709,
|
| 120 |
+
688
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "1 Introduction",
|
| 127 |
+
"text_level": 1,
|
| 128 |
+
"bbox": [
|
| 129 |
+
215,
|
| 130 |
+
710,
|
| 131 |
+
375,
|
| 132 |
+
727
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "Personalized recommendation aims to provide appropriate items for users according to their preferences. The core problem of personalized recommendation is how to accurately capture user preferences from user behaviors. In real-world scenarios, users usually have different types of behaviors to interact with recommender systems. For example, users can click, add to cart, purchase, and",
|
| 139 |
+
"bbox": [
|
| 140 |
+
212,
|
| 141 |
+
742,
|
| 142 |
+
787,
|
| 143 |
+
818
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "aside_text",
|
| 149 |
+
"text": "arXiv:2203.10576v1 [cs.IR] 20 Mar 2022",
|
| 150 |
+
"bbox": [
|
| 151 |
+
22,
|
| 152 |
+
268,
|
| 153 |
+
57,
|
| 154 |
+
705
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "page_footnote",
|
| 160 |
+
"text": "* indicates corresponding author. † indicates equal contributions.",
|
| 161 |
+
"bbox": [
|
| 162 |
+
217,
|
| 163 |
+
825,
|
| 164 |
+
653,
|
| 165 |
+
839
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "write reviews for items in E-commerce systems (e.g., Amazon, Taobao). Some conventional recommendation models [15] often rely on a single behavior for recommendation. However, it may suffer from severe data sparsity [14,11,38] and cold-start problems [10,26,37,39] in practical systems, especially for some high-cost and low-frequency behaviors. In this case, other behaviors (e.g., click and add to cart) can provide additional information for user understanding, which reflect user diverse and multi-grained preferences from different aspects.",
|
| 172 |
+
"bbox": [
|
| 173 |
+
212,
|
| 174 |
+
146,
|
| 175 |
+
787,
|
| 176 |
+
252
|
| 177 |
+
],
|
| 178 |
+
"page_idx": 1
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"type": "text",
|
| 182 |
+
"text": "Multi-behavior recommendation (MBR), which jointly considers different types of behaviors to learn user preferences better, has been widely explored and verified in practice [2,1,19]. ATRank [34] uses self-attention to model feature interactions between different behaviors of a user in sequence-based recommendation with focusing on the individual sequence view of a single user's historical behaviors. In contrast, MBGCN [9] considers different behaviors in graph-based recommendation, focusing on the global graph view of all users' interactions. However, there are still three challenges in MBR:",
|
| 183 |
+
"bbox": [
|
| 184 |
+
212,
|
| 185 |
+
255,
|
| 186 |
+
787,
|
| 187 |
+
376
|
| 188 |
+
],
|
| 189 |
+
"page_idx": 1
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"type": "list",
|
| 193 |
+
"sub_type": "text",
|
| 194 |
+
"list_items": [
|
| 195 |
+
"(1) How to model the coarse-grained commonality between different behaviors of a user? All types of behaviors of a user reflect this user's preferences from certain aspects, and thus these behaviors naturally share some commonalities. Considering the commonalities between different behaviors could help to learn better user representations to fight against the data sparsity issues. However, it is challenging to extract informative commonalities between different behaviors for recommendation, which is often ignored in existing MBR models.",
|
| 196 |
+
"(2) How to jointly consider both individual and global views of multi-behavior modeling? Conventional MBR models are often implemented on either sequence-based or graph-based models separately based on different views. The sequence-based MBR focuses more on the individual view of a user's multiple sequential behaviors to learn user representations [34]. In contrast, the graph-based MBR often concentrates on the global view of all users' behaviors, with multiple behaviors regarded as edges [9]. Different views (individual/global) and modeling methods (sequence/graph-based) build up different sides of users, which are complementary to each other and are helpful in MBR.",
|
| 197 |
+
"(3) How to learn the fine-grained differences between multiple behaviors of a user? Besides the coarse-grained commonalities, users' multiple behaviors also have fine-grained differences. There are preference priorities even among the target and other behaviors (e.g., purchase $>$ click). In real-world E-commerce datasets, the average number of click is often more than 7 times that of the average number of purchase [9]. The large numbers of clicked but not purchased items, viewed as hard negative samples, may reflect essential latent disadvantages that prevent users to purchase items. Existing works seldom consider the differences between multiple behaviors, and we attempt to encode this fine-grained information into users' multi-behavior representations.",
|
| 198 |
+
"Recently, contrastive learning (CL) has shown its magic in recommendation, which greatly alleviates the data sparsity and popularity bias issues [36]. We find that CL is naturally suitable for modeling coarse-grained commonalities and fine-grained differences between multi-behavior and multi-view user rep"
|
| 199 |
+
],
|
| 200 |
+
"bbox": [
|
| 201 |
+
214,
|
| 202 |
+
378,
|
| 203 |
+
787,
|
| 204 |
+
840
|
| 205 |
+
],
|
| 206 |
+
"page_idx": 1
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"type": "page_number",
|
| 210 |
+
"text": "2",
|
| 211 |
+
"bbox": [
|
| 212 |
+
217,
|
| 213 |
+
114,
|
| 214 |
+
228,
|
| 215 |
+
126
|
| 216 |
+
],
|
| 217 |
+
"page_idx": 1
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"type": "header",
|
| 221 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 222 |
+
"bbox": [
|
| 223 |
+
271,
|
| 224 |
+
114,
|
| 225 |
+
573,
|
| 226 |
+
128
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "resentations. To solve above challenges, we propose a novel Multi-behavior Multi-view Contrastive Learning Recommendation (MMCLR) framework. Specifically, MMCLR contains a sequence module and a graph module to jointly capture multiple behaviors' relationships, learning multiple user representations from different views and behaviors. We design three contrastive learning tasks for existing challenges, including the multi-behavior CL, the multi-view CL, and the behavior distinction CL. (1) The multi-behavior CL is conducted between multiple behaviors in both sequence and graph views. It assumes that user representations learned from different behaviors of the same user should be closer to each other compared to other users' representations, which focuses on extracting the commonalities between different types of behaviors. (2) The multi-view CL is a harder CL conducted between user representations in two views. It highlights the commonalities between the local sequence-based and the global graph-based user representations after behavior-level aggregations, and thus improves both views' modeling qualities. (3) The behavior distinction CL, unlike the multi-behavior CL, concentrates on the fine-grained differences rather than the coarse-grained commonalities between different types of behaviors. It is specially designed to capture users' fine-grained preferences on the target behavior's prediction task (e.g., purchase). The combination of CL tasks can multiply the additional information brought by multiple behaviors in the target recommendation task. Through the MMCLR framework assisted with three types of auxiliary CL losses, MBR models can better understand the informative commonalities and differences between different user behaviors and modeling views, and thus improve the overall performances.",
|
| 233 |
+
"bbox": [
|
| 234 |
+
217,
|
| 235 |
+
146,
|
| 236 |
+
785,
|
| 237 |
+
507
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 2
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "In experiments, we evaluate MMCLR on real-world MBR datasets. The significant improvements over competitive baselines and ablation versions demonstrate the effectiveness of MMCLR and its different CL tasks and components. The contributions of this work are summarized as follows:",
|
| 244 |
+
"bbox": [
|
| 245 |
+
215,
|
| 246 |
+
508,
|
| 247 |
+
784,
|
| 248 |
+
568
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 2
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "list",
|
| 254 |
+
"sub_type": "text",
|
| 255 |
+
"list_items": [
|
| 256 |
+
"- We systematically consider multiple contrastive learning tasks in MBR. To the best of our knowledge, this is the first attempt to bring in contrastive learning in multi-behavior recommendation.",
|
| 257 |
+
"- We propose a multi-behavior CL task and a multi-view CL task, which model the coarse-grained commonalities between different behaviors and (individual sequence/global graph) views for better representation learning.",
|
| 258 |
+
"- We also design a behavior distinction CL task, which creatively highlights the fine-grained differences and behavior priorities between multiple behaviors via a contrastive learning framework.",
|
| 259 |
+
"- MMCLR outperforms SOTA baselines on all datasets and metrics. All proposed CL tasks and the capability on cold-start scenarios are also verified."
|
| 260 |
+
],
|
| 261 |
+
"bbox": [
|
| 262 |
+
225,
|
| 263 |
+
569,
|
| 264 |
+
784,
|
| 265 |
+
734
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 2
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "2 Related Work",
|
| 272 |
+
"text_level": 1,
|
| 273 |
+
"bbox": [
|
| 274 |
+
215,
|
| 275 |
+
761,
|
| 276 |
+
387,
|
| 277 |
+
777
|
| 278 |
+
],
|
| 279 |
+
"page_idx": 2
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"type": "text",
|
| 283 |
+
"text": "Sequence-based & Graph-based Recommendation. Sequence-based recommendation mainly leverages users' sequential behavior to mine users' interests, which focuses on individual information. Recently, various deep neural net-",
|
| 284 |
+
"bbox": [
|
| 285 |
+
215,
|
| 286 |
+
794,
|
| 287 |
+
785,
|
| 288 |
+
839
|
| 289 |
+
],
|
| 290 |
+
"page_idx": 2
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "header",
|
| 294 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 295 |
+
"bbox": [
|
| 296 |
+
272,
|
| 297 |
+
114,
|
| 298 |
+
730,
|
| 299 |
+
128
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 2
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "page_number",
|
| 305 |
+
"text": "3",
|
| 306 |
+
"bbox": [
|
| 307 |
+
774,
|
| 308 |
+
116,
|
| 309 |
+
784,
|
| 310 |
+
126
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 2
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "text",
|
| 316 |
+
"text": "works have been employed for sequence-based recommendation, e.g., RNN [7], memory networks [3], attention mechanisms [23,35,15,30] and mixed models [29,20]. Graph-based recommendation aims to use high-order interaction information contained in the graph, which is able to model the global information of user preferences. Existing works have proved the effectiveness of GNNs in learning user and item representations [17,27]. In this work, we exploit both individual sequence view and global graph view in MBR.",
|
| 317 |
+
"bbox": [
|
| 318 |
+
212,
|
| 319 |
+
146,
|
| 320 |
+
799,
|
| 321 |
+
251
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 3
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "Multi-behavior Recommendation. Inspired by transfer learning [42,40,41], multi-behavior recommendation takes advantage of other behavior of user to help the prediction of target behavior. Ajit et al. [14] take multi-behavior into consideration via a collective matrix factorization. Recent works often model MBR via sequence or graph-based models[19,25]. MRIG [16] builds sequential graphs on users' behavior sequences. MBGCN [9] learns user-item and item-item similarities on the designed user-item graph and different co-behavior graphs. Other works combine MBR with meta-learning [22] and external knowledge [21]. However, these methods do not make full use of the correlations between behaviors via CL. In this paper, we propose a universal framework that utilizes contrastive learning to model the relations of different behaviors.",
|
| 328 |
+
"bbox": [
|
| 329 |
+
212,
|
| 330 |
+
252,
|
| 331 |
+
787,
|
| 332 |
+
417
|
| 333 |
+
],
|
| 334 |
+
"page_idx": 3
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"type": "text",
|
| 338 |
+
"text": "Self-supervised Learning. Self-supervised learning (SSL) aims at training a network by pretext tasks, which are designed according to the characteristics of raw data. Recently, self-supervised learning has been shown its superior ability in CV [5,31], NLP [4], and Graph [12] fields. Some works also adopt self-supervised learning in recommender systems [36,28,18,24].",
|
| 339 |
+
"bbox": [
|
| 340 |
+
212,
|
| 341 |
+
417,
|
| 342 |
+
787,
|
| 343 |
+
494
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 3
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "However, most of them fall into single-behavior methods. In this paper, we focus on modeling the commonalities and differences between multiple behaviors and views of users with CL.",
|
| 350 |
+
"bbox": [
|
| 351 |
+
212,
|
| 352 |
+
494,
|
| 353 |
+
787,
|
| 354 |
+
539
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 3
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "3 Methodology",
|
| 361 |
+
"text_level": 1,
|
| 362 |
+
"bbox": [
|
| 363 |
+
215,
|
| 364 |
+
561,
|
| 365 |
+
380,
|
| 366 |
+
580
|
| 367 |
+
],
|
| 368 |
+
"page_idx": 3
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"text": "3.1 Preliminaries",
|
| 373 |
+
"text_level": 1,
|
| 374 |
+
"bbox": [
|
| 375 |
+
215,
|
| 376 |
+
595,
|
| 377 |
+
372,
|
| 378 |
+
609
|
| 379 |
+
],
|
| 380 |
+
"page_idx": 3
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"type": "text",
|
| 384 |
+
"text": "MMCLR aims to make full use of multi-behavior and multi-view information to learn better representations for recommendation. We first give detailed definitions of the key notions in our multi-behavior recommendation as follows:",
|
| 385 |
+
"bbox": [
|
| 386 |
+
212,
|
| 387 |
+
612,
|
| 388 |
+
785,
|
| 389 |
+
657
|
| 390 |
+
],
|
| 391 |
+
"page_idx": 3
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"type": "text",
|
| 395 |
+
"text": "Multi-behavior Modeling. In MBR, the most important and profitable behavior (e.g., purchase in E-commerce) is regarded as the target behavior. While it suffers from data sparsity issues. Specifically, we denote the user and item as $u \\in U$ and $v \\in V$ , where $U$ and $V$ are user set and item set. We suppose that users have $B$ types of behaviors $\\{b_1, \\dots, b_B\\}$ in a system, where $b_t$ is the target behavior. Multi-view Modeling. Users' multiple behaviors can be modeled with different views, highlighting different aspects of user preferences. In this work, we construct two views, including the sequence vie and the graph view. For the sequence view, we represent the multi-behavioral sequence of user $u$ as $S_u = \\{s_u^{b_1}, s_u^{b_2}, \\dots, s_u^{b_B}\\}$ , where $s_u^b$ is the behavior sequence of user $u$ under behavior $b$ . For each behavior, we have the item sequence $s_u^b = \\{v_1, v_2, \\dots, v_{|s_u^b|}\\}$ . For the graph view, we build a global multi-relation user-item graph $G = (\\mathcal{V}, \\mathcal{E})$ ,",
|
| 396 |
+
"bbox": [
|
| 397 |
+
212,
|
| 398 |
+
657,
|
| 399 |
+
787,
|
| 400 |
+
840
|
| 401 |
+
],
|
| 402 |
+
"page_idx": 3
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"type": "page_number",
|
| 406 |
+
"text": "4",
|
| 407 |
+
"bbox": [
|
| 408 |
+
217,
|
| 409 |
+
114,
|
| 410 |
+
228,
|
| 411 |
+
126
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 3
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "header",
|
| 417 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 418 |
+
"bbox": [
|
| 419 |
+
271,
|
| 420 |
+
114,
|
| 421 |
+
573,
|
| 422 |
+
128
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 3
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "where $\\mathcal{V}$ is the node set, and $\\mathcal{E}$ is the edge set. If user $u$ and item $v$ have an interaction under a certain behavior $b$ , there is a edge $e = (u,v,b) \\in \\mathcal{E}$ in graph $G$ . We use $\\pmb{u}_i^0$ and $\\pmb{v}_j^0$ to represent the corresponding raw feature of $u_i$ and $v_j$ .",
|
| 429 |
+
"bbox": [
|
| 430 |
+
212,
|
| 431 |
+
146,
|
| 432 |
+
782,
|
| 433 |
+
193
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 4
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "text",
|
| 439 |
+
"text": "Problem definition. Given a user's multi-behavior sequences $S_{u}$ and the global multi-relation user-item graph $G$ , MMCLR should predict the most appropriate item $v$ that the user will interact under the target behavior $b_{t}$ .",
|
| 440 |
+
"bbox": [
|
| 441 |
+
212,
|
| 442 |
+
193,
|
| 443 |
+
782,
|
| 444 |
+
239
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 4
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "3.2 Framework of Multi-view Multi-behavior Recommendation",
|
| 451 |
+
"text_level": 1,
|
| 452 |
+
"bbox": [
|
| 453 |
+
214,
|
| 454 |
+
243,
|
| 455 |
+
748,
|
| 456 |
+
258
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 4
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "Overview. The model structure of MMCLR is illustrated in Fig.1. Our model mainly has three parts: multi-view encoder, multi-behavior fusion, and multiview fusion. Three types of contrastive learning tasks are proposed to capture the multi-behavior and multi-view feature interactions. Specifically, for a user $u$ , the global user-item graph $G$ and the user's multi-behavior sequence $S_{u}$ are first fed to the sequence-view encoder and the graph-view encoder as inputs. In both sequence and graph encoders, we build $B$ user single-behavior representations according to each behavior, respectively. Second, these single-behavior representations under the same view are fused by the multi-behavior fusion module, with sequence/graph-based multi-behavior CL and behavior distinction CL tasks as auxiliary losses. Then, we combine the sequence-view and graph-view user representations by the multi-view fusion module with the multi-view CL, jointly considering individual and global preferences. Finally, the similarity between the fused user and item representations is viewed as the ranking score.",
|
| 463 |
+
"bbox": [
|
| 464 |
+
212,
|
| 465 |
+
261,
|
| 466 |
+
784,
|
| 467 |
+
472
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 4
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "image",
|
| 473 |
+
"img_path": "images/36140ba8ec6e0344cb41dafe57bd50a1aacbf960de0fd37680a8a5e6b517789f.jpg",
|
| 474 |
+
"image_caption": [
|
| 475 |
+
"Fig. 1. Overall architecture of MMCLR with our proposed contrastive learning tasks."
|
| 476 |
+
],
|
| 477 |
+
"image_footnote": [],
|
| 478 |
+
"bbox": [
|
| 479 |
+
215,
|
| 480 |
+
503,
|
| 481 |
+
782,
|
| 482 |
+
672
|
| 483 |
+
],
|
| 484 |
+
"page_idx": 4
|
| 485 |
+
},
|
| 486 |
+
{
|
| 487 |
+
"type": "text",
|
| 488 |
+
"text": "Multi-view Encoder. Conventional sequence-based recommendation models [34,15] often focus on the individual historical behaviors of a user, which aims to precisely capture the local sequential information of a user. In contrast, graph-based recommendation models [33,9] are often conducted on the whole user-item graph built by all users' behaviors, which can benefit from the global interactions. We argue that both individual sequence and global graph views are beneficial in multi-behavior recommendation.",
|
| 489 |
+
"bbox": [
|
| 490 |
+
212,
|
| 491 |
+
734,
|
| 492 |
+
784,
|
| 493 |
+
839
|
| 494 |
+
],
|
| 495 |
+
"page_idx": 4
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"type": "header",
|
| 499 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 500 |
+
"bbox": [
|
| 501 |
+
272,
|
| 502 |
+
114,
|
| 503 |
+
730,
|
| 504 |
+
128
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 4
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "page_number",
|
| 510 |
+
"text": "5",
|
| 511 |
+
"bbox": [
|
| 512 |
+
774,
|
| 513 |
+
116,
|
| 514 |
+
784,
|
| 515 |
+
126
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 4
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "Specifically, we implement an individual sequence-based encoder $\\mathrm{SeqEnc}(\\cdot)$ and a global graph-based encoder $\\mathrm{GraphEnc}(\\cdot)$ to learn users' and items' single-behavior representations separately. Formally, for the behavior $b$ :",
|
| 522 |
+
"bbox": [
|
| 523 |
+
215,
|
| 524 |
+
146,
|
| 525 |
+
787,
|
| 526 |
+
191
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 5
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "equation",
|
| 532 |
+
"text": "\n$$\n\\boldsymbol {u} ^ {s, b} = \\operatorname {S e q E n c} ^ {b} \\left(\\boldsymbol {s} _ {u} ^ {b}\\right), \\quad \\boldsymbol {u} ^ {g, b} = \\operatorname {G r a p h E n c} ^ {b} (G, u, b), \\tag {1}\n$$\n",
|
| 533 |
+
"text_format": "latex",
|
| 534 |
+
"bbox": [
|
| 535 |
+
326,
|
| 536 |
+
200,
|
| 537 |
+
785,
|
| 538 |
+
218
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 5
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "text",
|
| 544 |
+
"text": "where $\\pmb{s}_u^b$ is the user's historical behavior sequence of $b$ , and $G$ is the global user-item graph. $\\pmb{u}^{s,b}$ and $\\pmb{u}^{g,b}$ indicate the user sequence-view and graph-view single-behavior representation of $b$ . Finally, we learn 2B single-behavior representations in two views for the next multi-behavior and multi-view fusions. Note that we can flexibly select appropriate sequence and graph models for $\\mathrm{SeqEnc}^b (\\cdot)$ and $\\mathrm{GraphEnc}^b (\\cdot)$ . Specifically, We adopt Bert4rec and lightGCN as sequence encoder and graph encoder. For lightGCN we replace the original aggregator with meaning aggregator.",
|
| 545 |
+
"bbox": [
|
| 546 |
+
212,
|
| 547 |
+
226,
|
| 548 |
+
787,
|
| 549 |
+
348
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 5
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "text",
|
| 555 |
+
"text": "Multi-behavior Fusion. Single-behavior representations may suffer from data sparsity issues, especially for some high-cost and low-frequent target behaviors (e.g., purchase). In this case, other auxiliary behaviors (e.g., click, add to cart) could provide essential information to infer user preferences on the target behaviors. Hence, we build a multi-behavior fusion module to fuse user single-behavior representations in each view to get the integrated sequence-view representation $\\pmb{u}^s$ and the integrated graph-view representation $\\pmb{u}^g$ , which is noted as:",
|
| 556 |
+
"bbox": [
|
| 557 |
+
214,
|
| 558 |
+
348,
|
| 559 |
+
787,
|
| 560 |
+
454
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 5
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "equation",
|
| 566 |
+
"text": "\n$$\n\\boldsymbol {u} ^ {s} = \\operatorname {M L P} ^ {s} \\left(\\boldsymbol {u} ^ {s, b _ {1}} | |, \\dots , | | \\boldsymbol {u} ^ {s, b _ {B}}\\right), \\quad \\boldsymbol {u} ^ {g} = \\operatorname {M L P} ^ {g} \\left(\\boldsymbol {u} ^ {0} | | \\boldsymbol {u} ^ {g, b _ {1}} | |, \\dots , | | \\boldsymbol {u} ^ {g, b _ {B}}\\right). \\tag {2}\n$$\n",
|
| 567 |
+
"text_format": "latex",
|
| 568 |
+
"bbox": [
|
| 569 |
+
236,
|
| 570 |
+
464,
|
| 571 |
+
785,
|
| 572 |
+
482
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 5
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "text",
|
| 578 |
+
"text": "$\\pmb{u}^{0}$ is the raw user embedding in the graph view. $\\mathrm{MLP}^s$ and $\\mathrm{MLP}^g$ are two-layer MLPs with ReLU as activation. We also build the graph-view item representation $\\pmb{v}^{g}$ similar to $\\pmb{u}^{g}$ , where $\\pmb{v}^{0}$ is also used as the raw behavior features in Eq. (1). Multi-view Fusion. To take advantage of representations in both views, we apply a multi-view fusion to learn the final user and item representations, which contain both individual and global information. We formalize the integrated user representation $\\pmb{u}$ and item representation $\\pmb{v}$ as follows:",
|
| 579 |
+
"bbox": [
|
| 580 |
+
214,
|
| 581 |
+
491,
|
| 582 |
+
787,
|
| 583 |
+
597
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 5
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "equation",
|
| 589 |
+
"text": "\n$$\n\\boldsymbol {u} = \\operatorname {M L P} ^ {U} \\left(\\boldsymbol {u} ^ {s} \\mid \\mid \\boldsymbol {u} ^ {g}\\right), \\quad \\boldsymbol {v} = \\operatorname {M L P} ^ {V} \\left(\\boldsymbol {v} ^ {0} \\mid \\mid \\boldsymbol {v} ^ {g}\\right). \\tag {3}\n$$\n",
|
| 590 |
+
"text_format": "latex",
|
| 591 |
+
"bbox": [
|
| 592 |
+
351,
|
| 593 |
+
608,
|
| 594 |
+
785,
|
| 595 |
+
626
|
| 596 |
+
],
|
| 597 |
+
"page_idx": 5
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "text",
|
| 601 |
+
"text": "Following the classical ranking model [13], the inner product of $\\mathbf{u}$ and $\\mathbf{v}$ is used to calculate the ranking scores of user-item pairs, trained under $L_{o}$ as:",
|
| 602 |
+
"bbox": [
|
| 603 |
+
214,
|
| 604 |
+
636,
|
| 605 |
+
787,
|
| 606 |
+
666
|
| 607 |
+
],
|
| 608 |
+
"page_idx": 5
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "equation",
|
| 612 |
+
"text": "\n$$\nL _ {o} = - \\sum_ {(u, v _ {i}) \\in S ^ {+}} \\sum_ {(u, v _ {j}) \\in S ^ {-}} \\log \\sigma \\left(\\boldsymbol {u} ^ {\\top} \\boldsymbol {v} _ {i} - \\boldsymbol {u} ^ {\\top} \\boldsymbol {v} _ {j}\\right), \\tag {4}\n$$\n",
|
| 613 |
+
"text_format": "latex",
|
| 614 |
+
"bbox": [
|
| 615 |
+
336,
|
| 616 |
+
676,
|
| 617 |
+
785,
|
| 618 |
+
710
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 5
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "text",
|
| 624 |
+
"text": "where $(u,v_{i})\\in S^{+}$ indicates the positive set of the target behavior, and $(u,v_{j})\\in$ $S^{-}$ indicates the randomly-sampled negative set.",
|
| 625 |
+
"bbox": [
|
| 626 |
+
214,
|
| 627 |
+
719,
|
| 628 |
+
787,
|
| 629 |
+
748
|
| 630 |
+
],
|
| 631 |
+
"page_idx": 5
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"type": "text",
|
| 635 |
+
"text": "Multi-view Multi-behavior Contrastive Learning. The above architecture is a straightforward combination of multi-view multi-behavior representations. To better capture the coarse-grained commonalities and fine-grained differences between different behaviors and views to learn better user representations in different views and behaviors, we design three types of CL tasks. Next we will introduce details of them.",
|
| 636 |
+
"bbox": [
|
| 637 |
+
212,
|
| 638 |
+
750,
|
| 639 |
+
787,
|
| 640 |
+
839
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 5
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "page_number",
|
| 646 |
+
"text": "6",
|
| 647 |
+
"bbox": [
|
| 648 |
+
217,
|
| 649 |
+
114,
|
| 650 |
+
228,
|
| 651 |
+
126
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 5
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "header",
|
| 657 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 658 |
+
"bbox": [
|
| 659 |
+
271,
|
| 660 |
+
114,
|
| 661 |
+
573,
|
| 662 |
+
128
|
| 663 |
+
],
|
| 664 |
+
"page_idx": 5
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"type": "text",
|
| 668 |
+
"text": "3.3 Multi-behavior Contrastive Learning",
|
| 669 |
+
"text_level": 1,
|
| 670 |
+
"bbox": [
|
| 671 |
+
215,
|
| 672 |
+
146,
|
| 673 |
+
565,
|
| 674 |
+
161
|
| 675 |
+
],
|
| 676 |
+
"page_idx": 6
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"type": "text",
|
| 680 |
+
"text": "A user's single-behavior representations reflect user preferences on the corresponding behaviors, which also share certain commonalities to reflect the user itself. We build two multi-behavior CL tasks in the sequence and graph views respectively as auxiliary losses to better use multi-behavior information.",
|
| 681 |
+
"bbox": [
|
| 682 |
+
212,
|
| 683 |
+
162,
|
| 684 |
+
782,
|
| 685 |
+
223
|
| 686 |
+
],
|
| 687 |
+
"page_idx": 6
|
| 688 |
+
},
|
| 689 |
+
{
|
| 690 |
+
"type": "text",
|
| 691 |
+
"text": "Sequential Multi-behavior CL. We adopt a sequential multi-behavior CL, which attempts to minimize the differences between different single-behavior representations of the same user and maximize the differences between different users. In this case, we naturally regard different single-behavior representations of a user as certain kinds of (behavior-level) user augmentations.",
|
| 692 |
+
"bbox": [
|
| 693 |
+
212,
|
| 694 |
+
223,
|
| 695 |
+
782,
|
| 696 |
+
297
|
| 697 |
+
],
|
| 698 |
+
"page_idx": 6
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"type": "text",
|
| 702 |
+
"text": "Precisely, considering a mini-batch of $N$ users $\\{u_{1},\\dots ,u_{N}\\}$ , we randomly select two single-behavior representations $(\\pmb{u}_i^{s,b_1},\\pmb{u}_i^{s,b_2})$ of behavior $b_{1}$ and $b_{2}$ for each $u_{i}$ as the positive pair in CL. And we consider $(\\pmb{u}_i^{s,b_1},\\pmb{u}_j^{s,b_2})$ as the negative pair. Following [2], we also conduct a projector $\\mathrm{MLP}_{p_1}(\\cdot)$ to map all user single-behavior representations into the same sequential semantic space. We have:",
|
| 703 |
+
"bbox": [
|
| 704 |
+
212,
|
| 705 |
+
299,
|
| 706 |
+
784,
|
| 707 |
+
378
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 6
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "equation",
|
| 713 |
+
"text": "\n$$\n\\boldsymbol {u} _ {i, p _ {1}} ^ {s, b _ {1}} = \\operatorname {M L P} _ {p _ {1}} \\left(\\boldsymbol {u} _ {i} ^ {s, b _ {1}}\\right), \\quad \\boldsymbol {u} _ {j, p _ {1}} ^ {s, b _ {2}} = \\operatorname {M L P} _ {p _ {1}} \\left(\\boldsymbol {u} _ {j} ^ {s, b _ {2}}\\right). \\tag {5}\n$$\n",
|
| 714 |
+
"text_format": "latex",
|
| 715 |
+
"bbox": [
|
| 716 |
+
334,
|
| 717 |
+
388,
|
| 718 |
+
784,
|
| 719 |
+
409
|
| 720 |
+
],
|
| 721 |
+
"page_idx": 6
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"type": "text",
|
| 725 |
+
"text": "The sequential multi-behavior CL loss $L_{SeqCL}$ is defined as follows:",
|
| 726 |
+
"bbox": [
|
| 727 |
+
215,
|
| 728 |
+
417,
|
| 729 |
+
699,
|
| 730 |
+
434
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 6
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "equation",
|
| 736 |
+
"text": "\n$$\nL _ {S e q C L} = - \\sum_ {i = 1} ^ {N} \\sum_ {u _ {j} \\neq u _ {i}} f \\left(\\boldsymbol {u} _ {i, p _ {1}} ^ {s, b _ {1}}, \\boldsymbol {u} _ {i, p _ {1}} ^ {s, b _ {2}}, \\boldsymbol {u} _ {j, p _ {1}} ^ {s, b _ {2}}\\right), \\tag {6}\n$$\n",
|
| 737 |
+
"text_format": "latex",
|
| 738 |
+
"bbox": [
|
| 739 |
+
352,
|
| 740 |
+
448,
|
| 741 |
+
784,
|
| 742 |
+
489
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 6
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "equation",
|
| 748 |
+
"text": "\n$$\nf (\\boldsymbol {x}, \\boldsymbol {y}, \\boldsymbol {z}) = \\log \\left(\\sigma \\left(\\boldsymbol {x} ^ {\\top} \\boldsymbol {y} - \\boldsymbol {x} ^ {\\top} \\boldsymbol {z}\\right)\\right).\n$$\n",
|
| 749 |
+
"text_format": "latex",
|
| 750 |
+
"bbox": [
|
| 751 |
+
416,
|
| 752 |
+
493,
|
| 753 |
+
651,
|
| 754 |
+
511
|
| 755 |
+
],
|
| 756 |
+
"page_idx": 6
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"type": "text",
|
| 760 |
+
"text": "$f(\\pmb{x},\\pmb{y},\\pmb{z})$ denotes our pair-wise distance function, $\\sigma(\\cdot)$ is the sigmoid activation. Graphic Multi-behavior CL. Similar with the sequential multi-behavior CL, we also build a graphic multi-behavior CL for the graph-view representations. For $\\pmb{u}_i^{g,b_1}$ , we consider $\\pmb{u}_i^{g,b_2}$ as the positive sample and $\\pmb{u}_j^{g,b_2}$ as the negative sample in this CL. We also have $\\pmb{u}_{i,p_2}^{g,b_1} = \\mathrm{MLP}_{p_2}(\\pmb{u}_i^{g,b_1})$ and $\\pmb{u}_{j,p_2}^{g,b_2} = \\mathrm{MLP}_{p_2}(\\pmb{u}_j^{g,b_2})$ as Eq. (5). We define the graphic multi-behavior CL loss $L_{GraphCL}$ as follows:",
|
| 761 |
+
"bbox": [
|
| 762 |
+
215,
|
| 763 |
+
522,
|
| 764 |
+
785,
|
| 765 |
+
618
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 6
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "equation",
|
| 771 |
+
"text": "\n$$\nL _ {\\text {G r a p h C L}} = - \\sum_ {i = 1} ^ {N} \\sum_ {u _ {j} \\neq u _ {i}} f \\left(\\boldsymbol {u} _ {i, p _ {2}} ^ {g, b _ {1}}, \\boldsymbol {u} _ {i, p _ {2}} ^ {g, b _ {2}}, \\boldsymbol {u} _ {j, p _ {2}} ^ {g, b _ {2}}\\right), \\tag {7}\n$$\n",
|
| 772 |
+
"text_format": "latex",
|
| 773 |
+
"bbox": [
|
| 774 |
+
343,
|
| 775 |
+
630,
|
| 776 |
+
784,
|
| 777 |
+
672
|
| 778 |
+
],
|
| 779 |
+
"page_idx": 6
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "text",
|
| 783 |
+
"text": "in which $f(\\pmb{x}, \\pmb{y}, \\pmb{z})$ is the same as Eq. (6). Through the sequential and graphic multi-behavior CL tasks, MMCLR can learn better and more robust single-behavior representations, which is the fundamental of user diverse preferences. It functions well, especially when the target behaviors are sparse.",
|
| 784 |
+
"bbox": [
|
| 785 |
+
212,
|
| 786 |
+
685,
|
| 787 |
+
782,
|
| 788 |
+
744
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 6
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"text": "3.4 Multi-view Contrastive Learning",
|
| 795 |
+
"text_level": 1,
|
| 796 |
+
"bbox": [
|
| 797 |
+
215,
|
| 798 |
+
748,
|
| 799 |
+
532,
|
| 800 |
+
763
|
| 801 |
+
],
|
| 802 |
+
"page_idx": 6
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"type": "text",
|
| 806 |
+
"text": "The multi-view CL aims to highlight the relationships between the individual sequence and global graph views. It is natural that the sequence-view and graph-view user representations of the same user should be closer than others, since they reflect the same user's preferences (though learned from different information). Hence, we propose the multi-view CL task on the integrated sequence-view",
|
| 807 |
+
"bbox": [
|
| 808 |
+
212,
|
| 809 |
+
763,
|
| 810 |
+
784,
|
| 811 |
+
840
|
| 812 |
+
],
|
| 813 |
+
"page_idx": 6
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"type": "header",
|
| 817 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 818 |
+
"bbox": [
|
| 819 |
+
272,
|
| 820 |
+
114,
|
| 821 |
+
732,
|
| 822 |
+
128
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 6
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "page_number",
|
| 828 |
+
"text": "7",
|
| 829 |
+
"bbox": [
|
| 830 |
+
774,
|
| 831 |
+
116,
|
| 832 |
+
784,
|
| 833 |
+
126
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 6
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "text",
|
| 839 |
+
"text": "and graph-view user representations in Eq. (2). We regard $(\\pmb{u}_i^s, \\pmb{u}_i^g)$ of the same user $u_i$ as the positive pair, considering $\\pmb{u}_i^s$ and $\\pmb{u}_i^g$ as different view-level user augmentations of $u_i$ , and regard $(\\pmb{u}_i^s, \\pmb{u}_j^g)$ and $(\\pmb{u}_i^g, \\pmb{u}_j^s)$ as the in-batch negative pairs of two views. After the projector, we have $\\pmb{u}_{i,p_3}^s = \\mathrm{MLP}_{p_3}(\\pmb{u}_i^s)$ and $\\pmb{u}_{j,p_3}^g = \\mathrm{MLP}_{p_3}(\\pmb{u}_j^g)$ . The multi-view CL loss $L_{ViewCL}$ is noted as follows:",
|
| 840 |
+
"bbox": [
|
| 841 |
+
215,
|
| 842 |
+
146,
|
| 843 |
+
787,
|
| 844 |
+
227
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 7
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "equation",
|
| 850 |
+
"text": "\n$$\nL _ {V i e w C L} = - \\sum_ {i = 1} ^ {N} \\sum_ {u _ {j} \\neq u _ {i}} f \\left(\\boldsymbol {u} _ {i, p _ {3}} ^ {s}, \\boldsymbol {u} _ {i, p _ {3}} ^ {g}, \\boldsymbol {u} _ {j, p _ {3}} ^ {g}\\right). \\tag {8}\n$$\n",
|
| 851 |
+
"text_format": "latex",
|
| 852 |
+
"bbox": [
|
| 853 |
+
348,
|
| 854 |
+
232,
|
| 855 |
+
785,
|
| 856 |
+
273
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 7
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "We are the first to propose the notion of multi-view CL. Through this CL, individual sequence and global graph views can cooperate well in MBR.",
|
| 863 |
+
"bbox": [
|
| 864 |
+
214,
|
| 865 |
+
279,
|
| 866 |
+
784,
|
| 867 |
+
310
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 7
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "3.5 Behavior Distinction Contrastive Learning",
|
| 874 |
+
"text_level": 1,
|
| 875 |
+
"bbox": [
|
| 876 |
+
215,
|
| 877 |
+
311,
|
| 878 |
+
612,
|
| 879 |
+
327
|
| 880 |
+
],
|
| 881 |
+
"page_idx": 7
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"type": "text",
|
| 885 |
+
"text": "The above two CL tasks highlight the commonalities between a user's multiple behaviors and views compared to other users' representations. However, the fine-grained differences between different behaviors of a user are also essential. For example, in E-commerce, the low-frequent high-cost purchase behaviors reflect the user's high-priority preferences, comparing with other low-cost auxiliary behaviors like click and add to cart. To some extent, these auxiliary behaviors (viewed as positive pair instances in multi-behavior CL) could be even regarded as certain hard negative samples of the high-cost target behaviors [8]. Considering the fine-grained differences and behavior priorities can further improve the target behavior's (e.g., purchase) performances, especially when distinguishing \"the good but negative\" candidates (e.g., clicked but not purchased items), which are challenging interference terms in practical ranking systems. Hence, we propose a novel behavior distinction CL for the first time in MBR.",
|
| 886 |
+
"bbox": [
|
| 887 |
+
212,
|
| 888 |
+
329,
|
| 889 |
+
787,
|
| 890 |
+
523
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 7
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "text",
|
| 896 |
+
"text": "Specifically, we define the behavior priority in MBR as follows: items of the target behavior $v_{i} >$ items of auxiliary behaviors $v_{j} >>$ other random in-batch items $v_{k}$ . In the target behavior prediction task, the integrated user representation $\\mathbf{u}$ should firstly be close to $\\mathbf{v}_{i}$ , and then the hard negative samples of auxiliary behaviors $\\mathbf{v}_{j}$ , and finally be distinct with the random negative items $\\mathbf{v}_{k}$ . Similarly, we conduct a projector $\\mathrm{MLP}_{p_4}$ to get $\\mathbf{u}_{p_4}, \\mathbf{v}_{i,p_4}, \\mathbf{v}_{j,p_4}$ , and $\\mathbf{v}_{k,p_4}$ , and then learn the item-aspect behavior distinction CL $L_{DisCL}$ as follows:",
|
| 897 |
+
"bbox": [
|
| 898 |
+
215,
|
| 899 |
+
525,
|
| 900 |
+
787,
|
| 901 |
+
632
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 7
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "equation",
|
| 907 |
+
"text": "\n$$\nL _ {D i s C L} = - \\sum_ {u} \\sum_ {\\left(v _ {i}, v _ {j}\\right)} \\sum_ {v _ {k}} \\left(f \\left(\\boldsymbol {u} _ {p _ {4}}, \\boldsymbol {v} _ {i, p _ {4}}, \\boldsymbol {v} _ {j, p _ {4}}\\right) + \\beta f \\left(\\boldsymbol {u} _ {p _ {4}}, \\boldsymbol {v} _ {j, p _ {4}}, \\boldsymbol {v} _ {k, p _ {4}}\\right)\\right). \\tag {9}\n$$\n",
|
| 908 |
+
"text_format": "latex",
|
| 909 |
+
"bbox": [
|
| 910 |
+
259,
|
| 911 |
+
648,
|
| 912 |
+
785,
|
| 913 |
+
683
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 7
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"text": "$\\beta$ is a loss weight, $v_{i}$ and $v_{j}$ are one of the target/auxiliary behaviors of $u$ .",
|
| 920 |
+
"bbox": [
|
| 921 |
+
215,
|
| 922 |
+
689,
|
| 923 |
+
751,
|
| 924 |
+
704
|
| 925 |
+
],
|
| 926 |
+
"page_idx": 7
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "text",
|
| 930 |
+
"text": "The multi-behavior CL (i.e., Eq. (6, 7)) aims to narrow the distances between different behaviors of a user from the global perspective, thus distinguishing them from other items. In contrast, the behavior distinction CL explores to capture the fine-grained differences between different types of behaviors of a user, achieving deeper and more precise understandings of user's target-behavior preferences.",
|
| 931 |
+
"bbox": [
|
| 932 |
+
212,
|
| 933 |
+
705,
|
| 934 |
+
787,
|
| 935 |
+
780
|
| 936 |
+
],
|
| 937 |
+
"page_idx": 7
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "text",
|
| 941 |
+
"text": "3.6 Optimization",
|
| 942 |
+
"text_level": 1,
|
| 943 |
+
"bbox": [
|
| 944 |
+
215,
|
| 945 |
+
782,
|
| 946 |
+
372,
|
| 947 |
+
797
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 7
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "text",
|
| 953 |
+
"text": "Overall Loss. The overall loss $L$ is defined with hyper-parameters $\\lambda$ as:",
|
| 954 |
+
"bbox": [
|
| 955 |
+
215,
|
| 956 |
+
797,
|
| 957 |
+
740,
|
| 958 |
+
814
|
| 959 |
+
],
|
| 960 |
+
"page_idx": 7
|
| 961 |
+
},
|
| 962 |
+
{
|
| 963 |
+
"type": "equation",
|
| 964 |
+
"text": "\n$$\nL = \\lambda_ {o} L _ {o} + \\lambda_ {1} L _ {\\text {S e q C L}} + \\lambda_ {2} L _ {\\text {G r a p h C L}} + \\lambda_ {3} L _ {\\text {V i e w C L}} + \\lambda_ {4} L _ {\\text {D i s C L}}. \\tag {10}\n$$\n",
|
| 965 |
+
"text_format": "latex",
|
| 966 |
+
"bbox": [
|
| 967 |
+
259,
|
| 968 |
+
823,
|
| 969 |
+
785,
|
| 970 |
+
839
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 7
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "page_number",
|
| 976 |
+
"text": "8",
|
| 977 |
+
"bbox": [
|
| 978 |
+
217,
|
| 979 |
+
114,
|
| 980 |
+
228,
|
| 981 |
+
126
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 7
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "header",
|
| 987 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 988 |
+
"bbox": [
|
| 989 |
+
271,
|
| 990 |
+
114,
|
| 991 |
+
573,
|
| 992 |
+
128
|
| 993 |
+
],
|
| 994 |
+
"page_idx": 7
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "text",
|
| 998 |
+
"text": "Model Analysis.",
|
| 999 |
+
"text_level": 1,
|
| 1000 |
+
"bbox": [
|
| 1001 |
+
215,
|
| 1002 |
+
146,
|
| 1003 |
+
352,
|
| 1004 |
+
160
|
| 1005 |
+
],
|
| 1006 |
+
"page_idx": 8
|
| 1007 |
+
},
|
| 1008 |
+
{
|
| 1009 |
+
"type": "text",
|
| 1010 |
+
"text": "For complexity, the graph and sequential encoders can run parallel, so the encoder complexity is decided by the more complex model. Hence, MMCLR does not produce extra encoding time. For contrastive tasks, the training complexity of the MLP layer is $O(|U|d^2)$ , and the complexity of CL is $O(|U|Nd)$ , where $|U|$ is the number of users and $N$ is the batch size. The complexity is equal with existing CL models [36,18] and can be computed in parallel with fusion operations. Moreover, the CL losses are only calculated in offline, which means our model has equal online serving complexity as others.",
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
212,
|
| 1013 |
+
162,
|
| 1014 |
+
787,
|
| 1015 |
+
282
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 8
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "text",
|
| 1021 |
+
"text": "4 Experiments",
|
| 1022 |
+
"text_level": 1,
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
215,
|
| 1025 |
+
306,
|
| 1026 |
+
375,
|
| 1027 |
+
324
|
| 1028 |
+
],
|
| 1029 |
+
"page_idx": 8
|
| 1030 |
+
},
|
| 1031 |
+
{
|
| 1032 |
+
"type": "text",
|
| 1033 |
+
"text": "In this section, we aim at answering the following research questions: (RQ1) How does MMCLR perform compared with other SOTA baselines in MBR on various evaluation metrics? (RQ2) What are the effects of different contrastive learning tasks in our proposed MMCLR? (RQ3) How does MMCLR perform on cold-start scenarios compared to baselines and ablation versions? (RQ4) How do different hyper-parameters affect the final performance?",
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
212,
|
| 1036 |
+
339,
|
| 1037 |
+
787,
|
| 1038 |
+
430
|
| 1039 |
+
],
|
| 1040 |
+
"page_idx": 8
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"type": "text",
|
| 1044 |
+
"text": "4.1 Datasets",
|
| 1045 |
+
"text_level": 1,
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
215,
|
| 1048 |
+
433,
|
| 1049 |
+
334,
|
| 1050 |
+
446
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 8
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "text",
|
| 1056 |
+
"text": "We evaluate MMCLR on two real-world MBR datasets on E-commerce, including the Tmall and CIKM2019 EComm AI dataset. $T_{mall}^{6}$ : It is collected by Tmall, which is one of the largest E-commerce platforms in China. We process this dataset following [2]. After processing, our Tmall dataset contains 22,014 users and 27,155 items. We consider three behaviors (i.e., click, add-to-cart, purchase), collecting 83,778 purchase behaviors, 44,717 add-to-cart behaviors, and 485,483 click behaviors. CIKM2019 EComm AI: It is provided by the CIKM2019 EComm AI challenge. In this dataset, each instance is made up by an item, a user and a behavior label (i.e., click, add-to-cart, purchase). We process this dataset following [2] as well. Finally, this dataset includes 23,032 users, 25,054 items, 100,529 purchase behaviors, 38,347 add-to-cart behaviors, and 276,750 click behaviors.",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
212,
|
| 1059 |
+
449,
|
| 1060 |
+
787,
|
| 1061 |
+
628
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 8
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "text",
|
| 1067 |
+
"text": "4.2 Competitors",
|
| 1068 |
+
"text_level": 1,
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
215,
|
| 1071 |
+
633,
|
| 1072 |
+
366,
|
| 1073 |
+
647
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 8
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "We compare MMCLR against several state-of-the-art baselines. For baselines not designed for MBR, we adopt our MMCLR's fusion function to jointly consider multi-behavior data. All baselines exploit data of multiple behaviors.",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
212,
|
| 1082 |
+
648,
|
| 1083 |
+
785,
|
| 1084 |
+
694
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 8
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "list",
|
| 1090 |
+
"sub_type": "text",
|
| 1091 |
+
"list_items": [
|
| 1092 |
+
"- BERT4Rec $^{MB}$ . BERT4Rec [15] is a self-attention-based sequential recommendation model. We conduct separate Transformer encoders on all behaviors, and fuse them via MMCLR's fusion function, denoted as BERT4Rec $^{MB}$ .",
|
| 1093 |
+
"- LightGCN $^{MB}$ . lightGCN [6] is a widely-used GNN model. Similarly, we construct multiple user-item graphs for all behaviors, encode them by it.",
|
| 1094 |
+
"- MRIG. MRIG [16] is one of the SOTA sequence-based models for MBR. It adopts user's individual behavior sequence to build a sequential graph, which regards two items having an edge if they are adjacent in a sequence."
|
| 1095 |
+
],
|
| 1096 |
+
"bbox": [
|
| 1097 |
+
223,
|
| 1098 |
+
694,
|
| 1099 |
+
784,
|
| 1100 |
+
815
|
| 1101 |
+
],
|
| 1102 |
+
"page_idx": 8
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "header",
|
| 1106 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 1107 |
+
"bbox": [
|
| 1108 |
+
272,
|
| 1109 |
+
114,
|
| 1110 |
+
730,
|
| 1111 |
+
128
|
| 1112 |
+
],
|
| 1113 |
+
"page_idx": 8
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "page_number",
|
| 1117 |
+
"text": "9",
|
| 1118 |
+
"bbox": [
|
| 1119 |
+
774,
|
| 1120 |
+
116,
|
| 1121 |
+
784,
|
| 1122 |
+
126
|
| 1123 |
+
],
|
| 1124 |
+
"page_idx": 8
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "page_footnote",
|
| 1128 |
+
"text": "$^{6}$ https://tianchi.aliyun.com/competition/entrance/231721/introduction",
|
| 1129 |
+
"bbox": [
|
| 1130 |
+
217,
|
| 1131 |
+
824,
|
| 1132 |
+
700,
|
| 1133 |
+
840
|
| 1134 |
+
],
|
| 1135 |
+
"page_idx": 8
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "list",
|
| 1139 |
+
"sub_type": "text",
|
| 1140 |
+
"list_items": [
|
| 1141 |
+
"- MBGCN. MBGCN [9] is a recent graph-based MBR model. It integrates multi-behavior information by user-item and item-item propagations.",
|
| 1142 |
+
"- MBGMN. MBGMN [22] is one of the SOTA graph-based models for MBR. MBGMN first models the behavior heterogeneity and interaction diversity jointly with the meta-learning paradigm.",
|
| 1143 |
+
"- MGNN. MGNN [32] is one of the SOTA multiplex-graph-based models for MBR. It builds users' multi-behavior to a multiplex-graph and learns shared graph embedding and behavior-specific embedding for recommendation."
|
| 1144 |
+
],
|
| 1145 |
+
"bbox": [
|
| 1146 |
+
223,
|
| 1147 |
+
145,
|
| 1148 |
+
784,
|
| 1149 |
+
266
|
| 1150 |
+
],
|
| 1151 |
+
"page_idx": 9
|
| 1152 |
+
},
|
| 1153 |
+
{
|
| 1154 |
+
"type": "text",
|
| 1155 |
+
"text": "We also compare with MMCLR's ablation versions for further comparisons:",
|
| 1156 |
+
"bbox": [
|
| 1157 |
+
215,
|
| 1158 |
+
268,
|
| 1159 |
+
759,
|
| 1160 |
+
282
|
| 1161 |
+
],
|
| 1162 |
+
"page_idx": 9
|
| 1163 |
+
},
|
| 1164 |
+
{
|
| 1165 |
+
"type": "list",
|
| 1166 |
+
"sub_type": "text",
|
| 1167 |
+
"list_items": [
|
| 1168 |
+
"- BERT4RecCL. We add the sequential multi-behavior CL $L_{SeqCL}$ to the BERT4RecMB, which is noted as BERT4RecCL",
|
| 1169 |
+
"- LightGCN $^{CL}$ . Similarly, We also add the graphic multi-behavior CL to the LightGCN $^{MB}$ , which is denoted as LightGCN $^{CL}$ .",
|
| 1170 |
+
"- MMR. MMR is an ablation version of MMCLR without all CL tasks. It can be viewed as a simple multi-view multi-behavior model, which combines BERT4Rec $^{MB}$ with LightGCN $^{MB}$ via embedding concatenation and MLP."
|
| 1171 |
+
],
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
223,
|
| 1174 |
+
284,
|
| 1175 |
+
782,
|
| 1176 |
+
388
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 9
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "4.3 Experimental Settings",
|
| 1183 |
+
"text_level": 1,
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
215,
|
| 1186 |
+
393,
|
| 1187 |
+
444,
|
| 1188 |
+
407
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 9
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "text",
|
| 1194 |
+
"text": "Parameter Settings. The embedding sizes of users and items are 64 and batch size is 256 for all methods. We optimize all models by Adam optimizer. For BERT4Rec, we stack two-layer transformers and each transformer with two attention heads. The depth of our graph encoder is set to 2. The learning rate and L2 normalization coefficient of MMCLR are set as $1e^{-3}$ and $1e^{-4}$ , respectively. The weights of supervised loss $L_{o}$ and four CL losses (i.e., $L_{SeqCL}$ , $L_{GraphCL}$ , $L_{ViewCL}$ , $L_{DisCL}$ ) are set as 1.0, 0.2, 0.2, 0.2, and 0.05, respectively. For all baselines, We conduct a grid search for parameter selections.",
|
| 1195 |
+
"bbox": [
|
| 1196 |
+
212,
|
| 1197 |
+
411,
|
| 1198 |
+
784,
|
| 1199 |
+
530
|
| 1200 |
+
],
|
| 1201 |
+
"page_idx": 9
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "text",
|
| 1205 |
+
"text": "Evaluation Protocols. Following [28,36], We adopt the leave-one-out strategy to evaluate the models' performance; We also employ the top-K hit rate (HIT), top-K Normalized Discounted Cumulative Gain (NDCG), Mean Reciprocal Rank (MRR), and AUC (Area Under the Curve). For HIT and NDCG, we report top 5 and 10; For each ground truth, we randomly sample 99 items that user did not interact with under the target behavior as negative samples.",
|
| 1206 |
+
"bbox": [
|
| 1207 |
+
212,
|
| 1208 |
+
532,
|
| 1209 |
+
784,
|
| 1210 |
+
619
|
| 1211 |
+
],
|
| 1212 |
+
"page_idx": 9
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "text",
|
| 1216 |
+
"text": "4.4 Results of Multi-behavior Recommendation (RQ1)",
|
| 1217 |
+
"text_level": 1,
|
| 1218 |
+
"bbox": [
|
| 1219 |
+
214,
|
| 1220 |
+
626,
|
| 1221 |
+
679,
|
| 1222 |
+
641
|
| 1223 |
+
],
|
| 1224 |
+
"page_idx": 9
|
| 1225 |
+
},
|
| 1226 |
+
{
|
| 1227 |
+
"type": "text",
|
| 1228 |
+
"text": "The main MBR results are shown in Table 1, from which we find that:",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
214,
|
| 1231 |
+
643,
|
| 1232 |
+
723,
|
| 1233 |
+
656
|
| 1234 |
+
],
|
| 1235 |
+
"page_idx": 9
|
| 1236 |
+
},
|
| 1237 |
+
{
|
| 1238 |
+
"type": "text",
|
| 1239 |
+
"text": "(1) MMCLR performs the best among all baselines and ablation versions of MMCLR on all metrics in two datasets. It achieves $4\\% \\sim 11.8\\%$ improvements over the best baselines on most metrics, with the significance level as $p < 0.05$ (paired t-test of MMCLR V.S. baselines). It indicates that MMCLR can well capture the commonalities and differences between different behaviors and views, and thus can better take advantage of all multi-view and multi-behavior information in MBR.(2) BERT4RecCL and LightGCNCL perform much better than their original models without CL. It verifies the importance of modeling relations between different types of behaviors when jointly learning user representations. It also implies that our multi-behavior CL can help to capture the behavior-level commonalities. Nevertheless, MMCLR still performs better than single-view models, which verifies the significance of jointly modeling multi-view",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
212,
|
| 1242 |
+
657,
|
| 1243 |
+
785,
|
| 1244 |
+
839
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 9
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "page_number",
|
| 1250 |
+
"text": "10",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
217,
|
| 1253 |
+
114,
|
| 1254 |
+
235,
|
| 1255 |
+
126
|
| 1256 |
+
],
|
| 1257 |
+
"page_idx": 9
|
| 1258 |
+
},
|
| 1259 |
+
{
|
| 1260 |
+
"type": "header",
|
| 1261 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
271,
|
| 1264 |
+
114,
|
| 1265 |
+
573,
|
| 1266 |
+
128
|
| 1267 |
+
],
|
| 1268 |
+
"page_idx": 9
|
| 1269 |
+
},
|
| 1270 |
+
{
|
| 1271 |
+
"type": "text",
|
| 1272 |
+
"text": "information.(3) We notice that MMR performs comparably with BERT4Rec $^{MB}$ . It reflects that the simple fusion of individual sequence-based and global graph-based models may not make full use of the multi-view information.",
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
212,
|
| 1275 |
+
145,
|
| 1276 |
+
782,
|
| 1277 |
+
191
|
| 1278 |
+
],
|
| 1279 |
+
"page_idx": 10
|
| 1280 |
+
},
|
| 1281 |
+
{
|
| 1282 |
+
"type": "table",
|
| 1283 |
+
"img_path": "images/9f5a2849c1a2817800d359034c761c4c4dcc285460a97103b1ebbb0ae32dd853.jpg",
|
| 1284 |
+
"table_caption": [
|
| 1285 |
+
"Table 1. Results on multi-behavior recommendation. * indicates significance (p<0.05)."
|
| 1286 |
+
],
|
| 1287 |
+
"table_footnote": [],
|
| 1288 |
+
"table_body": "<table><tr><td>Database</td><td>Model</td><td>MRR</td><td>AUC</td><td>HIT@5</td><td>NDCG@5</td><td>HIT@10</td><td>NDCG@10</td></tr><tr><td rowspan=\"11\">Tmall</td><td>BERT4RecMB</td><td>0.1568</td><td>0.6671</td><td>0.2138</td><td>0.1448</td><td>0.3133</td><td>0.1769</td></tr><tr><td>LightGCNMB</td><td>0.1449</td><td>0.6542</td><td>0.1983</td><td>0.1318</td><td>0.3020</td><td>0.1651</td></tr><tr><td>MRIG</td><td>0.1545</td><td>0.6823</td><td>0.2084</td><td>0.1401</td><td>0.3207</td><td>0.1762</td></tr><tr><td>MBGCN</td><td>0.1534</td><td>0.6912</td><td>0.2100</td><td>0.1396</td><td>0.3208</td><td>0.1751</td></tr><tr><td>MBGMN</td><td>0.1673</td><td>0.6808</td><td>0.2273</td><td>0.1559</td><td>0.3308</td><td>0.1892</td></tr><tr><td>MGNN</td><td>0.1782</td><td>0.6955</td><td>0.2332</td><td>0.1651</td><td>0.3389</td><td>0.1991</td></tr><tr><td>LightGCNCL</td><td>0.1609</td><td>0.6863</td><td>0.2201</td><td>0.1483</td><td>0.3293</td><td>0.1835</td></tr><tr><td>BERT4RecCL</td><td>0.1754</td><td>0.6971</td><td>0.2385</td><td>0.1641</td><td>0.3467</td><td>0.1990</td></tr><tr><td>MMR</td><td>0.1576</td><td>0.6606</td><td>0.2152</td><td>0.1466</td><td>0.3108</td><td>0.1773</td></tr><tr><td>MMCLR</td><td>0.1861*</td><td>0.7237*</td><td>0.2608*</td><td>0.1770*</td><td>0.3751*</td><td>0.2138*</td></tr><tr><td>Improvement</td><td>4.4%</td><td>4.1%</td><td>11.8%</td><td>7.3%</td><td>10.7%</td><td>7.4%</td></tr><tr><td rowspan=\"11\">CIKM</td><td>BERT4RecMB</td><td>0.1792</td><td>0.6990</td><td>0.2451</td><td>0.1687</td><td>0.3552</td><td>0.2042</td></tr><tr><td>LightGCNMB</td><td>0.1705</td><td>0.6979</td><td>0.2332</td><td>0.1584</td><td>0.3466</td><td>0.1949</td></tr><tr><td>MRIG</td><td>0.1795</td><td>0.7026</td><td>0.2489</td><td>0.1696</td><td>0.3649</td><td>0.2068</td></tr><tr><td>MBGCN</td><td>0.1850</td><td>0.6897</td><td>0.2479</td><td>0.1751</td><td>0.3492</td><td>0.2077</td></tr><tr><td>MBGMN</td><td>0.1887</td><td>0.7035</td><td>0.2575</td><td>0.1795</td><td>0.3648</td><td>0.2140</td></tr><tr><td>MGNN</td><td>0.1973</td><td>0.7116</td><td>0.2616</td><td>0.1866</td><td>0.3718</td><td>0.2222</td></tr><tr><td>LightGCNCL</td><td>0.1746</td><td>0.7031</td><td>0.2398</td><td>0.1633</td><td>0.3530</td><td>0.1998</td></tr><tr><td>BERT4RecCL</td><td>0.1984</td><td>0.7282</td><td>0.2728</td><td>0.1912</td><td>0.3929</td><td>0.2281</td></tr><tr><td>MMR</td><td>0.1788</td><td>0.6941</td><td>0.2506</td><td>0.1700</td><td>0.3627</td><td>0.2061</td></tr><tr><td>MMCLR</td><td>0.2046*</td><td>0.7313*</td><td>0.2878*</td><td>0.1981*</td><td>0.4049*</td><td>0.2358*</td></tr><tr><td>Improvement</td><td>3.7%</td><td>2.9%</td><td>10.0%</td><td>6.2%</td><td>8.9%</td><td>6.1%</td></tr></table>",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
218,
|
| 1291 |
+
244,
|
| 1292 |
+
784,
|
| 1293 |
+
618
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 10
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "text",
|
| 1299 |
+
"text": "4.5 Ablation Study (RQ2)",
|
| 1300 |
+
"text_level": 1,
|
| 1301 |
+
"bbox": [
|
| 1302 |
+
215,
|
| 1303 |
+
655,
|
| 1304 |
+
444,
|
| 1305 |
+
671
|
| 1306 |
+
],
|
| 1307 |
+
"page_idx": 10
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "text",
|
| 1311 |
+
"text": "In this section, we aim to prove that MMCLR can solve the three challenges mentioned in the introduction section via three CL tasks. We build seven ablation versions of MMCLR, which are different combinations of CL tasks and the multi-view fusion, to show the effectiveness of different components. Specifically, we regard the basic sequence-based model of MMCLR with multi-behavior information as seq (i.e., BERT4Rec $^{MB}$ ), and the basic graph-based model of enhanced LightGCN with multi-behavior information as graph (i.e., LightGCN $^{MB}$ ). We set seq+graph as the simple multi-view fusion version (i.e., MMR). Moreover, we represent the multi-behavior CL, multi-view CL, and behavior distinction CL as BCL, VCL, and DCL, respectively. The final MMCLR is noted as seq+graph +BCL+VCL+DCL. From Table 2, we can observe that:",
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
212,
|
| 1314 |
+
672,
|
| 1315 |
+
787,
|
| 1316 |
+
840
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 10
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "header",
|
| 1322 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
272,
|
| 1325 |
+
114,
|
| 1326 |
+
730,
|
| 1327 |
+
128
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 10
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "page_number",
|
| 1333 |
+
"text": "11",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
767,
|
| 1336 |
+
114,
|
| 1337 |
+
782,
|
| 1338 |
+
126
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 10
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "text",
|
| 1344 |
+
"text": "(1) Comparing ablation versions with and without BCL, we find that both sequential and graphic multi-behavior CL tasks are beneficial. BCL tasks even function well on the seq+graph model. The improvements of BCL are impressive, which have over $2\\%$ improvements in most metrics. It is because that multiple behaviors produced by the same user should reflect related preferences of the user. Modeling the coarse-grained commonalities of different behaviors helps to learn better representations to fight against the data sparsity issues. Moreover, through BCL, we can learn better user representations that are more precise and distinguishable from other users'. It reconfirms the effectiveness of the multi-behavior CL in modeling such coarse-grained commonality.(2) Comparing models with and without VCL, we know that the multi-view CL is also essential in multi-view fusion (getting nearly $1\\%$ improvements on most metrics). We also implement a simple fusion model with seq and graph models, whose improvements over single-view models are marginal. The multi-view CL smartly aligns sequence-view and graph-view representations via the CL-based learning, which well captures useful information from both individual and global aspects. These improvements verify the significance of multi-view CL.(3) Comparing with the last two versions, we can observe that the behavior distinction CL further improves the performances on all metrics. The $0.6 - 1.4\\%$ improvements are significant. It verifies that jointly considering both coarse-grained commonalities and fine-grained differences are essential in MMCLR.",
|
| 1345 |
+
"bbox": [
|
| 1346 |
+
212,
|
| 1347 |
+
146,
|
| 1348 |
+
787,
|
| 1349 |
+
465
|
| 1350 |
+
],
|
| 1351 |
+
"page_idx": 11
|
| 1352 |
+
},
|
| 1353 |
+
{
|
| 1354 |
+
"type": "table",
|
| 1355 |
+
"img_path": "images/c11eae1c1918a4d3d9dad26a66b02637bb9ca260e141de5d0246280e4f4cea10.jpg",
|
| 1356 |
+
"table_caption": [
|
| 1357 |
+
"Table 2. Ablation tests on CL tasks and multi-view fusion in MMCLR."
|
| 1358 |
+
],
|
| 1359 |
+
"table_footnote": [],
|
| 1360 |
+
"table_body": "<table><tr><td>Ablation</td><td>HIT@5</td><td>NDCG@5</td><td>HIT@10</td><td>NDCG@10</td></tr><tr><td>seq</td><td>0.2138</td><td>0.1448</td><td>0.3133</td><td>0.1769</td></tr><tr><td>graph</td><td>0.2108</td><td>0.1442</td><td>0.3136</td><td>0.1773</td></tr><tr><td>seq+graph</td><td>0.2152</td><td>0.1466</td><td>0.3108</td><td>0.1773</td></tr><tr><td>seq+BCL</td><td>0.2385</td><td>0.1641</td><td>0.3467</td><td>0.1990</td></tr><tr><td>graph+BCL</td><td>0.2380</td><td>0.1620</td><td>0.3456</td><td>0.1966</td></tr><tr><td>seq+graph+BCL</td><td>0.2418</td><td>0.1632</td><td>0.3527</td><td>0.1988</td></tr><tr><td>seq+graph+BCL+VCL</td><td>0.2521</td><td>0.1722</td><td>0.3614</td><td>0.2074</td></tr><tr><td>MMCLR (final)</td><td>0.2608*</td><td>0.1770*</td><td>0.3751*</td><td>0.2138*</td></tr></table>",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
282,
|
| 1363 |
+
500,
|
| 1364 |
+
720,
|
| 1365 |
+
643
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 11
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "4.6 Results on Cold-start Scenarios (RQ3)",
|
| 1372 |
+
"text_level": 1,
|
| 1373 |
+
"bbox": [
|
| 1374 |
+
215,
|
| 1375 |
+
672,
|
| 1376 |
+
578,
|
| 1377 |
+
686
|
| 1378 |
+
],
|
| 1379 |
+
"page_idx": 11
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "text",
|
| 1383 |
+
"text": "Real-world multi-behavior recommendation systems usually suffer from cold-start issues (e.g., cold-start users that have few historical behaviors), especially for the high-cost purchase behaviors in MBR of E-commerce. Hence, we further conduct an evaluation on the cold-start (user) scenario to verify the effectiveness of MMCLR on more challenging tasks. Without loss of generality, we regard all users that have less than 3 target behaviors in the train set as our cold-start users and select these cold-start users' test instances in the overall Tmall dataset as the test set of the cold-start scenario. To comprehensively display the effectiveness of MMCLR and its multiple CL tasks on the cold-start scenario, we draw three figures in Fig. 2 from different aspects. Precisely, we can observe",
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
212,
|
| 1386 |
+
688,
|
| 1387 |
+
785,
|
| 1388 |
+
840
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 11
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "page_number",
|
| 1394 |
+
"text": "12",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
217,
|
| 1397 |
+
114,
|
| 1398 |
+
235,
|
| 1399 |
+
126
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 11
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "header",
|
| 1405 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 1406 |
+
"bbox": [
|
| 1407 |
+
271,
|
| 1408 |
+
114,
|
| 1409 |
+
573,
|
| 1410 |
+
128
|
| 1411 |
+
],
|
| 1412 |
+
"page_idx": 11
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "text",
|
| 1416 |
+
"text": "that: (1) Fig. 2(a) shows different models' NDCG performances in both overall",
|
| 1417 |
+
"bbox": [
|
| 1418 |
+
215,
|
| 1419 |
+
146,
|
| 1420 |
+
784,
|
| 1421 |
+
161
|
| 1422 |
+
],
|
| 1423 |
+
"page_idx": 12
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "image",
|
| 1427 |
+
"img_path": "images/e6496e2d8a75d28bfb847a574905087979e603f721ce62444143c56e21e0a8cd.jpg",
|
| 1428 |
+
"image_caption": [
|
| 1429 |
+
"Fig. 2. Results of different models and ablation versions on the overall and cold-start scenarios. (a) NDCG@10 on the overall and cold-start datasets. (b) MMCLR's relative improvements of NDCG@10 on different baselines. (c) Different MMCLR's ablation versions' relative improvements of NDCG@10 on the baseline MRIG."
|
| 1430 |
+
],
|
| 1431 |
+
"image_footnote": [],
|
| 1432 |
+
"bbox": [
|
| 1433 |
+
215,
|
| 1434 |
+
195,
|
| 1435 |
+
401,
|
| 1436 |
+
323
|
| 1437 |
+
],
|
| 1438 |
+
"page_idx": 12
|
| 1439 |
+
},
|
| 1440 |
+
{
|
| 1441 |
+
"type": "image",
|
| 1442 |
+
"img_path": "images/59b6d5fbec06016b318ddb5d01f3229542410d76c6c7a2bcbe8fa9572c5775d2.jpg",
|
| 1443 |
+
"image_caption": [],
|
| 1444 |
+
"image_footnote": [],
|
| 1445 |
+
"bbox": [
|
| 1446 |
+
405,
|
| 1447 |
+
195,
|
| 1448 |
+
591,
|
| 1449 |
+
321
|
| 1450 |
+
],
|
| 1451 |
+
"page_idx": 12
|
| 1452 |
+
},
|
| 1453 |
+
{
|
| 1454 |
+
"type": "image",
|
| 1455 |
+
"img_path": "images/f56b3052a5aa1ccee9d02ffddc34678fc24cad732e012324285cd6aa7b4cdc70.jpg",
|
| 1456 |
+
"image_caption": [],
|
| 1457 |
+
"image_footnote": [],
|
| 1458 |
+
"bbox": [
|
| 1459 |
+
596,
|
| 1460 |
+
196,
|
| 1461 |
+
779,
|
| 1462 |
+
321
|
| 1463 |
+
],
|
| 1464 |
+
"page_idx": 12
|
| 1465 |
+
},
|
| 1466 |
+
{
|
| 1467 |
+
"type": "text",
|
| 1468 |
+
"text": "and cold-start users. We can know that: (a) All models perform better on the overall users than the cold-start users. (b) Results on both overall and cold-start users have consistent improvements from graph+BCL to MMCLR.(2) Fig. 2(b) shows MMCLR's relative improvements on other models. We find that: (a) Comparing with different models and ablation versions (except MMCLR w/o DCL), MMCLR has higher improvements on cold-start scenarios (e.g., nearly $35\\%$ astonishing improvements on MRIG). It is because that MMCLR can make full use of the multi-behavior and multi-view information via CL tasks, which can alleviate the data sparsity in cold-start users. (b) We notice that DCL brings in a slight improvement on cold-start users. It is natural since cold-start users usually have very few target behaviors, and rely more on auxiliary behaviors via the commonality-led CL tasks as supplements.(3) Fig. 2(c) gives the relative improvements of different MMCLR's ablation versions on MRIG. We observe that: (a) Both sequential and graphic multi-behavior CL, multi-view CL, and behavior distinction CL has improvements on cold-start scenarios. (b) Relatively, the multi-behavior CL contributes more on the overall dataset, while the multiview CL focuses more on the cold-start users. It may be because that a different view can bring in more information for cold-start users thanks to the global graph view and its multi-view CL task.",
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
212,
|
| 1471 |
+
412,
|
| 1472 |
+
787,
|
| 1473 |
+
700
|
| 1474 |
+
],
|
| 1475 |
+
"page_idx": 12
|
| 1476 |
+
},
|
| 1477 |
+
{
|
| 1478 |
+
"type": "text",
|
| 1479 |
+
"text": "4.7 Parameter Analyses (RQ4)",
|
| 1480 |
+
"text_level": 1,
|
| 1481 |
+
"bbox": [
|
| 1482 |
+
215,
|
| 1483 |
+
702,
|
| 1484 |
+
483,
|
| 1485 |
+
717
|
| 1486 |
+
],
|
| 1487 |
+
"page_idx": 12
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "text",
|
| 1491 |
+
"text": "Loss Weight. We start the experiment with different main-task loss weights on the Tmall dataset to explore its influence. We change the weight of supervised $L_{o}$ among $\\{0.2, 1, 2, 4, 8\\}$ . From Fig. 3(a) we can find that: (1) Both HIT@10 and NDCG@10 first increase and then decrease from 0.2 to 8, and MMCLR achieves the best results when $\\lambda_{o} = 1.0$ (here CL loss weights are 0.2, 0.2, 0.2, and 0.05). It indicates that the supervised loss is the fundamental of model training, and a proper loss weight helps to balance the supervised and self-supervised learning. (2) MMCLR consistently outperforms baselines with different weights.",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
212,
|
| 1494 |
+
719,
|
| 1495 |
+
787,
|
| 1496 |
+
840
|
| 1497 |
+
],
|
| 1498 |
+
"page_idx": 12
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "header",
|
| 1502 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 1503 |
+
"bbox": [
|
| 1504 |
+
272,
|
| 1505 |
+
114,
|
| 1506 |
+
730,
|
| 1507 |
+
128
|
| 1508 |
+
],
|
| 1509 |
+
"page_idx": 12
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "page_number",
|
| 1513 |
+
"text": "13",
|
| 1514 |
+
"bbox": [
|
| 1515 |
+
767,
|
| 1516 |
+
114,
|
| 1517 |
+
784,
|
| 1518 |
+
126
|
| 1519 |
+
],
|
| 1520 |
+
"page_idx": 12
|
| 1521 |
+
},
|
| 1522 |
+
{
|
| 1523 |
+
"type": "text",
|
| 1524 |
+
"text": "It shows the effectiveness and robustness of our model with different loss weights. Embedding Dimension. We also test different input embedding dimensions on",
|
| 1525 |
+
"bbox": [
|
| 1526 |
+
215,
|
| 1527 |
+
146,
|
| 1528 |
+
785,
|
| 1529 |
+
176
|
| 1530 |
+
],
|
| 1531 |
+
"page_idx": 13
|
| 1532 |
+
},
|
| 1533 |
+
{
|
| 1534 |
+
"type": "image",
|
| 1535 |
+
"img_path": "images/d7b6523f2ab9bb019a5765e631bb04de5e9d22a2d738985e324b9c7d9c4e7dbc.jpg",
|
| 1536 |
+
"image_caption": [
|
| 1537 |
+
"Fig. 3. Parameter analyses on (a) loss weights, and (b) embedding dimensions."
|
| 1538 |
+
],
|
| 1539 |
+
"image_footnote": [],
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
274,
|
| 1542 |
+
207,
|
| 1543 |
+
738,
|
| 1544 |
+
330
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 13
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "text",
|
| 1550 |
+
"text": "the Tmall dataset. We vary the embedding dimensions in $\\{16, 32, 64, 128, 256\\}$ , and keep other optimal hyper-parameters unchanged. The results of different dimensions are shown in Fig. 3. We observe that the model achieves better performance with bigger dimension, while dimension from 16 to 128. It shows that enough embedding dimension helps to increase model capacity. In contrast, the model with 256 dimensions has worse performance than 128 dimensions. The performance may be suffered from overfitting. It also suggests that too large an embedding dimension is not necessary.",
|
| 1551 |
+
"bbox": [
|
| 1552 |
+
212,
|
| 1553 |
+
378,
|
| 1554 |
+
787,
|
| 1555 |
+
500
|
| 1556 |
+
],
|
| 1557 |
+
"page_idx": 13
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "text",
|
| 1561 |
+
"text": "5 Conclusion",
|
| 1562 |
+
"text_level": 1,
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
215,
|
| 1565 |
+
518,
|
| 1566 |
+
359,
|
| 1567 |
+
535
|
| 1568 |
+
],
|
| 1569 |
+
"page_idx": 13
|
| 1570 |
+
},
|
| 1571 |
+
{
|
| 1572 |
+
"type": "text",
|
| 1573 |
+
"text": "In this work, We study the multi-behavior recommendation problem. Specifically, to alleviate the sparsity problem of target behaviors existing in recommender systems, we propose a novel MMCLR framework to jointly consider the commonalities and differences between different behaviors and views in MBR via three CL tasks. Extensive experimental results verify the effectiveness of our MMCLR and its CL tasks. The performance of MMCLR on cold-start users further demonstrates the superiority of MMCLR on the cold-start problem.",
|
| 1574 |
+
"bbox": [
|
| 1575 |
+
212,
|
| 1576 |
+
547,
|
| 1577 |
+
787,
|
| 1578 |
+
654
|
| 1579 |
+
],
|
| 1580 |
+
"page_idx": 13
|
| 1581 |
+
},
|
| 1582 |
+
{
|
| 1583 |
+
"type": "text",
|
| 1584 |
+
"text": "6 Acknowledgments",
|
| 1585 |
+
"text_level": 1,
|
| 1586 |
+
"bbox": [
|
| 1587 |
+
215,
|
| 1588 |
+
674,
|
| 1589 |
+
426,
|
| 1590 |
+
691
|
| 1591 |
+
],
|
| 1592 |
+
"page_idx": 13
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "text",
|
| 1596 |
+
"text": "The research work supported by the National Natural Science Foundation of China under Grant No.61976204, U1811461, U1836206. Xiang Ao is also supported by the Project of Youth Innovation Promotion Association CAS, Beijing Nova Program Z201100006820062.",
|
| 1597 |
+
"bbox": [
|
| 1598 |
+
212,
|
| 1599 |
+
702,
|
| 1600 |
+
787,
|
| 1601 |
+
763
|
| 1602 |
+
],
|
| 1603 |
+
"page_idx": 13
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "text",
|
| 1607 |
+
"text": "References",
|
| 1608 |
+
"text_level": 1,
|
| 1609 |
+
"bbox": [
|
| 1610 |
+
215,
|
| 1611 |
+
782,
|
| 1612 |
+
323,
|
| 1613 |
+
799
|
| 1614 |
+
],
|
| 1615 |
+
"page_idx": 13
|
| 1616 |
+
},
|
| 1617 |
+
{
|
| 1618 |
+
"type": "ref_text",
|
| 1619 |
+
"text": "1. Chen, C., Ma, W., Zhang, M., Wang, Z., He, X., Wang, C., Liu, Y., Ma, S.: Graph heterogeneous multi-relational recommendation. In: Proceedings of AAAI (2021)",
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
222,
|
| 1622 |
+
811,
|
| 1623 |
+
785,
|
| 1624 |
+
840
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 13
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "page_number",
|
| 1630 |
+
"text": "14",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
217,
|
| 1633 |
+
114,
|
| 1634 |
+
235,
|
| 1635 |
+
126
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 13
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "header",
|
| 1641 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
271,
|
| 1644 |
+
114,
|
| 1645 |
+
573,
|
| 1646 |
+
128
|
| 1647 |
+
],
|
| 1648 |
+
"page_idx": 13
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"type": "list",
|
| 1652 |
+
"sub_type": "ref_text",
|
| 1653 |
+
"list_items": [
|
| 1654 |
+
"2. Chen, C., Zhang, M., Zhang, Y., Ma, W., Liu, Y., Ma, S.: Efficient heterogeneous collaborative filtering without negative sampling for recommendation. In: Proceedings of AAAI (2020)",
|
| 1655 |
+
"3. Chen, X., Xu, H., Zhang, Y., Tang, J., Cao, Y., Qin, Z., Zha, H.: Sequential recommendation with user memory networks. In: Proceedings of WSDM (2018)",
|
| 1656 |
+
"4. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint (2018)",
|
| 1657 |
+
"5. Doersch, C., Gupta, A., Efros, A.A.: Unsupervised visual representation learning by context prediction. In: Proceedings of ICCV (2015)",
|
| 1658 |
+
"6. He, X., Deng, K., Wang, X., Li, Y., Zhang, Y., Wang, M.: Lightgcn: Simplifying and powering graph convolution network for recommendation. In: Proceedings of SIGIR (2020)",
|
| 1659 |
+
"7. Hidasi, B., Karatzoglou, A., Baltrunas, L., Tikk, D.: Session-based recommendations with recurrent neural networks. In: ICLR (2016)",
|
| 1660 |
+
"8. Huang, J.T., Sharma, A., Sun, S., Xia, L., Zhang, D., Pronin, P., Padmanabhan, J., Ottaviano, G., Yang, L.: Embedding-based retrieval in facebook search. In: Proceedings of KDD (2020)",
|
| 1661 |
+
"9. Jin, B., Gao, C., He, X., Jin, D., Li, Y.: Multi-behavior recommendation with graph convolutional networks. In: Proceedings of SIGIR (2020)",
|
| 1662 |
+
"10. Pan, F., Li, S., Ao, X., Tang, P., He, Q.: Warm up cold-start advertisements: Improving ctr predictions via learning to learn id embeddings. In: Proceedings of SIGIR. pp. 695-704 (2019)",
|
| 1663 |
+
"11. Pan, W., Xiang, E., Liu, N., Yang, Q.: Transfer learning in collaborative filtering for sparsity reduction. In: Proceedings of AAAI. vol. 24 (2010)",
|
| 1664 |
+
"12. Perozzi, B., Al-Rfou, R., Skiena, S.: Deepwalk: Online learning of social representations. In: Proceedings of KDD (2014)",
|
| 1665 |
+
"13. Rendle, S., Freudenthaler, C., Gantner, Z., Schmidt-Thieme, L.: Bpr: Bayesian personalized ranking from implicit feedback. arXiv preprint (2012)",
|
| 1666 |
+
"14. Singh, A.P., Gordon, G.J.: Relational learning via collective matrix factorization. In: Proceedings of KDD. pp. 650-658 (2008)",
|
| 1667 |
+
"15. Sun, F., Liu, J., Wu, J., Pei, C., Lin, X., Ou, W., Jiang, P.: Bert4rec: Sequential recommendation with bidirectional encoder representations from transformer. In: Proceedings of CIKM (2019)",
|
| 1668 |
+
"16. Wang, W., Zhang, W., Liu, S., Liu, Q., Zhang, B., Lin, L., Zha, H.: Beyond clicks: Modeling multi-relational item graph for session-based target behavior prediction. In: Proceedings of WWW (2020)",
|
| 1669 |
+
"17. Wang, X., He, X., Wang, M., Feng, F., Chua, T.S.: Neural graph collaborative filtering. In: Proceedings of SIGIR (2019)",
|
| 1670 |
+
"18. Wu, J., Wang, X., Feng, F., He, X., Chen, L., Lian, J., Xie, X.: Self-supervised graph learning for recommendation. In: Proceedings of SIGIR (2021)",
|
| 1671 |
+
"19. Xi, D., Chen, Z., Yan, P., Zhang, Y., Zhu, Y., Zhuang, F., Chen, Y.: Modeling the sequential dependence among audience multi-step conversions with multi-task learning in targeted display advertising. In: Proceedings of KDD (2021)",
|
| 1672 |
+
"20. Xi, D., Zhuang, F., Song, B., Zhu, Y., Chen, S., Hong, D., Chen, T., Gu, X., He, Q.: Neural hierarchical factorization machines for user's event sequence analysis. In: Proceedings of SIGIR. pp. 1893-1896 (2020)",
|
| 1673 |
+
"21. Xia, L., Huang, C., Xu, Y., Dai, P., Zhang, X., Yang, H., Pei, J., Bo, L.: Knowledge-enhanced hierarchical graph transformer network for multi-behavior recommendation. In: Proceedings of AAAI (2021)",
|
| 1674 |
+
"22. Xia, L., Xu, Y., Huang, C., Dai, P., Bo, L.: Graph meta network for multi-behavior recommendation. In: Proceedings of SIGIR (2021)"
|
| 1675 |
+
],
|
| 1676 |
+
"bbox": [
|
| 1677 |
+
218,
|
| 1678 |
+
146,
|
| 1679 |
+
787,
|
| 1680 |
+
839
|
| 1681 |
+
],
|
| 1682 |
+
"page_idx": 14
|
| 1683 |
+
},
|
| 1684 |
+
{
|
| 1685 |
+
"type": "header",
|
| 1686 |
+
"text": "Multi-view Multi-behavior Contrastive Learning in Recommendation",
|
| 1687 |
+
"bbox": [
|
| 1688 |
+
272,
|
| 1689 |
+
114,
|
| 1690 |
+
730,
|
| 1691 |
+
128
|
| 1692 |
+
],
|
| 1693 |
+
"page_idx": 14
|
| 1694 |
+
},
|
| 1695 |
+
{
|
| 1696 |
+
"type": "page_number",
|
| 1697 |
+
"text": "15",
|
| 1698 |
+
"bbox": [
|
| 1699 |
+
767,
|
| 1700 |
+
116,
|
| 1701 |
+
784,
|
| 1702 |
+
126
|
| 1703 |
+
],
|
| 1704 |
+
"page_idx": 14
|
| 1705 |
+
},
|
| 1706 |
+
{
|
| 1707 |
+
"type": "list",
|
| 1708 |
+
"sub_type": "ref_text",
|
| 1709 |
+
"list_items": [
|
| 1710 |
+
"23. Xiao, C., Xie, R., Yao, Y., Liu, Z., Sun, M., Zhang, X., Lin, L.: Uprec: User-aware pre-training for recommender systems. arXiv preprint (2021)",
|
| 1711 |
+
"24. Xie, R., Liu, Q., Wang, L., Liu, S., Zhang, B., Lin, L.: Contrastive cross-domain recommendation in matching (2021)",
|
| 1712 |
+
"25. Xie, R., Liu, Y., Zhang, S., Wang, R., Xia, F., Lin, L.: Personalized approximate pareto-efficient recommendation. In: Proceedings of the Web Conference 2021. pp. 3839-3849 (2021)",
|
| 1713 |
+
"26. Xie, R., Qiu, Z., Rao, J., Liu, Y., Zhang, B., Lin, L.: Internal and contextual attention network for cold-start multi-channel matching in recommendation. In: Proceedings of IJCAI. pp. 2732-2738 (2020)",
|
| 1714 |
+
"27. Xie, R., Wang, Y., Wang, R., Lu, Y., Zou, Y., Xia, F., Lin, L.: Long short-term temporal meta-learning in online recommendation. In: Proceedings of WSDM (2022)",
|
| 1715 |
+
"28. Xie, X., Sun, F., Liu, Z., Wu, S., Gao, J., Ding, B., Cui, B.: Contrastive learning for sequential recommendation. arXiv preprint (2020)",
|
| 1716 |
+
"29. Ying, H., Zhuang, F., Zhang, F., Liu, Y., Xu, G., Xie, X., Xiong, H., Wu, J.: Sequential recommender system based on hierarchical attention network. In: Proceedings of IJCAI (2018)",
|
| 1717 |
+
"30. Zeng, Z., Xiao, C., Yao, Y., Xie, R., Liu, Z., Lin, F., Lin, L., Sun, M.: Knowledge transfer via pre-training for recommendation: A review and prospect. Frontiers in big Data (2021)",
|
| 1718 |
+
"31. Zhang, R., Isola, P., Efros, A.A.: Colorful image colorization. In: Proceedings of ECCV (2016)",
|
| 1719 |
+
"32. Zhang, W., Mao, J., Cao, Y., Xu, C.: Multiplex graph neural networks for multi-behavior recommendation. In: Proceedings of CIKM (2020)",
|
| 1720 |
+
"33. Zheng, Y., Gao, C., He, X., Li, Y., Jin, D.: Price-aware recommendation with graph convolutional networks. In: Proceedings of ICDE (2020)",
|
| 1721 |
+
"34. Zhou, C., Bai, J., Song, J., Liu, X., Zhao, Z., Chen, X., Gao, J.: Atrank: An attention-based user behavior modeling framework for recommendation. In: Proceedings of AAAI (2018)",
|
| 1722 |
+
"35. Zhou, G., Zhu, X., Song, C., Fan, Y., Zhu, H., Ma, X., Yan, Y., Jin, J., Li, H., Gai, K.: Deep interest network for click-through rate prediction. In: Proceedings of KDD (2018)",
|
| 1723 |
+
"36. Zhou, K., Wang, H., Zhao, W.X., Zhu, Y., Wang, S., Zhang, F., Wang, Z., Wen, J.R.: S3-rec: Self-supervised learning for sequential recommendation with mutual information maximization. In: Proceedings of CIKM (2020)",
|
| 1724 |
+
"37. Zhu, Y., Ge, K., Zhuang, F., Xie, R., Xi, D., Zhang, X., Lin, L., He, Q.: Transfermeta framework for cross-domain recommendation to cold-start users. In: Proceedings of SIGIR (2021)",
|
| 1725 |
+
"38. Zhu, Y., Tang, Z., Liu, Y., Zhuang, F., Xie, R., Zhang, X., Lin, L., He, Q.: Personalized transfer of user preferences for cross-domain recommendation. In: Proceedings of WSDM (2021)",
|
| 1726 |
+
"39. Zhu, Y., Xie, R., Zhuang, F., Ge, K., Sun, Y., Zhang, X., Lin, L., Cao, J.: Learning to warm up cold item embeddings for cold-start recommendation with meta scaling and shifting networks. In: Proceedings of SIGIR (2021)",
|
| 1727 |
+
"40. Zhu, Y., Zhuang, F., Wang, J., Chen, J., Shi, Z., Wu, W., He, Q.: Multi-representation adaptation network for cross-domain image classification. Neural Networks 119, 214-221 (2019)",
|
| 1728 |
+
"41. Zhu, Y., Zhuang, F., Wang, J., Ke, G., Chen, J., Bian, J., Xiong, H., He, Q.: Deep subdomain adaptation network for image classification. TNNLS (2020)",
|
| 1729 |
+
"42. Zhuang, F., Qi, Z., Duan, K., Xi, D., Zhu, Y., Zhu, H., Xiong, H., He, Q.: A comprehensive survey on transfer learning. Proceedings of the IEEE (2020)"
|
| 1730 |
+
],
|
| 1731 |
+
"bbox": [
|
| 1732 |
+
215,
|
| 1733 |
+
147,
|
| 1734 |
+
784,
|
| 1735 |
+
833
|
| 1736 |
+
],
|
| 1737 |
+
"page_idx": 15
|
| 1738 |
+
},
|
| 1739 |
+
{
|
| 1740 |
+
"type": "page_number",
|
| 1741 |
+
"text": "16",
|
| 1742 |
+
"bbox": [
|
| 1743 |
+
217,
|
| 1744 |
+
114,
|
| 1745 |
+
235,
|
| 1746 |
+
126
|
| 1747 |
+
],
|
| 1748 |
+
"page_idx": 15
|
| 1749 |
+
},
|
| 1750 |
+
{
|
| 1751 |
+
"type": "header",
|
| 1752 |
+
"text": "Authors Suppressed Due to Excessive Length",
|
| 1753 |
+
"bbox": [
|
| 1754 |
+
271,
|
| 1755 |
+
114,
|
| 1756 |
+
573,
|
| 1757 |
+
128
|
| 1758 |
+
],
|
| 1759 |
+
"page_idx": 15
|
| 1760 |
+
}
|
| 1761 |
+
]
|