Add Batch c8a9e06a-c346-4ddd-a162-a51aa5721d31
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_content_list.json +0 -0
- 2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_model.json +0 -0
- 2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_origin.pdf +3 -0
- 2202.03xxx/2202.03026/full.md +500 -0
- 2202.03xxx/2202.03026/images.zip +3 -0
- 2202.03xxx/2202.03026/layout.json +0 -0
- 2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_content_list.json +0 -0
- 2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_model.json +0 -0
- 2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_origin.pdf +3 -0
- 2202.03xxx/2202.03028/full.md +465 -0
- 2202.03xxx/2202.03028/images.zip +3 -0
- 2202.03xxx/2202.03028/layout.json +0 -0
- 2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_content_list.json +0 -0
- 2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_model.json +0 -0
- 2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_origin.pdf +3 -0
- 2202.03xxx/2202.03036/full.md +640 -0
- 2202.03xxx/2202.03036/images.zip +3 -0
- 2202.03xxx/2202.03036/layout.json +0 -0
- 2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_content_list.json +1733 -0
- 2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_model.json +2424 -0
- 2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_origin.pdf +3 -0
- 2202.03xxx/2202.03047/full.md +288 -0
- 2202.03xxx/2202.03047/images.zip +3 -0
- 2202.03xxx/2202.03047/layout.json +0 -0
- 2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_content_list.json +0 -0
- 2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_model.json +0 -0
- 2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_origin.pdf +3 -0
- 2202.03xxx/2202.03052/full.md +580 -0
- 2202.03xxx/2202.03052/images.zip +3 -0
- 2202.03xxx/2202.03052/layout.json +0 -0
- 2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_content_list.json +0 -0
- 2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_model.json +0 -0
- 2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_origin.pdf +3 -0
- 2202.03xxx/2202.03086/full.md +0 -0
- 2202.03xxx/2202.03086/images.zip +3 -0
- 2202.03xxx/2202.03086/layout.json +0 -0
- 2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_content_list.json +0 -0
- 2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_model.json +0 -0
- 2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_origin.pdf +3 -0
- 2202.03xxx/2202.03091/full.md +437 -0
- 2202.03xxx/2202.03091/images.zip +3 -0
- 2202.03xxx/2202.03091/layout.json +0 -0
- 2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_content_list.json +1941 -0
- 2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_model.json +0 -0
- 2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_origin.pdf +3 -0
- 2202.03xxx/2202.03104/full.md +402 -0
- 2202.03xxx/2202.03104/images.zip +3 -0
- 2202.03xxx/2202.03104/layout.json +0 -0
- 2202.03xxx/2202.03107/a94ab9aa-3075-44f9-9854-b8ff37a3aba4_content_list.json +1215 -0
.gitattributes
CHANGED
|
@@ -8119,3 +8119,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 8119 |
2202.05xxx/2202.05607/9a583e1d-23c5-4f01-974f-e517affa8b0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8120 |
2202.05xxx/2202.05613/0dc5c0cf-5e6f-4016-a626-cbcbb04dc557_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8121 |
2202.05xxx/2202.05656/bd5bcdea-9e8c-418b-a9a0-1f2c4511619a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8119 |
2202.05xxx/2202.05607/9a583e1d-23c5-4f01-974f-e517affa8b0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8120 |
2202.05xxx/2202.05613/0dc5c0cf-5e6f-4016-a626-cbcbb04dc557_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8121 |
2202.05xxx/2202.05656/bd5bcdea-9e8c-418b-a9a0-1f2c4511619a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8122 |
+
2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8123 |
+
2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8124 |
+
2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8125 |
+
2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8126 |
+
2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8127 |
+
2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8128 |
+
2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8129 |
+
2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8130 |
+
2202.03xxx/2202.03107/a94ab9aa-3075-44f9-9854-b8ff37a3aba4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8131 |
+
2202.03xxx/2202.03131/bdc0ceca-7155-4428-bff9-af12627b2a18_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8132 |
+
2202.03xxx/2202.03169/061992c0-37da-43eb-a321-b882ed008807_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8133 |
+
2202.03xxx/2202.03218/83afd948-1e3c-4270-bc36-8a56a86b194e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8134 |
+
2202.03xxx/2202.03278/1e27d05c-4158-42da-bf9a-ea4fdcd7c7f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8135 |
+
2202.03xxx/2202.03286/b75f3c46-e646-472e-a38b-82e370806e0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8136 |
+
2202.03xxx/2202.03299/6ea767e7-16df-4d56-8d81-96acb8a72183_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8137 |
+
2202.03xxx/2202.03326/b9240c5a-07f4-4c0b-ab68-fc0aacedf2c9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8138 |
+
2202.03xxx/2202.03335/2d507ab0-e187-4a14-97cc-352bd0513ebd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8139 |
+
2202.03xxx/2202.03338/857a978c-a9d7-4c7a-a8f3-3967377987c6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8140 |
+
2202.03xxx/2202.03342/24b5e3b8-7a3d-47d0-a028-0c68e8159bbf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8141 |
+
2202.03xxx/2202.03347/70c654aa-065c-45da-8953-8796a1a018ee_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8142 |
+
2202.03xxx/2202.03373/4ef65edc-a8cf-4ea3-8dba-aa918451a0c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8143 |
+
2202.03xxx/2202.03376/2139904b-1629-4a3d-a7f4-1eb3e92e732e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8144 |
+
2202.03xxx/2202.03377/9a7feea3-c860-4964-a23a-de7c72a387cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8145 |
+
2202.03xxx/2202.03382/6fec954c-22cc-4e82-be58-c3077a6ff7b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8146 |
+
2202.03xxx/2202.03390/586a3c32-5f5d-41e8-ac31-02a03979846c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8147 |
+
2202.03xxx/2202.03392/aa008220-3ed4-47a2-9ae9-cd9a2513629d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8148 |
+
2202.03xxx/2202.03590/d9b86647-ceb9-4541-aafb-792d4254c5f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8149 |
+
2202.03xxx/2202.03599/958d589b-9e4f-4c96-9355-0ec0bf62f942_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8150 |
+
2202.03xxx/2202.03613/6f6f8bb0-a9e8-45b0-b14e-bed46b9688b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8151 |
+
2202.03xxx/2202.03629/af65c9a9-3a3a-4b96-86ed-e7b677412ec7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8152 |
+
2202.03xxx/2202.03630/9f7f8912-9ea3-4da8-a127-f311539a4365_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8153 |
+
2202.03xxx/2202.03631/414db517-230b-46e9-9b37-75dc4bad4324_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8154 |
+
2202.03xxx/2202.03643/249d43e7-57a2-4067-9dd7-385271d9b241_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8155 |
+
2202.03xxx/2202.03666/46b23c6b-1d48-4bea-8bd1-61e3bc163ce5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8156 |
+
2202.03xxx/2202.03670/d9840fd8-cef7-42f6-9171-4386b07067a8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8157 |
+
2202.03xxx/2202.03673/29f5d881-5fda-4e89-a0ab-9006b0caf354_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8158 |
+
2202.03xxx/2202.03680/ded7a4e1-1774-46db-8d68-1d4b88047d60_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8159 |
+
2202.03xxx/2202.03705/73d49bf5-2feb-48c6-9408-3552c45a1d76_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8160 |
+
2202.03xxx/2202.03737/32f75eaf-2e59-43d0-b6bf-c6f532939763_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8161 |
+
2202.03xxx/2202.03762/d7c0a289-2e09-4627-870f-98514396c281_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8162 |
+
2202.03xxx/2202.03772/2dfea703-3d9e-48d4-be65-51ccc587ca6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8163 |
+
2202.03xxx/2202.03807/4e665d62-1abe-4f8a-b969-49270ceb4e47_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8164 |
+
2202.03xxx/2202.03822/532f7e64-5a4f-422f-ab94-900dc3bde2ef_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8165 |
+
2202.03xxx/2202.03825/cef55a12-21b4-4a70-80b5-888ed01105ba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8166 |
+
2202.03xxx/2202.03829/bc86309b-84a9-499d-bd2f-6e4769a82d4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8167 |
+
2202.03xxx/2202.03836/e048e0cc-a6e4-44c1-b4d6-876973f7aa3c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8168 |
+
2202.03xxx/2202.03851/01de9084-e513-4356-b824-05f7f1b1d523_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8169 |
+
2202.03xxx/2202.03857/df0a6ce3-8995-43a7-b46a-221d41bbbafd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8170 |
+
2202.03xxx/2202.03947/1e31a1c6-9665-4d11-8790-12e7de1a6bd2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8171 |
+
2202.03xxx/2202.03958/4b8d93c0-2178-4eb1-a727-2d693e0a4e73_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8172 |
+
2202.04xxx/2202.04074/21c70517-8433-47cd-83b8-0e85b9d8382f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8173 |
+
2202.04xxx/2202.04075/ed440f37-d028-4343-8a2e-87f8fb3d6aa4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8174 |
+
2202.04xxx/2202.04121/357c2e4c-4c2b-49e7-b326-b10ff6490c9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8175 |
+
2202.04xxx/2202.04129/a48ef8f0-62bb-4037-bd43-74e3af6e5e15_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8176 |
+
2202.04xxx/2202.04141/85dbe885-2bb9-4f13-9e9a-927b35998cc9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8177 |
+
2202.04xxx/2202.04173/d17ce198-28d6-4543-b9bf-7e8826416bd5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8178 |
+
2202.04xxx/2202.04175/f94d9a3d-d53b-4af2-adc8-78e504aa65e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8179 |
+
2202.04xxx/2202.04187/7f013753-f5d2-4df0-9530-e9219c77d8da_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8180 |
+
2202.04xxx/2202.04757/4ed360f3-8019-4ba1-b0f9-1414b36dc1fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8181 |
+
2202.04xxx/2202.04770/1cedd3f0-044b-483a-9320-3405dac4dc47_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8182 |
+
2202.05xxx/2202.05145/6caeeb80-d3a9-4a0a-b2f7-09f4edfe745e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8183 |
+
2202.05xxx/2202.05146/637c471c-e0aa-400c-967e-126f59e423f7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8184 |
+
2202.05xxx/2202.05679/57f2869f-03fe-4463-9808-96425847bc7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8185 |
+
2203.07xxx/2203.07814/d7795d68-0200-452e-9f54-cf5731f27dc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03026/c915b884-4a03-41af-9dad-6b6303b5a5dd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26bfab0ac4611ca976d57c3a2b355f85a9c610e75bbfcbea6f1bbe78de39eb1e
|
| 3 |
+
size 11860943
|
2202.03xxx/2202.03026/full.md
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Context Autoencoder for Self-Supervised Representation Learning
|
| 2 |
+
|
| 3 |
+
Xiaokang Chen $^{1}$ · Mingyu Ding $^{2,3}$ · Xiaodi Wang $^{4}$ · Ying Xin $^{4}$ · Shentong Mo $^{4}$ · Yunhao Wang $^{4}$ · Shumin Han $^{4}$ · Ping Luo $^{2}$ · Gang Zeng $^{1}$ · Jingdong Wang $^{4}$
|
| 4 |
+
|
| 5 |
+
Received: date / Accepted: date
|
| 6 |
+
|
| 7 |
+
Abstract We present a novel masked image modeling (MIM) approach, context autoencoder (CAE), for self-supervised representation pretraining. We pretrain an encoder by making predictions in the encoded representation space. The pretraining tasks include two tasks: masked representation prediction - predict the representations for the masked patches, and masked patch reconstruction - reconstruct the masked patches. The network is an encoder-regressor-decoder architecture: the encoder takes the visible patches as input; the regressor predicts the representations of the masked patches, which are expected to be aligned with the representations computed from the encoder, using the representations of visible patches and the positions of visible and masked patches; the decoder reconstructs the masked patches from the predicted encoded representations. The CAE design encourages the separation of learning the encoder (representation) from completing the pertaining tasks: masked representation prediction and masked patch reconstruction tasks, and making predictions in the encoded representation space empirically shows the benefit to representation learning. We demonstrate the effectiveness of our CAE through superior transfer performance in downstream tasks: semantic segmentation, object detection and instance segmentation, and classification. The code will be available at https://github.com/Atten4Vis/CAE.
|
| 8 |
+
|
| 9 |
+
Keywords Self-Supervised Representation Learning, Masked Image Modeling, Context Autoencoder
|
| 10 |
+
|
| 11 |
+
$^{1}$ Peking University
|
| 12 |
+
$^{2}$ University of Hong Kong
|
| 13 |
+
$^{3}$ UC Berkeley
|
| 14 |
+
$^{4}$ Baidu
|
| 15 |
+
wangjingdong@outlook.com
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Fig. 1: The pipeline of context autoencoder. Our approach (a) feeds visible patches into the encoder and extracts their representations $\mathbf{Z}_v$ and then (b) completes the pretext tasks: predict the representations $\mathbf{Z}_m$ of the masked patches from the visible patches in the encoded representation space through latent contextual regressor and prediction alignment, and reconstruct the masked patches from the predicted representations $\mathbf{Z}_m$ of masked patches. The pretrained encoder in (a) is applied to downstream tasks by simply replacing the pretext task part (b) with the downstream task part. // means stop gradient.
|
| 19 |
+
|
| 20 |
+
# 1 Introduction
|
| 21 |
+
|
| 22 |
+
We study the masked image modeling (MIM) task for self-supervised representation learning. It aims to learn an encoder through masking some patches of the input image and making predictions for the masked patches from the visible patches. It is expected that the resulting encoder pretrained through solving the MIM task is able to extract the patch representations taking on semantics that are transferred to solving downstream tasks.
|
| 23 |
+
|
| 24 |
+
The typical MIM methods, such as BEiT [4], the method studied in the ViT paper [26], and iBoT [104], use a single ViT architecture to solve the pretraining
|
| 25 |
+
|
| 26 |
+
task i.e., reconstructing the patch tokens or the pixel colors. These methods mix the two tasks: learning the encoder (representation) and reconstructing the masked patch. The subsequent method, masked autoencoder (MAE) [38] adopts an encoder-decoder architecture, partially decoupling the two tasks. As a result, the representation quality is limited. Most previous methods, except iBoT [104], lack an explicit modeling between encoded representations of visible patches and masked patches.
|
| 27 |
+
|
| 28 |
+
We present a context autoencoder (CAE) approach, illustrated in Figure 1, for improving the encoding quality. We pretrain the encoder through making predictions for the masked patches in the encoded representation space. The pretraining task is a combination of masked representation prediction and masked patch reconstruction. The pretraining network is an encoder-regressor-decoder architecture. The encoder takes only the visible patches as input and learns the representations only for the visible patches. The regressor predicts the masked patch representations, which is expected to be aligned with the representations of the masked patches computed from the encoder, from the visible patch representations. The decoder reconstructs the masked patches from the predicted masked patch representations without receiving the representations of the visible patches.
|
| 29 |
+
|
| 30 |
+
The prediction in the encoded representation space from the visible patches to the masked patches generates a plausible semantic guess for the masked patches, which lies in the same semantic space for the visible patches. We assume that the prediction is easier if the encoded representations take higher semantics and that the accurate prediction encourages that the encoded representations take on a larger extent of semantics, empirically validated by the experiments.
|
| 31 |
+
|
| 32 |
+
The CAE design also encourages the separation of learning the encoder and completing the pretraining tasks: the responsibility of representation learning is mainly taken by the encoder and the encoder is only for representation learning. The reasons include: the encoder in the top stream in Figure 1 operates only on visible patches, only focusing on learning semantic representations; the regression is done on the encoded representation space, as a mapping between the representations of the visible patches and the masked patches; the decoder operates only on the predicted representations of the masked patches.
|
| 33 |
+
|
| 34 |
+
We present the empirical performance of our approach on downstream tasks, semantic segmentation, object detection and instance segmentation, and classification. The results show that our approach outperforms supervised pretraining, contrastive self-supervised pretraining, and other MIM methods.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
Self-supervised representation learning has been widely studied in computer vision, including: context prediction [24,75], clustering-based methods [88,93,8,1,105, 45,9,36], contrastive self-supervised learning [55,65,41, 80], instance discrimination [28,27], image discretization [34,35], masked image modeling [59,31,74], and information maximization [30,97,5]. The following mainly reviews closely-related methods.
|
| 39 |
+
|
| 40 |
+
Autoencoding. Traditionally, autoencoders were used for dimensionality reduction or feature learning [53, 32, 43, 42, 70, 78, 51]. The denoising autoencoder (DAE) is an autoencoder that receives a corrupted data point as input and is trained to estimate the original, uncorrupted data point as its output. The variants or modifications of DAE were adopted for self-supervised representation learning, e.g., corruption by masking pixels [79, 66, 15], removing color channels [100], shuffling image patches [64], denoising pixel-level noise [2] and so on.
|
| 41 |
+
|
| 42 |
+
Contrastive self-supervised learning. Contrastive self-supervised learning, referring in this paper to the self-supervised approaches comparing random views with contrastive loss or simply MSE loss that are related as shown in [33], has been popular for self-supervised representation learning [18, 39, 73, 21, 37, 11, 20, 10, 85, 67]. The basic idea is to maximize the similarity between the views augmented from the same image and optionally minimize the similarity between the views augmented from different images. Random cropping is an important augmentation scheme, and thus typical contrastive self-supervised learning methods (e.g., MoCo v3) tend to learn knowledge mainly from the central regions of the original images. Some dense variants [82, 90] eliminate the tendency in a limited degree by considering an extra contrastive loss with dense patches.
|
| 43 |
+
|
| 44 |
+
Masked image modeling. Motivated by BERT for masked language modeling [23], the method studied in [26] and BEiT [4] use the ViT structure to solve the masked image modeling task, e.g., estimate the pixels or the discrete tokens. The follow-up work, iBOT [104], combines the MIM method (BEiT) and a contrastive self-supervised approach (DINO [11]). But they do not have explicitly an encoder for representation learning or a decoder for pretraining task completion, and the ViT structure is essentially a mixture of encoder and decoder, limiting the representation learning quality.
|
| 45 |
+
|
| 46 |
+
Several subsequent MIM methods are developed to improve the encoder quality, such as designing pretraining architectures: Masked Autoencoder (MAE) [38], SplitMask [29], and Simple MIM (SimMIM) [91]; adopt-
|
| 47 |
+
|
| 48 |
+
ing new reconstruction targets: Masked Feature Prediction (MaskFeat) [83], Perceptual Codebook for BEiT (PeCo) [25], and data2vec [3]. The technical report 1 of our approach was initially published as an arXiv paper [19], and was concurrent to data2vec [3], MAE [38], and other methods, such as [29,91]. After that, MIM methods have developed rapidly, e.g., extended to frequency/semantic domain [87,61,84,58], combined with contrastive self-supervised learning [72,49,94,47], efficient pretraining [101,46,13], mask strategy design [50, 54,57], scalability of MIM [92], and interpretation of MIM [89,56,52].
|
| 49 |
+
|
| 50 |
+
The core idea of our approach is making predictions in the encoded representation space. We jointly solve two pretraining tasks: masked representation prediction - predict the representations for the masked patches, where the representations lie in the representation space output from the encoder, and masked patch reconstruction - reconstruct the masked patches.
|
| 51 |
+
|
| 52 |
+
Our approach is clearly different from MAE [38] (Figure 2 (top)). Our approach introduces an extra pretraining task, masked representation prediction, and encourages the separation of two roles: learning the encoder and completing pretraining tasks; in contrast, MAE partially mixes the two roles, and has no explicit prediction of masked patch representations.
|
| 53 |
+
|
| 54 |
+
On the other hand, our approach differs from data2vec [3] and iBoT [104] (Figure 2 (bottom)). Similar to BEiT, in data2vec and iBoT, there is no explicit module separation of learning the encoder and estimating the mask patch representations, and the target representations are formed from the full view (as the teacher) with the same network as the student network for processing the masked view and predicting the masked patch representations (except a centering process in iBoT for the teacher following DINO). In contrast, our approach is simple: form the target representations merely from the output of the encoder, and the encoder-regressor design is straightforward and explainable: the regressor predicts the representations of masked patches to match the representations computed directly from the encoder.
|
| 55 |
+
|
| 56 |
+
# 3 Approach
|
| 57 |
+
|
| 58 |
+
# 3.1 Architecture
|
| 59 |
+
|
| 60 |
+
Our context autoencoder (CAE) is a masked image modeling approach. The network shown in Figure 1 is an encoder-regressor-decoder architecture. The key is to make predictions from visible patches to masked patches in the encoded representation space. The pretraining
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Fig. 2: The pipeline of MAE (top), and the MIM part of iBoT (bottom). The centering module is not depicted in the bottom stream. The pretrained encoder in (a) is applied to downstream tasks by simply replacing the pretext task part (b) with the downstream task part. // means stop gradient.
|
| 64 |
+
|
| 65 |
+
tasks include: masked representation prediction and masked patch reconstruction.
|
| 66 |
+
|
| 67 |
+
We randomly split an image into two sets of patches: visible patches $\mathbf{X}_v$ and masked patches $\mathbf{X}_m$ . The encoder takes the visible patches as input; the regressor predicts the representations of the masked patches, which are expected to be aligned with the representations computed from the encoder, from the representations of the visible patches conditioned on the positions of masked patches; the decoder reconstructs the masked patches from the predicted encoded representations.
|
| 68 |
+
|
| 69 |
+
Encoder. The encoder $\mathcal{F}$ maps the visible patches $\mathbf{X}_v$ to the latent representations $\mathbf{Z}_v$ . It only handles the visible patches. We use the ViT to form our encoder. It first embeds the visible patches by linear projection as patch embeddings, and adds the positional embeddings $\mathbf{P}_v$ . Then it sends the combined embeddings into a sequence of transformer blocks that are based on self-attention, generating $\mathbf{Z}_v$ .
|
| 70 |
+
|
| 71 |
+
Regressor. The latent contextual regressor $\mathcal{H}$ predicts the latent representations $\mathbf{Z}_m$ for the masked patches from the latent representations $\mathbf{Z}_v$ of the visible patches output from the encoder conditioned on the positions of the masked patches. We form the latent contextual regressor $\mathcal{H}$ using a series of transformer blocks that are based on cross-attention.
|
| 72 |
+
|
| 73 |
+
The initial queries $\mathbf{Q}_m$ , called mask queries, are mask tokens that are learned as model parameters and are the same for all the masked patches. The keys and the
|
| 74 |
+
|
| 75 |
+
values are the same before linear projection and consist of the visible patch representations $\mathbf{Z}_v$ and the output of the previous cross-attention layer (mask queries for the first cross-attention layer). The corresponding positional embeddings of the masked patches are considered when computing the cross-attention weights between the queries and the keys. In this process, the latent representations $\mathbf{Z}_v$ of the visible patches are not updated.
|
| 76 |
+
|
| 77 |
+
Decoder. The decoder $\mathcal{G}$ maps the latent representations $\mathbf{Z}_m$ of the masked patches to some forms of masked patches, $\mathbf{Y}_m$ . The decoder, similar to the encoder, is a stack of transformer blocks that are based on self-attention, followed by a linear layer predicting the targets. The decoder only receives the latent representations of the masked patches (the output of the latent contextual regressor), and the positional embeddings of the masked patches as input without directly using the information of the visible patches.
|
| 78 |
+
|
| 79 |
+
# 3.2 Objective Function
|
| 80 |
+
|
| 81 |
+
Masking. Following BEiT [4], we adopt the random block-wise masking strategy (illustrated in Figure 3) to split the input image into two sets of patches, visible and masked patches. For each image, 98 of 196 $(14 \times 14)$ patches are masked.
|
| 82 |
+
|
| 83 |
+
Targets. The targets $\bar{\mathbf{Z}}_m$ for the representations of the masked patches are formed as follows. We feed the masked patches $\mathbf{X}_m$ into the encoder, which is the same as the one for encoding visible patches, and generate the representations $\bar{\mathbf{Z}}_m$ of the masked patches as the representation targets.
|
| 84 |
+
|
| 85 |
+
The targets $\bar{\mathbf{Y}}_m$ for the patch reconstruction are formed by the discrete tokenizer, e.g., the tokenizer trained with d-VAE on ImageNet-1K without using the labels or the DALL-E tokenizer (trained with d-VAE on 400M images) [69] used in BEiT [4]. The input image is fed into the tokenizer, assigning a discrete token to each patch for forming the reconstruction targets $\bar{\mathbf{Y}}_m$ .
|
| 86 |
+
|
| 87 |
+
Loss function. The loss function consists of a reconstruction loss: $\ell_y(\mathbf{Y}_m,\bar{\mathbf{Y}}_m)$ , and an alignment loss: $\ell_z(\mathbf{Z}_m,\bar{\mathbf{Z}}_m)$ , corresponding to masked patch reconstruction and masked representation prediction, respectively. The whole loss is a weighted sum:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\ell_ {y} (\mathbf {Y} _ {m}, \bar {\mathbf {Y}} _ {m}) + \lambda \ell_ {z} (\mathbf {Z} _ {m}, \mathrm {s g} [ \bar {\mathbf {Z}} _ {m} ]). \qquad (1)
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
We use the MSE loss for $\ell_z(\mathbf{Z}_m,\bar{\mathbf{Z}}_m)$ and the cross-entropy loss for $\ell_y(\mathbf{Y}_m,\bar{\mathbf{Y}}_m)$ . $\mathrm{sg}[\cdot ]$ stands for stop gradient. $\lambda$ is 2 in our experiments.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
Fig. 3: Illustration of random block-wise sampling (1st and 3rd images) and random cropping (2nd and 4th images). The colored regions are masked regions. The boxes correspond to cropped regions. Random blockwise sampling is used in our approach. Random cropping is a key data-augmentation scheme for contrastive self-supervised pretraining.
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+
# 4 Discussions
|
| 105 |
+
|
| 106 |
+
# 4.1 Analysis
|
| 107 |
+
|
| 108 |
+
Predictions are made in the encoded representation space. Our CAE attempts to make predictions in the encoded representation space: predict the representations for the masked patches from the encoded representations of the visible patches. In other words, it is expected that the output representations of the latent contextual regressor also lie in the encoded representation space, which is ensured by prediction alignment. This encourages the learned representation to take on a large extent of semantics for prediction from visible patches to masked patches, benefiting the representation learning of the encoder.
|
| 109 |
+
|
| 110 |
+
We empirically verify that the predicted representations lie in the encoded representation space through image reconstruction. We train the CAE using the pixel colors as the prediction targets, for two cases: with and without the alignment, i.e., masked representation prediction. For reconstruction, we feed all the patches (without masking, all the image patches are visible) of an image (from the ImageNet validation set) into the pretrained encoder, then skip the latent contextual regressor and directly send all the encoded patch representations to the pretrained decoder for reconstructing the whole image.
|
| 111 |
+
|
| 112 |
+
Figure 4 provides reconstruction results for several examples randomly sampled from the ImageNet-1K validation set. One can see that our approach can successfully reconstruct the images, implying that the input and output representations of latent contextual regressor are in the same space. In contrast, without the alignment, the reconstructed images are noisy, indicating the input and output representations of latent contextual regressor are in different spaces. The results suggest that the explicit prediction alignment is critical for ensuring
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
Fig. 4: Illustrating that predictions are made in the representation space. We reconstruct the image by feeding the full image (1st, 4th, and 7th) into the pretrained CAE encoder and then the pretrained CAE decoder outputting the reconstructed image (2nd, 5th, and 8th). It can be seen that the image can be constructed with the semantics kept when skipping latent contextual regressor, verifying the input and the predicted representations lie in the same space. We also show the reconstructed images (3rd, 6th, and 9th) from the encoder and the decoder pretrained without the alignment constraint. We can see that those images are meaningless, indicating that the alignment constraint is critical for ensuring that predictions are made in the representation space.
|
| 116 |
+
|
| 117 |
+
that predictions are made in the encoded representation space.
|
| 118 |
+
|
| 119 |
+
Representation alignment in CAE and contrastive self-supervised learning. Representation alignment is also used in contrastive self-supervised learning methods, such as MoCo, BYOL, SimCLR, and methods mixing contrastive self-supervised learning and masked image modeling, such as iBOT, and MST. The alignment loss could be the MSE loss or the contrastive loss that CAE may also take advantage of.
|
| 120 |
+
|
| 121 |
+
In the CAE, the alignment is imposed over the representations $\mathbf{Z}_m = \mathcal{H}(\mathcal{F}(\mathbf{X}_v))$ - predicted from the representations $\mathcal{F}(\mathbf{X}_v)$ of visible patches through the regressor $\mathcal{H}$ , and the representations $\bar{\mathbf{Z}}_m = \mathcal{F}(\mathbf{X}_m)$ - computed from the encoder $\mathcal{F}$ . Both $\mathbf{Z}_m$ and $\bar{\mathbf{Z}}_m$ are about the masked patches, and lie in the representation space output from the encoder.
|
| 122 |
+
|
| 123 |
+
Differently, the alignment in the most contrastive self-supervised learning methods is imposed over the representations $\{\mathcal{P}(\mathcal{F}(\mathbf{V}_1)),\mathcal{P}(\mathcal{F}(\mathbf{V}_2)),\dots ,\mathcal{P}(\mathcal{F}((\mathbf{V}_N)))\}$ , where $\mathcal{P}$ is a projector, and some views may be processed with the EMA version of the encoder and the projector. The $N$ representations to be aligned are about different views $\{\mathbf{V}_1,\mathbf{V}_2,\dots ,\mathbf{V}_N\}$ (in iBoT and MST, the views are masked views and full views), and are not directly output from the encoder. It is not quite clear how the projector works, and it is reported in [68] that the projector is a part-to-whole process mapping the object part representation to the whole object representation for contrastive self-supervised learning.
|
| 124 |
+
|
| 125 |
+
# 4.2 Connection
|
| 126 |
+
|
| 127 |
+
Relation to autoencoder. The original autoencoder [53, 32,43] consists of an encoder and a decoder. The encoder maps the input into a latent representation, and the decoder reconstructs the input from the latent representation. The denoising autoencoder (DAE) [79], a variant
|
| 128 |
+
|
| 129 |
+
of autoencoder, corrupts the input by adding noises and still reconstructs the non-corrupted input.
|
| 130 |
+
|
| 131 |
+
Our CAE encoder is similar to the original autoencoder and also contains an encoder and a decoder. Different from the autoencoder where the encoder and the decoder process the whole image, our encoder takes a portion of patches as input and our decoder takes the estimated latent representations of the other portion of patches as input. Importantly, the CAE makes predictions in the latent space from the visible patches to the masked patches.
|
| 132 |
+
|
| 133 |
+
Relation to BEiT, iBoT and MAE. The CAE encoder processes the visible patches, to extract their representations, without making predictions for masked patches. Masked representation prediction is made through the regressor and the prediction alignment, ensuring that the output of the regressor lies in the representation space same with the encoder output. The decoder only processes the predicted representations of masked patches. Our approach encourages that the encoder takes the responsibility of and is only for representation learning.
|
| 134 |
+
|
| 135 |
+
In contrast, BEiT [4] and the MIM part of iBOT do not separate the representation extraction role and the task completion role and uses a single network, with both the visible and masked patches as the input, simultaneously for the two roles. In MAE [38], the so-called decoder may play a partial role for representation learning as the representations of the visible patches are also updated in the MAE decoder. Unlike CAE, MAE, iBoT, BEiT do not explicitly predict the representations of masked patches from the representations of visible patches (that lie in the encoded representation space) for masked patches.
|
| 136 |
+
|
| 137 |
+
When the pretrained encoder is applied to downstream tasks, one often replaces the pretext task completion part using the downstream task layer, e.g., segmentation layer or detection layer. The separation of representation learning (encoding) and pretext task com
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
(a)
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
(b)
|
| 144 |
+
Fig. 5: The computational graphs for (a) a context autoencoder (CAE), (b) BEiT [4], (c) a denoising autoencoder (DAE), and (d) MAE [38] and the one stream in SplitMask [29]. The parts in cornflower blue are for loss function. (a) The encoder $\mathcal{F}$ receives visible patches $\mathbf{X}_v$ and outputs their latent representations $\mathbf{Z}_v$ . The latent contextual regressor $\mathcal{H}$ predicts the latent representations $\mathbf{Z}_m$ for masked patches from $\mathbf{Z}_v$ . The decoder predicts the targets $\mathbf{Y}_m$ for masked patches from $\mathbf{Z}_m$ . $\ell_z$ and $\ell_y$ are the loss functions. During training, the gradient is stopped for $\bar{\mathbf{Z}}_m$ . See the detail in Section 3. (b) The input includes both visible patches $\mathbf{X}_v$ and mask queries $\mathbf{Q}_m$ representing masked patches, and the representations for them are updated within the function $\mathcal{R}$ . (c) The function $\mathcal{N}$ is a noisy function generating the noisy version $\hat{\mathbf{X}}$ from the input $\mathbf{X}$ . $\mathcal{F}$ and $\mathcal{G}$ are the normal encoder and decoder, respectively. (d) The two functions, $\mathcal{F}'$ and $\mathcal{R}$ , are both based on self-attention. $\mathcal{F}'$ (called encoder in MAE) only processes the visible patches $\mathbf{X}_v$ , and $\mathcal{R}$ (called decoder in MAE) processes both the latent representations $\mathbf{Z}_v$ of the visible patches and the mask queries $(\mathbf{Q}_m)$ and updates them simultaneously. For simplicity, the positional embeddings are not included in computational graphs. (a) CAE and (c) DAE perform the encoding and MIM task completion roles explicitly and separately, (b) BEiT and (d) MAE perform the encoding and MIM task completion roles implicitly and simultaneously.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
(c)
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
(d)
|
| 151 |
+
|
| 152 |
+
pletion helps that downstream task applications take good advantage of representation pretraining.
|
| 153 |
+
|
| 154 |
+
We provide the computational graph for CAE, BEiT [4], denoising autoencoder, Masked Autoencoder [38] and SplitMask [29] (one stream) in Figure 5. Compared to our CAE, the main issue of MAE is that the so-called decoder $\mathcal{R}$ might have also the encoding role, i.e., learning semantic representations of the visible patches.
|
| 155 |
+
|
| 156 |
+
Comparison to contrastive self-supervised learning. Typical contrastive self-supervised learning methods, e.g., SimCLR [18] and MoCo [39,21], pretrain the networks by solving the pretext task, maximizing the similarities between augmented views (e.g., random crops) from the same image and minimizing the similarities between augmented views from different images.
|
| 157 |
+
|
| 158 |
+
It is shown in [18] that random cropping plays an important role in view augmentation for contrastive self-supervised learning. Through analyzing random crops (illustrated in Figure 3), we observe that the center pixels in the original image space have large chances to belong to random crops. We suspect that the global representation, learned by contrastive self-supervised learning for a random crop possibly with other augmentation schemes, tends to focus mainly on the center pixels in the original image, so that the representations of different crops from the same image can be possibly similar. Figure 6 (the second row) shows that the center region of the original image for the typical contrastive self-supervised learning approach, MoCo v3, is highly
|
| 159 |
+
|
| 160 |
+
attended. The part in random crops corresponding to the center of the original image is still attended as shown in Figure 8.
|
| 161 |
+
|
| 162 |
+
In contrast, our CAE method (and other MIM methods) randomly samples the patches from the augmented views to form the visible and masked patches. All the patches are possible to be randomly masked for the augmented views and accordingly the original image. Thus, the CAE encoder needs to learn good representations for all the patches, to make good predictions for the masked patches from the visible patches. Figure 6 (the third row) illustrates that almost all the patches in the original images are considered in our CAE encoder.
|
| 163 |
+
|
| 164 |
+
Considering that the instances of the 1000 categories in ImageNet-1K locate mainly around the center of the original images [71], typical contrastive self-supervised learning methods, e.g., MoCo v3, learn the knowledge mainly about the 1000 categories, which is similar to supervised pretraining. But our CAE and other MIM methods are able to learn more knowledge beyond the 1000 categories from the non-center image regions. This indicates that the CAE has the potential to perform better for downstream tasks.
|
| 165 |
+
|
| 166 |
+
# 4.3 Interpretation
|
| 167 |
+
|
| 168 |
+
Intuitive Interpretation for CAE. Humans are able to hallucinate what appears in the masked regions and how they appear according to the visible regions. We
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
Fig. 6: Illustrating the attention map averaged over 12 attention heads between the class token and the patch tokens in the last layer of the ViT encoder pretrained on ImageNet-1K. The region inside the blue contour is obtained by thresholding the attention weights to keep $50\%$ of the mass. The four rows are: (1) input image, (2) MoCo v3, a typical contrastive self-supervised learning method, (3) MAE, and (4) our CAE. One can see that MoCo v3 tends to focus mainly on the centering regions and little on other patches, and our CAE tends to consider almost all the patches.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Fig. 7: t-SNE visualization (one color for one category) of representations extracted from the images in ADE20K. Left: ViT pretrained with our CAE; Right: ViT with random weights.
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
|
| 178 |
+
speculate that humans do this possibly in a way similar as the following example: given that only the region of the dog's head is visible and the remaining parts are missing, one can (a) recognize the visible region to be about a dog, (b) predict the regions where the other parts of the dog appear, and (c) guess what the other parts look like.
|
| 179 |
+
|
| 180 |
+
Our CAE encoder is in some sense like the human recognition step (a). It understands the content by mapping the visual patches into latent representations that lie in the subspace that corresponds to the category
|
| 181 |
+
|
| 182 |
+
dog $^2$ . The latent contextual regressor is like step (b). It produces a plausible hypothesis for the masked patches, and describes the regions corresponding to the other parts of the dog using latent representations. The CAE decoder is like step (c), mapping the latent representations to the targets. It should be noted that the latent representations might contain other information besides the semantic information, e.g., the part information and the information for making predictions.
|
| 183 |
+
|
| 184 |
+
We adopt t-SNE [77] to visualize the high-dimensional patch representations output from our CAE encoder on ADE20K [103] in Figure 7. ADE20K has a total of 150 categories. For each patch in the image, we set its label to be the category that more than half of the pixels belong to. We collect up to 1000 patches for each category from sampled 500 images. As shown in the figure, the latent representations of CAE are clustered to some degree for different categories (though not perfect as our CAE is pretrained on ImageNet-1K). Similar observations could be found for other MIM methods.
|
| 185 |
+
|
| 186 |
+
Probabilistic interpretation for CAE. The MIM problem can be formulated in the probabilistic form, maximizing the probability of the predictions $\mathbf{Y}_m$ of the masked patches given the conditions, the visible patches $\mathbf{X}_v$ , the positions $\mathbf{P}_v$ of the visible patches,
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Fig. 8: The attention maps over two sets of randomly cropped images (the 1st the 5th rows) for MoCo v3 (the 2nd the 6th rows), MAE (the 3rd the 7th rows), and our CAE (the 4th the 8th rows) pretrained on ImageNet-1K. The contrastive self-supervised learning method, MoCo v3, tends to focus mainly on the object region and little on other regions. In contrast, MIM-based models, CAE and MAE, tend to consider almost all the patches. The attention maps over the original images are shown in Figure 6.
|
| 190 |
+
|
| 191 |
+
and the positions $\mathbf{P}_m$ of the masked patches: $P(\mathbf{Y}_m\mid \mathbf{X}_v,\mathbf{P}_v,\mathbf{P}_m)$ . It can be solved by introducing latent representations $\mathbf{Z}_m$ and $\mathbf{Z}_v$ , with the assumption that $\mathbf{Z}_v$ and $\mathbf{P}_m$ ( $\mathbf{Y}_m$ and $\mathbf{P}_v$ ) are conditionally independent (the probabilistic graphical model is given in Figure 9):
|
| 192 |
+
|
| 193 |
+
$$
|
| 194 |
+
p \left(\mathbf {Y} _ {m} \mid \mathbf {X} _ {v}, \mathbf {P} _ {v}, \mathbf {P} _ {m}\right) \tag {2}
|
| 195 |
+
$$
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
= p \left(\mathbf {Z} _ {v} \mid \mathbf {X} _ {v}, \mathbf {P} _ {v}, \mathbf {P} _ {m}\right) p \left(\mathbf {Z} _ {m} \mid \mathbf {Z} _ {v}, \mathbf {P} _ {v}, \mathbf {P} _ {m}\right)
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\begin{array}{l} p \left(\mathbf {Y} _ {m} \mid \mathbf {Z} _ {m}, \mathbf {P} _ {v}, \mathbf {P} _ {m}\right) (3) \\ = p \left(\mathbf {Z} _ {v} \mid \mathbf {X} _ {v}, \mathbf {P} _ {v}\right) p \left(\mathbf {Z} _ {m} \mid \mathbf {Z} _ {v}, \mathbf {P} _ {v}, \mathbf {P} _ {m}\right) p \left(\mathbf {Y} _ {m} \mid \mathbf {Z} _ {m}, \mathbf {P} _ {m}\right). (4) \\ \end{array}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
Here, the equation from (2) to (3) is obtained from the probabilistic graphical model of CAE shown in Figure 9, and the removal of the condition $\mathbf{P}_m$ (from $p(\mathbf{Z}_v\mid \mathbf{X}_v,\mathbf{P}_v,\mathbf{P}_m)$ to $p(\mathbf{Z}_v\mid \mathbf{X}_v,\mathbf{P}_v))$ , and the condition $\mathbf{P}_v$ (from $p(\mathbf{Y}_m\mid \mathbf{Z}_m,\mathbf{P}_v,\mathbf{P}_m)$ to $p(\mathbf{Y}_m\mid \mathbf{Z}_m,\mathbf{P}_m))$ from (3) to (4) is based on the conditional independence assumption. The three terms in (4) correspond to three parts of our CAE: the encoder, the latent contextual regressor, and the decoder, respectively.
|
| 206 |
+
|
| 207 |
+
Similarly, the latent representation alignment constraint can be written as a conditional probability, $P(\mathbf{Z}_m|$
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Fig. 9: The probabilistic graphical model of CAE. The other conditions of $\mathbf{Z}_v$ , $\mathbf{Z}_m$ , and $\mathbf{Y}_m$ , the positions $\mathbf{P}_v$ and $\mathbf{P}_m$ of the visible and masked patches, are not plotted for simplicity.
|
| 211 |
+
|
| 212 |
+
$\bar{\mathbf{Z}}_m)$ , where $\bar{\mathbf{Z}}_m$ is the masked patch representations computed from the encoder.
|
| 213 |
+
|
| 214 |
+
Intuitive interpretation for the contrastive self-supervised learning. We consider the case in ImageNet-1K that the object mainly lies in the center of an image<sup>3</sup>. There are $N$ randomly sampled crops from an image, and each crop $\mathbf{I}_n$ contains a part of the center object, $\mathbf{O}_n$ . To maximize the similarity between two crops $\mathbf{I}_m$ and $\mathbf{I}_n$ , the pretraining might contain the processes: select the regions $\mathbf{O}_m$ and $\mathbf{O}_n$ from the two crops $\mathbf{I}_m$ and $\mathbf{I}_n$ , extract their features $\mathbf{f}_{om}$ and $\mathbf{f}_{on}$ , and predict the feature of the object, $\mathbf{f}_o$ , from the part features $\mathbf{f}_{om}$ and $\mathbf{f}_{on}$ . In this way, the features of the crops from the same image could be similar. Among the $N$ random crops, most crops contain a part of the object in the center, and a few crops that do not contain a part of the center object could be viewed as noises when optimizing the contrastive loss.
|
| 215 |
+
|
| 216 |
+
After pretrained on ImageNet-1K (where the object mainly lies in the center) the encoder is able to learn the knowledge of the 1000 classes and localize the region containing the object belonging to the 1000 classes. It is not necessary that the object lies in the center for the testing image, which is verified in Figure 8. This further verifies that MoCo v3 (contrastive self-supervised pretraining) pretrained on ImageNet-1K tends to attend to the object region, corresponding to the center region of the original image as shown in Figure 6.
|
| 217 |
+
|
| 218 |
+
# 5 Experiments
|
| 219 |
+
|
| 220 |
+
# 5.1 Implementation
|
| 221 |
+
|
| 222 |
+
We study the standard ViT small, base and large architectures, ViT-S (12 transformer blocks with dimension 384), ViT-B (12 transformer blocks with dimension 768) and ViT-L (24 transformer blocks with dimension 1024). The latent contextual regressor consists of 4 transformer
|
| 223 |
+
|
| 224 |
+
blocks based on cross-attention in which self-attention over masked tokens and encoded visible patch representations is a choice but with slightly higher computation cost and a little lower performance, and the decoder consists of 4 transformer blocks based on self-attention, and an extra linear projection for making predictions.
|
| 225 |
+
|
| 226 |
+
# 5.2 Training Details
|
| 227 |
+
|
| 228 |
+
Pretraining. The pretraining settings are almost the same as BEiT [4]. We train the CAE on ImageNet-1K. We partition the image of $224 \times 224$ into $14 \times 14$ patches with the patch size being $16 \times 16$ . We use standard random cropping and horizontal flipping for data augmentation. We use AdamW [63] for optimization and train the CAE for $300/800/1600$ epochs with the batch size being 2048. We set the learning rate as 1.5e-3 with cosine learning rate decay. The weight decay is set as 0.05. The warmup epochs for $300/800/1600$ epochs pretraining are $10/20/40$ , respectively. We employ drop path [44] rate 0.1 and dropout rate 0.
|
| 229 |
+
|
| 230 |
+
Linear probing. We use the LARS [95] optimizer with momentum 0.9. The model is trained for 90 epochs. The batch size is 16384, the warmup epoch is 10 and the learning rate is 6.4. Following [38], we adopt an extra BatchNorm layer [48] without affine transformation (affine=False) before the linear classifier. We do not use mixup [99], cutmix [96], drop path [44], or color jittering, and we set weight decay as zero.
|
| 231 |
+
|
| 232 |
+
Attentive probing. The parameters of the encoder are fixed during attentive probing. A cross-attention module, a BatchNorm layer (affine=False), and a linear classifier are appended after the encoder. The extra class token representation in cross-attention is learned as model parameters. The keys and the values are the patch representations output from the encoder. There is no MLP or skip connection operation in the extra cross-attention module. We use the SGD optimizer with momentum 0.9 and train the model for 90 epochs. The batch size is 8192, the warmup epoch is 10 and the learning rate is 0.4. Same as linear probing, we do not use mixup [99], cutmix [96], drop path, or color jittering, and we set weight decay as zero.
|
| 233 |
+
|
| 234 |
+
Fine-tuning on ImageNet. We follow the fine-tuning protocol in BEiT to use layer-wise learning rate decay, weight decay and AdamW. The batch size is 4096, the warmup epoch is 5 and the weight decay is 0.05. For ViT-S, we train 200 epochs with learning rate 1.6e-2 and layer-wise decay rate 0.75. For ViT-B, we train 100 epochs with learning rate 8e-3 and layer-wise decay rate 0.65. For ViT-L, we train 50 epochs with learning rate 2e-3 and layer-wise decay rate 0.75.
|
| 235 |
+
|
| 236 |
+
Semantic segmentation on ADE20K. We use AdamW as the optimizer. The input resolution is $512 \times 512$ . The batch size is 16. For the ViT-B, the layer-wise decay rate is 0.65 and the drop path rate is 0.1. We search from four learning rates, 1e-4, 2e-4, 3e-4 and 4e-4, for all the results in Table 2. For the ViT-L, the layer-wise decay rate is 0.95 and the drop path rate is 0.15. We search from three learning rates for all the methods, 3e-5, 4e-5, and 5e-5. We conduct fine-tuning for 160K steps. We do not use multi-scale testing.
|
| 237 |
+
|
| 238 |
+
Object detection and instance segmentation on COCO. We utilize multi-scale training and resize the image with the size of the short side between 480 and 800 and the long side no larger than 1333. The batch size is 32. For the ViT-S, the learning rate is 3e-4, the layer-wise decay rate is 0.75, and the drop path rate is 0.1. For the ViT-B, the learning rate is 3e-4, the layer-wise decay rate is 0.75, and the drop path rate is 0.2. For the ViT-L, the learning rate is 2e-4, the layer-wise decay rate is 0.8, and the drop path rate is 0.2. We train the network with the $1 \times$ schedule: 12 epochs with the learning rate decayed by $10 \times$ at epochs 9 and 11. We do not use multi-scale testing. The Mask R-CNN implementation follows MMDetection [14].
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Fig. 10: Illustrating the cross-attention unit in attentive probing. The attention map (bottom) is the average of cross-attention maps over 12 heads between the extra class token and the patches. One can see that the attended region lies mainly in the object, which helps image classification.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+
# 5.3 Pretraining Evaluation
|
| 262 |
+
|
| 263 |
+
Linear probing. Linear probing is widely used as a proxy of pretraining quality evaluation for self-supervised representation learning. It learns a linear classifier over the image-level representation output from the pretrained encoder by using the labels of the images, and then tests the performance on the validation set.
|
| 264 |
+
|
| 265 |
+
Attentive probing. The output of the encoder pretrained with MIM methods are representations for all
|
| 266 |
+
|
| 267 |
+
the patches. It is not suitable to linearly probe the representation, averagely-pooled from patch representations, because the image label in ImageNet-1K only corresponds to a portion of patches. It is also not suitable to use the default class token within the encoder because the default class token serves as a role of aggregating the patch representations for better patch representation extraction and is not merely for the portion of patches corresponding to the image label.
|
| 268 |
+
|
| 269 |
+
To use the image-level label as a proxy of evaluating the pretraining quality for the encoder pretrained with MIM methods, we need to attend the patches that are related to the label. We introduce a simple modification by using a cross-attention unit with an extra class token (that is different from the class token in the encoder) as the query and the output patch representations of the encoder as the keys and the values, followed by a linear classifier. The introduced cross-attention unit is able to care mainly about the patches belonging to the 1000 classes in ImageNet-1K and remove the interference of other patches. Figure 10 illustrates the effect of the cross-attention unit, showing that the extra cross-attention unit can to some degree attend the regions that are related to the 1000 ImageNet-1K classes.
|
| 270 |
+
|
| 271 |
+
Results. Table 1 shows the results with three schemes, linear probing (LIN), attentive probing (ATT), and finetuning (FT) for representative contrastive self-supervised pretraining (MoCo v3 and DINO) and MIM (BEiT and MAE) methods, as well as our approach with the targets formed with the DALL-E tokenizer (trained on 400M images) and the d-VAE tokenizer (trained on ImageNet-1K without using the labels), denoted as CAE* and CAE, respectively. The models of MAE with 300 epochs and BEiT are pretrained by us using the official implementations, and other models are officially released models.
|
| 272 |
+
|
| 273 |
+
We highlight a few observations. The fine-tuning performance for these methods are very similar and there is only a minor difference similar to the observation [104]. We think that the reason is that self-supervised pretraining and fine-tuning are conducted on the same dataset and no extra knowledge is introduced for image classification. The minor difference might come from the optimization aspect: different initialization (provided by pretrained models) for fine-tuning.
|
| 274 |
+
|
| 275 |
+
In terms of linear probing, the scores of the contrastive self-supervised learning methods, MoCo v3 and DINO, are higher than the MIM methods. This is as expected because contrastive self-supervised learning focuses mainly on learning the representations for 1000 classes (See discussion in Section 4). The pretraining is relatively easier than existing MIM methods as contrastive self-supervised learning mainly cares about the
|
| 276 |
+
|
| 277 |
+
Table 1: Pretraining quality evaluation in terms of finetuning (FT), linear probing (LIN), and attentive probing (ATT). $\ddagger$ means the number of effective epochs in [104] as they adopt multi-crop augmentation (equivalently take a larger number of epochs compared to one-crop augmentation). We report the top-1 accuracy (in the column ATT) of the supervised training approach DeiT [76] to show how far the ATT score is from supervised training. The scores for other models and our models are based on our implementations if not specified. Except that * denotes using the DALL-E tokenizer, CAE adopts the d-VAE tokenizer trained on ImageNet-1K only.
|
| 278 |
+
|
| 279 |
+
<table><tr><td>Method</td><td>#Epochs</td><td>#Crops</td><td>FT</td><td>LIN</td><td>ATT</td></tr><tr><td colspan="6">Methods using ViT-S:</td></tr><tr><td>DeiT</td><td>300</td><td>-</td><td>-</td><td>-</td><td>79.9</td></tr><tr><td>MoCo v3</td><td>600‡</td><td>2</td><td>81.7</td><td>73.1</td><td>73.8</td></tr><tr><td>BEiT</td><td>300</td><td>1</td><td>81.7</td><td>15.7</td><td>23.6</td></tr><tr><td>CAE*</td><td>300</td><td>1</td><td>82.0</td><td>51.8</td><td>65.0</td></tr><tr><td colspan="6">Methods using ViT-B:</td></tr><tr><td>DeiT</td><td>300</td><td>-</td><td>-</td><td>-</td><td>81.8</td></tr><tr><td>MoCo v3</td><td>600‡</td><td>2</td><td>83.0</td><td>76.2</td><td>77.0</td></tr><tr><td>DINO</td><td>1600‡</td><td>12</td><td>83.3</td><td>77.3</td><td>77.8</td></tr><tr><td>BEiT</td><td>300</td><td>1</td><td>83.0</td><td>37.6</td><td>49.4</td></tr><tr><td>MAE</td><td>300</td><td>1</td><td>82.9</td><td>61.5</td><td>71.1</td></tr><tr><td>MAE</td><td>1600</td><td>1</td><td>83.6</td><td>67.8</td><td>74.2</td></tr><tr><td>SimMIM</td><td>800</td><td>1</td><td>83.8</td><td>56.7</td><td>-</td></tr><tr><td>iBOT</td><td>1600‡</td><td>12</td><td>83.8</td><td>79.5</td><td>79.8</td></tr><tr><td>CAE*</td><td>300</td><td>1</td><td>83.6</td><td>64.1</td><td>73.8</td></tr><tr><td>CAE*</td><td>800</td><td>1</td><td>83.8</td><td>68.6</td><td>75.9</td></tr><tr><td>CAE*</td><td>1600</td><td>1</td><td>83.9</td><td>70.4</td><td>77.1</td></tr><tr><td>CAE</td><td>1600</td><td>1</td><td>83.9</td><td>71.4</td><td>77.4</td></tr><tr><td colspan="6">Methods using ViT-L:</td></tr><tr><td>MoCo v3†</td><td>600‡</td><td>2</td><td>84.1</td><td>-</td><td>-</td></tr><tr><td>BEiT†</td><td>1600</td><td>1</td><td>85.2</td><td>-</td><td>-</td></tr><tr><td>MAE</td><td>1600</td><td>1</td><td>86.0</td><td>76.0</td><td>78.8</td></tr><tr><td>CAE*</td><td>1600</td><td>1</td><td>86.3</td><td>78.1</td><td>81.2</td></tr><tr><td>CAE</td><td>1600</td><td>1</td><td>86.3</td><td>77.9</td><td>81.2</td></tr></table>
|
| 280 |
+
|
| 281 |
+
1000 classes and MIM methods may care about the classes beyond the 1000 classes.
|
| 282 |
+
|
| 283 |
+
For the MIM methods, the scores of attentive probing are much larger than linear probing. This validates our analysis: the MIM methods extract the representations for all the patches, and the classification task needs to attend the corresponding portion of patches.
|
| 284 |
+
|
| 285 |
+
The LIN and ATT scores are similar for contrastive self-supervised pretraining on ViT-B, e.g., (76.2 vs 77.0) for MoCo v3 and (77.3 vs 77.8) for DINO. This means that the extra cross-attention in attentive probing does not make a big difference, which is one more evidence for our analysis in Section 4 that they already focus mainly on the region where the instance in the 1000 categories lies.
|
| 286 |
+
|
| 287 |
+
Table 2: Semantic segmentation on ADE20K. All the results are based on the same implementation for semantic segmentation. #Epochs refers to the number of pretraining epochs. $\ddagger$ means the number of effective epochs in [104] as the method uses multi-crop pretraining augmentation (See Table 1). SplitMask [29] is pretrained on ADE20K for 21000 epochs. $\dagger$ : these results are from [38].
|
| 288 |
+
|
| 289 |
+
<table><tr><td>Method</td><td>#Epochs</td><td>mIoU</td></tr><tr><td colspan="3">Methods using ViT-B:</td></tr><tr><td>SplitMask</td><td>-</td><td>45.7</td></tr><tr><td>BEiT</td><td>300</td><td>45.5</td></tr><tr><td>BEiT</td><td>800</td><td>46.5</td></tr><tr><td>mc-BEiT</td><td>800</td><td>47.0</td></tr><tr><td>DeiT</td><td>300</td><td>47.0</td></tr><tr><td>MoCo v3</td><td>600‡</td><td>47.2</td></tr><tr><td>DINO</td><td>1600‡</td><td>47.2</td></tr><tr><td>MAE</td><td>300</td><td>45.8</td></tr><tr><td>MAE</td><td>1600</td><td>48.1</td></tr><tr><td>Ge2-AE</td><td>800</td><td>48.9</td></tr><tr><td>A2MIM</td><td>800</td><td>49.0</td></tr><tr><td>iBOT</td><td>1600‡</td><td>50.0</td></tr><tr><td>CAE*</td><td>300</td><td>48.3</td></tr><tr><td>CAE*</td><td>800</td><td>49.7</td></tr><tr><td>CAE*</td><td>1600</td><td>50.2</td></tr><tr><td>CAE</td><td>1600</td><td>50.1</td></tr><tr><td colspan="3">Methods using ViT-L:</td></tr><tr><td>MoCo v3†</td><td>600‡</td><td>49.1</td></tr><tr><td>BEiT†</td><td>1600</td><td>53.3</td></tr><tr><td>MAE</td><td>1600</td><td>53.6</td></tr><tr><td>CAE*</td><td>1600</td><td>54.7</td></tr><tr><td>CAE</td><td>1600</td><td>54.6</td></tr></table>
|
| 290 |
+
|
| 291 |
+
# 5.4 Downstream Tasks
|
| 292 |
+
|
| 293 |
+
Semantic segmentation on ADE20K [103]. We follow the implementation [4] to use UperNet [86]. The CAE with the tokenizers learned over ImageNet-1K performs almost the same as the tokenizers learned over 400M images provided by DALL-E (CAE*), implying that the tokenizer trained on ImageNet-1K (without using the labels) or a larger dataset does not affect the pretraining quality and accordingly the downstream task performance.
|
| 294 |
+
|
| 295 |
+
Table 2 shows that using the ViT-B, our CAE* with 300 training epochs performs better than DeiT, MoCo v3, DINO, MAE (1600 epochs) and BEiT. Our CAE* (1600 epochs) further improves the segmentation scores and outperforms MAE (1600 epochs), MoCo v3 and DeiT by 2.1, 3.0 and 3.2, respectively. Using ViT-L, our CAE* (1600 epochs) outperforms BEiT (1600 epochs) and MAE (1600 epochs) by 1.4 and 1.1, respectively.
|
| 296 |
+
|
| 297 |
+
Table 3: Object detection and instance segmentation on COCO. Mask R-CNN is adopted and trained with the $1 \times$ schedule. All the results are based on the same implementation for object detection and instance segmentation. #Epochs refers to the number of pretraining epochs on ImageNet-1K. $\ddagger$ means the number of effective epochs in [104] (See Table 1).
|
| 298 |
+
|
| 299 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">#Epochs</td><td rowspan="2">Supervised</td><td rowspan="2">Self-supervised</td><td colspan="3">Object detection</td><td colspan="3">Instance segmentation</td></tr><tr><td>APb</td><td>APb50</td><td>APb75</td><td>APm</td><td>APm50</td><td>APm75</td></tr><tr><td colspan="10">Methods using ViT-S:</td></tr><tr><td>DeiT</td><td>300</td><td>✓</td><td>×</td><td>43.1</td><td>65.2</td><td>46.6</td><td>38.4</td><td>61.8</td><td>40.6</td></tr><tr><td>MoCo v3</td><td>600‡</td><td>×</td><td>✓</td><td>43.3</td><td>64.9</td><td>46.8</td><td>38.8</td><td>61.6</td><td>41.1</td></tr><tr><td>BEiT</td><td>300</td><td>×</td><td>✓</td><td>35.6</td><td>56.7</td><td>38.3</td><td>32.6</td><td>53.3</td><td>34.2</td></tr><tr><td>CAE*</td><td>300</td><td>×</td><td>✓</td><td>44.1</td><td>64.6</td><td>48.2</td><td>39.2</td><td>61.4</td><td>42.2</td></tr><tr><td colspan="10">Methods using ViT-B:</td></tr><tr><td>DeiT</td><td>300</td><td>✓</td><td>×</td><td>46.9</td><td>68.9</td><td>51.0</td><td>41.5</td><td>65.5</td><td>44.4</td></tr><tr><td>MoCo v3</td><td>600‡</td><td>×</td><td>✓</td><td>45.5</td><td>67.1</td><td>49.4</td><td>40.5</td><td>63.7</td><td>43.4</td></tr><tr><td>DINO</td><td>1600‡</td><td>×</td><td>✓</td><td>46.8</td><td>68.6</td><td>50.9</td><td>41.5</td><td>65.3</td><td>44.5</td></tr><tr><td>BEiT</td><td>300</td><td>×</td><td>✓</td><td>39.5</td><td>60.6</td><td>43.0</td><td>35.9</td><td>57.7</td><td>38.5</td></tr><tr><td>BEiT</td><td>800</td><td>×</td><td>✓</td><td>42.1</td><td>63.3</td><td>46.0</td><td>37.8</td><td>60.1</td><td>40.6</td></tr><tr><td>MAE</td><td>300</td><td>×</td><td>✓</td><td>45.4</td><td>66.4</td><td>49.6</td><td>40.6</td><td>63.4</td><td>43.7</td></tr><tr><td>MAE</td><td>1600</td><td>×</td><td>✓</td><td>48.4</td><td>69.4</td><td>53.1</td><td>42.6</td><td>66.1</td><td>45.9</td></tr><tr><td>iBOT</td><td>1600‡</td><td>×</td><td>✓</td><td>48.2</td><td>69.7</td><td>52.8</td><td>42.7</td><td>66.5</td><td>46.0</td></tr><tr><td>CAE*</td><td>300</td><td>×</td><td>✓</td><td>48.4</td><td>69.2</td><td>52.9</td><td>42.6</td><td>66.1</td><td>45.8</td></tr><tr><td>CAE*</td><td>800</td><td>×</td><td>✓</td><td>49.8</td><td>70.7</td><td>54.6</td><td>43.9</td><td>67.8</td><td>47.4</td></tr><tr><td>CAE*</td><td>1600</td><td>×</td><td>✓</td><td>50.0</td><td>70.9</td><td>54.8</td><td>44.0</td><td>67.9</td><td>47.6</td></tr><tr><td>CAE</td><td>1600</td><td>×</td><td>✓</td><td>50.2</td><td>71.0</td><td>54.9</td><td>44.2</td><td>68.3</td><td>47.9</td></tr><tr><td colspan="10">Methods using ViT-L:</td></tr><tr><td>MAE</td><td>1600</td><td>×</td><td>✓</td><td>54.0</td><td>74.3</td><td>59.5</td><td>47.1</td><td>71.5</td><td>51.0</td></tr><tr><td>CAE*</td><td>1600</td><td>×</td><td>✓</td><td>54.5</td><td>75.2</td><td>60.1</td><td>47.6</td><td>72.2</td><td>51.9</td></tr><tr><td>CAE</td><td>1600</td><td>×</td><td>✓</td><td>54.6</td><td>75.2</td><td>59.9</td><td>47.6</td><td>72.0</td><td>51.9</td></tr></table>
|
| 300 |
+
|
| 301 |
+
The superior results over supervised and contrastive self-supervised pretraining methods, DeiT, MoCo v3 and DINO, stem from that our approach captures the knowledge beyond the 1000 classes in ImageNet-1K. The superior results over BEiT and MAE stems from that our CAE makes predictions in the encoded representation space and that representation learning and pretext task completion are separated.
|
| 302 |
+
|
| 303 |
+
Object detection and instance segmentation on COCO [60]. We adopt the Mask R-CNN approach [40] that produces bounding boxes and instance masks simultaneously, with the ViT as the backbone. The results are given in Table 3. We report the box AP for object detection and the mask AP for instance segmentation. The observations are consistent with those for semantic segmentation in Table 2. Our CAE* (300 epochs, ViT-B) is superior to all the other models except that a little lower than MAE (1600 epochs). Our approach (1600 epochs) outperforms MAE (1600 epochs), MoCo v3 and DeiT by 1.6, 4.5 and 3.1, respectively. Using ViT-L, our CAE achieves 54.6 box AP and outperforms MAE by 0.6.
|
| 304 |
+
|
| 305 |
+
We also report the results of object detection and instance segmentation on COCO with the Cascaded Mask R-CNN framework [7] in Table 6. Results show that our CAE performs better than other methods.
|
| 306 |
+
|
| 307 |
+
In addition, we conduct experiments on the scaling ability of CAE on the detection task. The detection model is built upon ViT-Huge [26], DINO [98], and Group DETR [16] (see [17] for more details). The ViT-Huge is pretrained on ImageNet-22K [22] using CAE. We are the first to obtain 64.6 mAP on COCO test-dev, which outperforms previous methods with larger models and more training data (e.g., BEIT-3 [81] (63.7 mAP) and SwinV2-G [62] (63.1 mAP)).
|
| 308 |
+
|
| 309 |
+
Classification. We conduct fine-tuning experiments on three datasets: Food-101 [6], Clipart [12], and Sketch [12]. Results in Table 4 show that the proposed method outperforms the previous supervised method (DeiT) and self-supervised methods (DINO, MAE).
|
| 310 |
+
|
| 311 |
+
# 5.5 Ablation Studies
|
| 312 |
+
|
| 313 |
+
Decoder and alignment. The CAE architecture contains several components for pretraining the encoder: regressor and alignment for masked representation prediction, decoder with a linear layer for masked patch reconstruction. We observe that if the pretraining task, masked patch reconstruction, is not included, the training collapses, leading to a trivial solution. We thus study the effect of the decoder (when the decoder is removed,
|
| 314 |
+
|
| 315 |
+
Table 4: Top-1 classification accuracy on the Food-101, Clipart and Sketch datasets. The backbone is ViT-B.
|
| 316 |
+
|
| 317 |
+
<table><tr><td>Method</td><td>Supervised</td><td>Self-supervised</td><td>Food-101</td><td>Clipart</td><td>Sketch</td></tr><tr><td>Random Init.</td><td>×</td><td>×</td><td>82.77</td><td>52.90</td><td>46.42</td></tr><tr><td>DeiT</td><td>✓</td><td>×</td><td>91.81</td><td>81.18</td><td>73.45</td></tr><tr><td>DINO</td><td>×</td><td>✓</td><td>91.67</td><td>80.72</td><td>73.13</td></tr><tr><td>MAE</td><td>×</td><td>✓</td><td>93.19</td><td>80.63</td><td>73.87</td></tr><tr><td>CAE*</td><td>×</td><td>✓</td><td>93.32</td><td>81.84</td><td>74.65</td></tr></table>
|
| 318 |
+
|
| 319 |
+
Table 5: Ablation studies for the decoder and the alignment constraint in our CAE. All the models are pretrained on ImageNet-1K with 300 epochs.
|
| 320 |
+
|
| 321 |
+
<table><tr><td>Decoder</td><td>Alignment</td><td>LIN</td><td>ATT</td><td>FT</td><td>ADE Seg.</td><td>COCO Det.</td><td>#Params</td><td>Training Time</td></tr><tr><td>×</td><td>×</td><td>60.3</td><td>71.2</td><td>82.9</td><td>47.0</td><td>46.9</td><td>120.32 M</td><td>1×</td></tr><tr><td>✓</td><td>×</td><td>63.1</td><td>72.7</td><td>83.4</td><td>47.1</td><td>47.2</td><td>148.68 M</td><td>1.14×</td></tr><tr><td>×</td><td>✓</td><td>62.0</td><td>71.5</td><td>83.4</td><td>47.1</td><td>47.2</td><td>120.32 M</td><td>1.12×</td></tr><tr><td>✓</td><td>✓</td><td>64.1</td><td>73.8</td><td>83.6</td><td>48.3</td><td>48.4</td><td>148.68 M</td><td>1.24×</td></tr></table>
|
| 322 |
+
|
| 323 |
+
Table 6: The results of object detection and instance segmentation on COCO with the Cascaded Mask-RCNN framework (1× schedule). ViT-B is used for all experiments. All the detection results are from our implementation.
|
| 324 |
+
|
| 325 |
+
<table><tr><td>Method</td><td>#Epochs</td><td>APb</td><td>APm</td></tr><tr><td>MAE [38]</td><td>1600</td><td>51.3</td><td>44.3</td></tr><tr><td>mc-BEiT [104]</td><td>800</td><td>50.1</td><td>43.1</td></tr><tr><td>iBOT [104]</td><td>1600</td><td>51.2</td><td>44.2</td></tr><tr><td>CAE*</td><td>300</td><td>51.6</td><td>44.6</td></tr><tr><td>CAE*</td><td>800</td><td>52.8</td><td>45.5</td></tr><tr><td>CAE*</td><td>1600</td><td>52.9</td><td>45.5</td></tr></table>
|
| 326 |
+
|
| 327 |
+
Table 7: The effect of mask ratios. The backbone is ViT-B. Models are trained for 300 epochs.
|
| 328 |
+
|
| 329 |
+
<table><tr><td>Mask Ratio</td><td>LIN</td><td>ATT</td><td>ADE Seg</td></tr><tr><td>40%</td><td>63.1</td><td>73.0</td><td>47.2</td></tr><tr><td>50%</td><td>64.1</td><td>73.8</td><td>48.3</td></tr><tr><td>60%</td><td>64.8</td><td>74.2</td><td>48.1</td></tr></table>
|
| 330 |
+
|
| 331 |
+
we use a linear layer to predict the targets), which is helpful for target reconstruction, and the alignment, which is helpful for representation prediction.
|
| 332 |
+
|
| 333 |
+
Table 5 shows the ablation results. We report the scores for linear probing, attentive probing, fine-tuning and downstream tasks: semantic segmentation on the ADE20K dataset and object detection on COCO with the DALL-E tokenizer as the target. One can see that the downstream task performance is almost the same when only the decoder is added and that the performance increases when the decoder and the alignment are both added. This also verifies that the alignment is
|
| 334 |
+
|
| 335 |
+
Table 8: The effect of reconstruction targets on the performance of CAE. The backbone is ViT-B. Models are trained for 1600 epochs.
|
| 336 |
+
|
| 337 |
+
<table><tr><td>Targets</td><td>LIN</td><td>ATT</td><td>ADE Seg</td></tr><tr><td>DALL-E tokenizer</td><td>70.4</td><td>77.1</td><td>50.2</td></tr><tr><td>d-VAE tokenizer</td><td>71.4</td><td>77.4</td><td>50.1</td></tr><tr><td>RGB pixel value</td><td>72.4</td><td>77.0</td><td>50.4</td></tr></table>
|
| 338 |
+
|
| 339 |
+
important for ensuring that the predicted representations of masked patches lie in the encoded representation space and thus the predictions are made in the encoded representation space, and accordingly improving the representation quality. Without the decoder, the performance drops. This is because the reconstruction from the semantic representation to the low-level targets cannot be done through a single linear layer, and no decoder will deteriorate the semantic quality of the encoder. The additional computational cost, i.e. the number of parameters and training time, brought by the decoder and alignment is relatively small, e.g., increasing the number of parameters to $1.23 \times$ and training time to $1.24 \times$ .
|
| 340 |
+
|
| 341 |
+
Mask ratio. We also conduct experiments with different mask ratios including $40\%$ , $50\%$ , and $60\%$ . Results are listed in Table 7. We find that ratio $50\%$ gets better results than ratio $40\%$ . Adopting a higher mask ratio $(60\%)$ could further improve the performance of linear probing and attentive probing, while the semantic segmentation performance is reduced by $0.2\%$ . We choose $50\%$ in our work unless specified.
|
| 342 |
+
|
| 343 |
+
layers in the regressor and decoder. For the number of layers in the latent contextual regressor and decoder, we tried four choices: 1-layer, 2-layers, 4-layer, and 5-layer. The results for linear probing are 58.7, 62.1,
|
| 344 |
+
|
| 345 |
+
64.1, and 64.2. The results for attentive probing are 67.5, 71.1, 73.8, and 73.7. We empirically observed that 4-layer outperforms other choices overall.
|
| 346 |
+
|
| 347 |
+
Loss tradeoff parameter. There is a tradeoff variable $\lambda$ in the loss function given in Equation 1. We did not do an extensive study and only tried three choices, $\lambda = 1$ , $\lambda = 1.5$ and $\lambda = 2$ . The linear probing results are 63.4, 63.7 and 64.1, respectively. The choice $\lambda = 1$ works also well, slightly worse than $\lambda = 2$ that is adopted in our experiment.
|
| 348 |
+
|
| 349 |
+
Reconstruction targets. To study the impact of different pretraining targets on model performance, we conduct additional experiments on the RGB pixel value target. Comparing the results with DALL-E tokenizer and d-VAE tokenizer trained on ImageNet-1K, the model shows better linear probe and segmentation results but inferior in attentive probe, as shown in Table 8. Pretraining with these three targets obtains similar performance, illustrating that CAE does not rely on specific pretraining targets.
|
| 350 |
+
|
| 351 |
+
# 6 Conclusion
|
| 352 |
+
|
| 353 |
+
The core design of our CAE architecture for masked image modeling is that predictions are made from visible patches to masked patches in the encoded representation space. We adopt two pretraining tasks: masked representation prediction and masked patch reconstruction. Experiments demonstrate the effectiveness of the CAE design. In addition, we also point out that the advantage of MIM methods over typical contrastive self-supervised pretraining and supervised pretraining on ImageNet-1K is that MIM learns the representations for all the patches, while typical contrastive self-supervised pretraining (e.g., MoCo and SimCLR) and supervised pretraining tend to learn semantics mainly from center patches of the original images and little from non-center patches.
|
| 354 |
+
|
| 355 |
+
Possible extensions, as mentioned in the arXiv version [19], include: investigating the possibility only considering the pretraining task, masked representation prediction, without masked patch reconstruction, pretraining a depth-wise convolution network with masked convolution, and pretraining with the CLIP targets [102].
|
| 356 |
+
|
| 357 |
+
Potential limitations. The proposed method may face challenges when dealing with large and contiguous masked regions in an image, e.g., the whole object region is almost masked. Obtaining plausible and high-quality reconstruction for large areas can be particularly difficult, as the model has to infer the missing information based on limited available context. This is a common
|
| 358 |
+
|
| 359 |
+
limitation of Masked Image Modeling methods, and our proposed method is not exempt from it.
|
| 360 |
+
|
| 361 |
+
# Acknowledgments
|
| 362 |
+
|
| 363 |
+
We would like to acknowledge Hangbo Bao, Xinlei Chen, Li Dong, Qi Han, Zhuowen Tu, Saining Xie, and Furu Wei for the helpful discussions.
|
| 364 |
+
|
| 365 |
+
# Declarations
|
| 366 |
+
|
| 367 |
+
# -Funding
|
| 368 |
+
|
| 369 |
+
This work is partially supported by the National Key Research and Development Program of China (2020YFB1708002), National Natural Science Foundation of China (61632003, 61375022, 61403005), Grant SCITLAB-20017 of Intelligent Terminal Key Laboratory of SiChuan Province, Beijing Advanced Innovation Center for Intelligent Robots and Systems (2018IRS11), and PEK-SenseTime Joint Laboratory of Machine Vision. Ping Luo is supported by the General Research Fund of HK No.27208720, No.17212120, and No.17200622.
|
| 370 |
+
|
| 371 |
+
# - Code availability
|
| 372 |
+
|
| 373 |
+
Our code will be available at https://github.com/Atten4Vis/CAE.
|
| 374 |
+
|
| 375 |
+
# - Availability of data and materials
|
| 376 |
+
|
| 377 |
+
The datasets used in this paper are publicly available. ImageNet: https://www(image-net.org/, ADE20K: https://groups.csail.mit.edu/vision datasets/ADE20K/,
|
| 378 |
+
|
| 379 |
+
COCO: https://cocodataset.org/,
|
| 380 |
+
|
| 381 |
+
Food-101: https://data.vision.ee.ethz.ch/cvl/datasets-extra/food-101/,
|
| 382 |
+
|
| 383 |
+
Clipart: http://projects.csail.mit.edu/cmplaces/download.html,
|
| 384 |
+
|
| 385 |
+
Sketch: http://projects.csail.mit.edu/cmplaces/download.html.
|
| 386 |
+
|
| 387 |
+
# References
|
| 388 |
+
|
| 389 |
+
1. Yuki Markus Asano, Christian Rupprecht, and Andrea Vedaldi. Self-labelling via simultaneous clustering and representation learning. arXiv preprint arXiv:1911.05371, 2019.
|
| 390 |
+
2. Sara Atito, Muhammad Awais, and Josef Kittler. Sit: Self-supervised vision transformer. arXiv preprint arXiv:2104.03602, 2021.
|
| 391 |
+
3. Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, and Michael Auli. data2vec: A general framework for self-supervised learning in speech, vision and linguistics. Technical report, 2022.
|
| 392 |
+
|
| 393 |
+
4. Hangbo Bao, Li Dong, and Furu Wei. BEiT: BERT pre-training of image transformers. arXiv:2106.08254, 2021.
|
| 394 |
+
5. Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021.
|
| 395 |
+
6. Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101 - mining discriminative components with random forests. In ECCV, 2014.
|
| 396 |
+
7. Zhaowei Cai and Nuno Vasconcelos. Cascade r-cnn: High quality object detection and instance segmentation. TPAMI, 43:1483-1498, 2021.
|
| 397 |
+
8. Mathilde Caron, Piotr Bojanowski, Armand Joulin, and Matthijs Douze. Deep clustering for unsupervised learning of visual features. In ECCV, pages 132-149, 2018.
|
| 398 |
+
9. Mathilde Caron, Piotr Bojanowski, Julien Mairal, and Armand Joulin. Unsupervised pre-training of image features on non-curated data. In ICCV, pages 2959-2968, 2019.
|
| 399 |
+
10. Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882, 2020.
|
| 400 |
+
11. Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. CoRR, abs/2104.14294, 2021.
|
| 401 |
+
12. Lluis Castrejon, Yusuf Aytar, Carl Vondrick, Hamed Piri siavash, and Antonio Torralba. Learning aligned cross-modal representations from weakly aligned data. In CVPR, pages 2940-2949, 2016.
|
| 402 |
+
13. Jun Chen, Ming Hu, Boyang Li, and Mohamed Elhoseiny. Efficient self-supervised vision pretraining with local masked reconstruction. arXiv preprint arXiv:2206.00790, 2022.
|
| 403 |
+
14. Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, Zheng Zhang, Dazhi Cheng, Chenchen Zhu, Tianheng Cheng, Qijie Zhao, Buyu Li, Xin Lu, Rui Zhu, Yue Wu, Jifeng Dai, Jingdong Wang, Jianping Shi, Wanli Ouyang, Chen Change Loy, and Dahua Lin. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019.
|
| 404 |
+
15. Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, and Ilya Sutskever. Generative pretraining from pixels. In ICML, pages 1691-1703. PMLR, 2020.
|
| 405 |
+
16. Qiang Chen, Xiaokang Chen, Jian Wang, Haocheng Feng, Junyu Han, Errui Ding, Gang Zeng, and Jingdong Wang. Group detr: Fast detr training with group-wise one-to-many assignment. 2022.
|
| 406 |
+
17. Qiang Chen, Jian Wang, Chuchu Han, Shan Zhang, Zexian Li, Xiaokang Chen, Jiahui Chen, Xiaodi Wang, Shuming Han, Gang Zhang, Haocheng Feng, Kun Yao, Junyu Han, Errui Ding, and Jingdong Wang. Group DETR v2: Strong object detector with encoder-decoder pretraining. CoRR, abs/2211.03594, 2022.
|
| 407 |
+
18. Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey E. Hinton. A simple framework for contrastive learning of visual representations. In ICML, volume 119 of Proceedings of Machine Learning Research, pages 1597-1607. PMLR, 2020.
|
| 408 |
+
19. Xiaokang Chen, Mingyu Ding, Xiaodi Wang, Ying Xin, Shentong Mo, Yunhao Wang, Shumin Han, Ping Luo, Gang Zeng, and Jingdong Wang. Context autoencoder for self-supervised representation learning. CoRR, abs/2202.03026, 2022.
|
| 409 |
+
|
| 410 |
+
20. Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In $CVPR$ , pages 15750-15758, 2021.
|
| 411 |
+
21. Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. CoRR, abs/2104.02057, 2021.
|
| 412 |
+
22. Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248-255. IEEE, 2009.
|
| 413 |
+
23. Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, NAACL-HLT, pages 4171-4186. Association for Computational Linguistics, 2019.
|
| 414 |
+
24. Carl Doersch, Abhinav Gupta, and Alexei A. Efros. Unsupervised visual representation learning by context prediction. In ICCV, 2015.
|
| 415 |
+
25. Xiaoyi Dong, Jianmin Bao, Ting Zhang, Dongdong Chen, Weiming Zhang, Lu Yuan, Dong Chen, Fang Wen, and Nenghai Yu. Peco: Perceptual codebook for bert pre-training of vision transformers. arXiv preprint arXiv:2111.12710, 2021.
|
| 416 |
+
26. Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR. OpenReview.net, 2021.
|
| 417 |
+
27. Alexey Dosovitskiy, Philipp Fischer, Jost Tobias Springenberg, Martin Riedmiller, and Thomas Brox. Discriminative unsupervised feature learning with exemplar convolutional neural networks. TPAMI, 38(9):1734-1747, 2015.
|
| 418 |
+
28. Alexey Dosovitskiy, Jost Tobias Springenberg, Martin Riedmiller, and Thomas Brox. Discriminative unsupervised feature learning with convolutional neural networks. NeurIPS, 27:766-774, 2014.
|
| 419 |
+
29. Alaaeldin El-Nouby, Gautier Izacard, Hugo Touvron, Ivan Laptev, Hervé Jegou, and Edouard Grave. Are large-scale datasets necessary for self-supervised pretraining? arXiv preprint arXiv:2112.10740, 2021.
|
| 420 |
+
30. Aleksandr Ermolov, Aliaksandr Siarohin, Enver Sangineto, and Nicu Sebe. Whitening for self-supervised representation learning. In ICML, pages 3015-3024. PMLR, 2021.
|
| 421 |
+
31. Yuxin Fang, Li Dong, Hangbo Bao, Xinggang Wang, and Furu Wei. Corrupted image modeling for self-supervised visual pre-training. arXiv preprint arXiv:2202.03382, 2022.
|
| 422 |
+
32. Patrick Gallinari, Yann Lecun, Sylvie Thiria, and F Fogelman Soulie. Mémoires associatives distribuées: une comparaison (distributed associative memories: a comparison). In Proceedings of COGNITIVA 87, Paris, La Vellette, May 1987. Cesta-Afcet, 1987.
|
| 423 |
+
33. Quentin Garrido, Yubei Chen, Adrien Bardes, Laurent Najman, and Yann LeCun. On the duality between contrastive and non-contrastive self-supervised learning. CoRR, abs/2206.02574, 2022.
|
| 424 |
+
34. Spyros Gidaris, Andrei Bursuc, Nikos Komodakis, Patrick Pérez, and Matthieu Cord. Learning representations by predicting bags of visual words. In CVPR, pages 6928-6938, 2020.
|
| 425 |
+
35. Spyros Gidaris, Andrei Bursuc, Gilles Puy, Nikos Komodakis, Matthieu Cord, and Patrick Pérez. Online bag-of-visual-words generation for unsupervised representation learning. arXiv preprint arXiv:2012.11552, 2020.
|
| 426 |
+
|
| 427 |
+
36. Priya Goyal, Mathilde Caron, Benjamin Lefaudeaux, Min Xu, Pengchao Wang, Vivek Pai, Mannat Singh, Vitaliy Liptchinsky, Ishan Misra, Armand Joulin, et al. Self-supervised pretraining of visual features in the wild. arXiv preprint arXiv:2103.01988, 2021.
|
| 428 |
+
37. Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020.
|
| 429 |
+
38. Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022.
|
| 430 |
+
39. Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross B. Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, pages 9726-9735. Computer Vision Foundation / IEEE, 2020.
|
| 431 |
+
40. Kaiming He, Georgia Gkioxari, Piotr Dolkar, and Ross Girshick. Mask r-cnn. In ICCV, pages 2961-2969, 2017.
|
| 432 |
+
41. Olivier Henaff. Data-efficient image recognition with contrastive predictive coding. In ICML, pages 4182-4192. PMLR, 2020.
|
| 433 |
+
42. Geoffrey E Hinton and Ruslan R Salakhutdinov. Reducing the dimensionality of data with neural networks. science, 313(5786):504-507, 2006.
|
| 434 |
+
43. Geoffrey E Hinton and Richard S Zemel. Autoencoders, minimum description length, and helmholtz free energy. NeurIPS, 6:3-10, 1994.
|
| 435 |
+
44. Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In ECCV, pages 646-661. Springer, 2016.
|
| 436 |
+
45. Jiabo Huang, Qi Dong, Shaogang Gong, and Xiatian Zhu. Unsupervised deep learning by neighbourhood discovery. In ICML, pages 2849-2858. PMLR, 2019.
|
| 437 |
+
46. Lang Huang, Shan You, Mingkai Zheng, Fei Wang, Chen Qian, and Toshihiko Yamasaki. Green hierarchical vision transformer for masked image modeling. arXiv preprint arXiv:2205.13515, 2022.
|
| 438 |
+
47. Zhicheng Huang, Xiaojie Jin, Chengze Lu, Qibin Hou, Ming-Ming Cheng, Dongmei Fu, Xiaohui Shen, and Ji-ashi Feng. Contrastive masked autoencoders are stronger vision learners. arXiv preprint arXiv:2207.13532, 2022.
|
| 439 |
+
48. Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In ICML, 2015.
|
| 440 |
+
49. Li Jing, Jiachen Zhu, and Yann LeCun. Masked siamese convnets. arXiv preprint arXiv:2206.07700, 2022.
|
| 441 |
+
50. Ioannis Kakogeorgiou, Spyros Gidaris, Bill Psomas, Yannis Avrithis, Andrei Bursuc, Konstantinos Karantzalos, and Nikos Komodakis. What to hide from your students: Attention-guided masked image modeling. In ECCV, 2022.
|
| 442 |
+
51. Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 443 |
+
52. Xiangwen Kong and Xiangyu Zhang. Understanding masked image modeling via learning occlusion invariant feature. arXiv preprint arXiv:2208.04164, 2022.
|
| 444 |
+
53. Y. LeCun. Mod'èles connexionistes de l'apprentissage. PhD thesis, Université e de Paris VI, 1987.
|
| 445 |
+
54. Gang Li, Heliang Zheng, Daqing Liu, Bing Su, and Changwen Zheng. Semmae: Semantic-guided masking for learning masked autoencoders. arXiv preprint arXiv:2206.10207, 2022.
|
| 446 |
+
55. Junnan Li, Pan Zhou, Caiming Xiong, and Steven CH Hoi. Prototypical contrastive learning of unsupervised representations. arXiv preprint arXiv:2005.04966, 2020.
|
| 447 |
+
|
| 448 |
+
56. Siyuan Li, Di Wu, Fang Wu, Zelin Zang, Kai Wang, Lei Shang, Baigui Sun, Hao Li, Stan Li, et al. Architecture-agnostic masked image modeling-from vit back to cnn. arXiv preprint arXiv:2205.13943, 2022.
|
| 449 |
+
57. Xiang Li, Wenhai Wang, Lingfeng Yang, and Jian Yang. Uniform masking: Enabling mae pre-training for pyramid-based vision transformers with locality. arXiv preprint arXiv:2205.10063, 2022.
|
| 450 |
+
58. Xiaotong Li, Yixiao Ge, Kun Yi, Zixuan Hu, Ying Shan, and Ling-Yu Duan. mc-beit: Multi-choice discretization for image bert pre-training. In ECCV, 2022.
|
| 451 |
+
59. Zhaowen Li, Zhiyang Chen, Fan Yang, Wei Li, Yousong Zhu, Chaoyang Zhao, Rui Deng, Liwei Wu, Rui Zhao, Ming Tang, et al. Mst: Masked self-supervised transformer for visual representation. NeurIPS, 34:13165-13176, 2021.
|
| 452 |
+
60. Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, pages 740-755. Springer, 2014.
|
| 453 |
+
61. Hao Liu, Xinghua Jiang, Xin Li, Antai Guo, Deqiang Jiang, and Bo Ren. The devil is in the frequency: Geminated gestalt autoencoder for self-supervised visual pretraining. arXiv preprint arXiv:2204.08227, 2022.
|
| 454 |
+
62. Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, and Baining Guo. Swin transformer v2: Scaling up capacity and resolution. Cornell University - arXiv, 2021.
|
| 455 |
+
63. Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
|
| 456 |
+
64. Mehdi Noroozi and Paolo Favaro. Unsupervised learning of visual representations by solving jigsaw puzzles. In ECCV, pages 69-84. Springer, 2016.
|
| 457 |
+
65. Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.
|
| 458 |
+
66. Deepak Pathak, Philipp Krahenbuhl, Jeff Donahue, Trevor Darrell, and Alexei A Efros. Context encoders: Feature learning by inpainting. In CVPR, pages 2536-2544, 2016.
|
| 459 |
+
67. Xiangyu Peng, Kai Wang, Zheng Zhu, and Yang You. Crafting better contrastive views for siamese representation learning. In CVPR, 2022.
|
| 460 |
+
68. Jiyang Qi, Jie Zhu, Mingyu Ding, Xiaokang Chen, Ping Luo, Leye Wang, Xinggang Wang, Wenyu Liu, and Jingdong Wang. Understanding self-supervised pretraining with part-aware representation learning. Tech. Report, 2023.
|
| 461 |
+
69. Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. In Marina Meila and Tong Zhang, editors, ICML, volume 139, pages 8821-8831. PMLR, 2021.
|
| 462 |
+
70. Marc Ranzato, Christopher Poultney, Sumit Chopra, Yann LeCun, et al. Efficient learning of sparse representations with an energy-based model. NeurIPS, 19:1137, 2007.
|
| 463 |
+
71. Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 115(3):211-252, 2015.
|
| 464 |
+
72. Chenxin Tao, Xizhou Zhu, Gao Huang, Yu Qiao, Xiaogang Wang, and Jifeng Dai. Siamese image modeling
|
| 465 |
+
|
| 466 |
+
for self-supervised vision representation learning. arXiv preprint arXiv:2206.01204, 2022.
|
| 467 |
+
73. Yonglong Tian, Chen Sun, Ben Poole, Dilip Krishnan, Cordelia Schmid, and Phillip Isola. What makes for good views for contrastive learning? NeurIPS, 33:6827-6839, 2020.
|
| 468 |
+
74. Yunjie Tian, Lingxi Xie, Jiemin Fang, Mengnan Shi, Junran Peng, Xiaopeng Zhang, Jianbin Jiao, Qi Tian, and Qixiang Ye. Beyond masking: Demystifying token-based pre-training for vision transformers. arXiv preprint arXiv:2203.14313, 2022.
|
| 469 |
+
75. Yunjie Tian, Lingxi Xie, Xiaopeng Zhang, Jiemin Fang, Haohang Xu, Wei Huang, Jianbin Jiao, Qi Tian, and Qixiang Ye. Semantic-aware generation for self-supervised visual representation learning. arXiv preprint arXiv:2111.13163, 2021.
|
| 470 |
+
76. Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé Jégou. Training data-efficient image transformers & distillation through attention. arXiv preprint arXiv:2012.12877, 2020.
|
| 471 |
+
77. Laurens Van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(11), 2008.
|
| 472 |
+
78. Pascal Vincent, Hugo Larochelle, Yoshua Bengio, and Pierre-Antoine Manzagol. Extracting and composing robust features with denoising autoencoders. In ICML, pages 1096-1103, 2008.
|
| 473 |
+
79. Pascal Vincent, Hugo Larochelle, Isabelle Lajoie, Yoshua Bengio, and Pierre-Antoine Manzagol. Stacked denoising autoencoders: Learning useful representations in a deep network with a local denoising criterion. J. Mach. Learn. Res., 11:3371-3408, 2010.
|
| 474 |
+
80. Luya Wang, Feng Liang, Yangguang Li, Wanli Ouyang, Honggang Zhang, and Jing Shao. Repre: Improving self-supervised vision transformer with reconstructive pre-training. arXiv preprint arXiv:2201.06857, 2022.
|
| 475 |
+
81. Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan, Saksham Singhal, Subhojit Som, and Furu Wei. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. 2023.
|
| 476 |
+
82. Xinlong Wang, Rufeng Zhang, Chunhua Shen, Tao Kong, and Lei Li. Dense contrastive learning for self-supervised visual pre-training. In CVPR, pages 3024-3033, 2021.
|
| 477 |
+
83. Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. arXiv preprint arXiv:2112.09133, 2021.
|
| 478 |
+
84. Longhui Wei, Lingxi Xie, Wengang Zhou, Houqiang Li, and Qi Tian. Mvp: Multimodality-guided visual pretraining. In ECCV, 2022.
|
| 479 |
+
85. Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In CVPR, pages 3733-3742, 2018.
|
| 480 |
+
86. Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, and Jian Sun. Unified perceptual parsing for scene understanding. In ECCV, pages 418-434, 2018.
|
| 481 |
+
87. Jiahao Xie, Wei Li, Xiaohang Zhan, Ziwei Liu, Yew Soon Ong, and Chen Change Loy. Masked frequency modeling for self-supervised visual pre-training. arXiv preprint arXiv:2206.07706, 2022.
|
| 482 |
+
88. Junyuan Xie, Ross Girshick, and Ali Farhadi. Unsupervised deep embedding for clustering analysis. In ICML, pages 478-487. PMLR, 2016.
|
| 483 |
+
|
| 484 |
+
89. Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, and Yue Cao. Revealing the dark secrets of masked image modeling. arXiv preprint arXiv:2205.13543, 2022.
|
| 485 |
+
90. Zhenda Xie, Yutong Lin, Zheng Zhang, Yue Cao, Stephen Lin, and Han Hu. Propagate yourself: Exploring pixel-level consistency for unsupervised visual representation learning. In CVPR, pages 16684-16693, 2021.
|
| 486 |
+
91. Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Jianmin Bao, Zhuliang Yao, Qi Dai, and Han Hu. Simmim: A simple framework for masked image modeling. arXiv preprint arXiv:2111.09886, 2021.
|
| 487 |
+
92. Zhenda Xie, Zheng Zhang, Yue Cao, Yutong Lin, Yixuan Wei, Qi Dai, and Han Hu. On data scaling in masked image modeling. arXiv preprint arXiv:2206.04664, 2022.
|
| 488 |
+
93. Jianwei Yang, Devi Parikh, and Dhruv Batra. Joint unsupervised learning of deep representations and image clusters. In CVPR, pages 5147-5156, 2016.
|
| 489 |
+
94. Kun Yi, Yixiao Ge, Xiaotong Li, Shusheng Yang, Dian Li, Jianping Wu, Ying Shan, and Xiaohu Qie. Masked image modeling with denoising contrast. arXiv preprint arXiv:2205.09616, 2022.
|
| 490 |
+
95. Yang You, Igor Gitman, and Boris Ginsburg. Large batch training of convolutional networks. arXiv preprint arXiv:1708.03888, 2017.
|
| 491 |
+
96. Sangdoo Yun, Dongyoon Han, Seong Joon Oh, Sanghyuk Chun, Junsuk Choe, and Youngjoon Yoo. Cutmix: Regularization strategy to train strong classifiers with localizable features. In ICCV, pages 6023-6032, 2019.
|
| 492 |
+
97. Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stephane Deny. Barlow twins: Self-supervised learning via redundancy reduction. arXiv preprint arXiv:2103.03230, 2021.
|
| 493 |
+
98. Hao Zhang, Feng Li, Shilong Liu, Lei Zhang, Hang Su, Jun Zhu, Lionel M. Ni, and Heung-Yeung Shum. Dino: Detr with improved denoising anchor boxes for end-to-end object detection. 2023.
|
| 494 |
+
99. Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In ICLR, 2017.
|
| 495 |
+
00. Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, pages 649-666. Springer, 2016.
|
| 496 |
+
01. Xiaosong Zhang, Yunjie Tian, Wei Huang, Qixiang Ye, Qi Dai, Lingxi Xie, and Qi Tian. Hivit: Hierarchical vision transformer meets masked image modeling. arXiv preprint arXiv:2205.14949, 2022.
|
| 497 |
+
02. Xinyu Zhang, Jiahui Chen, Junkun Yuan, Qiang Chen, Jian Wang, Xiaodi Wang, Shumin Han, Xiaokang Chen, Jimin Pi, Kun Yao, Junyu Han, Errui Ding, and Jingdong Wang. CAE v2: Context autoencoder with CLIP target. CoRR, abs/2211.09799, 2022.
|
| 498 |
+
03. Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In CVPR, pages 633-641, 2017.
|
| 499 |
+
04. Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. Ibot: Image bert pre-training with online tokenizer. arXiv preprint arXiv:2111.07832, 2021.
|
| 500 |
+
05. Chengxu Zhuang, Alex Lin Zhai, and Daniel Yamins. Local aggregation for unsupervised learning of visual embeddings. In ICCV, pages 6002-6012, 2019.
|
2202.03xxx/2202.03026/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5025226af11e00f2526e41431a07c203889d63a0efe168eef564893f89bcb3c0
|
| 3 |
+
size 1119874
|
2202.03xxx/2202.03026/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03028/44aa698c-cf31-45b6-996a-147e65cbcb97_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ccd4e9da97269931d49879485783470b56e0b25423ce409180a24e49551b8c7
|
| 3 |
+
size 559018
|
2202.03xxx/2202.03028/full.md
ADDED
|
@@ -0,0 +1,465 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# QUARK: A Framework for Quantum Computing Application Benchmarking
|
| 2 |
+
|
| 3 |
+
Jernej Rudi Finžgar\*†§, Philipp Ross\*§, Leonhard Holscher*, Johannes Klepsch*, Andre Luckow\*‡ *BMW Group, Munich Germany
|
| 4 |
+
|
| 5 |
+
$^{\dagger}$ Technical University Munich, Germany
|
| 6 |
+
|
| 7 |
+
$^{\ddagger}$ Ludwig Maximilian University Munich, Germany
|
| 8 |
+
|
| 9 |
+
Authors contributed equally
|
| 10 |
+
|
| 11 |
+
Abstract—Quantum computing (QC) is anticipated to provide a speedup over classical approaches for specific problems in optimization, simulation, and machine learning. With the advances in quantum computing toward practical applications, the need to analyze and compare different quantum solutions is increasing. While different low-level benchmarks exist, they often do not provide sufficient insights into real-world application-level performance. We propose an application-centric benchmark method and the Quantum computing Application benchmark (QUARK) framework to foster the investigation and creation of application benchmarks for QC. This paper establishes three significant contributions: (1) it makes a case for application-level benchmarks and provides an in-depth "pen and paper" benchmark formulation of two reference problems: robot path and vehicle option optimization from the industrial domain; (2) it proposes the open-source QUARK framework for designing, implementing, executing, and analyzing benchmarks; (3) it provides multiple reference implementations for these two reference problems based on different known, and where needed, extended, classical and quantum algorithmic approaches and analyzes their performance on different types of infrastructures.
|
| 12 |
+
|
| 13 |
+
Index Terms—quantum computing, benchmark, optimization
|
| 14 |
+
|
| 15 |
+
# I. INTRODUCTION
|
| 16 |
+
|
| 17 |
+
Motivation: Quantum computing (QC) is transitioning from research to industrialization. It promises to improve optimization, machine learning, and simulation problems significantly, overcoming the limitations of existing high-performance computing systems [1]. Applications for these problem domains can be found in academia and industry [2]. For example, the automotive industry's complex design, manufacturing, logistics, and financial challenges are promising candidates for quantum-based optimization and machine learning approaches. Quantum chemistry simulations promise to enhance the material research process, e.g., for battery cell chemistry.
|
| 18 |
+
|
| 19 |
+
Impressive progress has been made, as visible, e.g., in several quantum advantage demonstrations [3], [4]. However, it is currently unclear what hardware technology and algorithms will deliver a practical quantum advantage, i.e., a quantum system that provides better solution quality, time-to-solution, energy usage, or cost than a classical system. The evaluation of quantum systems is becoming increasingly important to assess scientific and technical progress addressing the needs of end users, funding agencies, and investors. Benchmarks are critical for this purpose and will help to guide application, algorithm, and hardware development, and build communities [5], [6].
|
| 20 |
+
|
| 21 |
+
State-of-the-art and limitations: Current quantum computing benchmarks often focus on low-level hardware performance, targeting hardware providers [7], [8] and providing valuable metrics to assess technological maturity and roadmaps. Unfortunately, the results often do not translate to real-world application performance. Higher-level benchmarks, e.g., Lubinski [9] and Martiel [10], consider a set of algorithms and circuits. While these approaches provide important insights, they do not investigate end-to-end application performance and thus, foster holistic advances on all levels required for real-world applications.
|
| 22 |
+
|
| 23 |
+
Key insights, contributions, and artifacts: In this work, we make three significant contributions:
|
| 24 |
+
|
| 25 |
+
(1) We propose an application-centric approach for developing benchmarks. By using a "pencil and paper" approach (as popularized by the NAS parallel benchmark (NPB) [11]), we allow for multiple problem formulations, e.g., quantum annealing, gate-based, hybrid and classical formulations. Considering the maturity of quantum hardware and programming systems, we think this approach is best-suited, facilitating innovations and optimizations on all levels, e.g., hardware, control system, operating system and middleware, algorithm, and application level. Specifically, we provide a formulation of two reference problems from the industrial domain, robot path and vehicle option optimization (see section III);
|
| 26 |
+
(2) We introduce the open-source Quantum computing Application benchmark (QUARK) [12] framework for designing, implementing, executing, and analyzing benchmarks (see section IV). QUARK addresses critical requirements of application benchmarks, such as the need to abstract realistic workloads and datasets into benchmarks, support multiple implementations, and reproducibly capture all results;
|
| 27 |
+
(3) We demonstrate QUARK's capabilities by implementing benchmarks for the two reference problems (see section V). For these problems, we develop and characterize several classical and quantum algorithms (e.g., a novel QUBO formulation of the partial MAX-SAT problem) and benchmark these on different infrastructures (e.g., D-Wave and simulation).
|
| 28 |
+
Limitations: It is challenging to develop representative application benchmarks for quantum computing, as it is unclear which algorithm, qubit modality, and hardware will deliver a quantum advantage. As current quantum systems provide no real practical advantage, the utility of application-level benchmarks is still limited. Further, transferring benchmark
|
| 29 |
+
|
| 30 |
+
results to other applications is often challenging. Finally, in its present form QUARK is limited to optimization problems and thus does not cover all quantum application domains.
|
| 31 |
+
|
| 32 |
+
# II. BACKGROUND AND RELATED WORK
|
| 33 |
+
|
| 34 |
+
# A. Quantum Computing Infrastructure
|
| 35 |
+
|
| 36 |
+
Various hardware realizations of quantum computers have been proposed and are in development. These quantum hardware systems typically possess different characteristics, e.g., gate fidelities, coherence times, and clock speeds. Currently, superconducting and ion-trapped qubits are the most widely used modalities. Both modalities are available from different vendors, e.g., superconducting systems from IBM [13], Google [14], and Rigetti [15] and ion-trap-based systems from IonQ [16] and Honeywell [17]. Non-gate-based systems for quantum annealing from D-Wave are also broadly available on the D-Wave [18] and AWS clouds. Finally, approaches such as neutral atom [19], and topological quantum computation [20] could become prominent in the future.
|
| 37 |
+
|
| 38 |
+
Additionally, classical simulation of quantum systems is crucial for designing quantum algorithms and verifying results obtained on quantum devices. Hence, it is necessary to understand the trade-offs and scales of different simulation approaches (see [21] for an overview).
|
| 39 |
+
|
| 40 |
+
# B. Benchmarks
|
| 41 |
+
|
| 42 |
+
Benchmarks are standardized workloads, i.e., sets of inputs (program and data), that are used to compare computer systems [22], [23] and have been instrumental in many areas of computer science and engineering. In general, two types of benchmarks exist: (i) specification-based benchmarks that provide a "pen and paper" description of a problem; and (ii) reference implementations. Both approaches have trade-offs: specification-based benchmarks are flexible, allowing for innovation. Results of these benchmarks are, however, difficult to compare. Reference implementations limit the design space and allow for more controlled yet expensive experiments.
|
| 43 |
+
|
| 44 |
+
Benchmarks arise on different levels: System-level benchmarks focus on the lower-level hardware and system aspects (e.g., gate fidelity) and, thus, are difficult to map to application performance. Algorithmic-level benchmarks evaluate specific, significant subroutines. Application-level benchmarks are more holistic and consider the entire stack comprising hardware, operating system, middleware, classical resources, and the interplay between the individual components. Nevertheless, transferring insights and results between different applications is often difficult as only a narrow set of these interactions can be covered. Table I summarizes classical and quantum benchmarks for different levels.
|
| 45 |
+
|
| 46 |
+
1) Classical Benchmarks: Important benchmarks relevant to quantum computing have emerged in HPC and in many application domains. For example, the High-Performance Linpack (HPL) [31] is used to create the Top500 supercomputing list. The NPB [11] originated in the domain of aerodynamics simulations and is a "paper and pencil" benchmark, comprising
|
| 47 |
+
|
| 48 |
+
TABLEI IMPORTANT BENCHMARKS FOR THE DIFFERENT LAYERS: FROM SYSTEM-LEVEL TO APPLICATION-LEVEL BENCHMARKS.
|
| 49 |
+
|
| 50 |
+
<table><tr><td>Level</td><td colspan="4">Classical</td><td colspan="3">Quantum</td></tr><tr><td>Application</td><td colspan="4">ImageNet [24], MLPerf [26], Chook [27]</td><td colspan="3">QScore [10], SupermarQ [28], Fermi-Hubbard Model [29], [30]</td></tr><tr><td>Algorithm</td><td colspan="4">Linpack [31], SPEC [32], TSPLib95 [33], SAT competition [34]</td><td colspan="3">VQE [35], QAOA [36], Annealing [37], [38]</td></tr><tr><td>System</td><td colspan="4">SPEC HPC, ACCEL [39], MPI [40], OMP [32]</td><td colspan="3">QV [7], Volumetric benchmarking [41], randomized gate benchmarking [7], Arline compiler benchmark [42], CLOPS [8], QASM-Bench [43]</td></tr></table>
|
| 51 |
+
|
| 52 |
+
five parallel kernels, and three application benchmarks (e.g., LU matrix decomposition).
|
| 53 |
+
|
| 54 |
+
Various benchmarks have been proposed and advanced with the emergence of data and machine learning workloads and applications. Application-centric benchmarks, such as ImageNet [24] for computer vision and Glue [25] for natural language processing, were instrumental in advancing the state of machine learning, by providing labeled, standardized datasets that enabled comparisons.
|
| 55 |
+
|
| 56 |
+
There exist several benchmarks for common optimization tasks, such as the Boolean satisfiability problem (SAT) [34], scheduling [44], [45], and the traveling salesperson problem (TSP) [33].
|
| 57 |
+
|
| 58 |
+
2) Quantum System Benchmarks: Quantum system benchmarks focus on low-level aspects of quantum devices. One prominent example is the quantum volume (QV) benchmark [7], defined as the largest executable circuit with equal width (number of qubits) and depth (number of circuit layers). Thus, the QV provides valuable information (e.g., gate fidelities, coherence times) needed to assess quantum hardware quality and to validate roadmaps.
|
| 59 |
+
|
| 60 |
+
Blume-Kohout et al. [41] extend the QV beyond square circuits, allowing rectangular circuits with different numbers of qubits and layers. Further, the authors propose, in addition to randomized circuits used by QV, the use of other circuit types, e.g., Grover iterations and Hamiltonian simulations, and additional quality metrics.
|
| 61 |
+
|
| 62 |
+
The circuit layer operations per second (CLOPS) metric focuses on the execution speed [8]. The benchmark is based on parametrized circuits, i.e., circuits which are static and are configured with parameters at runtime. Parameterized circuits are used in quantum algorithms for machine learning, optimization, and chemistry, particularly in the NISQ-era. The metric considers the circuit execution time, including, e.g., preparation overheads.
|
| 63 |
+
|
| 64 |
+
QASMBench [43] is a benchmark suite providing different small to large-scale quantum circuits with an emphasis on evaluating these circuits on quantum hardware. While the benchmark includes application-relevant circuits for (e.g., the quantum approximate optimization algorithm (QAOA)), the authors focus on evaluating quantum hardware performance using these circuits (e.g., gate fidelity).
|
| 65 |
+
|
| 66 |
+
3) Quantum Application Benchmarks: We differentiate between characterizations, i.e., activities that focus on describing
|
| 67 |
+
|
| 68 |
+
and understanding systems, and benchmarks, i. e., standardized workloads that allow a comparison between systems.
|
| 69 |
+
|
| 70 |
+
Characterizations: D-Wave devices have been thoroughly investigated regarding their performance, tunability, and limitations for different applications from science to finance and industry. Grant et al. [46] utilize a portfolio optimization use case to analyze the effects of different control parameters of quantum annealers. In particular, they monitored how the solution quality changes with different embeddings, annealing times, and spin reversal routines. Perdomo et al. [37] investigate the combinational circuit fault diagnosis (CCFD) industrial optimization problem, focusing on the scalability of annealing approaches.
|
| 71 |
+
|
| 72 |
+
Various characterizations of gate-based systems and applications exist. Willsch et al. [36] investigate the performance of QAOA and annealing and their ability to discover the optimal solution for artificial Max-Cut and 2-SAT problems. Performance aspects, e.g., the time-to-solution, are not investigated.
|
| 73 |
+
|
| 74 |
+
**Benchmarks:** While previous examples focus on specific application scenarios, Mills et al. [47] emphasize the need for more holistic benchmarks. To this end, the authors propose three circuit designs: shallow, square, and deep circuits. The proposed approach is similar to the volumetric benchmark approach proposed in [41]. While these circuit types can be mapped to more concrete applications on a high level, it is difficult to predict performance on concrete applications (e.g., for specific problem types and sizes).
|
| 75 |
+
|
| 76 |
+
Martiel et al. [10] propose an application-centric optimization benchmark called Q-Score. The Q-Score is based on performing the Max-Cut algorithm using QAOA on different sizes of standardized Erdős-Renyi graphs. As Q-Score only encapsulates a single problem, its practical value is limited.
|
| 77 |
+
|
| 78 |
+
Lubinski et al. [9] propose application-oriented benchmarks to assess gate-based quantum systems using a volumetric framework. Currently, the framework comprises 11 different algorithms. While most of these algorithms provide important building blocks for quantum applications, the analysis is not conducted in the context of industry applications. Important application domains, such as optimization and machine learning, are not addressed. The framework relies on a normalized fidelity metric, comparing the output distributions of the optimal solution and experiment.
|
| 79 |
+
|
| 80 |
+
Tomesh et al. [28] propose the SupermarQ benchmark suite comprising eight benchmark kernels (e.g., GHZ, Flip Code, and QAOA) for gate-based devices. It focuses on synthetic problems, e.g., QAOA-based Max-Cut optimization on a random graph, making it difficult to generalize results to real-world problems.
|
| 81 |
+
|
| 82 |
+
Discussion: Most approaches focus on specific applications and systems, investigating different configurations to improve understanding. Further, they often rely on generic quality metrics, which are difficult to map to real-world application performance. Finally, they often lack an end-to-end perspective and ignore hidden costs, e.g., the time required to move data between classical and quantum interfaces. The current state reflects the maturity of the quantum ecosystem, which is yet
|
| 83 |
+
|
| 84 |
+
to deliver a practical advantage. The standardization of metrics, datasets, benchmarking methods, and reproducibility will become increasingly important considering the rapid progress toward real-world applications.
|
| 85 |
+
|
| 86 |
+
# III. APPLICATIONS AND WORKLOADS
|
| 87 |
+
|
| 88 |
+
Solving optimization problems has been a major driving force behind the development of quantum computing, as even marginal improvements over existing methods can lead to significant economic impact. This section describes and rigorously formulates two representative industrial applications from the optimization domain: robot path planning and vehicle options planning. The results of both benchmark problems provide immediate utility for assessing a proposed solution's feasibility and business impact. Further, various ways exist to scale and adapt the problems to the evolving technological landscape.
|
| 89 |
+
|
| 90 |
+
# A. Robot Path Planning
|
| 91 |
+
|
| 92 |
+
Application: Robots are a crucial enabler for automation in industrial manufacturing, driving quality, efficiency, and scale improvements. However, the deployment of robot systems comprising software and hardware is challenging. One particular example is planning paths for complex multi-robot systems [48]. Robots must follow a pre-defined path to execute multiple tasks in such systems.
|
| 93 |
+
|
| 94 |
+
An example is the polyvinyl chloride (PVC) sealing process, in which spaces on the vehicle body, e. g., between joint sheets, are sealed using PVC, a thermoplastic material, to increase waterproofness and prevent corrosion. The real-world system is highly complex - for example, each robot has multiple tools and configuration settings, like the number and type of nozzles used by each robot. Multiple robots (up to four) work in parallel during this process. Thus, spatial constraints to avoid collisions must be enforced. The objective is to find the shortest valid path that fulfills the following requirements: (1) all seams need to be sealed; (2) the robot must always start and end at a particular home position; and (3) no collision between the different robots may occur.
|
| 95 |
+
|
| 96 |
+
Due to the limitations of current quantum computing hardware, we make several simplifications. First, we only consider single robot systems, i.e., no collisions must be avoided. Second, we simplify the dataset and aggregate data across some dimensions, e.g., different available tools and configuration parameters. Third, we only consider two different tools and configuration settings. Further, we decrease the problem size to allow execution on current quantum hardware. For this purpose, we remove seams from the real-world problem graph deterministically to ensure reproducibility.
|
| 97 |
+
|
| 98 |
+
Problem Class: The problem is a variant of the NP-hard TSP. It is specified using a weighted graph, encoding the distances between all possible node pairs. The goal is to find a combination of nodes representing the shortest path and, thus, the shortest time.
|
| 99 |
+
|
| 100 |
+
While robot path planning shares some similarities with the TSP, there are some key differences: (1) There are two nodes
|
| 101 |
+
|
| 102 |
+
per seam, but only one of these nodes needs to be visited to seal that seam; (2) there are numerous tools and configuration settings in which a node can be visited; (3) the costs from one node to the other with a specific tool/configuration setting are not symmetric; and (4) the graph is not fully connected as not all moves are possible.
|
| 103 |
+
|
| 104 |
+
Mathematical Model: We define $x_{snct}^{(i)}$ as a binary variable, which we set to 1 if the robot is at the node $(s,n,c,t)$ at time-step $i$ , where $s$ denotes the seam number, $n$ the node number, $c$ the configuration and $t$ the tool setting. Overall there are $N_{\mathrm{seams}} + 1$ time-steps as we need to visit all seams plus the special home position for a path to be valid. The cost function comprises the following components:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
f _ {\text {d i s t .}} (\mathbf {x}) = \sum_ {i = 1} ^ {N _ {\text {s e a m s}} + 1} \sum_ {(s, n, c, t)} \sum_ {(s ^ {\prime}, n ^ {\prime}, c ^ {\prime}, t ^ {\prime})} d _ {s n c t} ^ {s ^ {\prime} n ^ {\prime} c ^ {\prime} t ^ {\prime}} x _ {s n c t} ^ {(i)} x _ {s ^ {\prime} n ^ {\prime} c ^ {\prime} t ^ {\prime}} ^ {(i + 1)}, \tag {1a}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
f _ {\text {t i m e}} (\mathbf {x}) = \sum_ {i = 1} ^ {N _ {\text {s e a m s}} + 1} \left[ \sum_ {(s, n, c, t)} x _ {s n c t} ^ {(i)} - 1 \right] ^ {2}, \tag {1b}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
f _ {\mathrm {c o m p .}} (\mathbf {x}) = \sum_ {s = 1} ^ {N _ {\mathrm {s e a m s}} + 1} \left[ \sum_ {i = 1} ^ {N _ {\mathrm {s e a m s}} + 1} \sum_ {(n, c, t)} x _ {s n c t} ^ {(i)} - 1 \right] ^ {2}, \qquad (1 \mathrm {c})
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where we have collected all $x_{snct}^{(i)}$ into a vector $\mathbf{x}$ . For simplicity, the home position is included in $(s,n,c,t)$ . The total distance covered by the robot is $f_{\mathrm{dist.}}$ , with $d_{snct}^{s'n'c't'}$ , representing the distance between $x_{snct}$ and $x_{s'n'c't'}$ . Additionally, we defined two constraint terms: $f_{\mathrm{time}}$ and $f_{\mathrm{comp.}}$ . The constraint term $f_{\mathrm{time}}$ ensures that only a single node is visited per time-step, while $f_{\mathrm{comp.}}$ ensures that every task is performed exactly once, i.e., every seam is sealed and the home position is visited.
|
| 119 |
+
|
| 120 |
+
The total cost function is given by
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
f (\mathbf {x}) = f _ {\text {d i s t .}} (\mathbf {x}) + \lambda \left[ f _ {\text {c o m p .}} (\mathbf {x}) + f _ {\text {t i m e}} (\mathbf {x}) \right], \tag {2}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $\lambda$ is the Lagrange parameter determining the magnitude of the constraint terms. The resulting QUBO instance can be optimized using quantum approaches such as quantum annealing, QAOA, or classical algorithms. In a post-processing step, we reorder the solution so that the robot starts at the home position.
|
| 127 |
+
|
| 128 |
+
Using
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
N _ {\text {q u b i t s}} = \left(2 _ {\text {(n o d e s p e r s e a m)}} \cdot N _ {\text {s e a m s}} + 1 _ {\text {(h o m e p o s i t i o n)}}\right) \tag {3a}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\cdot N _ {\text {c o n f i g s}} \cdot N _ {\text {t o o l s}} \cdot N _ {\text {t i m e - s t e p s}},
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
N _ {\text {t i m e - s t e p s}} = N _ {\text {s e a m s}} + 1 _ {\text {(h o m e p o s i t i o n)}}, \tag {3b}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
we can compute the number of qubits $N_{\mathrm{qubits}}$ needed to encode the optimization objective Eq. (2) on a quantum device (see Table II).
|
| 143 |
+
|
| 144 |
+
# B. Vehicle Options
|
| 145 |
+
|
| 146 |
+
Application: Before a new vehicle model can be deployed for production, several tests have to be carried out on preseries vehicles to ensure the feasibility and the functionality
|
| 147 |
+
|
| 148 |
+
TABLE II RESOURCE ESTIMATION FOR THE ROBOT PATH OPTIMIZATION, DISPLAYING THE NUMBER OF QUBITS REQUIRED FOR PROBLEM INSTANCES OF INCREASING COMPLEXITY.
|
| 149 |
+
|
| 150 |
+
<table><tr><td>Nseams</td><td>Ntools</td><td>Nconfigs</td><td>Ntime-steps</td><td>Nqubits</td></tr><tr><td>1</td><td>2</td><td>2</td><td>2</td><td>24</td></tr><tr><td>2</td><td>2</td><td>2</td><td>3</td><td>60</td></tr><tr><td>:</td><td>:</td><td>:</td><td>:</td><td>:</td></tr><tr><td>70</td><td>4</td><td>4</td><td>71</td><td>160176</td></tr></table>
|
| 151 |
+
|
| 152 |
+
of specific component configurations. The manufacturer wants to save resources and produce as few pre-series vehicles as possible while still performing all desired tests. Further, not all feature configurations can realistically be implemented in all vehicles, leading to constraints that the produced vehicles must satisfy.
|
| 153 |
+
|
| 154 |
+
Problem Class: The vehicle options optimization problem belongs to the family of SAT problems, which are hard to solve. SAT problems ask whether a configuration of Boolean variables exists, such that a given Boolean formula evaluates to 1. $^{1}$ SAT problems are NP-complete and not only lie at the center of contemporary theoretical computer science research but also appear in a wide range of fields, such as artificial intelligence [49], circuit design [50], and computational biology [51]. Additionally, an extension of SAT problems to maximum satisfiability (MAX-SAT) is frequently used to search for the configuration that maximizes the number of satisfied clauses. Due to its theoretical importance and applicability, the study of (MAX-)SAT is an active area of research [34], [52].
|
| 155 |
+
|
| 156 |
+
Mathematical Model: Consider the set of $N_{\mathrm{v}}$ test vehicles $\{\mathbf{v}^{(1)}, \dots, \mathbf{v}^{(N_{\mathrm{v}})}\}$ , where each vehicle is exactly defined by its configuration of $N_{\mathrm{f}}$ features. That is, for each $i$ , $\mathbf{v}^{(i)} \in \{0, 1\}^{N_{\mathrm{f}}}$ is a binary vector of dimension $N_{\mathrm{f}}$ , where its $j$ th component $v_{j}^{(i)}$ encodes the information whether feature $j$ is absent ( $v_{j}^{(i)} = 0$ ) or present ( $v_{j}^{(i)} = 1$ ) in this particular vehicle.
|
| 157 |
+
|
| 158 |
+
In a realistic setting, not all $2^{N_{\mathrm{f}}}$ possible configurations are feasible (e.g., a vehicle cannot simultaneously have a V4 and V8 engine), leading to the introduction of $N_{\mathrm{h}}$ constraints $\phi_{k}$ . Each constraint can be specified as a Boolean expression involving some subset of features. For example, the condition that vehicle $i$ must contain at least one of the features 1 or 2, and not include feature 3 can be formulated as follows:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\phi_ {\text {e x a m p l e}} \left(\mathbf {v} ^ {(i)}\right) = \left(v _ {1} ^ {(i)} \vee v _ {2} ^ {(i)}\right) \wedge \overline {{v}} _ {3} ^ {(i)}.
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
Since all of the $n$ vehicles have to satisfy each of the $p$ constraints, this means that we require
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\bigwedge_ {j = 1} ^ {N _ {\mathrm {h}}} \bigwedge_ {i = 1} ^ {N _ {\mathrm {v}}} \phi_ {j} \left(\mathbf {v} ^ {(i)}\right) = 1. \tag {4}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
Additionally, we want to perform $N_{\mathrm{s}}$ different tests on the vehicles. We model this by introducing a collection of $N_{\mathrm{f}}$ test
|
| 171 |
+
|
| 172 |
+
requirements $\theta_{i}$ - we demand each of the $\theta_{i}$ to be satisfied by at least one of the $N_{\mathrm{v}}$ vehicles:
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\bigwedge_ {k = 1} ^ {N _ {\mathrm {s}}} \bigvee_ {i = 1} ^ {N _ {\mathrm {v}}} \theta_ {k} \left(\mathbf {v} ^ {(i)}\right) = 1. \tag {5}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
Combining the buildability constraints and the test requirements, we can state the full mathematical formulation of the vehicle options problem as:
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\left[ \bigwedge_ {j = 1} ^ {N _ {\mathrm {h}}} \bigwedge_ {i = 1} ^ {N _ {\mathrm {v}}} \phi_ {j} \left(\mathbf {v} ^ {(i)}\right) \right] \wedge \left[ \bigwedge_ {k = 1} ^ {N _ {\mathrm {s}}} \bigvee_ {i = 1} ^ {N _ {\mathrm {v}}} \theta_ {k} \left(\mathbf {v} ^ {(i)}\right) \right] = 1. \tag {6}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
In practice, a related question is asked: given that a certain quantity of vehicles can be produced, what is the configuration of features of the produced vehicles that maximizes the number of tests that can be performed on them? Due to the limited capabilities of current quantum devices, we limit ourselves to finding the optimal configuration of features for a single vehicle. This approach can be interpreted as a single step of the optimization procedure for multiple vehicles. After one finds the vehicle that satisfies the most tests, the tests that have been satisfied can be removed from consideration. The next chosen vehicle is chosen by maximizing the number of the remaining tests.
|
| 185 |
+
|
| 186 |
+
Thus, the optimal configuration is defined as:
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\mathbf {v} ^ {*} = \underset {\mathbf {v} \in \Phi} {\arg \max } \left(\sum_ {k = 1} ^ {N _ {\mathrm {s}}} \theta_ {k} (\mathbf {v})\right), \tag {7}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
where $\Phi = \left\{\mathbf{v} \mid \bigwedge_{k=1}^{N_{\mathrm{h}}} \phi_k(\mathbf{v}) = 1\right\}$ is the set of the configurations that satisfy all buildability constraints<sup>2</sup>. This problem formulation is an instance of MAX-SAT,<sup>3</sup> with the buildability constraints and test requirements corresponding to hard and soft constraints from the MAX-SAT literature [53].
|
| 193 |
+
|
| 194 |
+
For simplicity, we limit ourselves to MAX-3SAT (i.e., MAX-SAT where all clauses are length 3) instances in conjunctive normal form (CNF), since any MAX-SAT instance can efficiently be brought into this form [54], [55].
|
| 195 |
+
|
| 196 |
+
We transform our problem into a suitable form to utilize quantum devices. In our case, this amounts to rewriting the given MAX-3SAT instance as a QUBO problem. We extend the QUBO formulation by Dinneen [56] to be able to prioritize satisfying hard over soft constraints.
|
| 197 |
+
|
| 198 |
+
Consider a clause
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
\xi_ {i} = \left(x _ {i 1} \vee x _ {i 2} \vee x _ {i 3}\right), \quad x _ {i j} \in \left\{v _ {1}, \dots , v _ {m}, \bar {v} _ {1}, \dots , \bar {v} _ {m} \right\}.
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
Using the fact that we can represent the negation of binary variables as $\bar{v} \Leftrightarrow (1 - v)$ , we can equivalently state the clause $\xi_{i}$ as a cubic polynomial in the binary variables $x_{ij}$ :
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\xi_ {i} = x _ {i 1} + x _ {i 2} + x _ {i 3} - x _ {i 1} x _ {i 2} - x _ {i 1} x _ {i 3} - x _ {i 2} x _ {i 3} + x _ {i 1} x _ {i 2} x _ {i 3}. \tag {8}
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
2In principle, a set of weights $\{w_{k}\}$ could be used to modify the objective function to $\sum_{k = 1}^{N_s}w_k\psi_k(\mathbf{v})$ yielding a weighted (partial) MAXSAT instance. This corresponds to prioritizing some tests.
|
| 211 |
+
|
| 212 |
+
3In the literature, Partial MAX-SAT is sometimes used to describe such problems.
|
| 213 |
+
|
| 214 |
+
By introducing an ancillary binary variable $z_{i}$ , we can reduce the degree of the polynomial on the r.h.s. of Eq. (8) as
|
| 215 |
+
|
| 216 |
+
$$
|
| 217 |
+
x _ {i 1} x _ {i 2} x _ {i 3} = \max _ {z _ {i} \in \{0, 1 \}} z _ {i} (x _ {i 1} + x _ {i 2} + x _ {i 3} - 2). \tag {9}
|
| 218 |
+
$$
|
| 219 |
+
|
| 220 |
+
As for binary variables $x = x^{2}$ , we can write each clause as a purely quadratic polynomial by combining Eqs. (8) and (9).
|
| 221 |
+
|
| 222 |
+
Let us denote with $\widetilde{\phi}_j(\mathbf{v},\mathbf{z}_h)$ and $\widetilde{\theta}_k(\mathbf{v},\mathbf{z}_s)$ quadratic polynomials corresponding to the hard and soft constraints transformed in this manner. Here $\mathbf{z}_h$ and $\mathbf{z}_s$ are binary vectors of dimensions $N_{\mathrm{h}}$ and $N_{\mathrm{s}}$ , with their components being the ancillary variables introduced to reduce the degree of the hard and soft constraints, respectively. The vehicle options MAX-3SAT problem can then be formulated as finding the maximum of the following QUBO problem:
|
| 223 |
+
|
| 224 |
+
$$
|
| 225 |
+
C _ {\text {M A X - S A T}} \left(\mathbf {v}, \mathbf {z} _ {h}, \mathbf {z} _ {s}\right) = \lambda \sum_ {j = 1} ^ {N _ {\mathrm {h}}} \widetilde {\phi} _ {j} \left(\mathbf {v}, \mathbf {z} _ {h}\right) + \sum_ {k = 1} ^ {N _ {\mathrm {s}}} \widetilde {\theta} _ {k} \left(\mathbf {v}, \mathbf {z} _ {s}\right), \tag {10}
|
| 226 |
+
$$
|
| 227 |
+
|
| 228 |
+
where $\lambda$ is a hyperparameter. If we set $\lambda$ to be the number of soft constraints $q$ , it is never favorable to violate a hard constraint in order to satisfy a soft constraint.<sup>4</sup> In that case
|
| 229 |
+
|
| 230 |
+
$$
|
| 231 |
+
\mathbf {v} _ {\text {o p t}} := \arg \max _ {\mathbf {v}} \left[ \max _ {\mathbf {z} _ {h}, \mathbf {z} _ {s}} C _ {\text {M A X - S A T}} \left(\mathbf {v}, \mathbf {z} _ {h}, \mathbf {z} _ {s}\right) \right] \tag {11}
|
| 232 |
+
$$
|
| 233 |
+
|
| 234 |
+
is guaranteed to be the optimal configuration for the given instance. Conversely, we can minimize $-C_{\mathrm{MAX - SAT}}$ using (quantum) annealing approaches to obtain $\mathbf{v}_{\mathrm{opt}}$ . Note that this approach uses $N_{\mathrm{f}} + N_{\mathrm{h}} + N_{\mathrm{s}}$ binary variables, and therefore qubits, to encode the vehicle options problem.
|
| 235 |
+
|
| 236 |
+
While the procedure presented above works for the MAX-3SAT problem, we also include a direct QUBO formulation for MAX-SAT instances with arbitrary (even varying) clause lengths in QUARK. The formulation relies on mapping the SAT problem to the maximum independent set problem and is an extension of the encoding introduced by Choi [57].
|
| 237 |
+
|
| 238 |
+
# IV. QUARK BENCHMARKING FRAMEWORK
|
| 239 |
+
|
| 240 |
+
The QUARK framework aims to facilitate the development of application-level benchmarks. The framework simplifies the end-to-end process of designing, implementing, conducting, and communicating application benchmarks. As applications are highly diverse, it is essential to provide a flexible framework that focuses on investigating system performance in terms of application-level quality metrics (e.g., the path length for TSP applications), bridging the gap between existing system benchmarks and applications. The framework addresses essential benchmarking requirements, allowing for rapid development and refinement of application benchmarks. It provides reproducibility, verifiability, high usability, and customizability. It ensures that benchmark results can be easily collected and distributed. Furthermore, it is vendor-agnostic, ensuring the neutrality of the system.
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
Fig. 1. Architecture of QUARK: The framework follows the separation of concerns design principle encapsulating application- and problem-specific aspects, mappings to mathematical formulations, solvers, and hardware.
|
| 244 |
+
|
| 245 |
+
# A. Architecture
|
| 246 |
+
|
| 247 |
+
The framework is written in Python and designed to be modular and extensible, facilitating new application and problem types, algorithms, and devices. Figure 1 shows the architecture of the QUARK framework. The framework comprises five components: The Benchmark Manager is responsible for orchestrating the overall execution of the benchmark. The Application, Mapping, Solver, and Device components encapsulate different aspects of a benchmark. Each component provides an abstract base class that can be extended for the concrete realizations of a functionality. The modular approach accommodates changes and extensions to benchmark implementations with minimal effort.
|
| 248 |
+
|
| 249 |
+
Application: The application component defines the workload, comprising a dataset of increasing complexity, a validation, and an evaluation function. Generally, the framework can integrate any dataset, e.g., real-world, synthetic and established benchmark data (e.g., TSPLib95 [33]). The application module can be configured using a shared, framework-wide configuration management system, e.g., different problem sizes can be generated depending on the configuration, accommodating the limitations of current quantum hardware and simulation devices. The validation function checks whether the provided solution is valid. For example, the function determines whether a valid path comprising a visit of all seams was generated for the robot path problem. The validation function assumes that the result can be validated using a classical system, which is the case for most problems. The task of the evaluation function is to compute and return a metric that aids the quantitative comparison of the discovered solution. The benchmark developer can utilize particular quality scores for this purpose.
|
| 250 |
+
|
| 251 |
+
Mapping: The task of the mapping module is to translate the application's data and problem specification into a mathematical formulation suitable for a solver. For example, quantum-based solvers for combinatorial optimization problems usually require the problem to be specified in a QUBO or Ising formulation [58]. The mapping is highly application-specific, requiring domain-specific knowledge. To implement
|
| 252 |
+
|
| 253 |
+
4If one considers the weighted extension, then we have to set $\lambda \geq \sum_{k = 1}^{N_{\mathrm{s}}}w_{k}$
|
| 254 |
+
|
| 255 |
+
the mapping, developers can utilize higher-level abstractions, e. g., PyQubo [59], or re-use available formulations in libraries, such as Ocean [60] and Qiskit Optimization [61].
|
| 256 |
+
|
| 257 |
+
Solver: The solver is responsible for finding feasible and high-quality solutions to the formulated problem, i.e., of the defined objective function. Various algorithms for solving QUBO problems exist, e.g., quantum annealing as provided by D-Wave machines, QAOA [62] and VQE [63] for NISQ devices, and Grover Adaptive Search for fault-tolerant hardware [64]. Quantum SDKs like Qiskit [65] and PennyLane [66] provide circuit templates or higher abstractions for solving QUBO and Ising problem formulations.
|
| 258 |
+
|
| 259 |
+
Device: Several quantum devices (e.g., IonQ, Rigetti, IBM, Google), simulators (e.g., Amazon Braket's SV1, Qiskit's QASM, and PennyLane's lightning simulator), and services (e.g., Amazon Braket and Azure Quantum) exist. Each environment has its characteristics and API. Adapting applications and benchmarks to this heterogeneous landscape is challenging, requiring the manual customization of API (e.g., for job submission) and translation between data formats (e.g., different QUBO/Ising matrix representations).
|
| 260 |
+
|
| 261 |
+
The device class abstracts details of the physical device, such as submitting a task to the quantum system. QUARK currently supports different simulators, e.g., Amazon Braket, Qulacs, and Qiskit, and quantum hardware, i.e., annealing, gate-based superconducting and ion-trap based quantum computers via Amazon's Braket service. It can easily be extended to additional simulators and quantum hardware systems.
|
| 262 |
+
|
| 263 |
+
Benchmark Manager: The benchmark manager is the main component of QUARK orchestrating the overall benchmarking process. The benchmarking process is highly customizable, i.e., every module is configurable using a central configuration file. Custom parameter settings can be added for all components, allowing a straightforward evaluation of different parameters. This configuration system ensures that benchmarks and parameters can easily be standardized. Based on the configuration, the benchmark manager will create an experimental plan considering all combinations of configurations, e.g., different problem sizes, solver, and hardware combinations. It will then instantiate the respective framework components representing the application, the mapping to the algorithmic formulation, solver, and device.
|
| 264 |
+
|
| 265 |
+
After executing the benchmarks, QUARK collects the generated data and executes the validation and evaluation functions. Data is processed according to the tidy specification [67] and stored with its metadata, such as the used configuration, to ensure reproducibility. Further, the framework creates various analysis plots automatically. The well-defined data model can also accommodate manual data analytics, e.g., for profiling.
|
| 266 |
+
|
| 267 |
+
Figure 2 illustrates an example of concrete instances of the abstract components. For example, the robot path planning application generates a synthetic application graph mimicking real-world data and stores it as a NetworkX graph object. The current implementation provides different mapping options, e.g., a custom or a predefined (from e.g. Qiskit) QUBO mapping. The QUBO formulation is then used to solve the
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
Fig. 2. Example of how an application can be combined with different mappings, solvers and devices.
|
| 271 |
+
|
| 272 |
+
problem using quantum annealing, QAOA or classical methods like simulated annealing. The device abstraction provides the means to execute application tasks.
|
| 273 |
+
|
| 274 |
+
# B. Key Metrics
|
| 275 |
+
|
| 276 |
+
Defining relevant metrics is one of the key challenges when creating benchmarks. QUARK supports a set of well-defined metrics that in particular attempt to balance the trade-offs between the time-to-solution $TTS$ , the validity $V$ , and the quality $Q$ of a solution. $V$ indicates whether a solution found by the solver is valid (e.g. if it conforms to all constraints). Both $V$ and $Q$ are application-specific and can be customized.
|
| 277 |
+
|
| 278 |
+
$TTS$ is defined as the end-to-end time required to obtain a solution. It is decomposed into several components:
|
| 279 |
+
|
| 280 |
+
$$
|
| 281 |
+
\begin{array}{l} T T S = T _ {\text {m a p p i n g}} + T _ {\text {s o l v e r}} + T _ {\text {r e v e r s e M a p}} \\ + T _ {\text {p r o c e s s S o l u t i o n}} + T _ {\text {v a l i d a t i o n}} + T _ {\text {e v a l u a t i o n}}. \tag {12} \\ \end{array}
|
| 282 |
+
$$
|
| 283 |
+
|
| 284 |
+
Here, $T_{\text{solver}}$ denotes the runtime for the solver with a given configuration. $T_{\text{mapping}}$ gives the time required to map an application formulation to a representation required by the solver, e.g., the time required to convert a graph into a QUBO instance. $T_{\text{reverseMap}}$ and $T_{\text{processSolution}}$ are the execution times of two intermediate steps, needed to convert the solution to a representation that can be used for validation and evaluation. We store the time elapsed during validation and evaluation as $T_{\text{validation}}$ and $T_{\text{evaluation}}$ , respectively.
|
| 285 |
+
|
| 286 |
+
# V. PERFORMANCE CHARACTERIZATION
|
| 287 |
+
|
| 288 |
+
We demonstrate the capabilities of QUARK by applying it to the applications presented in Section III. We present some initial results for these applications. The intention of these results is not to highlight the best approach to solve a given problem but to showcase the flexibility of QUARK, and the value of providing real-world applications.
|
| 289 |
+
|
| 290 |
+
# A. Experimental Setup
|
| 291 |
+
|
| 292 |
+
All non-quantum operations were executed on an NVIDIA DGX A100 device (Dual AMD Rome 7742, 2TB memory, 8x NVIDIA A100 40GB). We only use the GPU for selected experiments. Every experiment configuration is repeated at least five times to compute a variability measure. Problem sizes are chosen according to the current capabilities of quantum
|
| 293 |
+
|
| 294 |
+
devices. While we have conducted some micro-experiments to identify suitable configurations of hyperparameters, we focused on understanding out-of-the-box performance rather than deeply profiling a single configuration.
|
| 295 |
+
|
| 296 |
+
We investigate different classical solvers and D-Wave quantum annealers for all applications. For TSP, we also perform classical simulations of QAOA. We assess quantum annealing on the two D-Wave machines available on Amazon Braket: (D-Wave Advantage 4.1 with 5760 Qubits and 2000Q 6 with 2048 Qubits). It is insightful to compare quantum annealing to its classical counterpart, simulated annealing – we use the implementation from the Neal library [68]. For all annealing methods, we used 500 reads. Although a QUBO formulation is typically not the most efficient mathematical representation for simulated annealing, this approach aids a direct comparison between quantum and simulated annealing.
|
| 297 |
+
|
| 298 |
+
We investigate the solution validity $V$ , quality $Q$ , and time-to-solution $TTS$ . In all experiments, $TTS$ is mainly determined by $T_{\mathrm{solver}}$ . The other components of $TTS$ do not significantly change for different problem sizes. For example, the annealing of the robot path problem $T_{\mathrm{solver}}$ accounts for more than $99\%$ of the overall $TTS$ . As for quantum annealing, $T_{\mathrm{solver}}$ also includes the embedding time, which significantly impacts $TTS$ for larger problem instances (e.g., for both TSP and PVC, the embedding time accounts for around $80\%$ of the overall $TTS$ for the largest problem size). The error bars (where visible) display the minima and maxima across different solver runs.
|
| 299 |
+
|
| 300 |
+
# B. Robot Path Planning (PVC Sealing)
|
| 301 |
+
|
| 302 |
+
Figure 3 summarizes the results for $TTS$ , the path length $Q$ , and for the ratio of valid solutions $V$ . A path is valid if it starts from the home position and visits all seams. In addition to simulated annealing, we implemented three other classical algorithms as baselines: greedy, reversed greedy, and random. The greedy and reversed greedy algorithms make the best and worst possible local move at each step, respectively. The random solver makes a random choice at every time step to decide which node to visit next.
|
| 303 |
+
|
| 304 |
+
While quantum annealing outperforms the reverse greedy and random algorithms, it performs worse than simulated annealing - particularly striking is the difference between the ratios of valid solutions. As both simulated and quantum annealing use the same problem formulation, this suggests that the capabilities of available quantum devices, rather than the problem formulation and encoding, are the limiting factors.
|
| 305 |
+
|
| 306 |
+
Another limitation of current D-Wave devices is that embedding larger problem sizes is impossible after a few seams. On $D$ -Wave 2000Q we can only solve two seams, while on the larger Advantage 4.1, problems up to three seams can be solved. It is possible, however, that a QUBO formulation tailored to the particular architecture of D-Wave devices, would perform significantly better, both in terms of the solution quality and the time-to-solution.
|
| 307 |
+
|
| 308 |
+
Traveling Salesperson: The TSP problem represents a simplification of the PVC sealing problem. In the following, we
|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
Fig. 3. Robot Path Optimization - Annealing Results for different number of seams. Simulated annealing achieves the best solution quality (bottom panel; lower is better) on average, while both D-Wave devices struggle to find valid solutions (middle panel).
|
| 312 |
+
|
| 313 |
+
use TSP to establish baseline for the PVC experiments. By integrating the TSPLib95 [33] into QUARK, we can benchmark quantum TSP solutions against state-of-the-art solutions.
|
| 314 |
+
|
| 315 |
+
Fig. 4 illustrates the performance obtained using the dsj1000 TSPLib95 dataset, which we reproducibly simplified by removing nodes until reaching the desired problem size. The QUBO formulation for this problem is constructed from the graph using the Ocean library [60], and requires $N_{\mathrm{nodes}}^2$ qubits. We compare quantum and simulated annealing to different classical algorithms: NetworkX's greedy algorithm [69], the described reversed greedy and random algorithms.
|
| 316 |
+
|
| 317 |
+
On average, the greedy solver returns the shortest paths. However, for up to eight nodes, we always find at least one annealing run with a better solution (e.g., all three annealing options for six nodes). Thus, it is important to not only consider the average performance.
|
| 318 |
+
|
| 319 |
+
In summary, simulated annealing exhibits a better performance than quantum annealing. However, while for PVC sealing, simulated annealing outperforms the greedy algorithm, the opposite is true for the TSP. The reason is that the greedy algorithm for PVC sealing never changes its tool and config setting during a tour, as it is never locally optimal to do so.
|
| 320 |
+
|
| 321 |
+
As for PVC sealing, above a certain problem size, finding an embedding for quantum annealers is impossible. On the $D$ -Wave 2000Q we can solve problems with at most eight nodes, while on the larger $D$ -Wave Advantage 4.1 instances with up to 14 nodes are feasible. Furthermore, starting at eight nodes, we observe a drop in the ratio of valid solutions for both quantum annealers. Finally, for more than ten nodes, no valid solutions could be found with $D$ -Wave Advantage 4.1.
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
Fig. 4. TSP - Annealing Results for different numbers of nodes. While on average, the greedy solver achieves a better solution quality (bottom panel; lower is better), we find, up to eight nodes, at least one annealing run with a better solution. Starting at 8 nodes, we observe a drop in the rate of valid solutions (middle panel) for both quantum annealers.
|
| 325 |
+
|
| 326 |
+
Variational Algorithms: QAOA [62] is a variational quantum algorithm suitable for NISQ devices. It provides approximate solutions to combinatorial optimization problems. To assess the performance of QAOA, we implemented it with different libraries (e.g., Amazon Braket [70], PennyLane [66], and Qiskit [65]). Here, we present the results of a PennyLane-based QAOA implementation evaluated using QUARK on different CPU and GPU-based simulators. For example, PennyLane's lightning.qubit is a CPU-based simulator with built-in parallelization; lightning.gpu utilizes the cuStateVec library to offload computations to the GPU (currently, only one GPU is supported). We evaluated the performance on TSP instances up to five nodes. For all experiments, 60 iterations were performed, using the adjoint differentiation method [71], and the momentum optimizer (stepsize: 0.001, momentum: 0.9).
|
| 327 |
+
|
| 328 |
+
The top panel in Fig. 5 shows that the CPU-based simulator has a better $TTS$ than the GPU simulator, particularly for small instances. The runtime on CPU and GPUs are comparable for five node instances (corresponding to 25 qubits). To investigate the lack of performance gains using the GPU simulator, we conducted a micro experiment evaluating a single circuit execution of QAOA, averaged over ten runs (Fig. 6). We compare our QAOA circuit to PennyLane's StronglyEntanglingLayers benchmark [72]. The experiment confirmed that for the problem sizes considered our QAOA circuit does not benefit from the GPU acceleration as the overheads (e.g., data transfer, synchronization) negate a potential computational advantage. Further, the CPU simulator heavily utilizes the parallelism available on the 64 CPU cores machine.
|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
Fig. 5. TSP - QAOA Results for different numbers of nodes and CPU and GPU simulators. The trend of the $TTS$ (upper panel) indicates that GPU simulators could outperform CPU simulators at larger system sizes, while CPU-based simulations are more efficient at smaller system sizes. Validity (middle panel) improves slightly with a higher number of QAOA layers $p$ , while the quality curves (bottom panel; lower is better) collapse, as the algorithm cannot differentiate between the different valid solutions.
|
| 332 |
+
|
| 333 |
+

|
| 334 |
+
Fig. 6. QAOA Results - Runtime for a single circuit execution of QAOA and PennyLane's StronglyEntanglingLayers benchmark [72] (Ref). Both scenarios use 25 qubits. While the execution of Pennylane's reference circuit is significantly faster on a GPU than on a CPU, the converse holds for a QAOA circuit.
|
| 335 |
+
|
| 336 |
+
We expect that the GPU support will improve significantly in the future (see Pennylane's lightning roadmap [72]).
|
| 337 |
+
|
| 338 |
+
We define the validity $V$ as the probability of obtaining a valid bitstring (i.e., one that corresponds to an actual tour) among 50 measurements of the output state, averaged over five runs of the algorithm. As shown in the middle panel of Fig. 5, QAOA is able to identify valid solutions. However, $V$ decreases with increasing problem size, i.e., the number of nodes. The data also indicates that this issue could be remedied by deeper QAOA circuits; in our experiments, $V$ increased slightly for five layers.
|
| 339 |
+
|
| 340 |
+
The bottom panel of Fig. 5 displays the quality metric $Q$ , defined as the expectation value of the path cost. $Q$ collapses to the mean tour length for both one and five layers. This
|
| 341 |
+
|
| 342 |
+
could indicate that the Lagrange parameter value (here set to twice the average tour length) is too large. Our extensive hyperparameter search indicated a trade-off between $V$ and $Q$ and that smaller Lagrange parameter values lead to a decrease in $V$ .
|
| 343 |
+
|
| 344 |
+
Beyond tuning hyperparameters, potential improvements to the performance of QAOA on TSP could come from improving the TSP formulation. The current formulation requires $N_{\mathrm{nodes}}^2$ qubits and in turn leads to numerous spurious states which do not correspond to a valid tour. As getting the algorithm to converge to valid solutions was rather challenging due to the constrained nature of the problem, we will investigate other initialization schemes (e.g., warm-starting [73] or Dicke state initialization [74]) and constrained mixers [75] to decrease the reliance on the Lagrange parameter for constraint enforcement.
|
| 345 |
+
|
| 346 |
+
# C. Vehicle Options
|
| 347 |
+
|
| 348 |
+
We evaluate the vehicle options inspired instances of MAX-3SAT using randomly generated MAX-3SAT instances for a range of total feature (variable) numbers $N_{\mathrm{f}}$ up to 110, which is the largest problem instance we can encode on a quantum annealer. For each $N_{\mathrm{f}}$ , we generate ten different MAX-3SAT instances with $N_{\mathrm{h}} = 2N_{\mathrm{f}}$ hard constraints and $N_{\mathrm{s}} = \lceil 4.2N_{\mathrm{f}} \rceil$ soft constraints. Additionally, we ensure that no variable appears more than once within each clause.
|
| 349 |
+
|
| 350 |
+
We utilize the QUBO formulation presented in Section III-B (using $\lambda = N_{\mathrm{s}}$ ) to solve these problems using two different quantum annealing devices and a classical simulated annealing algorithm. With the given problem specifics, the number of qubits needed to encode the generated instances scales as linearly as $\lceil 7.2N_{\mathrm{f}}\rceil$ . We benchmarked the annealing-based approaches against the designated classical MAX-SAT solver RC2 [76]. We perform three solver runs for each problem instance, resulting in 30 runs per solver for each $N_{\mathrm{f}}$ .
|
| 351 |
+
|
| 352 |
+
In Fig. 7 we display the $TTS$ , the ratio and average quality of valid solutions returned by each solver. The quality metric is defined as the ratio of satisfied soft constraints and is only displayed for valid solutions, i.e., solutions that satisfy all hard constraints. These results reveal several features of the solvers we analyzed. Firstly, we observed that annealing-based approaches do not consistently return valid solutions – this would suggest that increasing the $\lambda$ parameter (see Eq. (10)) is required. However, the ratio of satisfied soft constraints roughly coincides with that expected from random assignments, which is $87.5\%$ . This suggests that the annealing methods completely disregard soft constraints – increasing $\lambda$ would only exacerbate this problem. This issue often arises in constrained QUBO formulations as QUBOs are inherently unconstrained. Hence, one has to carefully balance enforcing constraints and optimizing the objective [77].
|
| 353 |
+
|
| 354 |
+
Secondly, there is a big difference in solution quality between the RC2 classical solver and the annealing-based approaches. However, the $TTS$ of RC2 increases roughly
|
| 355 |
+
|
| 356 |
+

|
| 357 |
+
Fig. 7. Vehicle Options - Annealing Results. With increasing instance complexity, the classical solver's $TTS$ (upper panel) increases exponentially, while the scaling appears to be subexponential for annealing approaches. However, the classical solver outperforms annealing methods regarding the ratio of valid solutions (middle panel) and the quality of returned solutions (bottom panel). For larger problem instances, annealing algorithms only sporadically return valid solutions, showing a noticeable decline in performance.
|
| 358 |
+
|
| 359 |
+
exponentially, especially for larger problem sizes ( $N_{\mathrm{f}} \geq 40$ ). While more efficient approximate classical algorithms exist [78], this gives hope that quantum annealing could become a viable alternative with improved encoding and devices. Such improvements could come from tuning hyperparameters (e.g., $\lambda$ ) of the QUBO mappings presented within our framework or from finding more efficient encodings that potentially better suit the topology of current annealing devices [79], [80].
|
| 360 |
+
|
| 361 |
+
While one (on average) expects a monotonic decrease in performance of annealing algorithms with increasing problem sizes [81], [82], this is not strictly the case in our study (see middle panel of Figure 7). This behavior can be explained by the fact that we generate a limited number of instances at each $N_{\mathrm{f}}$ . These instances can, in principle, be of varying complexity, which in turn leads to the varying performance of the solvers – the trend towards worsening efficacy as the problem sizes increase is, however, evident. Varying instance complexity manifests itself in fluctuating solution validity for annealing-based approaches and in the variation of $TTS$ for the classical solver (note error bars in Fig 7's top panel).
|
| 362 |
+
|
| 363 |
+
Finally, we can observe that the quantum and simulated annealing approaches yield comparable results. Moreover, it is interesting to note that quantum annealing provided valid solutions for some problem sizes where no valid solution was found with simulated annealing. The fact that, at least for this use case, quantum annealing seems to have started catching up with its classical counterpart portends optimism as quantum annealing devices are improved.
|
| 364 |
+
|
| 365 |
+
# VI. CONCLUSION AND FUTURE WORK
|
| 366 |
+
|
| 367 |
+
Benchmarks are instrumental for measuring progress, encouraging new and innovative solutions, accelerating adoption, establishing best practices, and predicting the viability of algorithms and hardware solutions. In this paper, we make a case for application-centric benchmarks to connect progress in the QC hardware realm to real-world application performance. For this purpose, we propose a "pen and paper" benchmark approach to address the uncertainty concerning practical quantum advantages. QUARK automates and standardizes critical parts of a benchmarking system, ensuring reproducibility and verifiability. The modular architecture enables benchmark developers to investigate and automate large-scale benchmark scenarios across diverse infrastructures. We envision that a wide variety of community-driven benchmarks will guide the progress toward a practical advantage for industrial quantum applications. QUARK-based benchmarks will help quantum application developers to identify performance bottlenecks, compare different algorithms, hardware and software configurations, and estimate resource requirements.
|
| 368 |
+
|
| 369 |
+
We demonstrate the benchmark development lifecycle from specification, implementation to execution using QUARK using two significant and representative industrial applications: robot path and vehicle option optimization. Our results provide valuable insights into the current state of quantum computing. Unsurprisingly, classical solvers outperform quantum algorithms in that they more reliably return valid and higher quality solutions. However, the roughly exponential scaling of the $TTS$ for the classical solver in the vehicle options problem emphasizes the opportunity for a potential quantum computing advantage. While our results show limitations of current quantum approaches, we believe QUARK will be valuable for advancing application benchmarks.
|
| 370 |
+
|
| 371 |
+
Future Work: We will evolve QUARK by adding new problem classes (e.g., machine learning and chemistry) and frameworks (e.g., Amazon Braket Hybrid Jobs). Particularly, we will add the functionality of comprehensively analyzing hybrid algorithms, facilitating the in-depth characterization of all classical and quantum components. Further, we will enrich the data and metrics, e.g., by supporting lower-level metrics like gate fidelities to understand the system's behavior better.
|
| 372 |
+
|
| 373 |
+
We will evolve the presented reference implementations into standardized benchmarks. Standardizing all aspects of benchmarks is crucial to advance the uptake, utility, and impact. In addition to technical aspects, engaging interested parties in a community-driven process of the technology industry, application users, and academia is crucial.
|
| 374 |
+
|
| 375 |
+
# ACKNOWLEDGMENT
|
| 376 |
+
|
| 377 |
+
We thank S. Benesch, Y. van Dijk, M. Erdmann, C. Mendl, L. Müller and C. Riofrio for valuable feedback. Additionally, we thank AWS, specifically K. Brubaker, H. Katzgraber, H. Montagu, M. Resende, and M. Schuetz for the TSP QUBO formulation. PR and JK are partly funded by the German Ministry for Education and Research (BMBF) (Project: QAI2-Q-KIS/#13N15587).
|
| 378 |
+
|
| 379 |
+
# REFERENCES
|
| 380 |
+
|
| 381 |
+
[1] M. Biondi, A. Heid et al., "Quantum computing: An emerging ecosystem and industry use cases," McKinsey & Company, https://www.mckinsey.com/business-functions/mckinsey-digital/our-insights/quantum-computing-use-cases-are-getting-real-what-you-need-to-know, 2021.
|
| 382 |
+
[2] A. Bayerstadler, G. Becquin et al., "Industry quantum computing applications," EPJ Quantum Technology, vol. 8, no. 1, p. 25, Nov. 2021. [Online]. Available: https://doi.org/10.1140/epjqt/s40507-021-00114-x
|
| 383 |
+
[3] F. Arute, K. Arya et al., “Quantum supremacy using a programmable superconducting processor,” Nature, vol. 574, no. 7779, pp. 505-510, Oct. 2019. [Online]. Available: https://doi.org/10.1038/s41586-019-1666-5
|
| 384 |
+
[4] L. S. Madsen, F. Laudenbach et al., "Quantum computational advantage with a programmable photonic processor," Nature, vol. 606, no. 7912, pp. 75-81, 2022. [Online]. Available: https://doi.org/10.1038/s41586-022-04725-x
|
| 385 |
+
[5] S. Sim, S. Easterbrook, and R. Holt, "Using benchmarking to advance research: a challenge to software engineering," in 25th International Conference on Software Engineering, 2003. Proceedings., 2003, pp. 74-83.
|
| 386 |
+
[6] M. Langione, J.-F. Bobier et al., “The race to quantum advantage depends on benchmarking,” https://www.bcg.com/publications/2022/value-of-quantum-computing-benchmarks, 2022.
|
| 387 |
+
[7] A. W. Cross, L. S. Bishop et al., "Validating quantum computers using randomized model circuits," Phys. Rev. A, vol. 100, p. 032328, Sep 2019. [Online]. Available: https://link.aps.org/doi/10.1103/PhysRevA.100.032328
|
| 388 |
+
[8] A. Wack, H. Paik et al., "Quality, speed, and scale: three key attributes to measure the performance of near-term quantum computers," 2021.
|
| 389 |
+
[9] T. Lubinski, S. Johri et al., "Application-oriented performance benchmarks for quantum computing," 2021.
|
| 390 |
+
[10] S. Martiel, T. Ayral, and C. Allouche, “Benchmarking quantum coprocessors in an application-centric, hardware-agnostic and scalable way,” 2021.
|
| 391 |
+
[11] D. Bailey, E. Barszcz et al., "The nas parallel benchmarks," The International Journal of Supercomputing Applications, vol. 5, no. 3, pp. 63-73, 1991. [Online]. Available: https://doi.org/10.1177/109434209100500306
|
| 392 |
+
[12] (2022) Quark: A framework for quantum computing application benchmarking. [Online]. Available: https://github.com/BMW-Group-Quantum/QUARK
|
| 393 |
+
[13] IBM. (2022) Ibm quantum. [Online]. Available: https://quantum-computing.ibm.com/
|
| 394 |
+
[14] Google. (2022) Quantum computer datasheet. [Online]. Available: https://quantumai.google/software/datasheet/weber.pdf
|
| 395 |
+
[15] Rigetti. (2022) Rigetti website. [Online]. Available: https://www.rigetti.com/get-quantum
|
| 396 |
+
[16] IonQ. (2022) Ionq website. [Online]. Available: https://ionq.com/
|
| 397 |
+
[17] Honeywell. (2022) Honeywell. [Online]. Available: https://www.honeywell.com/us/en/company/quantum
|
| 398 |
+
[18] D-Wave. (2022) D-wave: Leap. [Online]. Available: https://cloud.dwavesys.com/
|
| 399 |
+
[19] S. Ebadi, T. T. Wang et al., “Quantum phases of matter on a 256-atom programmable quantum simulator,” Nature, vol. 595, no. 7866, pp. 227–232, Jul. 2021. [Online]. Available: https://doi.org/10.1038/s41586-021-03582-4
|
| 400 |
+
[20] V. Lahtinen and J. K. Pachos, "A Short Introduction to Topological Quantum Computation," SciPost Phys., vol. 3, p. 021, 2017. [Online]. Available: https://scipost.org/10.21468/SciPostPhys.3.3.021
|
| 401 |
+
[21] (2022) List qc simulators. [Online]. Available: https://wwwquantiki.org/wiki/list-qc-simulators
|
| 402 |
+
[22] D. Ferrari, Computer Systems Performance Evaluation. Prentice-Hall, 1978. [Online]. Available: https://books.google.de/books?id=geBQAAAAMAAJ
|
| 403 |
+
[23] R. Jain, The art of computer systems performance analysis - techniques for experimental design, measurement, simulation, and modeling, ser. Wiley professional computing. Wiley, 1991.
|
| 404 |
+
[24] O. Russakovsky, J. Deng et al., "Imagenet large scale visual recognition challenge," 2015.
|
| 405 |
+
[25] A. Wang, A. Singh et al., “Glue: A multi-task benchmark and analysis platform for natural language understanding,” 2019.
|
| 406 |
+
|
| 407 |
+
[26] P. Mattson, C. Cheng et al., "MLPerf Training Benchmark," arXiv eprints, p. arXiv:1910.01500, Oct. 2019.
|
| 408 |
+
[27] D. Perera, I. Akpabio et al., "Chook - a comprehensive suite for generating binary optimization problems with planted solutions," 2021.
|
| 409 |
+
[28] T. Tomesh, P. Gokhale et al., "Supermarq: A scalable quantum benchmark suite," 2022. [Online]. Available: https://arxiv.org/abs/2202.11045
|
| 410 |
+
[29] B. T. Gard and A. M. Meier, “A classically efficient quantum scalable fermi-hubbard benchmark,” 2021.
|
| 411 |
+
[30] P.-L. Dallaire-Demers, M. Stechly et al., "An application benchmark for fermionic quantum simulations," 2020.
|
| 412 |
+
[31] J. J. Dongarra, P. Luszczek, and A. Petitet, “The linpack benchmark: Past, present, and future,” 2002.
|
| 413 |
+
[32] SPEC. (2022) Standard performance evaluation corporation (spec): Benchmarks. [Online]. Available: https://www.spec.org/benchmarks.html
|
| 414 |
+
[33] G. Reinelt, "TSPLIB — A Traveling Salesman Problem Library," INFORMS Journal on Computing, vol. 3, no. 4, pp. 376-384, November 1991. [Online]. Available: https://ideas.repec.org/a/inm/orijoc/v3y1991i4p376-384.html
|
| 415 |
+
[34] (2022) International sat competition. [Online]. Available: http://www.satcompetition.org/
|
| 416 |
+
[35] A. J. McCaskey, Z. P. Parks et al., "Quantum chemistry as a benchmark for near-term quantum computers," 2019.
|
| 417 |
+
[36] M. Willsch, D. Willsch et al., "Benchmarking the quantum approximate optimization algorithm," Quantum Information Processing, vol. 19, 06 2020.
|
| 418 |
+
[37] A. Perdomo-Ortiz, A. Feldman et al., "Readiness of quantum optimization machines for industrial applications," Phys. Rev. Applied, vol. 12, p. 014004, Jul 2019. [Online]. Available: https://link.aps.org/doi/10.1103/PhysRevApplied.12.014004
|
| 419 |
+
[38] S. Yarkoni, A. Alekseyenko et al., "Multi-car paint shop optimization with quantum annealing," in 2021 IEEE International Conference on Quantum Computing and Engineering (QCE), 2021, pp. 35-41.
|
| 420 |
+
[39] G. Juckeland, W. Brantley et al., "Spec accel: A standard application suite for measuring hardware accelerator performance," 11 2014.
|
| 421 |
+
[40] M. Müller, M. van Waveren et al., "Spec mpi2007-an application benchmark suite for parallel systems using mpi," Concurrency and Computation: Practice and Experience, vol. 22, pp. 191-205, 02 2010.
|
| 422 |
+
[41] R. Blume-Kohout and K. C. Young, “A volumetric framework for quantum computer benchmarks,” Quantum, vol. 4, p. 362, Nov. 2020. [Online]. Available: https://doi.org/10.22331/q-2020-11-15-362
|
| 423 |
+
[42] (2021) Standard performance evaluation corporation (spec): Benchmarks. [Online]. Available: https://github.com/ArlineQ/arline_benchmarks
|
| 424 |
+
[43] A. Li, S. Stein et al., "Qasmbench: A low-level qasm benchmark suite for nisq evaluation and simulation," 2020. [Online]. Available: https://arxiv.org/abs/2005.13018
|
| 425 |
+
[44] Schedulingbenchmarks.org. (2022) Shift scheduling benchmark data sets. [Online]. Available: http://www.schedulingbenchmarks.org/other.html
|
| 426 |
+
[45] J. Wilkes, "Google cluster-usage traces v3," Google Inc., Mountain View, CA, USA, Technical Report, Apr. 2020, posted at https://github.com/google/cluster-data/blob/master/ClusterData2019.md.
|
| 427 |
+
[46] E. Grant, T. S. Humble, and B. Stump, "Benchmarking quantum annealing controls with portfolio optimization," Physical Review Applied, vol. 15, no. 1, 1 2021.
|
| 428 |
+
[47] D. Mills, S. Sivarajah et al., "Application-motivated, holistic benchmarking of a full quantum computing stack," Quantum, vol. 5, p. 415, Mar 2021. [Online]. Available: http://dx.doi.org/10.22331/q-2021-03-22-415
|
| 429 |
+
[48] M. Muradi and R. Wanka, "Sample-based motion planning for multirobot systems," in 2020 6th International Conference on Control, Automation and Robotics (ICCAR), 2020, pp. 130-138.
|
| 430 |
+
[49] N. Narodytska, A. Ignatiev et al., “Learning optimal decision trees with SAT.” International Joint Conferences on Artificial Intelligence Organization, Jul. 2018. [Online]. Available: https://doi.org/10.24963/ijcai.2018/189
|
| 431 |
+
[50] T. Hong, Y. Li et al., “Qed: Quick error detection tests for effective post-silicon validation,” in 2010 IEEE International Test Conference, 2010, pp. 1–10.
|
| 432 |
+
[51] I. Lynce and J. Marques-Silva, “SAT in bioinformatics: Making the case with haplotype inference.” Springer Berlin Heidelberg, 2006, pp. 136–141.
|
| 433 |
+
|
| 434 |
+
[52] C.-M. Li and F. Manya, Eds., Theory and Applications of Satisfiability Testing - SAT 2021. Springer International Publishing, 2021. [Online]. Available: https://doi.org/10.1007/978-3-030-80223-3
|
| 435 |
+
[53] L. C. Min and M. Felip, "Maxsat, hard and soft constraints," Frontiers in Artificial Intelligence and Applications, vol. 185, no. Handbook of Satisfiability, p. 613-631, 2009. [Online]. Available: https://doi.org/10.3233/978-1-58603-929-5-613
|
| 436 |
+
[54] S. Skiena, The algorithm design manual. London: Springer, 2008.
|
| 437 |
+
[55] G. S. Tseitin, “On the complexity of derivation in propositional calculus,” 1983.
|
| 438 |
+
[56] M. J. Dinneen. (2016) Maximum 3-sat as qubo. [Online]. Available: https://canvas.auckland.ac.nz/courses/14782/files/574983/download? verifier=1xqRikUjTEBwm8PnObD8YVmKdeEhZ9Ui8axW8HwP& wrap=1
|
| 439 |
+
[57] V. Choi, "Different Adiabatic Quantum Optimization Algorithms for the NP-Complete Exact Cover and 3SAT Problems," Proceedings of the National Academy of Sciences, vol. 108, no. 7, pp. E19-E20, Feb. 2011, arXiv: 1010.1221. [Online]. Available: http://arxiv.org/abs/1010.1221
|
| 440 |
+
[58] F. W. Glover and G. A. Kochenberger, “A tutorial on formulating QUBO models,” CoRR, vol. abs/1811.11538, 2018. [Online]. Available: http://arxiv.org/abs/1811.11538
|
| 441 |
+
[59] M. Zaman, K. Tanahashi, and S. Tanaka, "Pyqubo: Python library for qubo creation," IEEE Transactions on Computers, 2021.
|
| 442 |
+
[60] D.-W. Systems, “D-wave ocean software documentation,” https://docs.ocean.dwavesys.com/en/latest/index.html, last accessed: Nov. 2021.
|
| 443 |
+
[61] Q. O. Developers. (2022) Qiskit optimization. [Online]. Available: https://github.com/Qiskit/qiskit-optimization
|
| 444 |
+
[62] E. Farhi, J. Goldstone, and S. Gutmann, “A quantum approximate optimization algorithm,” 2014.
|
| 445 |
+
[63] A. Peruzzo, J. McClean et al., "A variational eigenvalue solver on a photonic quantum processor," Nature Communications, vol. 5, no. 1, Jul 2014. [Online]. Available: http://dx.doi.org/10.1038/ncomms5213
|
| 446 |
+
[64] D. Bulger, W. P. Baritompa, and G. R. Wood, "Implementing Pure Adaptive Search with Grover's Quantum Algorithm," Journal of Optimization Theory and Applications, vol. 116, no. 3, pp. 517-529, Mar. 2003. [Online]. Available: https://doi.org/10.1023/A:1023061218864
|
| 447 |
+
[65] G. Aleksandrowicz, T. Alexander et al., “Qiskit: An open-source framework for quantum computing,” 2019.
|
| 448 |
+
[66] V. Bergholm, J. Izaac et al., "Pennylane: Automatic differentiation of hybrid quantum-classical computations," 2020.
|
| 449 |
+
[67] H. Wickham et al., “Tidy data,” Journal of Statistical Software, vol. 59, no. 10, pp. 1-23, 2014.
|
| 450 |
+
[68] D-Wave Systems, “dwave-neal,” D-Wave Systems, 2021. [Online]. Available: https://docs.ocean.dwavesys.com/projects/neal/en/latest/reference/sampler.html
|
| 451 |
+
[69] N. Developers, https://networkx.org/documentation/stable/reference/ algorithms/generated/networkx.algorithms.approximation.traveling salesman.greedy_tsp.html, last accessed: Nov. 2021.
|
| 452 |
+
[70] (2022) Amazon braket: Accelerate quantum computing research. [Online]. Available: https://aws.amazon.com/braket/
|
| 453 |
+
[71] T. Jones and J. Gacon, "Efficient calculation of gradients in classical simulations of variational quantum algorithms," 2020. [Online]. Available: https://arxiv.org/abs/2009.02823
|
| 454 |
+
[72] (2022) Lightning-fast simulations with pennylane and the nvidia cuquantum sdk. [Online]. Available: https://pennylane.ai/blog/2022/07/lightning-fast-simulations-with-pennylane-and-the-nvidia-cuquantum-sdk/
|
| 455 |
+
[73] D. J. Egger, J. Mareček, and S. Woerner, "Warm-starting quantum optimization," Quantum, vol. 5, p. 479, jun 2021. [Online]. Available: https://doi.org/10.22331%2Fq-2021-06-17-479
|
| 456 |
+
[74] K. Mesman, Z. Al-Ars, and M. Möller, "Qpack: Quantum approximate optimization algorithms as universal benchmark for quantum computers," 2021. [Online]. Available: https://arxiv.org/abs/2103.17193
|
| 457 |
+
[75] S. Hadfield, Z. Wang et al., “From the quantum approximate optimization algorithm to a quantum alternating operator ansatz,” Algorithms, vol. 12, no. 2, 2019.
|
| 458 |
+
[76] A. Ignatiev, A. Morgado, and J. Marques-Silva, "Rc2: an efficient maxsat solver," Journal on Satisfiability, Boolean Modeling and Computation, vol. 11, pp. 53-64, 09 2019.
|
| 459 |
+
[77] A. Lucas, “Ising formulations of many np problems,” Frontiers in Physics, vol. 2, 2014. [Online]. Available: http://dx.doi.org/10.3389/fphy.2014.00005
|
| 460 |
+
|
| 461 |
+
[78] S. Joshi, P. Kumar et al., "Approximation strategies for incomplete MaxSAT," in Lecture Notes in Computer Science. Springer International Publishing, 2018, pp. 219-228. [Online]. Available: https://doi.org/10.1007/978-3-319-98334-9_15
|
| 462 |
+
[79] Z. Bian, F. Chudak et al., "Solving SAT (and MaxSAT) with a quantum annealer: Foundations, encodings, and preliminary results," Information and Computation, vol. 275, p. 104609, Dec. 2020. [Online]. Available: https://doi.org/10.1016/j.ic.2020.104609
|
| 463 |
+
[80] N. Chancellor, S. Zohren et al., "A direct mapping of max k-SAT and high order parity checks to a chimera graph," Scientific Reports, vol. 6, no. 1, Nov. 2016. [Online]. Available: https://doi.org/10.1038/srep37107
|
| 464 |
+
[81] M. Žnidarič and M. Horvat, "Exponential complexity of an adiabatic algorithm for an NP-complete problem," Physical Review A, vol. 73, no. 2, Feb. 2006. [Online]. Available: https://doi.org/10.1103/physreva.73.022329
|
| 465 |
+
[82] M. Azinović, D. Herr et al., "Assessment of quantum annealing for the construction of satisfiability filters," SciPost Physics, vol. 2, no. 2, Apr. 2017. [Online]. Available: https://doi.org/10.21468/scipostphys.2.2.013
|
2202.03xxx/2202.03028/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a741764f90789fe9f819166c1f8c10cd178fde71216fb6e3f90479ee497dac4e
|
| 3 |
+
size 382074
|
2202.03xxx/2202.03028/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03036/8d79809e-01a0-4bda-9822-cf005dbb67bd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70d622952e918dfc404c7b0b873f2d9589c083e648d1dede8bb4f17fb83eb65f
|
| 3 |
+
size 612269
|
2202.03xxx/2202.03036/full.md
ADDED
|
@@ -0,0 +1,640 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Structure-Aware Transformer for Graph Representation Learning
|
| 2 |
+
|
| 3 |
+
Dexiong Chen $^{*12}$ Leslie O'Bray $^{*12}$ Karsten Borgwardt $^{12}$
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
The Transformer architecture has gained growing attention in graph representation learning recently, as it naturally overcomes several limitations of graph neural networks (GNNs) by avoiding their strict structural inductive biases and instead only encoding the graph structure via positional encoding. Here, we show that the node representations generated by the Transformer with positional encoding do not necessarily capture structural similarity between them. To address this issue, we propose the Structure-Aware Transformer, a class of simple and flexible graph Transformers built upon a new self-attention mechanism. This new self-attention incorporates structural information into the original self-attention by extracting a subgraph representation rooted at each node before computing the attention. We propose several methods for automatically generating the subgraph representation and show theoretically that the resulting representations are at least as expressive as the subgraph representations. Empirically, our method achieves state-of-the-art performance on five graph prediction benchmarks. Our structure-aware framework can leverage any existing GNN to extract the subgraph representation, and we show that it systematically improves performance relative to the base GNN model, successfully combining the advantages of GNNs and Transformers. Our code is available at https://github.com/BorgwardtLab/SAT.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Graph neural networks (GNNs) have been established as powerful and flexible tools for graph representation learning,
|
| 12 |
+
|
| 13 |
+
*Equal contribution $^{1}$ Department of Biosystems Science and Engineering, ETH Zürich, Switzerland $^{2}$ SIB Swiss Institute of Bioinformatics, Switzerland. Correspondence to: Dexiong Chen <dexiong.chen@bsse.ethz.ch>, Leslie O'Bray <leslie.ocray@bsse.ethz.ch>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $39^{th}$ International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).
|
| 16 |
+
|
| 17 |
+
with successful applications in drug discovery (Gaugelet et al., 2021), protein design (Ingraham et al., 2019), social network analysis (Fan et al., 2019), and so on. A large class of GNNs build multilayer models, where each layer operates on the previous layer to generate new representations using a message-passing mechanism (Gilmer et al., 2017) to aggregate local neighborhood information.
|
| 18 |
+
|
| 19 |
+
While many different message-passing strategies have been proposed, some critical limitations have been uncovered in this class of GNNs. These include the limited expressiveness of GNNs (Xu et al., 2019; Morris et al., 2019), as well as known problems such as over-smoothing (Li et al., 2018; 2019; Chen et al., 2020; Oono & Suzuki, 2020) and oversquashing (Alon & Yahav, 2021). Over-smoothing manifests as all node representations converging to a constant after sufficiently many layers, while over-squashing occurs when messages from distant nodes are not effectively propagated through certain "bottlenecks" in a graph, since too many messages get compressed into a single fixed-length vector. Designing new architectures beyond neighborhood aggregation is thus essential to solve these problems.
|
| 20 |
+
|
| 21 |
+
Transformers (Vaswani et al., 2017), which have proved to be successful in natural language understanding (Vaswani et al., 2017), computer vision (Dosovitskiy et al., 2020), and biological sequence modeling (Rives et al., 2021), offer the potential to address these issues. Rather than only aggregating local neighborhood information in the message-passing mechanism, the Transformer architecture is able to capture interaction information between any node pair via a single self-attention layer. Moreover, in contrast to GNNs, the Transformer avoids introducing any structural inductive bias at intermediate layers, addressing the expressivity limitation of GNNs. Instead, it encodes structural or positional information about nodes only into input node features, albeit limiting how much information it can learn from the graph structure. Integrating information about the graph structure into the Transformer architecture has thus gained growing attention in the graph representation learning field. However, most existing approaches only encode positional relationships between nodes, rather than explicitly encoding the structural relationships. As a result, they may not identify structural similarities between nodes and could fail to model the structural interaction between nodes (see Figure 1). This could explain why their performance
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1: Position-aware vs. structure-aware: Using a positional encoding based on shortest paths in $G_{1}$ and $G_{2}$ respectively (assuming all edges have equal weight), node $u$ and $v$ would receive identical encodings since their shortest paths to all other nodes are the same in both graphs. However, their structures are different, with $v$ forming a triangle with its red neighbors.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+
was dominated by sparse GNNs in several tasks (Dwivedi et al., 2022).
|
| 29 |
+
|
| 30 |
+
Contribution In this work, we address the critical question of how to encode structural information into a Transformer architecture. Our principal contribution is to introduce a flexible structure-aware self-attention mechanism that explicitly considers the graph structure and thus captures structural interaction between nodes. The resulting class of Transformers, which we call the Structure-Aware Transformer (SAT), can provide structure-aware representations of graphs, in contrast to most existing position-aware Transformers for graph-structured data. Specifically:
|
| 31 |
+
|
| 32 |
+
- We reformulate the self-attention mechanism in Vaswani et al. (2017) as a kernel smoother and extend the original exponential kernel on node features to also account for local structures, by extracting a subgraph representation centered around each node.
|
| 33 |
+
- We propose several methods for automatically generating the subgraph representations, enabling the resulting kernel smoother to simultaneously capture structural and attributed similarities between nodes. The resulting representations are theoretically guaranteed to be at least as expressive as the subgraph representations.
|
| 34 |
+
- We demonstrate the effectiveness of SAT models on five graph and node property prediction benchmarks by showing it achieves better performance than state-of-the-art GNNs and Transformers. Furthermore, we show how SAT can easily leverage any GNN to compute the node representations which incorporate subgraph information and outperform the base GNN, making it an effortless enhancer of any existing GNN.
|
| 35 |
+
- Finally, we show that we can attribute the performance gains to the structure-aware aspect of our architecture, and showcase how SAT is more interpretable than the classic Transformer with an absolute encoding.
|
| 36 |
+
|
| 37 |
+
We will present the related work and relevant background in Sections 2 and 3 before presenting our method in Section 4 and our experimental findings in Section 5.
|
| 38 |
+
|
| 39 |
+
# 2. Related Work
|
| 40 |
+
|
| 41 |
+
We present here the work most related to ours, namely the work stemming from message passing GNNs, positional representations on graphs, and graph Transformers.
|
| 42 |
+
|
| 43 |
+
Message passing graph neural networks Message passing graph neural networks have recently been one of the leading methods for graph representation learning. An early seminal example is the GCN (Kipf & Welling, 2017), which was based on performing convolutions on the graph. Gilmer et al. (2017) reformulated the early GNNs into a framework of message passing GNNs, which has since then become the predominant framework of GNNs in use today, with extensive examples (Hamilton et al., 2017; Xu et al., 2019; Corso et al., 2020; Hu et al., 2020b; Velicković et al., 2018; Li et al., 2020a; Yang et al., 2022). However, as mentioned above, they suffer from problems of limited expressiveness, over-smoothing, and over-squashing.
|
| 44 |
+
|
| 45 |
+
Absolute encoding Because of the limited expressiveness of GNNs, there has been some recent research into the use of absolute encoding (Shaw et al., 2018), which consists of adding or concatenating positional or structural representations to the input node features. While it is often called an absolute positional encoding, we refer to it more generally as an absolute encoding to include both positional and structural encoding, which are both important in graph modeling. Absolute encoding primarily considers position or location relationships between nodes. Examples of position-based methods include the Laplacian positional encoding (Dwivedi & Bresson, 2021; Kreuzer et al., 2021), Weisfeiler-Lehman-based positional encoding (Zhang et al., 2020), and random walk positional encoding (RWPE) (Li et al., 2020b; Dwivedi et al., 2022), while distance-based methods include distances to a predefined set of nodes (You et al., 2019) and shortest path distances between pairs of nodes (Zhang et al., 2020; Li et al., 2020b). Dwivedi et al. (2022) extend these ideas by using a trainable absolute encoding.
|
| 46 |
+
|
| 47 |
+
Graph Transformers While the absolute encoding methods listed above can be used with message passing GNNs, they also play a crucial role in the (graph) Transformer architecture. Graph Transformer (Dwivedi & Bresson, 2021) provided an early example of how to generalize the Transformer architecture to graphs, using Laplacian eigenvectors as an absolute encoding and computing attention on the immediate neighborhood of each node, rather than on the full graph. SAN (Kreuzer et al., 2021) also used the Laplacian eigenvectors for computing an absolute encoding, but computed attention on the full graph, while distinguishing between true and created edges. Many graph Transformer methods also use a relative encoding (Shaw et al., 2018) in
|
| 48 |
+
|
| 49 |
+
addition to absolute encoding. This strategy incorporates representations of the relative position or distances between nodes on the graph directly into the self-attention mechanism, as opposed to the absolute encoding which is only applied once to the input node features. Mialon et al. (2021) propose a relative encoding by means of kernels on graphs to bias the self-attention calculation, which is then able to incorporate positional information into Transformers via the choice of kernel function. Other recent work seeks to incorporate structural information into the graph Transformer, for example by encoding some carefully selected graph theoretic properties such as centrality measures and shortest path distances as positional representations (Ying et al., 2021) or by using GNNs to integrate the graph structure (Rong et al., 2020; Jain et al., 2021; Mialon et al., 2021; Shi et al., 2021).
|
| 50 |
+
|
| 51 |
+
In this work, we combine the best of both worlds from message passing GNNs and from the Transformer architecture. We incorporate both an absolute as well as a novel relative encoding that explicitly incorporates the graph structure, thereby designing a Transformer architecture that takes both local and global information into account.
|
| 52 |
+
|
| 53 |
+
# 3. Background
|
| 54 |
+
|
| 55 |
+
In the following, we refer to a graph as $G = (V, E, \mathbf{X})$ , where the node attributes for node $u \in V$ is denoted by $x_u \in \mathcal{X} \subset \mathbb{R}^d$ and the node attributes for all nodes are stored in $\mathbf{X} \in \mathbb{R}^{n \times d}$ for a graph with $n$ nodes.
|
| 56 |
+
|
| 57 |
+
# 3.1. Transformers on Graphs
|
| 58 |
+
|
| 59 |
+
While GNNs use the graph structure explicitly, Transformers remove that explicit structure, and instead infer relations between nodes by leveraging the node attributes. In this sense, the Transformer (Vaswani et al., 2017) ignores the graph structure and rather considers the graph as a (multi-) set of nodes, and uses the self-attention mechanism to infer the similarity between nodes. The Transformer itself is composed of two main blocks: a self-attention module followed by a feed-forward neural network. In the self-attention module, the input node features $\mathbf{X}$ are first projected to query $(\mathbf{Q})$ , key $(\mathbf{K})$ and value $(\mathbf{V})$ matrices through a linear projection such that $\mathbf{Q} = \mathbf{X}\mathbf{W}_{\mathbf{Q}}$ , $\mathbf{K} = \mathbf{X}\mathbf{W}_{\mathbf{K}}$ and $\mathbf{V} = \mathbf{X}\mathbf{W}_{\mathbf{V}}$ respectively. We can compute the self-attention via
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\operatorname {A t t n} (\mathbf {X}) := \operatorname {s o f t m a x} \left(\frac {\mathbf {Q K} ^ {T}}{\sqrt {d _ {o u t}}}\right) \mathbf {V} \in \mathbb {R} ^ {n \times d _ {o u t}}, \tag {1}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
where $d_{out}$ refers to the dimension of $\mathbf{Q}$ , and $\mathbf{W}_{\mathbf{Q}}, \mathbf{W}_{\mathbf{K}}, \mathbf{W}_{\mathbf{V}}$ are trainable parameters. It is common to use multi-head attention, which concatenates multiple instances of Eq. (1) and has shown to be effective in practice (Vaswani et al., 2017). Then, the output of the self-attention is followed by a skip-connection and a feedforward network (FFN), which jointly compose a Trans
|
| 66 |
+
|
| 67 |
+
former layer, as shown below:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\begin{array}{l} \mathbf {X} ^ {\prime} = \mathbf {X} + \operatorname {A t t n} (\mathbf {X}), \\ \mathbf {X} ^ {\prime \prime} = \operatorname {F F N} \left(\mathbf {X} ^ {\prime}\right) := \operatorname {R e L U} \left(\mathbf {X} ^ {\prime} W _ {1}\right) W _ {2}. \tag {2} \\ \end{array}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
Multiple layers can be stacked to form a Transformer model, which ultimately provides node-level representations of the graph. As the self-attention is equivariant to permutations of the input nodes, the Transformer will always generate the same representations for nodes with the same attributes regardless of their locations and surrounding structures in the graph. It is thus necessary to incorporate such information into the Transformer, generally via absolute encoding.
|
| 74 |
+
|
| 75 |
+
Absolute encoding Absolute encoding refers to adding or concatenating the positional or structural representations of the graph to the input node features before the main Transformer model, such as the Laplacian positional encoding (Dwivedi & Bresson, 2021) or RWPE (Dwivedi et al., 2022). The main shortcoming of these encoding methods is that they generally do not provide a measure of the structural similarity between nodes and their neighborhoods.
|
| 76 |
+
|
| 77 |
+
Self-attention as kernel smoothing As noticed by Mialon et al. (2021), the self-attention in Eq. (1) can be rewritten as a kernel smoother
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\operatorname {A t t n} \left(x _ {v}\right) = \sum_ {u \in V} \frac {\kappa_ {\exp} \left(x _ {v} , x _ {u}\right)}{\sum_ {w \in V} \kappa_ {\exp} \left(x _ {v} , x _ {w}\right)} f \left(x _ {u}\right), \forall v \in V, \tag {3}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $f(x) = \mathbf{W}_{\mathbf{V}}x$ is the linear value function and $\kappa_{\mathrm{exp}}$ is a (non-symmetric) exponential kernel on $\mathbb{R}^d\times \mathbb{R}^d$ parameterized by $\mathbf{W}_{\mathbf{Q}}$ and $\mathbf{W}_{\mathbf{K}}$ :
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\kappa_ {\exp} (x, x ^ {\prime}) := \exp \left(\langle \mathbf {W} _ {\mathbf {Q}} x, \mathbf {W} _ {\mathbf {K}} x ^ {\prime} \rangle / \sqrt {d _ {o u t}}\right), \tag {4}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\langle \cdot, \cdot \rangle$ is the dot product on $\mathbb{R}^d$ . With this form, Mialon et al. (2021) propose a relative positional encoding strategy via the product of this kernel and a diffusion kernel on the graph, which consequently captures the positional similarity between nodes. However, this method is only position-aware, in contrast to our structure-aware encoding that will be presented in Section 4.
|
| 90 |
+
|
| 91 |
+
# 4. Structure-Aware Transformer
|
| 92 |
+
|
| 93 |
+
In this section, we will describe how to encode the graph structure into the self-attention mechanism and provide a class of Transformer models based on this framework.
|
| 94 |
+
|
| 95 |
+
# 4.1. Structure-Aware Self-Attention
|
| 96 |
+
|
| 97 |
+
As presented above, self-attention in the Transformer can be rewritten as a kernel smoother where the kernel is a trainable exponential kernel defined on node features, and which only
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
Figure 2: Overview of an example SAT layer that uses the $k$ -subgraph GNN extractor as its structure extractor. The structure extractor generates structure-aware node representations which are used to compute the query (Q) and key (K) matrices in the Transformer layer. Structure-aware node representations are generated in the $k$ -subgraph GNN extractor by first extracting the $k$ -hop subgraph centered at each node (here, $k = 1$ ) and then using a GNN on each subgraph to generate a node representations using the full subgraph information. While the structure extractor can use any class of subgraphs, the one illustrated here defined on the class of $k$ -hop subgraphs has a reasonable computation-expressiveness trade-off.
|
| 101 |
+
|
| 102 |
+
captures attributed similarity between a pair of nodes. The problem with this kernel smoother is that it cannot filter out nodes that are structurally different from the node of interest when they have the same or similar node features. In order to also incorporate the structural similarity between nodes, we consider a more generalized kernel that additionally accounts for the local substructures around each node. By introducing a set of subgraphs centered at each node, we define our structure-aware attention as:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\operatorname {S A} - \operatorname {a t t n} (v) := \sum_ {u \in V} \frac {\kappa_ {\operatorname {g r a p h}} \left(S _ {G} (v) , S _ {G} (u)\right)}{\sum_ {w \in V} \kappa_ {\operatorname {g r a p h}} \left(S _ {G} (v) , S _ {G} (w)\right)} f \left(x _ {u}\right), \tag {5}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where $S_G(v)$ denotes a subgraph in $G$ centered at a node $v$ associated with node features $\mathbf{X}$ and $\kappa_{\mathrm{graph}}$ can be any kernel that compares a pair of subgraphs. This new self-attention function not only takes the attributed similarity into account but also the structural similarity between subgraphs. It thus generates more expressive node representations than the original self-attention, as we will show in Section 4.4. Moreover, this self-attention is no longer equivariant to any permutation of nodes but only to nodes whose features and subgraphs coincide, which is a desirable property.
|
| 109 |
+
|
| 110 |
+
In the rest of the paper, we will consider the following form of $\kappa_{\mathrm{graph}}$ that already includes a large class of expressive and computationally tractable models:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\kappa_ {\text {g r a p h}} \left(S _ {G} (v), S _ {G} (u)\right) = \kappa_ {\exp} \left(\varphi (v, G), \varphi (u, G)\right), \tag {6}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $\varphi(u, G)$ is a structure extractor that extracts vector representations of some subgraph centered at $u$ with node features $\mathbf{X}$ . We provide several alternatives of the structure extractor below. It is worth noting that our structure-aware self-attention is flexible enough to be combined with any model that generates representations of subgraphs, including GNNs and (differentiable) graph kernels. For notational
|
| 117 |
+
|
| 118 |
+
simplicity, we assume there are no edge attributes, but our method can easily incorporate edge attributes as long as the structure extractor can accommodate them. The edge attributes are consequently not considered in the self-attention computation, but are incorporated into the structure-aware node representations. In the structure extractors presented in this paper, this means that edge attributes were included whenever the base GNN was able to handle edge attributes.
|
| 119 |
+
|
| 120 |
+
$k$ -subtree GNN extractor A straightforward way to extract local structural information at node $u$ is to apply any existing GNN model to the input graph with node features $\mathbf{X}$ and take the output node representation at $u$ as the subgraph representation at $u$ . More formally, if we denote by $\mathrm{GNN}_G^{(k)}$ an arbitrary GNN model with $k$ layers applied to $G$ with node features $\mathbf{X}$ , then
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\varphi (u, G) = \operatorname {G N N} _ {G} ^ {(k)} (u). \tag {7}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
This extractor is able to represent the $k$ -subtree structure rooted at $u$ (Xu et al., 2019). While this class of structure extractors is fast to compute and can flexibly leverage any existing GNN, they cannot be more expressive than the Weisfeiler-Lehman test due to the expressiveness limitation of message passing GNNs (Xu et al., 2019). In practice, a small value of $k$ already leads to good performance, while not suffering from over-smoothing or over-squashing.
|
| 127 |
+
|
| 128 |
+
$k$ -subgraph GNN extractor A more expressive extractor is to use a GNN to directly compute the representation of the entire $k$ -hop subgraph centered at $u$ rather than just the node representation $u$ . Recent work has explored the idea of using subgraphs rather than subtrees around a node in GNNs, with positive experimental results (Zhang & Li, 2021; Wijesinghe & Wang, 2022), as well as being strictly
|
| 129 |
+
|
| 130 |
+
more powerful than the 1-WL test (Zhang & Li, 2021). We follow the same setup as is done in Zhang & Li (2021), and adapt our GNN extractor to utilize the entire $k$ -hop subgraph. The $k$ -subgraph GNN extractor aggregates the updated node representations of all nodes within the $k$ -hop neighborhood using a pooling function such as summation. Formally, if we denote by $\mathcal{N}_k(u)$ the $k$ -hop neighborhood of node $u$ including itself, the representation of a node $u$ is:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\varphi (u, G) = \sum_ {v \in \mathcal {N} _ {k} (u)} \mathrm {G N N} _ {G} ^ {(k)} (v). \tag {8}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
We observe that prior to the pooling function, the $k$ -subgraph GNN extractor is equivalent to using the $k$ -subtree GNN extractor within each $k$ -hop subgraph. So as to capture the attributed similarity as well as structural similarity, we augment the node representation from $k$ -subgraph GNN extractor with the original node features via concatenation. While this extractor provides more expressive subgraph representations than the $k$ -subtree extractor, it requires enumerating all $k$ -hop subgraphs, and consequently does not scale as well as the $k$ -subtree extractor to large datasets.
|
| 137 |
+
|
| 138 |
+
Other structure extractors Finally, we present a list of other potential structure extractors for different purposes. One possible choice is to directly learn a number of "hidden graphs" as the "anchor subgraphs" to represent subgraphs for better model interpretability, by using the concepts introduced in Nikolentzos & Vazirgiannis (2020). While Nikolentzos & Vazirgiannis (2020) obtain a vector representation of the input graph by counting the number of matching walks between the whole graph and each of the hidden graphs, one could extend this to the node level by comparing the hidden graphs to the $k$ -hop subgraph centered around each node. The adjacency matrix of the hidden graphs is a trainable parameter in the network, thereby enabling end-to-end training to identify which subgraph structures are predictive. Then, for a trained model, visualizing the learned hidden graphs provides useful insights about the structural motifs in the dataset.
|
| 139 |
+
|
| 140 |
+
Furthermore, more domain-specific GNNs could also be used to extract potentially more expressive subgraph representations. For instance, Bodnar et al. (2021) recently proposed a new kind of message passing scheme operating on regular cell complexes which benefits from provably stronger expressivity for molecules. Our self-attention mechanism can fully benefit from the development of more domain-specific and expressive GNNs.
|
| 141 |
+
|
| 142 |
+
Finally, another possible structure extractor is to use a nonparametric graph kernel (e.g. a Weisfeiler-Lehman graph kernel) on the $k$ -hop subgraphs centered around each node. This provides a flexible way to combine graph kernels and deep learning, which might offer new theoretical insights
|
| 143 |
+
|
| 144 |
+
into the link between the self-attention and kernel methods.
|
| 145 |
+
|
| 146 |
+
# 4.2. Structure-Aware Transformer
|
| 147 |
+
|
| 148 |
+
Having defined our structure-aware self-attention function, the other components of the Structure-Aware Transformer follow the Transformer architecture as described in Section 3.1; see Figure 2 for a visual overview. Specifically, the self-attention function is followed by a skip-connection, a FFN and two normalization layers before and after the FFN. In addition, we also include the degree factor in the skip-connection, which was found useful for reducing the overwhelming influence of highly connected graph components (Mialon et al., 2021), i.e.,
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
x _ {v} ^ {\prime} = x _ {v} + 1 / \sqrt {d _ {v}} \mathrm {S A - a t t n} (v), \tag {9}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
where $d_v$ denotes the degree of node $v$ . After a Transformer layer, we obtain a new graph with the same structure but different node features $G' = (V, E, \mathbf{X}')$ , where $\mathbf{X}'$ corresponds to the output of the Transformer layer.
|
| 155 |
+
|
| 156 |
+
Finally, for graph property prediction, there are various ways to aggregate node-level representations into a graph representation, such as by taking the average or sum. Alternatively, one can use the embedding of a virtual [CLS] node (Jain et al., 2021) that is attached to the input graph without any connectivity to other nodes. We compare these approaches in Section 5.
|
| 157 |
+
|
| 158 |
+
# 4.3. Combination with Absolute Encoding
|
| 159 |
+
|
| 160 |
+
While the self-attention in Eq. (5) is structure-aware, most absolute encoding techniques are only position-aware and could therefore provide complementary information. Indeed, we find that the combination leads to further performance improvements, which we show in Section 5. We choose to use the RWPE (Dwivedi et al., 2022), though any other absolute positional representations, including learnable ones, can also be used.
|
| 161 |
+
|
| 162 |
+
We further argue that only using absolute positional encoding with the Transformer would exhibit a too relaxed structural inductive bias which is not guaranteed to generate similar node representations even if two nodes have similar local structures. This is due to the fact that distance or Laplacian-based positional representations generally serve as structural or positional signatures but do not provide a measure of structural similarity between nodes, especially in the inductive case where two nodes are from different graphs. This is also empirically affirmed in Section 5 by their relatively worse performance without using our structural encoding. In contrast, the subgraph representations used in the structure-aware attention can be tailored to measure the structural similarity between nodes, and thus generate similar node-level representations if they possess similar
|
| 163 |
+
|
| 164 |
+
attributes and surrounding structures. We can formally state this in the following theorem:
|
| 165 |
+
|
| 166 |
+
Theorem 1. Assume that $f$ is a Lipschitz mapping with the Lipschitz constant denoted by $\operatorname{Lip}(f)$ and the structure extractor $\varphi$ is bounded by a constant $C_{\varphi}$ on the space of subgraphs. For any pair of nodes $v$ and $v'$ in two graphs $G = (V, E, \mathbf{X})$ and $G' = (V', E', \mathbf{X}')$ with the same number of nodes $|V| = |V'|$ , the distance between their representations after the structure-aware attention is bounded by:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\begin{array}{l} \left\| S A - a t t n (v) - S A - a t t n \left(v ^ {\prime}\right) \right\| \leq C _ {1} \left[ \| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \right\| \tag {10} \\ \left. + D \left(\mathbf {H}, \mathbf {H} ^ {\prime}\right) \right] + C _ {2} D \left(\mathbf {X}, \mathbf {X} ^ {\prime}\right), \\ \end{array}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $C_1, C_2 > 0$ are constants depending on $|V|$ , $\operatorname{Lip}(f)$ , $C_{\varphi}$ and spectral norms of the parameters in SA-attn, whose expressions are given in the Appendix, and $h_w \coloneqq \varphi(w, G)$ denotes the subgraph representation at node $w$ for any $w \in V$ and $h_{w'}' \coloneqq \varphi(w', G')$ similarly, and $\mathbf{H} = (h_w)_{w \in V}$ and $\mathbf{H}' = (h_{w'}')_{w' \in V'}$ denote the multiset of subgraph representations in $G$ and $G'$ respectively. Denoting by $\Pi(V, V')$ the set of permutations from $V$ to $V'$ , $D$ is an optimal matching metric between two multisets of representations with the same cardinality, defined as
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
D(\mathbf{X},\mathbf{X}^{\prime}):= \inf_{\pi \in \Pi (V,V^{\prime})}\sup_{w\in V}\| x_{w} - x^{\prime}_{\pi (w)}\| .
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
The proof is provided in the Appendix. The metric $D$ is an optimal matching metric between two multisets which measures how different they are. This theorem shows that two node representations from the SA-attn are similar if the graphs that they belong to have similar multisets of node features and subgraph representations overall, and at the same time, the subgraph representations at these two nodes are similar. In particular, if two nodes belong to the same graph, i.e. $G = G'$ , then the second and last terms on the right side of Eq. (10) are equal to zero and the distance between their representations is thus constrained by the distance between their corresponding subgraph representations. However, for Transformers with absolute positional encoding, the distance between two node representations is not constrained by their structural similarity, as the distance between two positional representations does not necessarily characterize how structurally similar two nodes are. Despite stronger inductive biases, we will show that our model is still sufficiently expressive in the next section.
|
| 179 |
+
|
| 180 |
+
# 4.4. Expressivity Analysis
|
| 181 |
+
|
| 182 |
+
The expressive power of graph Transformers compared to classic GNNs has hardly been studied, since the soft structural inductive bias introduced in absolute encoding is generally hard to characterize. Thanks to the unique design of our SAT, which relies on a subgraph structure extractor, it
|
| 183 |
+
|
| 184 |
+
Table 1: Comparison of SAT to SOTA methods on graph regression and classification tasks. ZINC results use edge weights where applicable, otherwise without edge weights. $\star$ indicates we obtained the results ourselves by adapting the code provided by the original paper. $\uparrow$ means that higher is better for the performance metric; $\downarrow$ indicates lower is better.
|
| 185 |
+
|
| 186 |
+
<table><tr><td></td><td>ZINC ↓</td><td>CLUSTER ↑</td><td>PATTERN ↑</td></tr><tr><td># GRAPHS</td><td>12,000</td><td>12,000</td><td>14,000</td></tr><tr><td>AVG. # NODES</td><td>23.2</td><td>117.2</td><td>118.9</td></tr><tr><td>AVG. # EDGES</td><td>49.8</td><td>4,303.9</td><td>6,098.9</td></tr><tr><td>METRIC</td><td>MAE</td><td>ACCURACY</td><td>ACCURACY</td></tr><tr><td>GIN</td><td>0.387±0.015</td><td>64.716±1.553</td><td>85.590±0.011</td></tr><tr><td>GAT</td><td>0.384±0.007</td><td>70.587±0.447</td><td>78.271±0.186</td></tr><tr><td>PNA</td><td>0.188±0.004</td><td>67.077±0.977*</td><td>86.567±0.075</td></tr><tr><td>TRANSFORMER+RWPE</td><td>0.310±0.005</td><td>29.622±0.176</td><td>86.183±0.019</td></tr><tr><td>GRAPH TRANSFORMER</td><td>0.226±0.014</td><td>73.169±0.622</td><td>84.808±0.068</td></tr><tr><td>SAN</td><td>0.139±0.006</td><td>76.691±0.650</td><td>86.581±0.037</td></tr><tr><td>GRAPHORMER</td><td>0.122±0.006</td><td>-</td><td>-</td></tr><tr><td>K-SUBTREE SAT</td><td>0.102±0.005</td><td>77.751±0.121</td><td>86.865±0.043</td></tr><tr><td>K-SUBGRAPH SAT</td><td>0.094±0.008</td><td>77.856±0.104</td><td>86.848±0.037</td></tr></table>
|
| 187 |
+
|
| 188 |
+
becomes possible to study the expressiveness of the output representations. More specifically, we formally show that the node representation from a structure-aware attention layer is at least as expressive as its subgraph representation given by the structure extractor, following the injectivity of the attention function with respect to the query:
|
| 189 |
+
|
| 190 |
+
Theorem 2. Assume that the space of node attributes $\mathcal{X}$ is countable. For any pair of nodes $v$ and $v'$ in two graphs $G = (V, E, \mathbf{X})$ and $G' = (V', E', \mathbf{X}')$ , assume that there exist a node $u_1$ in $V$ such that $x_{u_1} \neq x_w$ for any $w \in V$ and a node $u_2$ in $V$ such that its subgraph representation $\varphi(u_2, G) \neq \varphi(w, G)$ for any $w \in V$ . Then, there exists a set of parameters and a mapping $f: \mathcal{X} \to \mathbb{R}^{d_{out}}$ such that their representations after the structure-aware attention are different, i.e. SA-attn(v) $\neq$ SA-attn(v'), if their subgraph representations are different, i.e. $\varphi(v, G) \neq \varphi(v', G')$ .
|
| 191 |
+
|
| 192 |
+
Note that the assumptions made in the theorem are mild as one can always add some absolute encoding or random noise to make the attributes of one node different from all other nodes, and similarly for subgraph representations. The countable assumption on $\mathcal{X}$ is generally adopted for expressivity analysis of GNNs (e.g. Xu et al. (2019)). We assume $f$ to be any mapping rather than just a linear function as in the definition of the self-attention function since it can be practically approximated by a FFN in multi-layer Transformers through the universal approximation theorem (Hornik, 1991). Theorem 2 suggests that if the structure extractor is sufficiently expressive, the resulting SAT model can also be at least equally expressive. Furthermore, more expressive extractors could lead to more expressively powerful SAT models and thus better prediction performance, which is also empirically confirmed in Section 5.
|
| 193 |
+
|
| 194 |
+
Table 2: Comparison of SAT to SOTA methods on OGB datasets.
|
| 195 |
+
|
| 196 |
+
<table><tr><td></td><td>OGBG-PPA ↑</td><td>OGBG-CODE2 ↑</td></tr><tr><td># GRAPHS</td><td>158,100</td><td>452,741</td></tr><tr><td>AVG. # NODES</td><td>243.4</td><td>125.2</td></tr><tr><td>AVG. # EDGES</td><td>2,266.1</td><td>124.2</td></tr><tr><td>METRIC</td><td>ACCURACY</td><td>F1 SCORE</td></tr><tr><td>GCN</td><td>0.6839±0.0084</td><td>0.1507±0.0018</td></tr><tr><td>GCN-VIRTUAL NODE</td><td>0.6857±0.0061</td><td>0.1595±0.0018</td></tr><tr><td>GIN</td><td>0.6892±0.0100</td><td>0.1495±0.0023</td></tr><tr><td>GIN-VIRTUAL NODE</td><td>0.7037±0.0107</td><td>0.1581±0.0026</td></tr><tr><td>DEeperGCN</td><td>0.7712±0.0071</td><td>-</td></tr><tr><td>EXPC</td><td>0.7976±0.0072</td><td>-</td></tr><tr><td>TRANSFORMER</td><td>0.6454±0.0033</td><td>0.1670±0.0015</td></tr><tr><td>GRAPHTRANS</td><td>-</td><td>0.1830±0.0024</td></tr><tr><td>K-SUBTREE SAT</td><td>0.7522±0.0056</td><td>0.1937±0.0028</td></tr></table>
|
| 197 |
+
|
| 198 |
+
# 5. Experiments
|
| 199 |
+
|
| 200 |
+
In this section, we evaluate SAT models versus several SOTA methods for graph representation learning, including GNNs and Transformers, on five graph and node prediction tasks, as well as analyze the different components of our architecture to identify what drives the performance. In summary, we discovered the following aspects about SAT:
|
| 201 |
+
|
| 202 |
+
- The structure-aware framework achieves SOTA performance on graph and node classification tasks, outperforming SOTA graph Transformers and sparse GNNs.
|
| 203 |
+
- Both instances of the SAT, namely $k$ -subtree and $k$ -subgraph SAT, always improve upon the base GNN it is built upon, highlighting the improved expressiveness of our structure-aware approach.
|
| 204 |
+
- We show that incorporating the structure via our structure-aware attention brings a notable improvement relative to the vanilla Transformer with RWPE that just uses node attribute similarity instead of also incorporating structural similarity. We also show that a small value of $k$ already leads to good performance, while not suffering from over-smoothing or over-squashing.
|
| 205 |
+
- We show that choosing a proper absolute positional encoding and a readout method improves performance, but to a much lesser extent than incorporating the structure into the approach.
|
| 206 |
+
|
| 207 |
+
Furthermore, we note that SAT achieves SOTA performance while only considering a small hyperparameter search space. Performance could likely be further improved with more hyperparameter tuning.
|
| 208 |
+
|
| 209 |
+
# 5.1. Datasets and Experimental Setup
|
| 210 |
+
|
| 211 |
+
We assess the performance of our method with five medium to large benchmark datasets for node and graph property prediction, including ZINC (Dwivedi et al., 2020),
|
| 212 |
+
|
| 213 |
+
CLUSTER (Dwivedi et al., 2020), PATTERN (Dwivedi et al., 2020), OGBG-PPA (Hu et al., 2020a) and OGBG-CODE2 (Hu et al., 2020a).
|
| 214 |
+
|
| 215 |
+
We compare our method to the following GNNs: GCN (Kipf & Welling, 2017), GraphSAGE (Hamilton et al., 2017), GAT (Velicković et al., 2018), GIN (Xu et al., 2019), PNA (Corso et al., 2020), DeeperGCN (Li et al., 2020a), and ExpC (Yang et al., 2022). Our comparison partners also include several recently proposed Transformers on graphs, including the original Transformer with RWPE (Dwivedi et al., 2022), Graph Transformer (Dwivedi & Bresson, 2021), SAN (Kreuzer et al., 2021), Graphormer (Ying et al., 2021) and GraphTrans (Jain et al., 2021), a model that uses the vanilla Transformer on top of a GNN.
|
| 216 |
+
|
| 217 |
+
All results for the comparison methods are either taken from the original paper or from Dwivedi et al. (2020) if not available. We consider $k$ -subtree and $k$ -subgraph SAT equipped with different GNN extractors, including GCN, GIN, GraphSAGE and PNA. For OGBG-PPA and OGBG-CODE2, we do not run experiments for $k$ -subgraph SAT models due to large memory requirements. Full details on the datasets, experimental setup, and hyperparameters are provided in the Appendix.
|
| 218 |
+
|
| 219 |
+
# 5.2. Comparison to State-of-the-Art Methods
|
| 220 |
+
|
| 221 |
+
We show the performance of SATs compared to other GNNs and Transformers in Table 1 and 2. SAT models consistently outperform SOTA methods on these datasets, showing its ability to combine the benefits of both GNNs and Transformers. In particular, for the CODE2 dataset, our SAT models outperform SOTA methods by a large margin despite a relatively small number of parameters and minimal hyperparameter tuning, which will put it at the first place on the OGB leaderboard.
|
| 222 |
+
|
| 223 |
+
# 5.3. SAT Models vs. Sparse GNNs
|
| 224 |
+
|
| 225 |
+
Table 3 summarizes the performance of SAT relative to the sparse GNN it uses to extract the subgraph representations, across different GNNs. We observe that both variations of SAT consistently bring large performance gains to its base GNN counterpart, making it a systematic enhancer of any GNN model. Furthermore, PNA, which is the most expressive GNN we considered, has consistently the best performance when used with SAT, empirically validating our theoretical finding in Section 4.4. $k$ -subgraph SAT also outperforms or performs equally as $k$ -subtree SAT in almost all the cases, showing its superior expressiveness.
|
| 226 |
+
|
| 227 |
+
# 5.4. Hyperparameter Studies
|
| 228 |
+
|
| 229 |
+
While Table 3 showcases the added value of the SAT relative to sparse GNNs, we now dissect the components of
|
| 230 |
+
|
| 231 |
+
Table 3: Since SAT uses a GNN to extract structures, we compare the performance of the original sparse GNN to SAT which uses that GNN ("base GNN"). Across different choices of GNNs, we observe that both $k$ -subtree and $k$ -subgraph SATs always outperform the original sparse GNN it uses. The evaluation metrics are the same as in Table 1.
|
| 232 |
+
|
| 233 |
+
<table><tr><td rowspan="2" colspan="2"></td><td colspan="2">ZINC↓</td><td>CLUSTER↑</td><td>PATTERN↑</td></tr><tr><td>W/ EDGE ATTR.</td><td>W/O EDGE ATTR.</td><td>ALL</td><td>ALL</td></tr><tr><td rowspan="3">GCN</td><td>BASE GNN</td><td>0.192±0.015</td><td>0.367±0.011</td><td>68.498±0.976</td><td>71.892±0.334</td></tr><tr><td>K-SUBTREE SAT</td><td>0.127±0.010</td><td>0.174±0.009</td><td>77.247±0.094</td><td>86.749±0.065</td></tr><tr><td>K-SUBGRAPH SAT</td><td>0.114±0.005</td><td>0.184±0.002</td><td>77.682±0.098</td><td>86.816±0.028</td></tr><tr><td rowspan="3">GIN</td><td>BASE GNN</td><td>0.209±0.009</td><td>0.387±0.015</td><td>64.716±1.553</td><td>85.590±0.011</td></tr><tr><td>K-SUBTREE SAT</td><td>0.115±0.005</td><td>0.166±0.007</td><td>77.255±0.085</td><td>86.759±0.022</td></tr><tr><td>K-SUBGRAPH SAT</td><td>0.095±0.002</td><td>0.162±0.013</td><td>77.502±0.282</td><td>86.746±0.014</td></tr><tr><td rowspan="3">GRASAGE</td><td>BASE GNN</td><td>-</td><td>0.398±0.002</td><td>63.844±0.110</td><td>50.516±0.001</td></tr><tr><td>K-SUBTREE SAT</td><td>-</td><td>0.164±0.004</td><td>77.592±0.074</td><td>86.818±0.043</td></tr><tr><td>K-SUBGRAPH SAT</td><td>-</td><td>0.168±0.005</td><td>77.657±0.185</td><td>86.838±0.010</td></tr><tr><td rowspan="3">PNA</td><td>BASE GNN</td><td>0.188±0.004</td><td>0.320±0.032</td><td>67.077±0.977</td><td>86.567±0.075</td></tr><tr><td>K-SUBTREE SAT</td><td>0.102±0.005</td><td>0.147±0.001</td><td>77.751±0.121</td><td>86.865±0.043</td></tr><tr><td>K-SUBGRAPH SAT</td><td>0.094±0.008</td><td>0.131±0.002</td><td>77.856±0.104</td><td>86.848±0.037</td></tr></table>
|
| 234 |
+
|
| 235 |
+
SAT on the ZINC dataset to identify which aspects of the architecture bring the biggest performance gains.
|
| 236 |
+
|
| 237 |
+
Effect of $k$ in SAT The key contribution of SAT is its ability to explicitly incorporate structural information in the self-attention. Here, we seek to demonstrate that this information provides crucial predictive information, and study how the choice of $k$ affects the results. Figure 3a shows how the test MAE is impacted by varying $k$ for $k$ -subtree and $k$ -subgraph extractors using PNA on the ZINC dataset. All models use the RWPE. $k = 0$ corresponds to the vanilla Transformer only using absolute positional encoding, i.e. not using structure. We find that incorporating structural information leads to substantial improvement in performance, with optimal performance around $k = 3$ for both $k$ -subtree and $k$ -subgraph extractors. As $k$ increases beyond $k = 4$ , the performance in $k$ -subtree extractors deteriorated, which is consistent with the observed phenomenon that GNNs work best in shallower networks (Kipf & Welling, 2017). We observe that $k$ -subgraph does not suffer as much from this issue, underscoring a new aspect of its usefulness. On the other hand, $k$ -subtree extractors are more computationally efficient and scalable to larger OGB datasets.
|
| 238 |
+
|
| 239 |
+
Effect of absolute encoding We assess here whether the absolute encoding brought complementary information to SAT. In Figure 3b, we conduct an ablation study showing the results of SAT with and without absolute positional encoding, including RWPE and Laplacian PE (Dwivedi et al., 2020). Our SAT with a positional encoding outperforms its counterpart without it, confirming the complementary nature of the two encodings. However, we also note that the performance gain brought by the absolute encoding is far less than the gain obtained by using our structure-aware attention, as shown in Figure 3a (comparing the instance of $k = 0$ to $k > 0$ ), emphasizing that our structure-aware attention is the more important aspect of the model.
|
| 240 |
+
|
| 241 |
+
Comparison of readout methods Finally, we compare the performance of SAT models using different readout methods for aggregating node-level representations on the ZINC dataset in Figure 3c, including the CLS pooling discussed in Section 4.2. Unlike the remarkable influence of the readout method in GNNs (Xu et al., 2019), we observe very little impact in SAT models.
|
| 242 |
+
|
| 243 |
+
# 5.5. Model Interpretation
|
| 244 |
+
|
| 245 |
+
In addition to performance improvement, we show that SAT offers better model interpretability compared to the classic Transformer with only absolute positional encoding. We respectively train a SAT model and a Transformer with a CLS readout on the Mutagenicity dataset, and visualize the attention scores between the [CLS] node and other nodes learned by SAT and the Transformer in Figure 4. The salient difference between the two models is that SAT has structure-aware node embeddings, and thus we can attribute the following interpretability gains to that. While both models manage to identify some chemical motifs known for mutagenicity, such as $\mathrm{NO}_2$ and $\mathrm{NH}_2$ , the attention scores learned by SAT are sparser and more informative, meaning that SAT puts more attention weights on these known mutagenic motifs than the Transformer with RWPE. The vanilla Transformer even fails to put attention on some important atoms such as the H atoms in the $\mathrm{NH}_2$ group. The only H atoms highlighted by SAT are those in the $\mathrm{NH}_2$ group, suggesting that our SAT indeed takes the structure into account. More focus on these discriminative motifs makes the SAT model less influenced by other chemical patterns that commonly exist in the dataset, such as benzene, and thus leads to overall improved performance. More results are provided in the Appendix.
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
(a) Effect of $k$
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
(b) Effect of absolute encoding
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
(c) Effect of readout method
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
Figure 3: We provide an analysis of the different drivers of performance in SAT on the ZINC dataset (lower is better). In Figure 3a, we show how changing the size of $k$ affects performance ( $k = 0$ is equivalent to a vanilla Transformer that is not structure-aware). Figure 3b shows the effect of different absolute encoding methods, and Figure 3c shows the effect of different readout methods.
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
Figure 4: Attention visualization of SAT and the Transformer. The center column shows the attention weights of the [CLS] node learned by our SAT model and the right column shows the attention weights learned by the classic Transformer with the random walk positional encoding (RWPE).
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
|
| 264 |
+
# 6. Discussion
|
| 265 |
+
|
| 266 |
+
We introduced the SAT model, which successfully incorporates structural information into the Transformer architecture and overcomes the limitations of the absolute encoding. In addition to SOTA empirical performance with minimal hyperparameter tuning, SAT also provides better interpretability than the Transformer.
|
| 267 |
+
|
| 268 |
+
Limitations As mentioned above, $k$ -subgraph SAT has higher memory requirements than $k$ -subtree SAT, which can restrict its applicability if access to high memory GPUs is restricted. We see the main limitation of SAT is that it suffers from the same drawbacks as the Transformer, namely the quadratic complexity of the self-attention computation.
|
| 269 |
+
|
| 270 |
+
Future work Because SAT can be combined with any GNN, a natural extension of our work is to combine SAT with structure extractors which have shown to be strictly
|
| 271 |
+
|
| 272 |
+
more expressive than the 1-WL test, such as the recent topological GNN introduced by Horn et al. (2021). Additionally, the SAT framework is flexible and can incorporate any structure extractor which produces structure-aware node representations, and could even be extended beyond using GNNs, such as differentiable graph kernels.
|
| 273 |
+
|
| 274 |
+
Another important area for future work is to focus on reducing the high memory cost and time complexity of the self-attention computation, as is being done in recent efforts for developing a so-called linear transformer, which has linear complexity in both time and space requirements (Tay et al., 2020; Wang et al., 2020; Qin et al., 2022).
|
| 275 |
+
|
| 276 |
+
# Acknowledgements
|
| 277 |
+
|
| 278 |
+
This work was supported in part by the Alfred Krupp Prize for Young University Teachers of the Alfred Krupp von Bohlen und Halbach-Stiftung (K.B.). The authors would also like to thank Dr. Bastian Rieck and Dr. Carlos Oliver for their insightful feedback on the manuscript, which greatly improved it.
|
| 279 |
+
|
| 280 |
+
# References
|
| 281 |
+
|
| 282 |
+
Abbe, E. Community detection and stochastic block models: Recent developments. Journal of Machine Learning Research (JMLR), 18(177):1-86, 2018. (Cited on 17)
|
| 283 |
+
Alon, U. and Yahav, E. On the bottleneck of graph neural networks and its practical implications. In International Conference on Learning Representations (ICLR), 2021. (Cited on 1)
|
| 284 |
+
Alsentzer, E., Finlayson, S. G., Li, M. M., and Zitnik, M. Subgraph neural networks. In Proceedings of Neural
|
| 285 |
+
|
| 286 |
+
Information Processing Systems, NeurIPS, 2020. (Cited on 21)
|
| 287 |
+
Bodnar, C., Frasca, F., Otter, N., Wang, Y. G., Liò, P., Montufar, G. F., and Bronstein, M. Weisfeiler and lehman go cellular: Cw networks. In Advances in Neural Information Processing Systems (NeurIPS), 2021. (Cited on 5)
|
| 288 |
+
Chen, D., Lin, Y., Li, W., Li, P., Zhou, J., and Sun, X. Measuring and relieving the over-smoothing problem for graph neural networks from the topological view. In Proceedings of the AAAI Conference on Artificial Intelligence, 2020. (Cited on 1)
|
| 289 |
+
Corso, G., Cavalleri, L., Beaini, D., Lio, P., and Velicković, P. Principal neighbourhood aggregation for graph nets. In Advances in Neural Information Processing Systems (NeurIPS), 2020. (Cited on 2, 7)
|
| 290 |
+
Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2020. (Cited on 1)
|
| 291 |
+
Dwivedi, V. P. and Bresson, X. A generalization of transformer networks to graphs. In AAAI Workshop on Deep Learning on Graphs: Methods and Applications, 2021. (Cited on 2, 3, 7)
|
| 292 |
+
Dwivedi, V. P., Joshi, C. K., Laurent, T., Bengio, Y., and Bresson, X. Benchmarking graph neural networks. arXiv preprint arXiv:2003.00982, 2020. (Cited on 7, 8, 16, 17)
|
| 293 |
+
Dwivedi, V. P., Liu, A. T., Laurent, T., Bengio, Y., and Bresson, X. Graph neural networks with learnable structural and positional representations. In International Conference on Learning Representations, 2022. (Cited on 2, 3, 5, 7, 17)
|
| 294 |
+
Fan, W., Ma, Y., Li, Q., He, Y., Zhao, E., Tang, J., and Yin, D. Graph neural networks for social recommendation. In The World Wide Web Conference, 2019. (Cited on 1)
|
| 295 |
+
Gao, B. and Pavel, L. On the properties of the softmax function with application in game theory and reinforcement learning. arXiv preprint arXiv:1704.00805, 2017. (Cited on 14)
|
| 296 |
+
Gao, H. and Ji, S. Graph u-nets. In International Conference on Machine Learning, pp. 2083-2092, 2019. (Cited on 21)
|
| 297 |
+
Gaugelet, T., Day, B., Hamasb, A. R., Soman, J., Regep, C., Liu, G., Hayter, J. B., Vickers, R., Roberts, C., Tang, J., et al. Utilizing graph machine learning within drug
|
| 298 |
+
|
| 299 |
+
discovery and development. Briefings in Bioinformatics, 22(6):bbab159, 2021. (Cited on 1)
|
| 300 |
+
Gilmer, J., Schoenholz, S. S., Riley, P. F., Vinyals, O., and Dahl, G. E. Neural message passing for quantum chemistry. In International Conference on Machine Learning (ICML), 2017. (Cited on 1, 2)
|
| 301 |
+
Hamilton, W. L., Ying, R., and Leskovec, J. Inductive representation learning on large graphs. In Advances in Neural Information Processing Systems (NeurIPS), 2017. (Cited on 2, 7)
|
| 302 |
+
Horn, M., De Brouwer, E., Moor, M., Moreau, Y., Rieck, B., and Borgwardt, K. Topological graph neural networks. 2021. (Cited on 9)
|
| 303 |
+
Hornik, K. Approximation capabilities of multilayer feedforward networks. Neural networks, 4(2):251-257, 1991. (Cited on 6, 15)
|
| 304 |
+
Hu, W., Fey, M., Zitnik, M., Dong, Y., Ren, H., Liu, B., Catasta, M., and Leskovec, J. Open graph benchmark: Datasets for machine learning on graphs. In Advances in Neural Information Processing Systems (NeurIPS), 2020a. (Cited on 7, 16, 17)
|
| 305 |
+
Hu, W., Liu, B., Gomes, J., Zitnik, M., Liang, P., Pande, V., and Leskovec, J. Strategies for pre-training graph neural networks. In International Conference on Learning Representations (ICLR), 2020b. (Cited on 2)
|
| 306 |
+
Ingraham, J., Garg, V., Barzilay, R., and Jaakkola, T. Generative models for graph-based protein design. In Advances in Neural Information Processing Systems (NeurIPS), 2019. (Cited on 1)
|
| 307 |
+
Irwin, J. J., Sterling, T., Mysinger, M. M., Bolstad, E. S., and Coleman, R. G. Zinc: A free tool to discover chemistry for biology. Journal of Chemical Information and Modeling, 52(7):1757-1768, 2012. (Cited on 16)
|
| 308 |
+
Jain, P., Wu, Z., Wright, M., Mirhoseini, A., Gonzalez, J. E., and Stoica, I. Representing long-range context for graph neural networks with global attention. In Advances in Neural Information Processing Systems (NeurIPS), 2021. (Cited on 3, 5, 7)
|
| 309 |
+
Kersting, K., Kriege, N. M., Morris, C., Mutzel, P., and Neumann, M. Benchmark data sets for graph kernels, 2016. http://graphkernels.cs.tu-dortmund.de. (Cited on 20)
|
| 310 |
+
Kipf, T. N. and Welling, M. Semi-supervised classification with graph convolutional networks. In International Conference on Learning Representations (ICLR), 2017. (Cited on 2, 7, 8)
|
| 311 |
+
|
| 312 |
+
Kreuzer, D., Beaini, D., Hamilton, W. L., Létourneau, V., and Tossou, P. Rethinking graph transformers with spectral attention. In Advances in Neural Information Processing Systems (NeurIPS), 2021. (Cited on 2, 7)
|
| 313 |
+
Li, G., Müller, M., Thabet, A., and Ghanem, B. Deep GCNs: Can GCNs go as deep as cnns? In Proceedings of the International Conference on Computer Vision (ICCV), 2019. (Cited on 1)
|
| 314 |
+
Li, G., Xiong, C., Thabet, A., and Ghanem, B. Deepergcn: All you need to train deeper gcns, 2020a. (Cited on 2, 7)
|
| 315 |
+
Li, P., Wang, Y., Wang, H., and Leskovec, J. Distance encoding: Design provably more powerful neural networks for graph representation learning. In Advances in Neural Information Processing Systems (NeurIPS), 2020b. (Cited on 2)
|
| 316 |
+
Li, Q., Han, Z., and Wu, X. Deeper insights into graph convolutional networks for semi-supervised learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. (Cited on 1)
|
| 317 |
+
Loshchilov, I. and Hutter, F. Sgdr: Stochastic gradient descent with warm restarts. In International Conference on Learning Representations (ICLR), 2016. (Cited on 17)
|
| 318 |
+
Loshchilov, I. and Hutter, F. Decoupled weight decay regularization. In International Conference on Learning Representations (ICLR), 2018. (Cited on 17)
|
| 319 |
+
Mesquita, D., Souza, A. H., and Kaski, S. Rethinking pooling in graph neural networks. In Advances in Neural Information Processing Systems (NeurIPS), 2020. (Cited on 21)
|
| 320 |
+
Mialon, G., Chen, D., Selosse, M., and Mairal, J. Graphit: Encoding graph structure in transformers, 2021. (Cited on 3, 5)
|
| 321 |
+
Micchelli, C. A., Xu, Y., and Zhang, H. Universal kernels. Journal of Machine Learning Research (JMLR), 7(12), 2006. (Cited on 16)
|
| 322 |
+
Morris, C., Ritzert, M., Fey, M., Hamilton, W. L., Lenssen, J. E., Rattan, G., and Grohe, M. Weisfeiler and leman go neural: Higher-order graph neural networks. In Proceedings of the AAAI Conference on Artificial Intelligence, 2019. (Cited on 1)
|
| 323 |
+
Nikolentzos, G. and Vazirgiannis, M. Random walk graph neural networks. In Advances in Neural Information Processing Systems (NeurIPS), 2020. (Cited on 5)
|
| 324 |
+
Oono, K. and Suzuki, T. Graph neural networks exponentially lose expressive power for node classification. In International Conference on Learning Representations (ICLR), 2020. (Cited on 1)
|
| 325 |
+
|
| 326 |
+
Qin, Z., Sun, W., Deng, H., Li, D., Wei, Y., Lv, B., Yan, J., Kong, L., and Zhong, Y. cosformer: Rethinking softmax in attention. In International Conference on Learning Representations, 2022. (Cited on 9)
|
| 327 |
+
Rives, A., Meier, J., Sercu, T., Goyal, S., Lin, Z., Liu, J., Guo, D., Ott, M., Zitnick, C. L., Ma, J., et al. Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences. Proceedings of the National Academy of Sciences, 118(15), 2021. (Cited on 1)
|
| 328 |
+
Rong, Y., Bian, Y., Xu, T., Xie, W., Wei, Y., Huang, W., and Huang, J. Self-supervised graph transformer on large-scale molecular data. In Advances in Neural Information Processing Systems (NeurIPS), 2020. (Cited on 3)
|
| 329 |
+
Shaw, P., Uszkoreit, J., and Vaswani, A. Self-attention with relative position representations. In Proceedings of the North American Chapter of the Association for Computational Linguistics (NAACL), 2018. (Cited on 2)
|
| 330 |
+
Shi, Y., Huang, Z., Feng, S., Zhong, H., Wang, W., and Sun, Y. Masked label prediction: Unified message passing model for semi-supervised classification. In Zhou, Z.-H. (ed.), Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence (IJCAI-21), pp. 1548-1554. International Joint Conferences on Artificial Intelligence Organization, 8 2021. (Cited on 3)
|
| 331 |
+
Tay, Y., Dehghani, M., Bahri, D., and Metzler, D. Efficient transformers: A survey. arXiv preprint arXiv:2009.06732, 2020. (Cited on 9)
|
| 332 |
+
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention is all you need. In Advances in Neural Information Processing Systems (NeurIPS), 2017. (Cited on 1, 2, 3, 17)
|
| 333 |
+
Veličković, P., Cucurull, G., Casanova, A., Romero, A., Lio, P., and Bengio, Y. Graph Attention Networks. In International Conference on Learning Representations (ICLR), 2018. (Cited on 2, 7)
|
| 334 |
+
Wang, S., Li, B. Z., Khabsa, M., Fang, H., and Ma, H. Linformer: Self-attention with linear complexity, 2020. (Cited on 9)
|
| 335 |
+
Wijesinghe, A. and Wang, Q. A new perspective on "how graph neural networks go beyond weisfeiler-lehman?" In International Conference on Learning Representations, 2022. (Cited on 4)
|
| 336 |
+
Xu, K., Hu, W., Leskovec, J., and Jegelka, S. How powerful are graph neural networks? In International Conference on Learning Representations (ICLR), 2019. (Cited on 1, 2, 4, 6, 7, 8, 13, 15)
|
| 337 |
+
|
| 338 |
+
Yang, M., Wang, R., Shen, Y., Qi, H., and Yin, B. Breaking the expression bottleneck of graph neural networks. IEEE Transactions on Knowledge and Data Engineering, pp. 1-1, 2022. doi: 10.1109/TKDE.2022.3168070. (Cited on 2, 7)
|
| 339 |
+
Ying, C., Cai, T., Luo, S., Zheng, S., Ke, G., He, D., Shen, Y., and Liu, T.-Y. Do transformers really perform badly for graph representation? In Advances in Neural Information Processing Systems (NeurIPS), 2021. (Cited on 3, 7)
|
| 340 |
+
Ying, Z., You, J., Morris, C., Ren, X., Hamilton, W., and Leskovec, J. Hierarchical graph representation learning with differentiable pooling. Advances in neural information processing systems, 31, 2018. (Cited on 21)
|
| 341 |
+
You, J., Ying, R., and Leskovec, J. Position-aware graph neural networks. In International Conference on Machine Learning (ICML), 2019. (Cited on 2)
|
| 342 |
+
Zhang, J., Zhang, H., Xia, C., and Sun, L. Graph-bert: Only attention is needed for learning graph representations. arXiv preprint arXiv:2001.05140, 2020. (Cited on 2)
|
| 343 |
+
Zhang, M. and Li, P. Nested graph neural networks. In Proceedings of the 35th Conference on Neural Information Processing Systems (NeurIPS), 2021. (Cited on 4, 5)
|
| 344 |
+
|
| 345 |
+
# Appendix
|
| 346 |
+
|
| 347 |
+
This appendix provides both theoretical and experimental materials and is organized as follows: Section A provides a more detailed background on graph neural networks. Section B presents proofs of Theorem 1 and 2. Section C provides experimental details and additional results. Section D provides details on the model interpretation and additional visualization results.
|
| 348 |
+
|
| 349 |
+
# A. Background on Graph Neural Networks
|
| 350 |
+
|
| 351 |
+
The overarching idea of a graph neural network is to iteratively update a node's embedding by incorporating information sent from its neighbors. Xu et al. (2019) provide a general framework of the steps incorporated in this process by generalizing the different frameworks into AGGREGATE, COMBINE and READOUT steps. The various flavors of GNNs can be typically understood as variations within these three functions. For a given layer $l$ , the AGGREGATE step aggregates (e.g. using the sum or mean) the representations of the neighbors of a given node, which is then combined with the given node's representation from the previous layer in the COMBINE step. This is followed by a non-linear function, such as ReLU, and the updated node representations are then passed to the next layer. These two steps are repeated for as many layers as there are in the network. It is worth noting that the output of these two steps provides representations of nodes which accounts for local sub-structures of size only increased by one, which would thus require a very deep network to capture interactions between the given node and all other nodes (the depth should not be smaller than the diameter of the graph). At the end of the network, the READOUT function provides a pooling function to convert the representations to the appropriate output-level granularity (e.g. node-level or graph-level). Both the AGGREGATE and READOUT steps must be invariant to node permutations.
|
| 352 |
+
|
| 353 |
+
# B. Theoretical Analysis
|
| 354 |
+
|
| 355 |
+
# B.1. Controllability of the Representations from the Structure-Aware Attention
|
| 356 |
+
|
| 357 |
+
Theorem 1. Assume that $f$ is a Lipschitz mapping with the Lipschitz constant denoted by $\operatorname{Lip}(f)$ and the structure extractor $\varphi$ is bounded by a constant $C_{\varphi}$ on the space of subgraphs. For any pair of nodes $v$ and $v'$ in two graphs $G = (V, E, \mathbf{X})$ and $G' = (V', E', \mathbf{X}')$ with the same number of nodes $|V| = |V'| = n$ , the distance between their representations after the structure-aware attention is bounded by:
|
| 358 |
+
|
| 359 |
+
$$
|
| 360 |
+
\left\| S A - a t t n (v) - S A - a t t n \left(v ^ {\prime}\right) \right\| \leq C _ {1} \left[ \left\| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \right\| + D \left(\mathbf {H}, \mathbf {H} ^ {\prime}\right) \right] + C _ {2} D \left(\mathbf {X}, \mathbf {X} ^ {\prime}\right), \tag {11}
|
| 361 |
+
$$
|
| 362 |
+
|
| 363 |
+
where $h_w \coloneqq \varphi(w, G)$ denotes the subgraph representation at node $w$ for any $w \in V$ and $h_{w'}' \coloneqq \varphi(w', G')$ similarly, and $\mathbf{H} = (h_w)_{w \in V}$ and $\mathbf{H}' = (h_{w'}')_{w' \in V'}$ denote the multiset of subgraph representations in $G$ and $G'$ respectively. Denoting by $\Pi(V, V')$ the set of permutations between $V$ and $V'$ , $D$ is a matching metric between two multisets of representations with the same cardinality, defined as
|
| 364 |
+
|
| 365 |
+
$$
|
| 366 |
+
D (\mathbf {X}, \mathbf {X} ^ {\prime}) := \inf _ {\pi \in \Pi (V, V ^ {\prime})} \sup _ {w \in V} \| x _ {w} - x _ {\pi (w)} ^ {\prime} \|.
|
| 367 |
+
$$
|
| 368 |
+
|
| 369 |
+
$C_1$ and $C_2$ are constants given by:
|
| 370 |
+
|
| 371 |
+
$$
|
| 372 |
+
C _ {1} = \sqrt {\frac {2}{d _ {o u t}}} n L i p (f) C _ {\varphi} \| \mathbf {W} _ {\mathbf {Q}} \| _ {\infty} \| \mathbf {W} _ {\mathbf {K}} \| _ {\infty}, C _ {2} = L i p (f).
|
| 373 |
+
$$
|
| 374 |
+
|
| 375 |
+
Proof. Let us denote by
|
| 376 |
+
|
| 377 |
+
$$
|
| 378 |
+
z _ {v} = \left(\left\langle \mathbf {W} _ {\mathbf {Q}} h _ {v}, \mathbf {W} _ {\mathbf {K}} h _ {w} \right\rangle\right) _ {w \in V} \in \mathbb {R} ^ {n},
|
| 379 |
+
$$
|
| 380 |
+
|
| 381 |
+
$$
|
| 382 |
+
z _ {v ^ {\prime}} ^ {\prime} = \left(\left\langle \mathbf {W} _ {\mathbf {Q}} h _ {v ^ {\prime}} ^ {\prime}, \mathbf {W} _ {\mathbf {K}} h _ {w ^ {\prime}} ^ {\prime} \right\rangle\right) _ {w ^ {\prime} \in V ^ {\prime}} \in \mathbb {R} ^ {n},
|
| 383 |
+
$$
|
| 384 |
+
|
| 385 |
+
and by $\mathrm{softmax}(z) \in \mathbb{R}^n$ for any $z \in \mathbb{R}^n$ with its $i$ -th coefficient
|
| 386 |
+
|
| 387 |
+
$$
|
| 388 |
+
\mathrm {s o f t m a x} (z) _ {i} = \frac {\exp (z _ {i} / \sqrt {d _ {o u t}})}{\sum_ {j = 1} ^ {n} \exp (z _ {j} / \sqrt {d _ {o u t}})}.
|
| 389 |
+
$$
|
| 390 |
+
|
| 391 |
+
Then, we have
|
| 392 |
+
|
| 393 |
+
$$
|
| 394 |
+
\begin{array}{l} \| \mathrm {S A - A t t n} (v) - \mathrm {S A - A t t n} \left(v ^ {\prime}\right) \| \\ = \left\| \sum_ {w \in V} \operatorname {s o f t m a x} (z _ {v}) _ {w} f (x _ {w}) - \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} (z _ {v ^ {\prime}} ^ {\prime}) _ {w ^ {\prime}} f (x _ {w ^ {\prime}} ^ {\prime}) \right\| \\ = \left\| \sum_ {w \in V} \left(\operatorname {s o f t m a x} \left(z _ {v}\right) _ {w} - \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {\pi (w)}\right) f \left(x _ {w}\right) + \sum_ {w \in V} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {\pi (w)} f \left(x _ {w}\right) - \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \left(f \left(x _ {w ^ {\prime}} ^ {\prime}\right)\right) \right\| \\ \leq \left\| \sum_ {w \in V} \left(\operatorname {s o f t m a x} \left(z _ {v}\right) _ {w} - \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {\pi (w)}\right) f \left(x _ {w}\right) \right\| + \left\| \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \left(f \left(x _ {\pi^ {- 1} \left(w ^ {\prime}\right)}\right) - f \left(x _ {w ^ {\prime}} ^ {\prime}\right)\right) \right\| \\ \end{array}
|
| 395 |
+
$$
|
| 396 |
+
|
| 397 |
+
where $\pi : V \to V'$ is an arbitrary permutation and we used the triangle inequality. Now we need to bound the two terms respectively. We first bound the second term:
|
| 398 |
+
|
| 399 |
+
$$
|
| 400 |
+
\begin{array}{l} \left\| \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \left(f \left(x _ {\pi^ {- 1} \left(w ^ {\prime}\right)}\right) - f \left(x _ {w ^ {\prime}} ^ {\prime}\right)\right) \right\| \leq \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \left\| f \left(x _ {\pi^ {- 1} \left(w ^ {\prime}\right)}\right) - f \left(x _ {w ^ {\prime}} ^ {\prime}\right) \right\| \\ \leq \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \operatorname {L i p} (f) \| x _ {\pi^ {- 1} \left(w ^ {\prime}\right)} - x _ {w ^ {\prime}} ^ {\prime} \| \\ = \operatorname {L i p} (f) \sum_ {w ^ {\prime} \in V ^ {\prime}} \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {w ^ {\prime}} \| x _ {\pi^ {- 1} \left(w ^ {\prime}\right)} - x _ {w ^ {\prime}} ^ {\prime} \| \\ \leq \operatorname {L i p} (f) \sup _ {w ^ {\prime} \in V ^ {\prime}} \| x _ {\pi^ {- 1} \left(w ^ {\prime}\right)} - x _ {w ^ {\prime}} ^ {\prime} \| \\ = \operatorname {L i p} (f) \sup _ {w \in V} \| x _ {w} - x _ {\pi (w)} ^ {\prime} \| \\ \end{array}
|
| 401 |
+
$$
|
| 402 |
+
|
| 403 |
+
where the first inequality is a triangle inequality, the second inequality uses the Lipschitzness of $f$ . And for the first term, we can upper-bound it by
|
| 404 |
+
|
| 405 |
+
$$
|
| 406 |
+
\begin{array}{l} \left\| \sum_ {w \in V} (\operatorname {s o f t m a x} (z _ {v}) _ {w} - \operatorname {s o f t m a x} (z _ {v ^ {\prime}} ^ {\prime}) _ {\pi (w)}) f (x _ {w}) \right\| \\ \leq \| \operatorname {s o f t m a x} \left(z _ {v}\right) - \operatorname {s o f t m a x} \left(\left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {\pi}\right) \| \sqrt {\sum_ {w \in V} \| f \left(x _ {w}\right) \| ^ {2}} \\ \leq \frac {1}{\sqrt {d _ {o u t}}} \| z _ {v} - (z _ {v ^ {\prime}} ^ {\prime}) _ {\pi} \| \sqrt {n} \mathrm {L i p} (f), \\ \end{array}
|
| 407 |
+
$$
|
| 408 |
+
|
| 409 |
+
where by abuse of notation, $(z)_{\pi} \in \mathbb{R}^n$ denotes the vector whose $w$ -th entry is $z_{\pi(w)}$ for any $z \in \mathbb{R}^n$ . The first inequality comes from a simple matrix norm inequality, and the second inequality uses the fact that $\text{softmax function is } 1 / \sqrt{d_{out}}$ . Lipschitz (see e.g. Gao & Pavel (2017)). Then, we have
|
| 410 |
+
|
| 411 |
+
$$
|
| 412 |
+
\begin{array}{l} \| z _ {v} - (z _ {v ^ {\prime}} ^ {\prime}) _ {\pi}) \| ^ {2} = \sum_ {w \in V} \left(\langle \mathbf {W _ {Q}} h _ {v}, \mathbf {W _ {K}} h _ {w} \rangle - \langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime}, \mathbf {W _ {K}} h _ {\pi (w)} ^ {\prime} \rangle\right) ^ {2} \\ = \sum_ {w \in V} \left(\langle \mathbf {W _ {Q}} h _ {v}, \mathbf {W _ {K}} (h _ {w} - h _ {\pi (w)} ^ {\prime}) \rangle + \langle \mathbf {W _ {Q}} (h _ {v} - h _ {v ^ {\prime}} ^ {\prime}), \mathbf {W _ {K}} h _ {\pi (w)} ^ {\prime} \rangle\right) ^ {2} \\ \leq 2 \sum_ {w \in V} \left(\langle \mathbf {W _ {Q}} h _ {v}, \mathbf {W _ {K}} (h _ {w} - h _ {\pi (w)} ^ {\prime}) \rangle^ {2} + \langle \mathbf {W _ {Q}} (h _ {v} - h _ {v ^ {\prime}} ^ {\prime}), \mathbf {W _ {K}} h _ {\pi (w)} ^ {\prime} \rangle^ {2}\right) \\ \leq 2 \sum_ {w \in V} \left(\| \mathbf {W _ {Q}} h _ {v} \| ^ {2} \| \mathbf {W _ {K}} (h _ {w} - h _ {\pi (w)} ^ {\prime}) \| ^ {2} + \| \mathbf {W _ {Q}} (h _ {v} - h _ {v ^ {\prime}} ^ {\prime}) \| ^ {2} \| \mathbf {W _ {K}} h _ {\pi (w)} ^ {\prime} \| ^ {2}\right) \\ \leq 2 \sum_ {w \in V} \left(C _ {\varphi} ^ {2} \| \mathbf {W _ {Q}} \| _ {\infty} ^ {2} \| \mathbf {W _ {K}} \| _ {\infty} ^ {2} \| h _ {w} - h _ {\pi (w)} ^ {\prime} \| ^ {2} + \| \mathbf {W _ {Q}} \| _ {\infty} ^ {2} \| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \| ^ {2} C _ {\varphi} ^ {2} \| \mathbf {W _ {K}} \| _ {\infty} ^ {2}\right) \\ \leq 2 n C _ {\varphi} ^ {2} \| \mathbf {W} _ {\mathbf {Q}} \| _ {\infty} ^ {2} \| \mathbf {W} _ {\mathbf {K}} \| _ {\infty} ^ {2} \left(\| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \| ^ {2} + \sup _ {w \in V} \| h _ {w} - h _ {\pi (w)} ^ {\prime} \| ^ {2}\right), \\ \end{array}
|
| 413 |
+
$$
|
| 414 |
+
|
| 415 |
+
where the first inequality comes from $(a + b)^2 \leq 2(a^2 + b^2)$ , the second one uses the Cauchy-Schwarz inequality and the third one uses the definition of spectral norm and the bound of the structure extractor function. Then, we obtain the following inequality
|
| 416 |
+
|
| 417 |
+
$$
|
| 418 |
+
\begin{array}{l} \left\| \sum_ {w \in V} \left(\operatorname {s o f t m a x} \left(z _ {v}\right) _ {w} - \operatorname {s o f t m a x} \left(z _ {v ^ {\prime}} ^ {\prime}\right) _ {\pi (w)}\right) f \left(x _ {w}\right) \right\| \\ \leq \sqrt {\frac {2}{d _ {o u t}}} n \mathrm {L i p} (f) C _ {\varphi} \| \mathbf {W} _ {\mathbf {Q}} \| _ {\infty} \| \mathbf {W} _ {\mathbf {K}} \| _ {\infty} \left(\| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \| + \sup _ {w \in V} \| h _ {w} - h _ {\pi (w)} ^ {\prime} \|\right) \\ \end{array}
|
| 419 |
+
$$
|
| 420 |
+
|
| 421 |
+
By combining the upper bounds of the first and the second term, we obtain an upper bound for the distance between the structure-aware attention representations:
|
| 422 |
+
|
| 423 |
+
$$
|
| 424 |
+
\| \mathrm {S A - a t t n} (v) - \mathrm {S A - a t t n} (v ^ {\prime}) \| \leq C _ {1} \left(\| h _ {v} - h _ {v ^ {\prime}} ^ {\prime} \| + \sup _ {w \in V} \| h _ {w} - h _ {\pi (w)} ^ {\prime} \|\right) + C _ {2} \sup _ {w \in V} \| x _ {w} - x _ {\pi (w)} ^ {\prime} \|,
|
| 425 |
+
$$
|
| 426 |
+
|
| 427 |
+
for any permutation $\pi \in \Pi (V,V^{\prime})$ , where
|
| 428 |
+
|
| 429 |
+
$$
|
| 430 |
+
C _ {1} = \sqrt {\frac {2}{d _ {o u t}}} n \operatorname {L i p} (f) C _ {\varphi} \| \mathbf {W} _ {\mathbf {Q}} \| _ {\infty} \| \mathbf {W} _ {\mathbf {K}} \| _ {\infty}
|
| 431 |
+
$$
|
| 432 |
+
|
| 433 |
+
$$
|
| 434 |
+
C _ {2} = \operatorname {L i p} (f).
|
| 435 |
+
$$
|
| 436 |
+
|
| 437 |
+
Finally, by taking the infimum over the set of permutations, we obtain the inequality in the theorem.
|
| 438 |
+
|
| 439 |
+

|
| 440 |
+
|
| 441 |
+
# B.2. Expressivity Analysis
|
| 442 |
+
|
| 443 |
+
Here, we assume that $f$ can be any continuous mapping and it is approximated by an MLP network through the universal approximation theorem (Hornik, 1991) in practice.
|
| 444 |
+
|
| 445 |
+
Theorem 2. Assume that the space of node attributes $\mathcal{X}$ is countable. For any pair of nodes $v$ and $v'$ in two graphs $G = (V, E, \mathbf{X})$ and $G' = (V', E', \mathbf{X}')$ , assume that there exists a node $u_1$ in $V$ such that $x_{u_1} \neq x_w$ for any $w \in V$ and a node $u_2$ in $V$ such that its subgraph representation $\varphi(u_2, G) \neq \varphi(w, G)$ for any $w \in V$ . Then, there exists a set of parameters $f: \mathcal{X} \to \mathbb{R}^{d_{out}}$ such that their representations after the structure-aware attention are different, i.e. SA-attn $(v) \neq SA-attn(v')$ , if their subgraph representations are different, i.e. $\varphi(v, G) \neq \varphi(v', G')$ .
|
| 446 |
+
|
| 447 |
+
Proof. This theorem amounts to showing the injectivity of the original dot-product attention with respect to the query, that is to show
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\operatorname {A t t n} (h _ {v}, x _ {v}, G) = \sum_ {u \in V} \frac {\kappa_ {\exp} (h _ {v} , h _ {u})}{\sum_ {w \in V} \kappa_ {\exp} (h _ {v} , h _ {w})} f (x _ {u})
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
is injective in $h_v$ , where
|
| 454 |
+
|
| 455 |
+
$$
|
| 456 |
+
\kappa_ {\exp} (h, h ^ {\prime}) := \exp \left(\left\langle \mathbf {W} _ {\mathbf {Q}} h + b _ {Q}, \mathbf {W} _ {\mathbf {K}} h ^ {\prime} + b _ {K} \right\rangle / \sqrt {d _ {o u t}}\right). \tag {12}
|
| 457 |
+
$$
|
| 458 |
+
|
| 459 |
+
Here we consider the offset terms that were omitted in Eq. (1). Let us prove the contrapositive of the theorem. We assume that $\mathrm{Attn}(h_v, x_v, G) = \mathrm{Attn}(h_{v'}', x_{v'}', G')$ for any set of parameters and any mapping $f$ and want to show that $h_v = h_{v'}'$ .
|
| 460 |
+
|
| 461 |
+
Without loss of generality, we assume that $G$ and $G'$ have the same number of nodes, that is $|V| = |V'| = n$ . Otherwise, one can easily add some virtual isolated nodes to the smaller graph. Now if we take $\mathbf{W}_{\mathbf{Q}} = \mathbf{W}_{\mathbf{K}} = 0$ , all the softmax coefficients will be identical and we have
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\sum_ {w \in V} f (x _ {w}) = \sum_ {w ^ {\prime} \in V ^ {\prime}} f (x _ {w ^ {\prime}} ^ {\prime}).
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
Thus, by Lemma 5 of Xu et al. (2019), there exists a mapping $f$ such that the multisets $X$ and $X'$ are identical.
|
| 468 |
+
|
| 469 |
+
As a consequence, we can re- enumerate the nodes in two graphs by a sequence $V$ (by abuse of notation, we keep using $V$ here) such that $x_{u} = x_{u}^{\prime}$ for any $u\in V$ . Then, we can rewrite the equality $\operatorname{Attn}(h_v,x_v,G) = \operatorname{Attn}(h_{v'}',x_{v'}',G')$ as
|
| 470 |
+
|
| 471 |
+
$$
|
| 472 |
+
\sum_ {u \in V} \left(\frac {\kappa_ {\mathrm {e x p}} (h _ {v} , h _ {u})}{\sum_ {w \in V} \kappa_ {\mathrm {e x p}} (h _ {v} , h _ {w})} - \frac {\kappa_ {\mathrm {e x p}} (h _ {v ^ {\prime}} ^ {\prime} , h _ {u} ^ {\prime})}{\sum_ {w \in V} \kappa_ {\mathrm {e x p}} (h _ {v ^ {\prime}} ^ {\prime} , h _ {w} ^ {\prime})}\right) f (x _ {u}) = 0.
|
| 473 |
+
$$
|
| 474 |
+
|
| 475 |
+
Now since there exists a node $u_{1}$ in $V$ such that its attributes are different from all other nodes, i.e. $x_{u_1} \neq x_w$ for any $w \in V$ , we can find a mapping $f$ such that $f(x_{u_1})$ is not in the span of $(f(x_w))_{w \in V, w \neq u_1}$ . Then, by their independence we have
|
| 476 |
+
|
| 477 |
+
$$
|
| 478 |
+
\frac {\kappa_ {\mathrm {e x p}} (h _ {v} , h _ {u _ {1}})}{\sum_ {w \in V} \kappa_ {\mathrm {e x p}} (h _ {v} , h _ {w})} = \frac {\kappa_ {\mathrm {e x p}} (h _ {v ^ {\prime}} ^ {\prime} , h _ {u _ {1}} ^ {\prime})}{\sum_ {w \in V} \kappa_ {\mathrm {e x p}} (h _ {v ^ {\prime}} ^ {\prime} , h _ {w} ^ {\prime})},
|
| 479 |
+
$$
|
| 480 |
+
|
| 481 |
+
for any $\mathbf{W}_{\mathbf{Q}},\mathbf{W}_{\mathbf{K}},b_{Q}$ and $b_{K}$
|
| 482 |
+
|
| 483 |
+
On the one hand, if we take $\mathbf{W}_{\mathbf{Q}} = 0$ , we have for any $\mathbf{W}_{\mathbf{K}}$ , $b_{Q}$ and $b_{K}$ that
|
| 484 |
+
|
| 485 |
+
$$
|
| 486 |
+
\frac {\exp \left(\langle b _ {Q} , \mathbf {W} _ {\mathbf {K}} h _ {u _ {1}} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}{\sum_ {w \in V} \exp \left(\langle b _ {Q} , \mathbf {W} _ {\mathbf {K}} h _ {w} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)} = \frac {\exp \left(\langle b _ {Q} , \mathbf {W} _ {\mathbf {K}} h _ {u _ {1}} ^ {\prime} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}{\sum_ {w \in V} \exp \left(\langle b _ {Q} , \mathbf {W} _ {\mathbf {K}} h _ {w} ^ {\prime} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}.
|
| 487 |
+
$$
|
| 488 |
+
|
| 489 |
+
On the other hand if we take $b_{Q} = 0$ we have for any $\mathbf{W}_{\mathbf{Q}}$ , $\mathbf{W}_{\mathbf{K}}$ and $b_{K}$ that
|
| 490 |
+
|
| 491 |
+
$$
|
| 492 |
+
\begin{array}{l} \frac {\exp \left(\langle \mathbf {W _ {Q}} h _ {v} , \mathbf {W _ {K}} h _ {u _ {1}} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}{\sum_ {w \in V} \exp \left(\langle \mathbf {W _ {Q}} h _ {v} , \mathbf {W _ {K}} h _ {w} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)} = \frac {\exp \left(\langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime} , \mathbf {W _ {K}} h _ {u _ {1}} ^ {\prime} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}{\sum_ {w \in V} \exp \left(\langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime} , \mathbf {W _ {K}} h _ {w} ^ {\prime} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)} \\ = \frac {\exp \left(\langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime} , \mathbf {W _ {K}} h _ {u _ {1}} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}{\sum_ {w \in V} \exp \left(\langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime} , \mathbf {W _ {K}} h _ {w} + b _ {K} \rangle / \sqrt {d _ {o u t}}\right)}, \\ \end{array}
|
| 493 |
+
$$
|
| 494 |
+
|
| 495 |
+
where the second equality is obtained by replacing $b_{Q}$ with $\mathbf{W}_{\mathbf{Q}} h_{v'}'$ in the above equality. Then, we can rewrite the above equality as below:
|
| 496 |
+
|
| 497 |
+
$$
|
| 498 |
+
\sum_ {w \in V} \exp \left(\frac {\langle \mathbf {W _ {Q}} h _ {v} , \mathbf {W _ {K}} (h _ {w} - h _ {u _ {1}}) \rangle}{\sqrt {d _ {o u t}}}\right) = \sum_ {w \in V} \exp \left(\frac {\langle \mathbf {W _ {Q}} h _ {v ^ {\prime}} ^ {\prime} , \mathbf {W _ {K}} (h _ {w} - h _ {u _ {1}}) \rangle}{\sqrt {d _ {o u t}}}\right).
|
| 499 |
+
$$
|
| 500 |
+
|
| 501 |
+
If we denote by $\phi : \mathbb{R}^{d_{out}} \to \mathcal{H}$ the feature mapping associated with the dot product kernel $\kappa_{\mathrm{exp}}(t, t') = \exp(\langle t, t' \rangle / \sqrt{d_{out}})$ and $\mathcal{H}$ the corresponding reproducing kernel Hilbert space, we then have for any $\mathbf{W}_{\mathbf{Q}}$ and $\mathbf{W}_{\mathbf{K}}$ that
|
| 502 |
+
|
| 503 |
+
$$
|
| 504 |
+
\left\langle \phi (\mathbf {W} _ {\mathbf {Q}} h _ {v}) - \phi (\mathbf {W} _ {\mathbf {Q}} h _ {v ^ {\prime}} ^ {\prime}), \sum_ {w \in V} \phi (\mathbf {W} _ {\mathbf {K}} (h _ {w} - h _ {u _ {1}})) \right\rangle_ {\mathcal {H}} = 0.
|
| 505 |
+
$$
|
| 506 |
+
|
| 507 |
+
Since by assumption there exists a $u_{2} \in V$ such that $h_{u_{2}} - h_{u_{1}} \neq 0$ and $\kappa_{\mathrm{exp}}$ is a universal kernel (Micchelli et al., 2006), $\mathbf{W}_{\mathbf{K}} \mapsto \phi(\mathbf{W}_{\mathbf{K}}(h_{u_{2}} - h_{u_{1}}))$ is dense in $\mathcal{H}$ and we have $\phi(\mathbf{W}_{\mathbf{Q}} h_{v}) = \phi(\mathbf{W}_{\mathbf{Q}} h_{v^{\prime}}^{\prime})$ . We can then conclude, by the injectivity of $\phi$ , that
|
| 508 |
+
|
| 509 |
+
$$
|
| 510 |
+
\mathbf {W} _ {\mathbf {Q}} h _ {v} = \mathbf {W} _ {\mathbf {Q}} h _ {v ^ {\prime}} ^ {\prime},
|
| 511 |
+
$$
|
| 512 |
+
|
| 513 |
+
for any $\mathbf{W}_{\mathbf{Q}}$ , and thus $h_v = h_{v'}'$ . Now by taking $h_v = \varphi(v, G)$ and $h_{v'}' = \varphi(v', G')$ , we obtain the theorem.
|
| 514 |
+
|
| 515 |
+
# C. Experimental Details and Additional Results
|
| 516 |
+
|
| 517 |
+
In this section, we provide implementation details and additional experimental results.
|
| 518 |
+
|
| 519 |
+
# C.1. Computation Details
|
| 520 |
+
|
| 521 |
+
All experiments were performed on a shared GPU cluster equipped with GTX1080, GTX1080TI, GTX2080TI and TITAN RTX. About 20 of these GPUs were used simultaneously, and the total computational cost of this research project was about 1k GPU hours.
|
| 522 |
+
|
| 523 |
+
# C.2. Datasets Description
|
| 524 |
+
|
| 525 |
+
We provide details of the datasets used in our experiments, including ZINC (Irwin et al., 2012), CLUSTER (Dwivedi et al., 2020), PATTERN (Dwivedi et al., 2020), OGBG-PPA (Hu et al., 2020a) and OGBG-CODE2 (Hu et al., 2020a). For each dataset, we follow their respective training protocols and use the standard train/validation/test splits and evaluation metrics.
|
| 526 |
+
|
| 527 |
+
ZINC. The ZINC dataset is a graph regression dataset comprised of molecules, where the task is to predict constrained solubility. Like Dwivedi et al. (2020), we use the subset of 12K molecules and follow their same splits.
|
| 528 |
+
|
| 529 |
+
Table 4: Hyperparameters for SAT models trained on different datasets. RWPE- $p$ indicates using $p$ steps in the random walk positional encoding, which results in a $p$ -dimensional vector as the positional representation for each node.
|
| 530 |
+
|
| 531 |
+
<table><tr><td>Hyperparameter</td><td>ZINC</td><td>CLUSTER</td><td>PATTERN</td><td>OGBG-PPA</td><td>OGBG-CODE2</td></tr><tr><td>#Layers</td><td>6</td><td>16</td><td>6</td><td>3</td><td>4</td></tr><tr><td>Hidden dimensions</td><td>64</td><td>48</td><td>64</td><td>128</td><td>256</td></tr><tr><td>FFN hidden dimensions</td><td></td><td></td><td colspan="2">2×Hidden dimensions</td><td></td></tr><tr><td>#Attention heads</td><td>8</td><td>8</td><td>8</td><td>8</td><td>{4,8}</td></tr><tr><td>Dropout</td><td></td><td></td><td colspan="2">{0.0,0.1,0.2,0.3,0.4}</td><td></td></tr><tr><td>Size of subgraphs k</td><td></td><td></td><td colspan="2">{1,2,3,4}</td><td></td></tr><tr><td>Readout method</td><td>mean</td><td>None</td><td>None</td><td>mean</td><td>mean</td></tr><tr><td>Absolute PE</td><td>RWPE-20</td><td>RWPE-3</td><td>RWPE-7</td><td>None</td><td>None</td></tr><tr><td>Learning rate</td><td>0.001</td><td>0.0005</td><td>0.0003</td><td>0.0003</td><td>0.0001</td></tr><tr><td>Batch size</td><td>128</td><td>32</td><td>32</td><td>32</td><td>32</td></tr><tr><td>#Epochs</td><td>2000</td><td>200</td><td>200</td><td>200</td><td>30</td></tr><tr><td>Warm-up steps</td><td>5000</td><td>5000</td><td>5000</td><td>10 epochs</td><td>2 epochs</td></tr><tr><td>Weight decay</td><td>1e-5</td><td>1e-4</td><td>1e-4</td><td>1e-4</td><td>1e-6</td></tr></table>
|
| 532 |
+
|
| 533 |
+
PATTERN and CLUSTER. PATTERN and CLUSTER Dwivedi et al. (2020) are synthetic datasets that were created using Stochastic Block Models (Abbe, 2018). The goal for both datasets is node classification, with PATTERN focused on detecting a given pattern in the dataset, and with CLUSTER focused on identifying communities within the graphs. For PATTERN, the binary class label corresponds to whether a node is part of the predefined pattern or not; for CLUSTER, the multi-class label indicates membership in a community. We use the splits as is used in Dwivedi et al. (2020).
|
| 534 |
+
|
| 535 |
+
OGBG-PPA. PPA (Hu et al., 2020a) is comprised of protein-protein association networks where the goal is to correctly classify the network into one of 37 classes representing the category of species the network is from. Nodes represent proteins and edges represent associations between proteins. Edge attributes represent information relative to the association, such as co-expression. We use the standard splits provided by Hu et al. (2020a).
|
| 536 |
+
|
| 537 |
+
OGBG-CODE2. CODE2 (Hu et al., 2020a) is a dataset containing source code from the Python programming language. It is made up of Abstract Syntax Trees where the task is to correctly classify the sub-tokens that comprise the method name. We use the standard splits provided by Hu et al. (2020a).
|
| 538 |
+
|
| 539 |
+
# C.3. Hyperparameter Choices and Reproducibility
|
| 540 |
+
|
| 541 |
+
Hyperparameter choice. In general, we perform a very limited hyperparameter search to produce the results in Table 1 and Table 2. The hyperparameters for training SAT models on different datasets are summarized in Table 4, where only the dropout rate and the size of the subgraph $k$ are tuned ( $k \in \{1, 2, 3, 4\}$ ). We use fixed RWPE (Dwivedi et al., 2022) with SAT on ZINC, PATTERN and CLUSTER. In all experiments, we use the validation set to select the dropout rate and the size of the subtree or subgraph $k \in \{1, 2, 3, 4\}$ . All other hyperparameters are fixed for simplicity, including setting the readout method to mean pooling. We did not use RWPE on OBGG-PPA and OBGG-CODE2 as we observed very little performance improvement. Note that we only use $k = 1$ for the $k$ -subgraph SAT models on CLUSTER and PATTERN due to its large memory requirement, which already leads to performance boost compared to the $k$ -subtree SAT using a larger $k$ . Reported results are the average over 4 seeds on ZINC, PATTERN and CLUSTER, as is done in Dwivedi et al. (2020), and averaged over 10 seeds on OBGG-PPA and OBGG-CODE2.
|
| 542 |
+
|
| 543 |
+
**Optimization.** All our models are trained with the AdamW optimizer (Loshchilov & Hutter, 2018) with a standard warm-up strategy suggested for Transformers in Vaswani et al. (2017). We use either the L1 loss or the cross-entropy loss depending on whether the task is regression or classification. The learning rate scheduler proposed in the Transformer is used on the ZINC, PATTERN and CLUSTER datasets and a cosine scheduler (Loshchilov & Hutter, 2016) is used on the larger OGBG-PPA and OGBG-CODE2 datasets.
|
| 544 |
+
|
| 545 |
+
Table 5: Number of parameters and training time per epoch for $k$ -subtree SAT models using the hyperparameters in Table 4. Various GNNs are used as the base GNN in SAT.
|
| 546 |
+
|
| 547 |
+
<table><tr><td></td><td>ZINC</td><td>CLUSTER</td><td>PATTERN</td><td>OGBG-PPA</td><td>OGBG-CODE2</td></tr><tr><td>Base GNN</td><td colspan="5">#Parameters</td></tr><tr><td>GCN</td><td>421k</td><td>571k</td><td>380k</td><td>766k</td><td>14,030k</td></tr><tr><td>GIN</td><td>495k</td><td>684k</td><td>455k</td><td>866k</td><td>14,554k</td></tr><tr><td>PNA</td><td>523k</td><td>741k</td><td>493k</td><td>1,088k</td><td>15,734k</td></tr><tr><td>Base GNN</td><td colspan="5">GPU time on a single TITAN RTX/epoch</td></tr><tr><td>GCN</td><td>6s</td><td>142s</td><td>40s</td><td>308s</td><td>40min</td></tr><tr><td>GIN</td><td>6s</td><td>144s</td><td>62s</td><td>310s</td><td>40min</td></tr><tr><td>PNA</td><td>9s</td><td>178s</td><td>90s</td><td>660s</td><td>55min</td></tr></table>
|
| 548 |
+
|
| 549 |
+
Table 6: Test MAE for SAT models using different structure extractors and readout methods on the ZINC dataset.
|
| 550 |
+
|
| 551 |
+
<table><tr><td colspan="2"></td><td colspan="3">W/O EDGE ATTRIBUTES</td><td colspan="3">W/ EDGE ATTRIBUTES</td></tr><tr><td></td><td>BASE GNN</td><td>MEAN</td><td>SUM</td><td>CLS</td><td>MEAN</td><td>SUM</td><td>CLS</td></tr><tr><td rowspan="4">K-SUBTREE SAT</td><td>GCN</td><td>0.174±0.009</td><td>0.170±0.010</td><td>0.167±9.005</td><td>0.127±0.010</td><td>0.117±0.008</td><td>0.115±0.007</td></tr><tr><td>GIN</td><td>0.166±0.007</td><td>0.162±0.010</td><td>0.157±0.002</td><td>0.115±0.005</td><td>0.112±0.008</td><td>0.104±0.003</td></tr><tr><td>GRAPHSAGE</td><td>0.164±0.004</td><td>0.165±0.008</td><td>0.156±0.005</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PNA</td><td>0.147±0.001</td><td>0.142±0.008</td><td>0.135±0.004</td><td>0.102±0.005</td><td>0.102±0.003</td><td>0.098±0.008</td></tr><tr><td rowspan="4">K-SUBGRAPH SAT</td><td>GCN</td><td>0.184±0.002</td><td>0.186±0.007</td><td>0.184±0.007</td><td>0.114±0.005</td><td>0.103±0.002</td><td>0.103±0.008</td></tr><tr><td>GIN</td><td>0.162±0.013</td><td>0.158±0.007</td><td>0.162±0.005</td><td>0.095±0.002</td><td>0.097±0.002</td><td>0.098±0.010</td></tr><tr><td>GRAPHSAGE</td><td>0.168±0.005</td><td>0.165±0.005</td><td>0.169±0.005</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PNA</td><td>0.131±0.002</td><td>0.129±0.003</td><td>0.128±0.004</td><td>0.094±0.008</td><td>0.089±0.002</td><td>0.093±0.009</td></tr></table>
|
| 552 |
+
|
| 553 |
+
Number of parameters and computation time. In Table 5, we report the number of parameters and the training time per epoch for SAT with $k$ -subtree GNN extractors using the hyperparameters selected from Table 4. Note that the number of parameters used in our SAT on OGB datasets is smaller than most of the state-of-art methods.
|
| 554 |
+
|
| 555 |
+
# C.4. Additional Results
|
| 556 |
+
|
| 557 |
+
We provide additional experimental results on ZINC, OGBG-PPA and OGBG-CODE2.
|
| 558 |
+
|
| 559 |
+
# C.4.1. ADDITIONAL RESULTS ON ZINC
|
| 560 |
+
|
| 561 |
+
We report a more thorough comparison of SAT instances using different structure extractors and different readout methods in Table 6. We find that SAT models with PNA consistently outperform other GNNs. Additionally, the readout methods have very little impact on the prediction performance.
|
| 562 |
+
|
| 563 |
+
# C.4.2. ADDITIONAL RESULTS ON OGBG-PPA
|
| 564 |
+
|
| 565 |
+
Table 7 summarizes the results for $k$ -subtree SAT with different GNNs compared to state-of-the-art methods on OGBG-PPA. All the results are computed from 10 runs using different random seeds.
|
| 566 |
+
|
| 567 |
+
# C.4.3. ADDITIONAL RESULTS ON OGBG-CODE2
|
| 568 |
+
|
| 569 |
+
Table 8 summarizes the results for $k$ -subtree SAT with different GNNs compared to state-of-the-art methods on OGBG-CODE2. All the results are computed from 10 runs using different random seeds.
|
| 570 |
+
|
| 571 |
+
Table 7: Comparison of SAT and SOTA methods on the OGBG-PPA dataset. All results are computed from 10 different runs.
|
| 572 |
+
|
| 573 |
+
<table><tr><td colspan="3">OGBG-PPA</td></tr><tr><td>METHOD</td><td>TEST ACCURACY</td><td>VALIDATION ACCURACY</td></tr><tr><td>GCN</td><td>0.6839±0.0084</td><td>0.6497±0.0034</td></tr><tr><td>GCN-VIRTUAL NODE</td><td>0.6857±0.0061</td><td>0.6511±0.0048</td></tr><tr><td>GIN</td><td>0.6892±0.0100</td><td>0.6562±0.0107</td></tr><tr><td>GIN-VIRTUAL NODE</td><td>0.7037±0.0107</td><td>0.6678±0.0105</td></tr><tr><td>TRANSFORMER</td><td>0.6454±0.0033</td><td>0.6221±0.0039</td></tr><tr><td>K-SUBTREE SAT-GCN</td><td>0.7483±0.0048</td><td>0.7072±0.0030</td></tr><tr><td>K-SUBTREE SAT-GIN</td><td>0.7306±0.0076</td><td>0.6928±0.0058</td></tr><tr><td>K-SUBTREE SAT-PNA</td><td>0.7522±0.0056</td><td>0.7025±0.0064</td></tr></table>
|
| 574 |
+
|
| 575 |
+
Table 8: Comparison of SAT and SOTA methods on the OGBG-CODE2 dataset. All results are computed from 10 different runs.
|
| 576 |
+
|
| 577 |
+
<table><tr><td rowspan="2">METHOD</td><td colspan="2">OGBG-CODE2</td></tr><tr><td>TEST F1 SCORE</td><td>VALIDATION F1 SCORE</td></tr><tr><td>GCN</td><td>0.1507±0.0018</td><td>0.1399±0.0017</td></tr><tr><td>GCN-VIRTUAL NODE</td><td>0.1581±0.0026</td><td>0.1461±0.0013</td></tr><tr><td>GIN</td><td>0.1495±0.0023</td><td>0.1376±0.0016</td></tr><tr><td>GIN-VIRTUAL NODE</td><td>0.1581±0.0026</td><td>0.1439±0.0020</td></tr><tr><td>TRANSFORMER</td><td>0.1670±0.0015</td><td>0.1546±0.0018</td></tr><tr><td>GRAPHTRANS</td><td>0.1830±0.0024</td><td>0.1661±0.0012</td></tr><tr><td>K-SUBTREE SAT-GCN</td><td>0.1934±0.0020</td><td>0.1777±0.0011</td></tr><tr><td>K-SUBTREE SAT-GIN</td><td>0.1910±0.0023</td><td>0.1748±0.0016</td></tr><tr><td>K-SUBTREE SAT-PNA</td><td>0.1937±0.0028</td><td>0.1773±0.0023</td></tr></table>
|
| 578 |
+
|
| 579 |
+
# D. Model Interpretation
|
| 580 |
+
|
| 581 |
+
In this section, we provide implementation details about the model visualization.
|
| 582 |
+
|
| 583 |
+
# D.1. Dataset and Training Details
|
| 584 |
+
|
| 585 |
+
We use the Mutagenicity dataset (Kersting et al., 2016), consisting of 4337 molecular graphs labeled based on their mutagenic effect. We randomly split the dataset into train/val/test sets in a stratified way with a proportion of $80 / 10 / 10$ . We first train a two-layer vanilla Transformer model using RWPE. The hidden dimension and the number of heads are fixed to 64 and 8 respectively. The CLS pooling as described in Section 4.2 is chosen as the readout method for visualization purpose. We also train a $k$ -subtree SAT using exactly the same hyperparameter setting except that it does not use any absolute positional encoding. $k$ is fixed to 2. For both models, we use the AdamW optimizer and the optimization strategy described in Section C.3. We train enough epochs until both models converge. While the classic Transformer with RWPE achieves a test accuracy of $78\%$ , the $k$ -subtree SAT achieves a $82\%$ test accuracy.
|
| 586 |
+
|
| 587 |
+
# D.2. Additional Results
|
| 588 |
+
|
| 589 |
+
Visualization of attention scores. Here, we provide additional visualization examples of attention scores of the [CLS] node from the Mutagenicity dataset, learned by SAT and a vanilla Transformer. Figure 5 provides several examples of attention learned weights. SAT generally learns sparser and more informative weights even for very large graph as shown in the left panel of the middle row.
|
| 590 |
+
|
| 591 |
+

|
| 592 |
+
|
| 593 |
+

|
| 594 |
+
|
| 595 |
+

|
| 596 |
+
|
| 597 |
+

|
| 598 |
+
|
| 599 |
+

|
| 600 |
+
|
| 601 |
+

|
| 602 |
+
|
| 603 |
+

|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
|
| 607 |
+

|
| 608 |
+
|
| 609 |
+

|
| 610 |
+
Figure 5: Attention visualization of SAT and the Transformer. The middle column shows the attention weights of the [CLS] node learned by our SAT model and the right column shows the attention weights learned by the classic Transformer with RWPE.
|
| 611 |
+
|
| 612 |
+

|
| 613 |
+
|
| 614 |
+

|
| 615 |
+
|
| 616 |
+

|
| 617 |
+
|
| 618 |
+

|
| 619 |
+
|
| 620 |
+

|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
|
| 624 |
+

|
| 625 |
+
|
| 626 |
+

|
| 627 |
+
|
| 628 |
+

|
| 629 |
+
|
| 630 |
+
# E. Relationship to Subgraph Neural Networks and Graph Pooling
|
| 631 |
+
|
| 632 |
+
In the following we clarify the relationship (and differences) of SAT to Subgraph Neural Networks (Alsentzer et al., 2020) as well as to the general topic of graph pooling.
|
| 633 |
+
|
| 634 |
+
# E.1. Differences to Subgraph Neural Networks
|
| 635 |
+
|
| 636 |
+
Subgraph Neural Networks (SNN) (Alsentzer et al., 2020) explores explicitly incorporating position, neighborhood and structural information, for the purpose of solving the problem of subgraph prediction. SNN generates representations at the level of subgraph (rather than node). SAT, on the other hand, is instead motivated by modeling the structural interaction (through the dot-product attention) between nodes in the Transformer architecture by generating node representations that are structure-aware. This structure-aware aspect is achieved via a structure extractor, which can be any function that extracts local structural information for a given node and does not necessarily need to explicitly extract subgraphs. For example, the $k$ -subtree GNN extractor does not explicitly extract the subgraph, but rather only uses the node representation generated from a GNN. This aspect also makes the $k$ -subtree SAT very scalable. In contrast to SNN, the input to the resulting SAT is not subgraphs but rather the original graph, and the structure-aware node representations are computed as the query and key for the dot-product attention at each layer.
|
| 637 |
+
|
| 638 |
+
# E.2. Relationship to Graph Pooling
|
| 639 |
+
|
| 640 |
+
In GNNs, structural information is traditionally incorporated into node embeddings via the neighborhood aggregation process. A supplemental way to incorporate structural information is through a process called local pooling, which is typically based on graph clustering (Ying et al., 2018). Local pooling coarsens the adjacency matrix at layer $l$ in the network by, for example, applying a clustering algorithm to cluster nodes, and then replacing the adjacency matrix with the cluster assignment matrix, where all nodes within a cluster are connected by an edge. An alternative approach to the pooling layer is based on sampling nodes (Gao & Ji, 2019). While Mesquita et al. (2020) found that these local pooling operations currently do not improve performance relative to more simple operations, local pooling could in theory be incorporated into the existing SAT's $k$ -subtree and $k$ -subgraph GNN extractors, as it is another layer in a GNN.
|
2202.03xxx/2202.03036/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07c799ebf4ae4ffd74a40d4ebf48c7358fa32a21e0a31d1043e6009d260732f8
|
| 3 |
+
size 1124940
|
2202.03xxx/2202.03036/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_content_list.json
ADDED
|
@@ -0,0 +1,1733 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "DATA SET CREATION AND EMPIRICAL ANALYSIS FOR DETECTING SIGNS OF DEPRESSION FROM SOCIAL MEDIA POSTINGS",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
158,
|
| 8 |
+
119,
|
| 9 |
+
836,
|
| 10 |
+
191
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "A PREPRINT",
|
| 17 |
+
"bbox": [
|
| 18 |
+
452,
|
| 19 |
+
223,
|
| 20 |
+
542,
|
| 21 |
+
236
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Kayalvizhi S \nDepartment of Computer Science \nSSN College of Engineering \nkayalvizhis@ssn.edu.in",
|
| 28 |
+
"bbox": [
|
| 29 |
+
218,
|
| 30 |
+
263,
|
| 31 |
+
442,
|
| 32 |
+
320
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Thenmozhi D \nDepartment of Computer Science \nSSN College of Engineering \ntheni_d@ssn.edu.in",
|
| 39 |
+
"bbox": [
|
| 40 |
+
553,
|
| 41 |
+
263,
|
| 42 |
+
776,
|
| 43 |
+
320
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "February 8, 2022",
|
| 50 |
+
"bbox": [
|
| 51 |
+
439,
|
| 52 |
+
356,
|
| 53 |
+
555,
|
| 54 |
+
369
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "ABSTRACT",
|
| 61 |
+
"text_level": 1,
|
| 62 |
+
"bbox": [
|
| 63 |
+
447,
|
| 64 |
+
388,
|
| 65 |
+
545,
|
| 66 |
+
402
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Depression is a common mental illness that has to be detected and treated at an early stage to avoid serious consequences. There are many methods and modalities for detecting depression that involves physical examination of the individual. However, diagnosing mental health using their social media data is more effective as it avoids such physical examinations. Also, people express their emotions well in social media, it is desirable to diagnose their mental health using social media data. Though there are many existing systems that detects mental illness of a person by analysing their social media data, detecting the level of depression is also important for further treatment. Thus, in this research, we developed a gold standard data set that detects the levels of depression as 'not depressed', 'moderately depressed' and 'severely depressed' from the social media postings. Traditional learning algorithms were employed on this data set and an empirical analysis was presented in this paper. Data augmentation technique was applied to overcome the data imbalance. Among the several variations that are implemented, the model with Word2Vec vectorizer and Random Forest classifier on augmented data outperforms the other variations with a score of 0.877 for both accuracy and F1 measure.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
169,
|
| 75 |
+
416,
|
| 76 |
+
826,
|
| 77 |
+
609
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Keywords Depression $\\cdot$ Data set $\\cdot$ Data augmentation $\\cdot$ Levels of depression $\\cdot$ Random Forest",
|
| 84 |
+
"bbox": [
|
| 85 |
+
109,
|
| 86 |
+
623,
|
| 87 |
+
723,
|
| 88 |
+
638
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1 Introduction",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
112,
|
| 98 |
+
657,
|
| 99 |
+
253,
|
| 100 |
+
672
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Depression (major depressive disorder) is a common and serious medical illness that negatively affects the way one feels, thinks and acts ame. The rate of depression is rapidly increasing day by day. According to Global Health Data Exchange (GHDx), depression has affected 280 million people worldwide who. Detecting depression is important since it has to be observed and treated at an early stage to avoid severe consequences<sup>1</sup>. The depression was generally diagnosed by different methods modalities clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. All these methods of diagnosing involves more involvement of an individual or discussion about their feeling in person.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
109,
|
| 109 |
+
686,
|
| 110 |
+
883,
|
| 111 |
+
813
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "On the other hand, social media is highly emerging into our lives with a considerable rate of increase in social media users according to the statistics of statista sta. Slowly, the social media became a comfortable virtual platform to express our feelings. And so, social media platform can be considered as a source to analyse people's thoughts and so can also be used for analysing mental health of an individual. Thus, we aim to use social media texts for analysing the mental health of a person.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
109,
|
| 120 |
+
818,
|
| 121 |
+
883,
|
| 122 |
+
888
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "aside_text",
|
| 128 |
+
"text": "arXiv:2202.03047v1 [cs.AI] 7 Feb 2022",
|
| 129 |
+
"bbox": [
|
| 130 |
+
22,
|
| 131 |
+
277,
|
| 132 |
+
55,
|
| 133 |
+
699
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "page_footnote",
|
| 139 |
+
"text": "<sup>1</sup>https://www.healthline.com/health/depression/effects-on-body",
|
| 140 |
+
"bbox": [
|
| 141 |
+
135,
|
| 142 |
+
897,
|
| 143 |
+
509,
|
| 144 |
+
911
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "The existing works collect social media texts from open source platforms like Reddit Wolohan et al. [2018], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. and used them to detect depression.",
|
| 151 |
+
"bbox": [
|
| 152 |
+
109,
|
| 153 |
+
90,
|
| 154 |
+
883,
|
| 155 |
+
145
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 1
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "Research gaps:",
|
| 162 |
+
"text_level": 1,
|
| 163 |
+
"bbox": [
|
| 164 |
+
112,
|
| 165 |
+
147,
|
| 166 |
+
222,
|
| 167 |
+
160
|
| 168 |
+
],
|
| 169 |
+
"page_idx": 1
|
| 170 |
+
},
|
| 171 |
+
{
|
| 172 |
+
"type": "text",
|
| 173 |
+
"text": "All these research works concentrate on diagnosing depression from the social media texts. Although detecting depression has its own significance, detecting the level of depression also has its equal importance for further treatment. Generally, depression is classified into three stages namely mild, moderate and severe typ. Each stage has its own symptoms and effects and so detecting the level of depression is also a crucial one. Thus, we propose a data set to detect the level of depression in addition to detection of depression from the social media texts. The data set is made available to the public in a CodaLab competition repository $^{2}$ . This paper explains the process of data set creation that detects the levels of depression along with some baseline models.",
|
| 174 |
+
"bbox": [
|
| 175 |
+
109,
|
| 176 |
+
160,
|
| 177 |
+
883,
|
| 178 |
+
256
|
| 179 |
+
],
|
| 180 |
+
"page_idx": 1
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"type": "text",
|
| 184 |
+
"text": "Our contributions in this research include:",
|
| 185 |
+
"text_level": 1,
|
| 186 |
+
"bbox": [
|
| 187 |
+
112,
|
| 188 |
+
257,
|
| 189 |
+
413,
|
| 190 |
+
270
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 1
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "list",
|
| 196 |
+
"sub_type": "text",
|
| 197 |
+
"list_items": [
|
| 198 |
+
"1. Creating a new bench mark data set to detect the sign of depression from social media data at postings level.",
|
| 199 |
+
"2. Developing base line models with traditional learning classifiers.",
|
| 200 |
+
"3. Analysing the impact of data augmentation"
|
| 201 |
+
],
|
| 202 |
+
"bbox": [
|
| 203 |
+
150,
|
| 204 |
+
282,
|
| 205 |
+
877,
|
| 206 |
+
335
|
| 207 |
+
],
|
| 208 |
+
"page_idx": 1
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"type": "text",
|
| 212 |
+
"text": "2 Related Work",
|
| 213 |
+
"text_level": 1,
|
| 214 |
+
"bbox": [
|
| 215 |
+
112,
|
| 216 |
+
354,
|
| 217 |
+
264,
|
| 218 |
+
371
|
| 219 |
+
],
|
| 220 |
+
"page_idx": 1
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"type": "text",
|
| 224 |
+
"text": "The aim of our research work is to create a data set that identifies the sign of depression and detect the level of depression and thus, the existing works are analysed in terms of data collection, modalities and methodologies of detecting depression.",
|
| 225 |
+
"bbox": [
|
| 226 |
+
109,
|
| 227 |
+
386,
|
| 228 |
+
883,
|
| 229 |
+
429
|
| 230 |
+
],
|
| 231 |
+
"page_idx": 1
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"type": "text",
|
| 235 |
+
"text": "2.1 Modalities and methodologies of depression detection:",
|
| 236 |
+
"text_level": 1,
|
| 237 |
+
"bbox": [
|
| 238 |
+
112,
|
| 239 |
+
445,
|
| 240 |
+
529,
|
| 241 |
+
460
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 1
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "text",
|
| 247 |
+
"text": "For detecting depression, the data was collected by various methods like clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. Since, the social media users are rapidly increasing day by day, social media data can also be considered as a main source for detecting the mental health. This key idea gave rise to the most utilized data set E-Risk@CLEF-2017 pilot task data set Losada et al. [2017] that was collected from Reddit. In addition to this data set, many other data sets such as DAIC corpus Al Hanai et al. [2018], AVEC Morales and Levitan [2016], etc. also evolved that detects depression from the social media data. Though few benchmark data set exists to detect depression, more researchers tend to collect data from social media and create their own data sets.",
|
| 248 |
+
"bbox": [
|
| 249 |
+
109,
|
| 250 |
+
470,
|
| 251 |
+
883,
|
| 252 |
+
595
|
| 253 |
+
],
|
| 254 |
+
"page_idx": 1
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"text": "2.2 Data collection from social media:",
|
| 259 |
+
"text_level": 1,
|
| 260 |
+
"bbox": [
|
| 261 |
+
112,
|
| 262 |
+
612,
|
| 263 |
+
390,
|
| 264 |
+
626
|
| 265 |
+
],
|
| 266 |
+
"page_idx": 1
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"type": "text",
|
| 270 |
+
"text": "The social media texts were collected from open source platforms like Reddit Wolohan et al. [2018]Tadesse et al. [2019], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. The data from twitter was collected using API's and annotated into depressed and not depressed classes based on key words like \"depressed, hopeless and suicide\" Deshpande and Rao [2017], using a questionnaire Tsugawa et al. [2015], survey Reece et al. [2017], etc. The data was also scrapped from groups of live journals Nguyen et al. [2014], blog postsTyshchenko [2018] and manually annotated into depressed and not depressed.",
|
| 271 |
+
"bbox": [
|
| 272 |
+
109,
|
| 273 |
+
637,
|
| 274 |
+
883,
|
| 275 |
+
736
|
| 276 |
+
],
|
| 277 |
+
"page_idx": 1
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"type": "text",
|
| 281 |
+
"text": "Among these social media platforms, Reddit possess large amount text discussion than the other platforms and so Reddit has become widely used platform to collect social media text data recently.",
|
| 282 |
+
"bbox": [
|
| 283 |
+
109,
|
| 284 |
+
741,
|
| 285 |
+
883,
|
| 286 |
+
770
|
| 287 |
+
],
|
| 288 |
+
"page_idx": 1
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"type": "text",
|
| 292 |
+
"text": "The data were collected from these platforms using Application Programming Interface (API) using hashtags, groups, communities, etc. The data from reddit was collected from Subreddits like \"r/depression help, r/aww, r/AskReddit, r/news, r/Showerthoughts, r/pics, r/gaming, r/depression, r/videos r todaylearned r/funny\" and annotated manually by two annotators into depressed and not depressed class Wolohan et al. [2018]. The data was also from subreddits like \"r/anxiety, r/depression and r/depression_help\" and annotated into a data set Pirina and Cöltekin [2018]. A data set was created with classes depression, suicide.watch, opiates and controlled which was collected using subreddits such as \"r/suicidewatch, r/depression\", opioid related forums and other general forums Yao et al. [2020]. A survey was also done based on the studies of depression and anxiety from the Reddit data Boettcher et al. [2021].",
|
| 293 |
+
"bbox": [
|
| 294 |
+
109,
|
| 295 |
+
775,
|
| 296 |
+
883,
|
| 297 |
+
887
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 1
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "header",
|
| 303 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 304 |
+
"bbox": [
|
| 305 |
+
256,
|
| 306 |
+
42,
|
| 307 |
+
740,
|
| 308 |
+
56
|
| 309 |
+
],
|
| 310 |
+
"page_idx": 1
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"type": "header",
|
| 314 |
+
"text": "A PREPRINT",
|
| 315 |
+
"bbox": [
|
| 316 |
+
802,
|
| 317 |
+
44,
|
| 318 |
+
880,
|
| 319 |
+
55
|
| 320 |
+
],
|
| 321 |
+
"page_idx": 1
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"type": "page_footnote",
|
| 325 |
+
"text": "<sup>2</sup>https://competitions.codalab.org/competitions/36410",
|
| 326 |
+
"bbox": [
|
| 327 |
+
133,
|
| 328 |
+
896,
|
| 329 |
+
535,
|
| 330 |
+
911
|
| 331 |
+
],
|
| 332 |
+
"page_idx": 1
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"type": "page_number",
|
| 336 |
+
"text": "2",
|
| 337 |
+
"bbox": [
|
| 338 |
+
493,
|
| 339 |
+
935,
|
| 340 |
+
504,
|
| 341 |
+
946
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 1
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "table",
|
| 347 |
+
"img_path": "images/bc4c5d02115a17ef53c7196d7f53ffeb34b0afd447ed73491e3f87f50d0777bf.jpg",
|
| 348 |
+
"table_caption": [
|
| 349 |
+
"Table 1: Comparison of existing data sets"
|
| 350 |
+
],
|
| 351 |
+
"table_footnote": [],
|
| 352 |
+
"table_body": "<table><tr><td>Existing system</td><td>Social Media Platform</td><td>Class Labels</td></tr><tr><td>Eichstaedt et.al Eichstaedt et al. [2018]</td><td>Facebook</td><td>Depressed and not depressed</td></tr><tr><td>Nguyen et.al Nguyen et al. [2014]</td><td>Live journal</td><td>Depressed and control</td></tr><tr><td>Tyshchenko et. al Tyshchenko [2018]</td><td>Blog post</td><td>Clinical and Control</td></tr><tr><td>Deshpande et.al Deshpande and Rao [2017]</td><td>Twitter</td><td>Neutral and negative</td></tr><tr><td>Lin et.al Lin et al. [2020]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Reece et.al Reece et al. [2017]</td><td>Twitter</td><td>PTSD and Depression</td></tr><tr><td>Tsugawa et.al Tsugawa et al. [2015]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Losada et.al Losada et al. [2017]</td><td>Reddit</td><td>Depression and Not depression</td></tr><tr><td>Wolohan et.al Wolohan et al. [2018]</td><td>Reddit</td><td>Depressed and not depressed</td></tr><tr><td>Tadesse et.al Tadesse et al. [2019]</td><td>Reddit</td><td>Depression indicative and standard</td></tr><tr><td>Pirina et.al Pirina and Çöltekin [2018]</td><td>Reddit</td><td>positive and negative</td></tr><tr><td>Yao et.al Yao et al. [2020]</td><td>Reddit</td><td>Depression, Suicide watch, Control and Opiates</td></tr><tr><td>Proposed Data set</td><td>Reddit</td><td>Not depressed, moderately depressed & severely depressed</td></tr></table>",
|
| 353 |
+
"bbox": [
|
| 354 |
+
122,
|
| 355 |
+
111,
|
| 356 |
+
875,
|
| 357 |
+
422
|
| 358 |
+
],
|
| 359 |
+
"page_idx": 2
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "text",
|
| 363 |
+
"text": "From the Table 1, it is clear that all these research works have collected the social media data only to detect the presence of depression. Although, diagnosing depression is important, detecting the level of depression is more crucial for further treatment. And thus, we propose a data set that detects the level of depression.",
|
| 364 |
+
"bbox": [
|
| 365 |
+
111,
|
| 366 |
+
452,
|
| 367 |
+
883,
|
| 368 |
+
494
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 2
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"text": "3 Proposed Work",
|
| 375 |
+
"text_level": 1,
|
| 376 |
+
"bbox": [
|
| 377 |
+
112,
|
| 378 |
+
520,
|
| 379 |
+
277,
|
| 380 |
+
537
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 2
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "We propose to develop a gold standard data set that detects the levels of depression as not depressed, moderately depressed and severely depressed. Initially, the data set was created by collecting the data from the social media platform, Reddit. For collecting the data from archives of Reddit, two way communication is needed, which requires app authentication. After getting proper authentication, the subreddits from which the data must be collected are chosen and the data was extracted. After extracting the data, the data is pre-processed and exported in the required format which forms the data set. The data were then annotated into levels of depression by domain experts following the annotation guidelines. After annotation, the inter-rater agreement is calculated to analyze the quality of data and annotation. Then, the corpus is formed using the mutually annotated instances. Baseline models were also employed on the corpus to analyze the performance. To overcome the data imbalance problem, data augmentation technique was applied and their impact on performance was also analyzed.",
|
| 387 |
+
"bbox": [
|
| 388 |
+
111,
|
| 389 |
+
554,
|
| 390 |
+
883,
|
| 391 |
+
693
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 2
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "text",
|
| 397 |
+
"text": "3.1 Data set creation:",
|
| 398 |
+
"text_level": 1,
|
| 399 |
+
"bbox": [
|
| 400 |
+
112,
|
| 401 |
+
715,
|
| 402 |
+
277,
|
| 403 |
+
729
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 2
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "For creating the data set, a suitable social media platform is chosen initially and data is scraped using suitable methods. After scraping the data, the data is processed and dumped in a suitable format.",
|
| 410 |
+
"bbox": [
|
| 411 |
+
111,
|
| 412 |
+
743,
|
| 413 |
+
883,
|
| 414 |
+
772
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 2
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "text",
|
| 420 |
+
"text": "3.1.1 Data collection:",
|
| 421 |
+
"text_level": 1,
|
| 422 |
+
"bbox": [
|
| 423 |
+
112,
|
| 424 |
+
792,
|
| 425 |
+
276,
|
| 426 |
+
806
|
| 427 |
+
],
|
| 428 |
+
"page_idx": 2
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"type": "text",
|
| 432 |
+
"text": "For creating the data set, the data was collected from Reddit<sup>3</sup>, an open source social media platform since it has more textual data when compared to other social media platforms. This data will be of postings format which includes only one or more statements of an individual. The postings data are scraped from the Reddit archives using the API \"pushshift\".",
|
| 433 |
+
"bbox": [
|
| 434 |
+
111,
|
| 435 |
+
816,
|
| 436 |
+
883,
|
| 437 |
+
875
|
| 438 |
+
],
|
| 439 |
+
"page_idx": 2
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"type": "header",
|
| 443 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 444 |
+
"bbox": [
|
| 445 |
+
256,
|
| 446 |
+
42,
|
| 447 |
+
738,
|
| 448 |
+
56
|
| 449 |
+
],
|
| 450 |
+
"page_idx": 2
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"type": "header",
|
| 454 |
+
"text": "A PREPRINT",
|
| 455 |
+
"bbox": [
|
| 456 |
+
802,
|
| 457 |
+
44,
|
| 458 |
+
880,
|
| 459 |
+
55
|
| 460 |
+
],
|
| 461 |
+
"page_idx": 2
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"type": "page_footnote",
|
| 465 |
+
"text": "<sup>3</sup>https://www.reddit.com",
|
| 466 |
+
"bbox": [
|
| 467 |
+
133,
|
| 468 |
+
893,
|
| 469 |
+
313,
|
| 470 |
+
909
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 2
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "page_number",
|
| 476 |
+
"text": "3",
|
| 477 |
+
"bbox": [
|
| 478 |
+
493,
|
| 479 |
+
935,
|
| 480 |
+
504,
|
| 481 |
+
946
|
| 482 |
+
],
|
| 483 |
+
"page_idx": 2
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "text",
|
| 487 |
+
"text": "3.1.2 App authentication:",
|
| 488 |
+
"text_level": 1,
|
| 489 |
+
"bbox": [
|
| 490 |
+
112,
|
| 491 |
+
90,
|
| 492 |
+
305,
|
| 493 |
+
106
|
| 494 |
+
],
|
| 495 |
+
"page_idx": 3
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"type": "text",
|
| 499 |
+
"text": "For scraping the data from Reddit achieves, Python Reddit API Wrapper(PRAW) is used. The data can be only scraped after getting authentication from the Reddit platform. This authentication process involves creation of an application in their domain, for which a unique client secret key and client id will be assigned. Thus, PRAW allows a two way communication only with these credentials of user_agent (application name), client_id and client_secret to get data from Reddit.",
|
| 500 |
+
"bbox": [
|
| 501 |
+
109,
|
| 502 |
+
114,
|
| 503 |
+
883,
|
| 504 |
+
184
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 3
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "text",
|
| 510 |
+
"text": "3.1.3 Subreddit selection",
|
| 511 |
+
"text_level": 1,
|
| 512 |
+
"bbox": [
|
| 513 |
+
112,
|
| 514 |
+
202,
|
| 515 |
+
302,
|
| 516 |
+
215
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 3
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "Reddit is a collection of million groups or forums called subreddits. For collecting the confessions or discussion of people about their mental health, data was scraped from the archives of subreddits groups like \"r/Mental Health, r/depression, r/loneliness, r/stress, r/anxiety\".",
|
| 523 |
+
"bbox": [
|
| 524 |
+
109,
|
| 525 |
+
226,
|
| 526 |
+
883,
|
| 527 |
+
268
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 3
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "3.1.4 Data extraction:",
|
| 534 |
+
"text_level": 1,
|
| 535 |
+
"bbox": [
|
| 536 |
+
112,
|
| 537 |
+
286,
|
| 538 |
+
279,
|
| 539 |
+
300
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 3
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "text",
|
| 545 |
+
"text": "For each posting, the details such as post ID, title, URL, publish date, name of the subreddit, score of the post and total number of comments can be collected using PRAW. Among these data, PostID, title, text, URL, date and subreddit name are all collected in dictionary format.",
|
| 546 |
+
"bbox": [
|
| 547 |
+
109,
|
| 548 |
+
310,
|
| 549 |
+
883,
|
| 550 |
+
354
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 3
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "3.1.5 Data pre-processing and exporting:",
|
| 557 |
+
"text_level": 1,
|
| 558 |
+
"bbox": [
|
| 559 |
+
112,
|
| 560 |
+
369,
|
| 561 |
+
413,
|
| 562 |
+
386
|
| 563 |
+
],
|
| 564 |
+
"page_idx": 3
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "text",
|
| 568 |
+
"text": "After collecting these data, the text and title part are pre-processed by removing the non-ASCII characters and emoticons to get a clean data set. The processed data is exported into a Comma Separated Values (.csv) format file with the five columns. The sample of the collected postings is shown in Table 2.",
|
| 569 |
+
"bbox": [
|
| 570 |
+
109,
|
| 571 |
+
393,
|
| 572 |
+
883,
|
| 573 |
+
436
|
| 574 |
+
],
|
| 575 |
+
"page_idx": 3
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "table",
|
| 579 |
+
"img_path": "images/9314ff605d2098f5f2c105dc4ac2280cfd7090aadd0f570faaf6d2ae9cfd64d4.jpg",
|
| 580 |
+
"table_caption": [
|
| 581 |
+
"Table 2: Sample Postings data"
|
| 582 |
+
],
|
| 583 |
+
"table_footnote": [],
|
| 584 |
+
"table_body": "<table><tr><td>Post ID</td><td>Title</td><td>Text</td><td>Url</td><td>Publish date</td><td>Subreddit</td></tr><tr><td>g69ppt</td><td>Don’t want to get of bed</td><td>I’m done with me crying all day and thinking to myself that I can’t do a thing and I don’t what to get out of bed at all</td><td>https://www.reddit.com/r/depression/comments/g69ppt/dont_want_to_get_of.bed/4</td><td>2020-04-23 02:51:32</td><td>depression</td></tr><tr><td>gb9zei</td><td>Today is a day where I feel emptier than on other days.</td><td>It’s like I am alone with all my problems. I am sad about the fact I can’t trust anyone and nobody could help me because I feel like nobody understand how I feel. Depression is holding me tight today..</td><td>https://www.reddit.com/r/depression/comments/gb9zei/today_is_a_day_where_i Feel_emptier _than_on_other/5</td><td>2020-05-01 08:10:06</td><td>depression</td></tr></table>",
|
| 585 |
+
"bbox": [
|
| 586 |
+
112,
|
| 587 |
+
474,
|
| 588 |
+
870,
|
| 589 |
+
772
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 3
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "text",
|
| 595 |
+
"text": "3.2 Data Annotation",
|
| 596 |
+
"text_level": 1,
|
| 597 |
+
"bbox": [
|
| 598 |
+
112,
|
| 599 |
+
801,
|
| 600 |
+
272,
|
| 601 |
+
814
|
| 602 |
+
],
|
| 603 |
+
"page_idx": 3
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"text": "After collecting the data, the data were annotated according to the signs of depression. Although all the postings were collected from subreddits that exhibit the characteristics of mental illness, there is a possibility of postings that do not confess or discuss depression. Thus, the collected postings data were annotated by two domain experts into three labels that denote the level of signs of depression namely \"Not depressed, Moderate and Severe\". Framing the annotation guidelines for postings data is difficult since the mental health of an individual has to be analyzed using his/her single postings. For annotating the data into three classes, the guidelines were formatted as follows:",
|
| 608 |
+
"bbox": [
|
| 609 |
+
109,
|
| 610 |
+
827,
|
| 611 |
+
883,
|
| 612 |
+
910
|
| 613 |
+
],
|
| 614 |
+
"page_idx": 3
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "header",
|
| 618 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 619 |
+
"bbox": [
|
| 620 |
+
256,
|
| 621 |
+
42,
|
| 622 |
+
740,
|
| 623 |
+
56
|
| 624 |
+
],
|
| 625 |
+
"page_idx": 3
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "header",
|
| 629 |
+
"text": "A PREPRINT",
|
| 630 |
+
"bbox": [
|
| 631 |
+
802,
|
| 632 |
+
44,
|
| 633 |
+
880,
|
| 634 |
+
55
|
| 635 |
+
],
|
| 636 |
+
"page_idx": 3
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "page_number",
|
| 640 |
+
"text": "4",
|
| 641 |
+
"bbox": [
|
| 642 |
+
491,
|
| 643 |
+
935,
|
| 644 |
+
504,
|
| 645 |
+
946
|
| 646 |
+
],
|
| 647 |
+
"page_idx": 3
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"text": "3.2.1 Label 1 - Not depressed :",
|
| 652 |
+
"text_level": 1,
|
| 653 |
+
"bbox": [
|
| 654 |
+
112,
|
| 655 |
+
90,
|
| 656 |
+
339,
|
| 657 |
+
104
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 4
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "text",
|
| 663 |
+
"text": "The postings data will be annotated as \"Not Depressed\", if the postings data reflect one of the following mannerism:",
|
| 664 |
+
"bbox": [
|
| 665 |
+
111,
|
| 666 |
+
116,
|
| 667 |
+
874,
|
| 668 |
+
131
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 4
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "list",
|
| 674 |
+
"sub_type": "text",
|
| 675 |
+
"list_items": [
|
| 676 |
+
"- If the statements have only one or two lines about irrelevant topics.",
|
| 677 |
+
"- If the statements reflect momentary feelings of present situation.",
|
| 678 |
+
"- If the statements are about asking questions about any or medication",
|
| 679 |
+
"- If the statement is about ask/seek help for friend's difficulties."
|
| 680 |
+
],
|
| 681 |
+
"bbox": [
|
| 682 |
+
156,
|
| 683 |
+
143,
|
| 684 |
+
620,
|
| 685 |
+
220
|
| 686 |
+
],
|
| 687 |
+
"page_idx": 4
|
| 688 |
+
},
|
| 689 |
+
{
|
| 690 |
+
"type": "text",
|
| 691 |
+
"text": "Example 1:",
|
| 692 |
+
"text_level": 1,
|
| 693 |
+
"bbox": [
|
| 694 |
+
138,
|
| 695 |
+
244,
|
| 696 |
+
220,
|
| 697 |
+
258
|
| 698 |
+
],
|
| 699 |
+
"page_idx": 4
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"type": "text",
|
| 703 |
+
"text": "The holidays are the most difficult.",
|
| 704 |
+
"bbox": [
|
| 705 |
+
138,
|
| 706 |
+
273,
|
| 707 |
+
369,
|
| 708 |
+
286
|
| 709 |
+
],
|
| 710 |
+
"page_idx": 4
|
| 711 |
+
},
|
| 712 |
+
{
|
| 713 |
+
"type": "text",
|
| 714 |
+
"text": "Not a big reddit poster, but I felt like this has been past due for myself. The holidays honestly are so hard for me to get through. I've spent the last 6 years of major holidays alone. Mostly because of my retail job, I never get enough time off around the holidays to go home and spend it with family, nor have they been able to visit me. My condolences to anyone else spending this time of year alone no matter what the circumstances may be. I moved to a new state 9 months ago and it's been a tough struggle meeting new friends as I didn't know anyone here before I moved. Now it's new years and all of my \"friends\" I've made while here yet again flaked on me (was actually excited to have plans for the first time I remember in a while), which I recently found out has been a common occurrence of them just getting together without me. (Which I'm used to at this point, it is what it is). It just sucks knowing you're always the last choice in anyone's lives. And that my depression may be the cause of my 'boringness'/lack of interest my friends have towards me. Any tips on making friends for someone struggling mentally? I'm just tired of this constant weight of loneliness bearing down on me. I seriously can't remember the last time someone went out of their way to invite me to something. It seems like I'm always asking to tag along, and then I'm just a burden at that point, which is why I'm starting to lose all hope.",
|
| 715 |
+
"bbox": [
|
| 716 |
+
135,
|
| 717 |
+
286,
|
| 718 |
+
859,
|
| 719 |
+
467
|
| 720 |
+
],
|
| 721 |
+
"page_idx": 4
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"type": "text",
|
| 725 |
+
"text": "Whoever takes the time to read this, thank you.",
|
| 726 |
+
"bbox": [
|
| 727 |
+
138,
|
| 728 |
+
479,
|
| 729 |
+
447,
|
| 730 |
+
494
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 4
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "text",
|
| 736 |
+
"text": "3.2.2 Label 2 - Moderately depressed :",
|
| 737 |
+
"text_level": 1,
|
| 738 |
+
"bbox": [
|
| 739 |
+
112,
|
| 740 |
+
526,
|
| 741 |
+
393,
|
| 742 |
+
540
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 4
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "The postings data will be annotated as \"moderately depressed\", if the postings falls under these conditions:",
|
| 749 |
+
"bbox": [
|
| 750 |
+
111,
|
| 751 |
+
551,
|
| 752 |
+
812,
|
| 753 |
+
566
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 4
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "list",
|
| 759 |
+
"sub_type": "text",
|
| 760 |
+
"list_items": [
|
| 761 |
+
"- If the statements reflect change in feelings (feeling low for some time and feeling better for some time).",
|
| 762 |
+
"- If the statement shows that they aren't feeling completely immersed in any situations",
|
| 763 |
+
"- If the statements show that they have hope for life."
|
| 764 |
+
],
|
| 765 |
+
"bbox": [
|
| 766 |
+
156,
|
| 767 |
+
578,
|
| 768 |
+
849,
|
| 769 |
+
635
|
| 770 |
+
],
|
| 771 |
+
"page_idx": 4
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"type": "text",
|
| 775 |
+
"text": "Example 1:",
|
| 776 |
+
"text_level": 1,
|
| 777 |
+
"bbox": [
|
| 778 |
+
138,
|
| 779 |
+
659,
|
| 780 |
+
223,
|
| 781 |
+
672
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 4
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "text",
|
| 787 |
+
"text": "If I disappeared today, would it really matter?",
|
| 788 |
+
"bbox": [
|
| 789 |
+
138,
|
| 790 |
+
686,
|
| 791 |
+
439,
|
| 792 |
+
700
|
| 793 |
+
],
|
| 794 |
+
"page_idx": 4
|
| 795 |
+
},
|
| 796 |
+
{
|
| 797 |
+
"type": "text",
|
| 798 |
+
"text": "I'm just too tired to go on, but at the same time I'm too tired to end it. I always thought about this but with the quarantine I just realised it is true. My friends never felt close to me, just like the only two relationships I have ever been in. They never cared about me, to the point where I even asked for help and they just turned a blind eye. And my family isn't any better. I don't know what to do, and I believe it won't matter if I do something or not. I'm sorry if my English isn't good, it isn't my first language.",
|
| 799 |
+
"bbox": [
|
| 800 |
+
135,
|
| 801 |
+
700,
|
| 802 |
+
857,
|
| 803 |
+
771
|
| 804 |
+
],
|
| 805 |
+
"page_idx": 4
|
| 806 |
+
},
|
| 807 |
+
{
|
| 808 |
+
"type": "text",
|
| 809 |
+
"text": "3.2.3 Label - 3 : Severely depressed :",
|
| 810 |
+
"text_level": 1,
|
| 811 |
+
"bbox": [
|
| 812 |
+
112,
|
| 813 |
+
813,
|
| 814 |
+
382,
|
| 815 |
+
828
|
| 816 |
+
],
|
| 817 |
+
"page_idx": 4
|
| 818 |
+
},
|
| 819 |
+
{
|
| 820 |
+
"type": "text",
|
| 821 |
+
"text": "The data will be annotated as \"Severely depressed\", if the postings have one of the following scenarios:",
|
| 822 |
+
"bbox": [
|
| 823 |
+
111,
|
| 824 |
+
838,
|
| 825 |
+
789,
|
| 826 |
+
853
|
| 827 |
+
],
|
| 828 |
+
"page_idx": 4
|
| 829 |
+
},
|
| 830 |
+
{
|
| 831 |
+
"type": "list",
|
| 832 |
+
"sub_type": "text",
|
| 833 |
+
"list_items": [
|
| 834 |
+
"- If the statements express more than one disorder conditions.",
|
| 835 |
+
"- If the statements explain about history of suicide attempts."
|
| 836 |
+
],
|
| 837 |
+
"bbox": [
|
| 838 |
+
156,
|
| 839 |
+
864,
|
| 840 |
+
566,
|
| 841 |
+
901
|
| 842 |
+
],
|
| 843 |
+
"page_idx": 4
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "header",
|
| 847 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 848 |
+
"bbox": [
|
| 849 |
+
256,
|
| 850 |
+
42,
|
| 851 |
+
740,
|
| 852 |
+
56
|
| 853 |
+
],
|
| 854 |
+
"page_idx": 4
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "header",
|
| 858 |
+
"text": "A PREPRINT",
|
| 859 |
+
"bbox": [
|
| 860 |
+
802,
|
| 861 |
+
44,
|
| 862 |
+
880,
|
| 863 |
+
55
|
| 864 |
+
],
|
| 865 |
+
"page_idx": 4
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "page_number",
|
| 869 |
+
"text": "5",
|
| 870 |
+
"bbox": [
|
| 871 |
+
493,
|
| 872 |
+
935,
|
| 873 |
+
503,
|
| 874 |
+
946
|
| 875 |
+
],
|
| 876 |
+
"page_idx": 4
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "table",
|
| 880 |
+
"img_path": "images/24e537bbf77be712ecd315f2155db3bc1ef1ce53176b567e33fdbd5e93334422.jpg",
|
| 881 |
+
"table_caption": [
|
| 882 |
+
"Table 3: Landis & Koch measurement table of inter rater agreement"
|
| 883 |
+
],
|
| 884 |
+
"table_footnote": [],
|
| 885 |
+
"table_body": "<table><tr><td>Kappa value (κ)</td><td>Strength of agreement</td></tr><tr><td>< 0</td><td>Poor</td></tr><tr><td>0.01 - 0.20</td><td>Slight</td></tr><tr><td>0.21 - 0.40</td><td>Fair</td></tr><tr><td>0.41 - 0.60</td><td>Moderate</td></tr><tr><td>0.61 - 0.80</td><td>Substantial</td></tr><tr><td>0.81 - 0.99</td><td>Almost perfect agreement</td></tr></table>",
|
| 886 |
+
"bbox": [
|
| 887 |
+
330,
|
| 888 |
+
112,
|
| 889 |
+
663,
|
| 890 |
+
256
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 5
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "text",
|
| 896 |
+
"text": "Example 1:",
|
| 897 |
+
"text_level": 1,
|
| 898 |
+
"bbox": [
|
| 899 |
+
138,
|
| 900 |
+
297,
|
| 901 |
+
222,
|
| 902 |
+
313
|
| 903 |
+
],
|
| 904 |
+
"page_idx": 5
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"text": "Getting depressed again?",
|
| 909 |
+
"bbox": [
|
| 910 |
+
138,
|
| 911 |
+
325,
|
| 912 |
+
307,
|
| 913 |
+
340
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 5
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"text": "So I'm 22F and I have taken antidepressants the last time 4 years ago. I've had ups and downs when I got off and with 19 I was having a rough time for two months - started drinking and smoking weed a lot. Kinda managed to get back on track then and haven't been feeling too bad until now. Lately I've been feeling kinda blue and started making mistakes or have to go through stuff multiple times to do it correctly or to be able to remember it. Currently I'm having a week off and have to go back to work on monday. I just don't know I feel like I'm getting worse and want to sleep most of the time and at first I thought it's because I'm used to working a lot, but when I think about having to go back soon I feel like throwing up and at the same time doing nothing also doesn't sit well with me. I guess I'm kinda scared at the moment because I don't want to feel like I was feeling years ago and I still don't feel comfortable with my own mind and don't trust myself that I'm strong enough to pull through if depression hits me again.",
|
| 920 |
+
"bbox": [
|
| 921 |
+
135,
|
| 922 |
+
339,
|
| 923 |
+
859,
|
| 924 |
+
479
|
| 925 |
+
],
|
| 926 |
+
"page_idx": 5
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "text",
|
| 930 |
+
"text": "3.3 Inter-rater agreement",
|
| 931 |
+
"text_level": 1,
|
| 932 |
+
"bbox": [
|
| 933 |
+
112,
|
| 934 |
+
520,
|
| 935 |
+
307,
|
| 936 |
+
535
|
| 937 |
+
],
|
| 938 |
+
"page_idx": 5
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"type": "text",
|
| 942 |
+
"text": "After annotating the data, inter-rater agreement was calculated between the decisions of two judges using kappa coefficient estimated using a per-annotator empirical prior over the class labels Artstein and Poesio [2008]. Inter-rater agreement<sup>6</sup> is the degree of agreement among independent observers who rate, code, or assess the same phenomenon. The inter rater agreement is measured using Cohen's kappa statistics Cohen [1960].",
|
| 943 |
+
"bbox": [
|
| 944 |
+
111,
|
| 945 |
+
549,
|
| 946 |
+
885,
|
| 947 |
+
604
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 5
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "text",
|
| 953 |
+
"text": "The inter-rater agreement between the annotations was calculated using sklearn Pedregosa et al. [2011a]. For our annotation, the kappa value $(\\kappa)$ is 0.686. According to Landis & Koch Landis and Koch [1977] in the Table 3, the $\\kappa$ value denotes substantial agreement between the annotators, which proves the consistency of labeling according to the annotation guidelines. Thus, the mutually annotated instances form the corpus.",
|
| 954 |
+
"bbox": [
|
| 955 |
+
111,
|
| 956 |
+
609,
|
| 957 |
+
885,
|
| 958 |
+
667
|
| 959 |
+
],
|
| 960 |
+
"page_idx": 5
|
| 961 |
+
},
|
| 962 |
+
{
|
| 963 |
+
"type": "text",
|
| 964 |
+
"text": "3.4 Corpus Analysis",
|
| 965 |
+
"text_level": 1,
|
| 966 |
+
"bbox": [
|
| 967 |
+
112,
|
| 968 |
+
693,
|
| 969 |
+
269,
|
| 970 |
+
708
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 5
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "text",
|
| 976 |
+
"text": "Initially 20,088 instances of postings data were annotated, out of which 16,613 instances were found to be mutually annotated instances by the two judges, and thus they were considered as instances of data set with their corresponding labels. Table 4 shows the complete statistics of the corpus.",
|
| 977 |
+
"bbox": [
|
| 978 |
+
111,
|
| 979 |
+
722,
|
| 980 |
+
883,
|
| 981 |
+
765
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 5
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "text",
|
| 987 |
+
"text": "The whole corpus has 1,56,676 sentences with 26,59,938 words which shows the size of the corpus created. In the corpus, each posting with its labels is considered as each instance in the corpus. An instance in the corpus will have an average of 9.42 sentences each that varies in the range of 1 to 260 sentences with an average of 159.92 words that lies between 1 to 5065 words. The distribution of the three class labels in the data set is shown in Figure 1. As shown in figure, the data set is unbalanced with 10,494 instances of \"moderately depressed\" class, 1489 instances of \"severely depressed\" class and 4649 instances of \"Not depressed\" class which also includes some duplicate instances.",
|
| 988 |
+
"bbox": [
|
| 989 |
+
111,
|
| 990 |
+
770,
|
| 991 |
+
883,
|
| 992 |
+
854
|
| 993 |
+
],
|
| 994 |
+
"page_idx": 5
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "header",
|
| 998 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 999 |
+
"bbox": [
|
| 1000 |
+
256,
|
| 1001 |
+
42,
|
| 1002 |
+
740,
|
| 1003 |
+
56
|
| 1004 |
+
],
|
| 1005 |
+
"page_idx": 5
|
| 1006 |
+
},
|
| 1007 |
+
{
|
| 1008 |
+
"type": "header",
|
| 1009 |
+
"text": "A PREPRINT",
|
| 1010 |
+
"bbox": [
|
| 1011 |
+
802,
|
| 1012 |
+
44,
|
| 1013 |
+
880,
|
| 1014 |
+
55
|
| 1015 |
+
],
|
| 1016 |
+
"page_idx": 5
|
| 1017 |
+
},
|
| 1018 |
+
{
|
| 1019 |
+
"type": "page_footnote",
|
| 1020 |
+
"text": "<sup>6</sup>https://en.wikipedia.org/wiki/Interrater_reliability",
|
| 1021 |
+
"bbox": [
|
| 1022 |
+
132,
|
| 1023 |
+
892,
|
| 1024 |
+
436,
|
| 1025 |
+
907
|
| 1026 |
+
],
|
| 1027 |
+
"page_idx": 5
|
| 1028 |
+
},
|
| 1029 |
+
{
|
| 1030 |
+
"type": "page_number",
|
| 1031 |
+
"text": "6",
|
| 1032 |
+
"bbox": [
|
| 1033 |
+
493,
|
| 1034 |
+
936,
|
| 1035 |
+
504,
|
| 1036 |
+
946
|
| 1037 |
+
],
|
| 1038 |
+
"page_idx": 5
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "table",
|
| 1042 |
+
"img_path": "images/add3e14e9e2d993eb883e5ce3605c35c59a7cb8341ab19dcb5b39dcda4ce5b25.jpg",
|
| 1043 |
+
"table_caption": [
|
| 1044 |
+
"Table 4: Postings data analysis"
|
| 1045 |
+
],
|
| 1046 |
+
"table_footnote": [],
|
| 1047 |
+
"table_body": "<table><tr><td>Category</td><td>Count</td></tr><tr><td>Total number of instances annotated</td><td>20,088</td></tr><tr><td>Data set instances \n(number of instances mutually annotated)</td><td>16,632</td></tr><tr><td>Total number of sentences</td><td>1,56,676</td></tr><tr><td>Total number of words</td><td>26,59,938</td></tr><tr><td>Total number of stop-words</td><td>12,47,016</td></tr><tr><td>Total number of words other than stop-words</td><td>14,12,922</td></tr><tr><td>Total number of unique words</td><td>28,415</td></tr><tr><td>Total number of unique stop-words</td><td>150</td></tr><tr><td>Total number of unique words other than stop-words</td><td>28,265</td></tr><tr><td>Range of sentences per instance</td><td>1 - 260</td></tr><tr><td>Range of words per instance</td><td>1 - 5065</td></tr><tr><td>Average number of sentences per posting instance</td><td>9.42</td></tr><tr><td>Average number of words per posting instance</td><td>159.92</td></tr></table>",
|
| 1048 |
+
"bbox": [
|
| 1049 |
+
279,
|
| 1050 |
+
112,
|
| 1051 |
+
712,
|
| 1052 |
+
410
|
| 1053 |
+
],
|
| 1054 |
+
"page_idx": 6
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "image",
|
| 1058 |
+
"img_path": "images/31389850d5ab6e836a93b93a8bae53d6f19816081dd3fb0be5a39df23c4cc083.jpg",
|
| 1059 |
+
"image_caption": [
|
| 1060 |
+
"Figure 1: Class wise distribution of the data set"
|
| 1061 |
+
],
|
| 1062 |
+
"image_footnote": [],
|
| 1063 |
+
"bbox": [
|
| 1064 |
+
336,
|
| 1065 |
+
433,
|
| 1066 |
+
663,
|
| 1067 |
+
583
|
| 1068 |
+
],
|
| 1069 |
+
"page_idx": 6
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "text",
|
| 1073 |
+
"text": "3.5 Base line models",
|
| 1074 |
+
"text_level": 1,
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
112,
|
| 1077 |
+
642,
|
| 1078 |
+
269,
|
| 1079 |
+
656
|
| 1080 |
+
],
|
| 1081 |
+
"page_idx": 6
|
| 1082 |
+
},
|
| 1083 |
+
{
|
| 1084 |
+
"type": "text",
|
| 1085 |
+
"text": "The data set has been evaluated using traditional models which are considered as baseline models. The data set has four columns namely id, title, text and class label. For implementation, the title data and text data are initially combined. The combined text data is pre-processed, extracted features, balanced, classified using traditional classifiers and evaluated by cross validation.",
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
109,
|
| 1088 |
+
669,
|
| 1089 |
+
883,
|
| 1090 |
+
724
|
| 1091 |
+
],
|
| 1092 |
+
"page_idx": 6
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"type": "text",
|
| 1096 |
+
"text": "3.5.1 Data Pre-processing:",
|
| 1097 |
+
"text_level": 1,
|
| 1098 |
+
"bbox": [
|
| 1099 |
+
112,
|
| 1100 |
+
743,
|
| 1101 |
+
313,
|
| 1102 |
+
758
|
| 1103 |
+
],
|
| 1104 |
+
"page_idx": 6
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "text",
|
| 1108 |
+
"text": "The title and text column are combined together as a single text data column by filling the \"NA\" instances of both title and text data. The combined text data is cleaned by converting the words to lower case letters and removing unwanted punctuation, \"[removed]\" tags, web links, HTML links, stop words and small words (words with length less than two). After cleaning, the instances are tokenized using regextokenizer Pedregosa et al. [2011b], stemmed using porter stemmer Porter [1980] and lemmatized using wordnet lemmatizer.",
|
| 1109 |
+
"bbox": [
|
| 1110 |
+
109,
|
| 1111 |
+
768,
|
| 1112 |
+
883,
|
| 1113 |
+
839
|
| 1114 |
+
],
|
| 1115 |
+
"page_idx": 6
|
| 1116 |
+
},
|
| 1117 |
+
{
|
| 1118 |
+
"type": "text",
|
| 1119 |
+
"text": "3.5.2 Feature extraction:",
|
| 1120 |
+
"text_level": 1,
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
112,
|
| 1123 |
+
856,
|
| 1124 |
+
302,
|
| 1125 |
+
871
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 6
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "text",
|
| 1131 |
+
"text": "The features were extracted using three vectorizers namely Word2Vec, Term Frequency - Inverse Document Frequency (TF-IDF) vectorizer and Glove Pennington et al. [2014] vectorizer.",
|
| 1132 |
+
"bbox": [
|
| 1133 |
+
109,
|
| 1134 |
+
881,
|
| 1135 |
+
883,
|
| 1136 |
+
910
|
| 1137 |
+
],
|
| 1138 |
+
"page_idx": 6
|
| 1139 |
+
},
|
| 1140 |
+
{
|
| 1141 |
+
"type": "header",
|
| 1142 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1143 |
+
"bbox": [
|
| 1144 |
+
256,
|
| 1145 |
+
42,
|
| 1146 |
+
740,
|
| 1147 |
+
56
|
| 1148 |
+
],
|
| 1149 |
+
"page_idx": 6
|
| 1150 |
+
},
|
| 1151 |
+
{
|
| 1152 |
+
"type": "header",
|
| 1153 |
+
"text": "A PREPRINT",
|
| 1154 |
+
"bbox": [
|
| 1155 |
+
802,
|
| 1156 |
+
44,
|
| 1157 |
+
880,
|
| 1158 |
+
55
|
| 1159 |
+
],
|
| 1160 |
+
"page_idx": 6
|
| 1161 |
+
},
|
| 1162 |
+
{
|
| 1163 |
+
"type": "page_number",
|
| 1164 |
+
"text": "7",
|
| 1165 |
+
"bbox": [
|
| 1166 |
+
493,
|
| 1167 |
+
935,
|
| 1168 |
+
504,
|
| 1169 |
+
946
|
| 1170 |
+
],
|
| 1171 |
+
"page_idx": 6
|
| 1172 |
+
},
|
| 1173 |
+
{
|
| 1174 |
+
"type": "list",
|
| 1175 |
+
"sub_type": "text",
|
| 1176 |
+
"list_items": [
|
| 1177 |
+
"- Word2Vec: It produces a vector that represents the context of the word considering the occurrence of the word. The vectors are generated using Continuous Bag Of Words.",
|
| 1178 |
+
"- TF-IDF: It produces a score considering the occurrence of the word in the document. It is based on the relevance of a topic in a particular document. The vectors are calculated using four grams considering a maximum of 2000 features.",
|
| 1179 |
+
"- Glove: It produces the word embeddings considering the occurrence and co-occurrence of the words with reduced dimensionality. The words are mapped to a word embedding using 6 Billion pre-trained tokens with 100 features each."
|
| 1180 |
+
],
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
156,
|
| 1183 |
+
90,
|
| 1184 |
+
879,
|
| 1185 |
+
208
|
| 1186 |
+
],
|
| 1187 |
+
"page_idx": 7
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"text": "3.5.3 Classifiers:",
|
| 1192 |
+
"text_level": 1,
|
| 1193 |
+
"bbox": [
|
| 1194 |
+
112,
|
| 1195 |
+
223,
|
| 1196 |
+
243,
|
| 1197 |
+
238
|
| 1198 |
+
],
|
| 1199 |
+
"page_idx": 7
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "text",
|
| 1203 |
+
"text": "Twelve different classifiers that include Ada Boost Classifier, Decision Tree, Gaussian Naive Bayes, K-Nearest Neighbour, linear-Support Vector Machine, Linear Deterministic Analysis, Logistic Regression, Multi-layer Perceptron, Qua-",
|
| 1204 |
+
"bbox": [
|
| 1205 |
+
111,
|
| 1206 |
+
247,
|
| 1207 |
+
883,
|
| 1208 |
+
287
|
| 1209 |
+
],
|
| 1210 |
+
"page_idx": 7
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "text",
|
| 1214 |
+
"text": "dratic Deterministic Analysis, Radial Basis Function - Support Vector Machine and Random Forest of Scikit-learn Pedregosa et al. [2011b] were used for classification.",
|
| 1215 |
+
"bbox": [
|
| 1216 |
+
111,
|
| 1217 |
+
289,
|
| 1218 |
+
883,
|
| 1219 |
+
316
|
| 1220 |
+
],
|
| 1221 |
+
"page_idx": 7
|
| 1222 |
+
},
|
| 1223 |
+
{
|
| 1224 |
+
"type": "list",
|
| 1225 |
+
"sub_type": "text",
|
| 1226 |
+
"list_items": [
|
| 1227 |
+
"- Ada Boost Classifier(ABC): The Adaptive Boosting algorithm is a collection of N estimator models that assigns higher weights to the mis-classified samples in the next model. In our implementation, 100 estimator models with t0 random state at a learning rate of 0.1 were used to fine tune the model.",
|
| 1228 |
+
"- Decision Tree (DT): The decision tree classifier predicts the target value based on the decision rules that was formed using features to identify the target variable. The decision rules are formed using gini index and entropy for information gain. For implementing the decision trees, the decision tree classifier was fine tuned with two splits of minimum samples of one leaf node each by calculating gini to choose the best split and random state as 0.",
|
| 1229 |
+
"- Gaussian Naive Bayes (GNB): The Gaussian normal distribution variant of Naive Bayes classifier that depends on the Bayes theorem is Gaussian Naive Bayes.",
|
| 1230 |
+
"- K-Nearest Neighbour(KNN): KNN classifies the data point by plotting them and finding the similarity between the data points. In implementation, number of neighbours were set as three with equal weights and euclidean distance as metric to calculate distance.",
|
| 1231 |
+
"- Logistic Regression (LR): The probabilistic model that predicts the class label based on the sigmoid function for binary classification. As our data set are multi-class data sets, multi-nominal logistic regression was used to evaluate the data sets. For implementation, the classifier was trained with a tolerance of 1e-4, 1.0 as inverse of regularization strength and intercept scaling as 1.",
|
| 1232 |
+
"- Multi-layer Perceptron (MLP): The artificial neural network that is trained to predict the class label along with back propagation of error. The multi-layer perceptron of two layers of 100 hidden nodes each was trained with relu activation function, adam optimizer, learning rate of 0.001 for a maximum 300 iterations.",
|
| 1233 |
+
"- Discriminant Analysis: The generative model that utilizes Gaussian distribution for classification by assuming each class has a different co-variance. For implementation, the co-variance is calculated with threshold of 1.0e-04. Linear DA (LDA) and Quadratic DA (QDA) both were implemented.",
|
| 1234 |
+
"- Support Vector Machine: The supervised model that projects the data into higher dimensions and then classifies using hyper-planes. The model was trained with RBF kernel (RBF-SVM) and linear kernel (L-SVM) function of three degree, 0.1 regularization parameter without any specifying any maximum iterations.",
|
| 1235 |
+
"- Random Forest (RF): Random Forest combines many decision trees as in ensemble method to generate predictions. It overcomes the limitation of decision trees by bagging and bootstrap aggregation. It was implemented with 100 number of estimators."
|
| 1236 |
+
],
|
| 1237 |
+
"bbox": [
|
| 1238 |
+
156,
|
| 1239 |
+
325,
|
| 1240 |
+
879,
|
| 1241 |
+
757
|
| 1242 |
+
],
|
| 1243 |
+
"page_idx": 7
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "text",
|
| 1247 |
+
"text": "4 Implementation and Results",
|
| 1248 |
+
"text_level": 1,
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
111,
|
| 1251 |
+
777,
|
| 1252 |
+
383,
|
| 1253 |
+
792
|
| 1254 |
+
],
|
| 1255 |
+
"page_idx": 7
|
| 1256 |
+
},
|
| 1257 |
+
{
|
| 1258 |
+
"type": "text",
|
| 1259 |
+
"text": "The features extracted in subsection 3.5.2 are classified using the above classifiers in subsection 3.5.3 and evaluated using stratified k-fold sampling of Scikit-learn Pedregosa et al. [2011b]. In this validation, data are split into 10 folds and the evaluation results with respect to weighted average F1-score is tabulated in Table 5.",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
111,
|
| 1262 |
+
806,
|
| 1263 |
+
883,
|
| 1264 |
+
849
|
| 1265 |
+
],
|
| 1266 |
+
"page_idx": 7
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"type": "text",
|
| 1270 |
+
"text": "From the Table 5, it is clear that the model with Random Forest Classifier and Multi-Layer Perceptron (MLP) applied on the features extracted using Glove performs equally well with an F1-score of 0.647. The performance of the models with accuracy as metric is shown in Table 6. From the table, it is clear that the model with Random Forest classifier and Glove vectorizer performs better with an accuracy of 0.760.",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
111,
|
| 1273 |
+
854,
|
| 1274 |
+
883,
|
| 1275 |
+
911
|
| 1276 |
+
],
|
| 1277 |
+
"page_idx": 7
|
| 1278 |
+
},
|
| 1279 |
+
{
|
| 1280 |
+
"type": "header",
|
| 1281 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
256,
|
| 1284 |
+
42,
|
| 1285 |
+
740,
|
| 1286 |
+
56
|
| 1287 |
+
],
|
| 1288 |
+
"page_idx": 7
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "header",
|
| 1292 |
+
"text": "A PREPRINT",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
802,
|
| 1295 |
+
44,
|
| 1296 |
+
880,
|
| 1297 |
+
55
|
| 1298 |
+
],
|
| 1299 |
+
"page_idx": 7
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "page_number",
|
| 1303 |
+
"text": "8",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
493,
|
| 1306 |
+
935,
|
| 1307 |
+
504,
|
| 1308 |
+
946
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 7
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "table",
|
| 1314 |
+
"img_path": "images/fd87cfb58997d088ef5d7546b05e96201e28857fbffb602d2f8cb9d88418e208.jpg",
|
| 1315 |
+
"table_caption": [
|
| 1316 |
+
"Table 5: F1 score of all baseline models"
|
| 1317 |
+
],
|
| 1318 |
+
"table_footnote": [],
|
| 1319 |
+
"table_body": "<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.451</td><td>0.496</td><td>0.451</td></tr><tr><td>DT</td><td>0.469</td><td>0.614</td><td>0.469</td></tr><tr><td>GNB</td><td>0.290</td><td>0.415</td><td>0.302</td></tr><tr><td>KNN</td><td>0.549</td><td>0.604</td><td>0.594</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.309</td><td>0.273</td></tr><tr><td>LDA</td><td>0.391</td><td>0.395</td><td>0.391</td></tr><tr><td>LR</td><td>0.395</td><td>0.329</td><td>0.395</td></tr><tr><td>MLP</td><td>0.625</td><td>0.647</td><td>0.625</td></tr><tr><td>QDA</td><td>0.368</td><td>0.459</td><td>0.368</td></tr><tr><td>RBF -SVM</td><td>0.452</td><td>0.560</td><td>0.452</td></tr><tr><td>RF</td><td>0.449</td><td>0.647</td><td>0.456</td></tr></table>",
|
| 1320 |
+
"bbox": [
|
| 1321 |
+
330,
|
| 1322 |
+
111,
|
| 1323 |
+
663,
|
| 1324 |
+
356
|
| 1325 |
+
],
|
| 1326 |
+
"page_idx": 8
|
| 1327 |
+
},
|
| 1328 |
+
{
|
| 1329 |
+
"type": "table",
|
| 1330 |
+
"img_path": "images/4dc9cccbd95ea2ffd83f5f713af98105ef705d82fb63d002cea4df946fcf6eb7.jpg",
|
| 1331 |
+
"table_caption": [
|
| 1332 |
+
"Table 6: Accuracy of all baseline models"
|
| 1333 |
+
],
|
| 1334 |
+
"table_footnote": [],
|
| 1335 |
+
"table_body": "<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.616</td><td>0.654</td><td>0.616</td></tr><tr><td>DT</td><td>0.579</td><td>0.697</td><td>0.579</td></tr><tr><td>GNB</td><td>0.351</td><td>0.464</td><td>0.351</td></tr><tr><td>KNN</td><td>0.695</td><td>0.717</td><td>0.694</td></tr><tr><td>L-SVM</td><td>0.623</td><td>0.646</td><td>0.623</td></tr><tr><td>LDA</td><td>0.619</td><td>0.659</td><td>0.619</td></tr><tr><td>LR</td><td>0.619</td><td>0.650</td><td>0.619</td></tr><tr><td>MLP</td><td>0.700</td><td>0.754</td><td>0.700</td></tr><tr><td>QDA</td><td>0.485</td><td>0.499</td><td>0.485</td></tr><tr><td>RBF -SVM</td><td>0.667</td><td>0.733</td><td>0.667</td></tr><tr><td>RF</td><td>0.689</td><td>0.760</td><td>0.695</td></tr></table>",
|
| 1336 |
+
"bbox": [
|
| 1337 |
+
328,
|
| 1338 |
+
396,
|
| 1339 |
+
669,
|
| 1340 |
+
641
|
| 1341 |
+
],
|
| 1342 |
+
"page_idx": 8
|
| 1343 |
+
},
|
| 1344 |
+
{
|
| 1345 |
+
"type": "text",
|
| 1346 |
+
"text": "4.1 With Data augmentation",
|
| 1347 |
+
"text_level": 1,
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
112,
|
| 1350 |
+
670,
|
| 1351 |
+
328,
|
| 1352 |
+
686
|
| 1353 |
+
],
|
| 1354 |
+
"page_idx": 8
|
| 1355 |
+
},
|
| 1356 |
+
{
|
| 1357 |
+
"type": "text",
|
| 1358 |
+
"text": "The postings data is populated with more \"moderately depressed\" instances and thus, the data has to be balanced before classification for better performance. For balancing the data, Synthetic Minority Oversampling Technique (SMOTE) Chawla et al. [2002] was applied after vectorization. The effect of augmentation is shown in Figure 2.",
|
| 1359 |
+
"bbox": [
|
| 1360 |
+
109,
|
| 1361 |
+
698,
|
| 1362 |
+
883,
|
| 1363 |
+
742
|
| 1364 |
+
],
|
| 1365 |
+
"page_idx": 8
|
| 1366 |
+
},
|
| 1367 |
+
{
|
| 1368 |
+
"type": "image",
|
| 1369 |
+
"img_path": "images/9e5724f18ad4fdd2b41883a6c6c39c474faed097f3ac2d728f0adfce8a79aacc.jpg",
|
| 1370 |
+
"image_caption": [
|
| 1371 |
+
"(a) Before applying SMOTE"
|
| 1372 |
+
],
|
| 1373 |
+
"image_footnote": [],
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
156,
|
| 1376 |
+
766,
|
| 1377 |
+
279,
|
| 1378 |
+
864
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 8
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "image",
|
| 1384 |
+
"img_path": "images/2b0c015e9fb60338407f702357c77c335ffbb46730174d2216fe315b6d4b3e66.jpg",
|
| 1385 |
+
"image_caption": [
|
| 1386 |
+
"(b) After applying SMOTE",
|
| 1387 |
+
"Figure 2: Effect of data augmentation"
|
| 1388 |
+
],
|
| 1389 |
+
"image_footnote": [
|
| 1390 |
+
"Not depressed",
|
| 1391 |
+
"Moderately depressed",
|
| 1392 |
+
"Severely depressed"
|
| 1393 |
+
],
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
516,
|
| 1396 |
+
767,
|
| 1397 |
+
635,
|
| 1398 |
+
857
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 8
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "header",
|
| 1404 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
256,
|
| 1407 |
+
42,
|
| 1408 |
+
740,
|
| 1409 |
+
56
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 8
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "header",
|
| 1415 |
+
"text": "A PREPRINT",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
802,
|
| 1418 |
+
44,
|
| 1419 |
+
880,
|
| 1420 |
+
55
|
| 1421 |
+
],
|
| 1422 |
+
"page_idx": 8
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "page_number",
|
| 1426 |
+
"text": "9",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
493,
|
| 1429 |
+
935,
|
| 1430 |
+
504,
|
| 1431 |
+
946
|
| 1432 |
+
],
|
| 1433 |
+
"page_idx": 8
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "table",
|
| 1437 |
+
"img_path": "images/845d9e0188c56a49154e8c2f495630988d5fb74327c13c6fb5500375dd5de994.jpg",
|
| 1438 |
+
"table_caption": [
|
| 1439 |
+
"Table 7: F1-score of all baseline models after data augmentation"
|
| 1440 |
+
],
|
| 1441 |
+
"table_footnote": [],
|
| 1442 |
+
"table_body": "<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.263</td><td>0.622</td><td>0.559</td></tr><tr><td>DT</td><td>0.273</td><td>0.772</td><td>0.721</td></tr><tr><td>GNB</td><td>0.271</td><td>0.449</td><td>0.389</td></tr><tr><td>KNN</td><td>0.258</td><td>0.814</td><td>0.834</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.570</td><td>0.642</td></tr><tr><td>LDA</td><td>0.270</td><td>0.550</td><td>0.540</td></tr><tr><td>LR</td><td>0.270</td><td>0.544</td><td>0.551</td></tr><tr><td>MLP</td><td>0.269</td><td>0.775</td><td>0.852</td></tr><tr><td>QDA</td><td>0.276</td><td>0.592</td><td>0.477</td></tr><tr><td>RBF -SVM</td><td>0.273</td><td>0.762</td><td>0.788</td></tr><tr><td>RF</td><td>0.272</td><td>0.854</td><td>0.877</td></tr></table>",
|
| 1443 |
+
"bbox": [
|
| 1444 |
+
331,
|
| 1445 |
+
112,
|
| 1446 |
+
663,
|
| 1447 |
+
356
|
| 1448 |
+
],
|
| 1449 |
+
"page_idx": 9
|
| 1450 |
+
},
|
| 1451 |
+
{
|
| 1452 |
+
"type": "table",
|
| 1453 |
+
"img_path": "images/9b67385526eb32f98a5dd0166d807b477809b5b20c68a2d730182a53f0a39c53.jpg",
|
| 1454 |
+
"table_caption": [
|
| 1455 |
+
"Table 8: Accuracy of all baseline models after data augmentation"
|
| 1456 |
+
],
|
| 1457 |
+
"table_footnote": [],
|
| 1458 |
+
"table_body": "<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.384</td><td>0.628</td><td>0.562</td></tr><tr><td>DT</td><td>0.388</td><td>0.781</td><td>0.728</td></tr><tr><td>GNB</td><td>0.388</td><td>0.479</td><td>0.427</td></tr><tr><td>KNN</td><td>0.379</td><td>0.839</td><td>0.854</td></tr><tr><td>L-SVM</td><td>0.388</td><td>0.575</td><td>0.642</td></tr><tr><td>LDA</td><td>0.388</td><td>0.550</td><td>0.550</td></tr><tr><td>LR</td><td>0.387</td><td>0.547</td><td>0.559</td></tr><tr><td>MLP</td><td>0.386</td><td>0.780</td><td>0.857</td></tr><tr><td>QDA</td><td>0.393</td><td>0.615</td><td>0.497</td></tr><tr><td>RBF-SVM</td><td>0.388</td><td>0.769</td><td>0.792</td></tr><tr><td>RF</td><td>0.388</td><td>0.864</td><td>0.877</td></tr></table>",
|
| 1459 |
+
"bbox": [
|
| 1460 |
+
331,
|
| 1461 |
+
402,
|
| 1462 |
+
663,
|
| 1463 |
+
646
|
| 1464 |
+
],
|
| 1465 |
+
"page_idx": 9
|
| 1466 |
+
},
|
| 1467 |
+
{
|
| 1468 |
+
"type": "text",
|
| 1469 |
+
"text": "The features extracted in subsection 3.5.2 are augmented using SMOTE and then classified using the classifiers in subsection 3.5.3. The performance of these models in terms of F1-score and accuracy after data augmentation are shown in Table 7 and 8 respectively. From the tables, it is clear that the performance was improved and model with Random Forest classifier applied on the features extracted using Word2Vec performs well with a score of 0.877.",
|
| 1470 |
+
"bbox": [
|
| 1471 |
+
111,
|
| 1472 |
+
680,
|
| 1473 |
+
883,
|
| 1474 |
+
739
|
| 1475 |
+
],
|
| 1476 |
+
"page_idx": 9
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "text",
|
| 1480 |
+
"text": "5 Research insights",
|
| 1481 |
+
"text_level": 1,
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
112,
|
| 1484 |
+
766,
|
| 1485 |
+
294,
|
| 1486 |
+
785
|
| 1487 |
+
],
|
| 1488 |
+
"page_idx": 9
|
| 1489 |
+
},
|
| 1490 |
+
{
|
| 1491 |
+
"type": "text",
|
| 1492 |
+
"text": "The researchers can further extend this work by implementing the following methods:",
|
| 1493 |
+
"bbox": [
|
| 1494 |
+
111,
|
| 1495 |
+
804,
|
| 1496 |
+
678,
|
| 1497 |
+
819
|
| 1498 |
+
],
|
| 1499 |
+
"page_idx": 9
|
| 1500 |
+
},
|
| 1501 |
+
{
|
| 1502 |
+
"type": "list",
|
| 1503 |
+
"sub_type": "text",
|
| 1504 |
+
"list_items": [
|
| 1505 |
+
"- Extend the data set by considering the images along with text data.",
|
| 1506 |
+
"- Implement deep learning models in the data set.",
|
| 1507 |
+
"- Implement other methods of data augmentation to improve performance."
|
| 1508 |
+
],
|
| 1509 |
+
"bbox": [
|
| 1510 |
+
156,
|
| 1511 |
+
835,
|
| 1512 |
+
650,
|
| 1513 |
+
907
|
| 1514 |
+
],
|
| 1515 |
+
"page_idx": 9
|
| 1516 |
+
},
|
| 1517 |
+
{
|
| 1518 |
+
"type": "header",
|
| 1519 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1520 |
+
"bbox": [
|
| 1521 |
+
256,
|
| 1522 |
+
42,
|
| 1523 |
+
740,
|
| 1524 |
+
56
|
| 1525 |
+
],
|
| 1526 |
+
"page_idx": 9
|
| 1527 |
+
},
|
| 1528 |
+
{
|
| 1529 |
+
"type": "header",
|
| 1530 |
+
"text": "A PREPRINT",
|
| 1531 |
+
"bbox": [
|
| 1532 |
+
802,
|
| 1533 |
+
44,
|
| 1534 |
+
880,
|
| 1535 |
+
55
|
| 1536 |
+
],
|
| 1537 |
+
"page_idx": 9
|
| 1538 |
+
},
|
| 1539 |
+
{
|
| 1540 |
+
"type": "page_number",
|
| 1541 |
+
"text": "10",
|
| 1542 |
+
"bbox": [
|
| 1543 |
+
488,
|
| 1544 |
+
935,
|
| 1545 |
+
508,
|
| 1546 |
+
946
|
| 1547 |
+
],
|
| 1548 |
+
"page_idx": 9
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "text",
|
| 1552 |
+
"text": "6 Conclusions",
|
| 1553 |
+
"text_level": 1,
|
| 1554 |
+
"bbox": [
|
| 1555 |
+
112,
|
| 1556 |
+
89,
|
| 1557 |
+
250,
|
| 1558 |
+
104
|
| 1559 |
+
],
|
| 1560 |
+
"page_idx": 10
|
| 1561 |
+
},
|
| 1562 |
+
{
|
| 1563 |
+
"type": "text",
|
| 1564 |
+
"text": "Depression is a common mental illness that has to be detected and treated early to avoid serious consequences. Among the other ways of detecting, diagnosing mental health using their social media data seems much more effective since it involves less involvement of the individual. All the existing systems are designed to detect depression from social media texts. Although detecting depression is more important, detecting the level of depression also has its equal significance. Thus, we propose a data set that not only detects depression from social media but also analyzes the level of depression. For creating the data set, the data was collected from subreddits and annotated by domain experts into three levels of depression, namely not depressed, moderately depressed and severely depressed. An empirical analysis of traditional learning algorithms was also done for evaluating the data sets. Among the models, the model with Glove vectorizer and Random Forest classifier performs well with a F1-score of 0.647 and accuracy of 0.760. While analyzing the data set, \"the moderately depressed\" class seems to be highly populated than the classes and so, a data augmentation method named SMOTE was applied, and the performance is analyzed. Data augmentation improved the performance by $23\\%$ and $12\\%$ in terms of F1-score and accuracy respectively, with both F1-score and accuracy of 0.877. The data set can also be extended by considering the images along with texts for more accurate detection. The work can be extended further by implementing other traditional learning and deep learning models. Other augmentation techniques can also be experimented with for improving the performance of the model.",
|
| 1565 |
+
"bbox": [
|
| 1566 |
+
109,
|
| 1567 |
+
121,
|
| 1568 |
+
883,
|
| 1569 |
+
329
|
| 1570 |
+
],
|
| 1571 |
+
"page_idx": 10
|
| 1572 |
+
},
|
| 1573 |
+
{
|
| 1574 |
+
"type": "text",
|
| 1575 |
+
"text": "Data set availability",
|
| 1576 |
+
"text_level": 1,
|
| 1577 |
+
"bbox": [
|
| 1578 |
+
112,
|
| 1579 |
+
349,
|
| 1580 |
+
284,
|
| 1581 |
+
368
|
| 1582 |
+
],
|
| 1583 |
+
"page_idx": 10
|
| 1584 |
+
},
|
| 1585 |
+
{
|
| 1586 |
+
"type": "text",
|
| 1587 |
+
"text": "The data set is available to the public in a repository of a Github in the link: https://github.com/Kayal-Sampath/detecting-signs-of-depression-from-social-media-postings.",
|
| 1588 |
+
"bbox": [
|
| 1589 |
+
109,
|
| 1590 |
+
382,
|
| 1591 |
+
883,
|
| 1592 |
+
411
|
| 1593 |
+
],
|
| 1594 |
+
"page_idx": 10
|
| 1595 |
+
},
|
| 1596 |
+
{
|
| 1597 |
+
"type": "text",
|
| 1598 |
+
"text": "References",
|
| 1599 |
+
"text_level": 1,
|
| 1600 |
+
"bbox": [
|
| 1601 |
+
112,
|
| 1602 |
+
431,
|
| 1603 |
+
210,
|
| 1604 |
+
448
|
| 1605 |
+
],
|
| 1606 |
+
"page_idx": 10
|
| 1607 |
+
},
|
| 1608 |
+
{
|
| 1609 |
+
"type": "list",
|
| 1610 |
+
"sub_type": "ref_text",
|
| 1611 |
+
"list_items": [
|
| 1612 |
+
"American psychiatric association. https://www.psychiatry.org/patients-families/depression/what-is-depression. (Accessed: 2021-11-17).",
|
| 1613 |
+
"Institute of health metrics and evaluation. global health data exchange (ghdx). http://ghdx.healthdata.org/gbd-results-tool?params=gbd-api-2019-permalink/d780cffbe8a381b25e1416884959e88b. (Accessed: 2021-11-17).",
|
| 1614 |
+
"Tuka Al Hanai, Mohammad M Ghassemi, and James R Glass. Detecting depression with audio/text sequence modeling of interviews. In Interspeech, pages 1716-1720, 2018.",
|
| 1615 |
+
"Hamdi Dibeklioglu, Zakia Hammal, Ying Yang, and Jeffrey F Cohn. Multimodal detection of depression in clinical interviews. In Proceedings of the 2015 ACM on international conference on multimodal interaction, pages 307-310, 2015.",
|
| 1616 |
+
"Sharifa Alghowinem, Roland Goecke, Michael Wagner, Julien Epps, Matthew Hyett, Gordon Parker, and Michael Breakspear. Multimodal depression detection: fusion analysis of paralinguistic, head pose and eye gaze behaviors. IEEE Transactions on Affective Computing, 9(4):478-490, 2016.",
|
| 1617 |
+
"Md Nasir, Arindam Jati, Prashanth Gurunath Shivakumar, Sandeep Nallan Chakravarthula, and Panayiotis Georgiou. Multimodal and multiresolution depression detection from speech and facial landmark features. In Proceedings of the 6th international workshop on audio/visual emotion challenge, pages 43-50, 2016.",
|
| 1618 |
+
"Jana M Havigerová, Jiří Haviger, Dalibor Kucera, and Petra Hoffmannová. Text-based detection of the risk of depression. Frontiers in psychology, 10:513, 2019.",
|
| 1619 |
+
"Maxim Stankevich, Andrey Latyshev, Evgenia Kuminskaya, Ivan Smirnov, and Oleg Grigoriev. Depression detection from social media texts. In Data Analytics and Management in Data Intensive Domains: XXI International Conference DAMDID/RCDL-2019, page 352, 2019.",
|
| 1620 |
+
"Michelle Renee Morales and Rivka Levitan. Speech vs. text: A comparative analysis of features for depression detection systems. In 2016 IEEE spoken language technology workshop (SLT), pages 136-143. IEEE, 2016.",
|
| 1621 |
+
"Statista statistics. https://www.statista.com/statistics/278414/number-of-worldwide-social-network-users/. (Accessed: 2021-11-17).",
|
| 1622 |
+
"JT Wolohan, Misato Hiraga, Atreyee Mukherjee, Zeeshan Ali Sayyed, and Matthew Millard. Detecting linguistic traces of depression in topic-restricted text: Attending to self-stigmatized depression with nlp. In Proceedings of the First International Workshop on Language Cognition and Computational Models, pages 11-21, 2018."
|
| 1623 |
+
],
|
| 1624 |
+
"bbox": [
|
| 1625 |
+
112,
|
| 1626 |
+
457,
|
| 1627 |
+
955,
|
| 1628 |
+
912
|
| 1629 |
+
],
|
| 1630 |
+
"page_idx": 10
|
| 1631 |
+
},
|
| 1632 |
+
{
|
| 1633 |
+
"type": "header",
|
| 1634 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1635 |
+
"bbox": [
|
| 1636 |
+
256,
|
| 1637 |
+
42,
|
| 1638 |
+
740,
|
| 1639 |
+
56
|
| 1640 |
+
],
|
| 1641 |
+
"page_idx": 10
|
| 1642 |
+
},
|
| 1643 |
+
{
|
| 1644 |
+
"type": "header",
|
| 1645 |
+
"text": "A PREPRINT",
|
| 1646 |
+
"bbox": [
|
| 1647 |
+
802,
|
| 1648 |
+
44,
|
| 1649 |
+
880,
|
| 1650 |
+
55
|
| 1651 |
+
],
|
| 1652 |
+
"page_idx": 10
|
| 1653 |
+
},
|
| 1654 |
+
{
|
| 1655 |
+
"type": "page_number",
|
| 1656 |
+
"text": "11",
|
| 1657 |
+
"bbox": [
|
| 1658 |
+
488,
|
| 1659 |
+
935,
|
| 1660 |
+
506,
|
| 1661 |
+
946
|
| 1662 |
+
],
|
| 1663 |
+
"page_idx": 10
|
| 1664 |
+
},
|
| 1665 |
+
{
|
| 1666 |
+
"type": "list",
|
| 1667 |
+
"sub_type": "ref_text",
|
| 1668 |
+
"list_items": [
|
| 1669 |
+
"Johannes C Eichstaedt, Robert J Smith, Raina M Merchant, Lyle H Ungar, Patrick Crutchley, Daniel Preotciuc-Pietro, David A Asch, and H Andrew Schwartz. Facebook language predicts depression in medical records. Proceedings of the National Academy of Sciences, 115(44):11203-11208, 2018.",
|
| 1670 |
+
"Andrew G Reece, Andrew J Reagan, Katharina LM Lix, Peter Sheridan Dodds, Christopher M Danforth, and Ellen J Langer. Forecasting the onset and course of mental illness with twitter data. Scientific reports, 7(1):1-11, 2017.",
|
| 1671 |
+
"Sho Tsugawa, Yusuke Kikuchi, Fumio Kishino, Kosuke Nakajima, Yuichi Itoh, and Hiroyuki Ohsaki. Recognizing depression from twitter activity. In Proceedings of the 33rd annual ACM conference on human factors in computing systems, pages 3187-3196, 2015.",
|
| 1672 |
+
"Mandar Deshpande and Vignesh Rao. Depression detection using emotion artificial intelligence. In 2017 international conference on intelligent sustainable systems (iciss), pages 858-862. IEEE, 2017.",
|
| 1673 |
+
"Chenhao Lin, Pengwei Hu, Hui Su, Shaochun Li, Jing Mei, Jie Zhou, and Henry Leung. Sensemood: Depression detection on social media. In Proceedings of the 2020 International Conference on Multimedia Retrieval, pages 407-411, 2020.",
|
| 1674 |
+
"Thin Nguyen, Dinh Phung, Bo Dao, Svetha Venkatesh, and Michael Berk. Affective and content analysis of online depression communities. IEEE Transactions on Affective Computing, 5(3):217-226, 2014.",
|
| 1675 |
+
"Yevhen Tyshchenko. Depression and anxiety detection from blog posts data. Nature Precis. Sci., Inst. Comput. Sci. Univ. Tartu, Tartu, Estonia, 2018.",
|
| 1676 |
+
"Andrew G Reece and Christopher M Danforth. Instagram photos reveal predictive markers of depression. *EPJ Data Science*, 6:1–12, 2017.",
|
| 1677 |
+
"Healthline. https://www.healthline.com/health/depression/mild-depression. (Accessed: 2021-11-17).",
|
| 1678 |
+
"David E Losada, Fabio Crestani, and Javier Parapar. erisk 2017: Clef lab on early risk prediction on the internet: experimental foundations. In International Conference of the Cross-Language Evaluation Forum for European Languages, pages 346-360. Springer, 2017.",
|
| 1679 |
+
"Michael M. Tadesse, Hongfei Lin, Bo Xu, and Liang Yang. Detection of depression-related posts in reddit social media forum. IEEE Access, 7:44883-44893, 2019. doi:10.1109/ACCESS.2019.2909180.",
|
| 1680 |
+
"Inna Pirina and Cagni Coltekin. Identifying depression on Reddit: The effect of training data. In Proceedings of the 2018 EMNLP Workshop SMM4H: The 3rd Social Media Mining for Health Applications Workshop & Shared Task, pages 9-12, Brussels, Belgium, October 2018. Association for Computational Linguistics. doi:10.18653/v1/W18-5903 URL https://aclanthology.org/W18-5903.",
|
| 1681 |
+
"Hannah Yao, Sina Rashidian, Xinyu Dong, Hongyi Duanmu, Richard N Rosenthal, and Fusheng Wang. Detection of suicidality among opioid users on reddit: Machine learning-based approach. Journal of medical internet research, 22(11):e15293, 2020.",
|
| 1682 |
+
"Nick Boettcher et al. Studies of depression and anxiety using reddit as a data source: Scoping review. JMIR Mental Health, 8(11):e29487, 2021.",
|
| 1683 |
+
"Ron Artstein and Massimo Poesio. Inter-coder agreement for computational linguistics. Computational Linguistics, 34 (4):555-596, 2008.",
|
| 1684 |
+
"Jacob Cohen. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1), 37-46, 1960.",
|
| 1685 |
+
"F. Pedregosa, G. Varoquaux, A. Gramfort, et al. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011a.",
|
| 1686 |
+
"J Richard Landis and Gary G Koch. The measurement of observer agreement for categorical data. biometrics, pages 159-174, 1977.",
|
| 1687 |
+
"F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011b.",
|
| 1688 |
+
"Martin F Porter. An algorithm for suffix stripping. *Program*, 1980.",
|
| 1689 |
+
"Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014.",
|
| 1690 |
+
"N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer. Smote: Synthetic minority over-sampling technique Journal of Artificial Intelligence Research, 16:321-357, Jun 2002. ISSN 1076-9757. doi:10.1613/jair.953. URL http://dx.doi.org/10.1613/jair.953."
|
| 1691 |
+
],
|
| 1692 |
+
"bbox": [
|
| 1693 |
+
114,
|
| 1694 |
+
90,
|
| 1695 |
+
883,
|
| 1696 |
+
912
|
| 1697 |
+
],
|
| 1698 |
+
"page_idx": 11
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "header",
|
| 1702 |
+
"text": "Data set creation and Empirical analysis for detecting signs of depression",
|
| 1703 |
+
"bbox": [
|
| 1704 |
+
256,
|
| 1705 |
+
42,
|
| 1706 |
+
740,
|
| 1707 |
+
56
|
| 1708 |
+
],
|
| 1709 |
+
"page_idx": 11
|
| 1710 |
+
},
|
| 1711 |
+
{
|
| 1712 |
+
"type": "header",
|
| 1713 |
+
"text": "A PREPRINT",
|
| 1714 |
+
"bbox": [
|
| 1715 |
+
802,
|
| 1716 |
+
44,
|
| 1717 |
+
880,
|
| 1718 |
+
55
|
| 1719 |
+
],
|
| 1720 |
+
"page_idx": 11
|
| 1721 |
+
},
|
| 1722 |
+
{
|
| 1723 |
+
"type": "page_number",
|
| 1724 |
+
"text": "12",
|
| 1725 |
+
"bbox": [
|
| 1726 |
+
488,
|
| 1727 |
+
935,
|
| 1728 |
+
506,
|
| 1729 |
+
946
|
| 1730 |
+
],
|
| 1731 |
+
"page_idx": 11
|
| 1732 |
+
}
|
| 1733 |
+
]
|
2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_model.json
ADDED
|
@@ -0,0 +1,2424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.279,
|
| 8 |
+
0.056,
|
| 9 |
+
0.7
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2202.03047v1 [cs.AI] 7 Feb 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.16,
|
| 18 |
+
0.121,
|
| 19 |
+
0.837,
|
| 20 |
+
0.192
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "DATA SET CREATION AND EMPIRICAL ANALYSIS FOR DETECTING SIGNS OF DEPRESSION FROM SOCIAL MEDIA POSTINGS"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.453,
|
| 29 |
+
0.224,
|
| 30 |
+
0.544,
|
| 31 |
+
0.237
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "A PREPRINT"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.22,
|
| 40 |
+
0.265,
|
| 41 |
+
0.443,
|
| 42 |
+
0.321
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Kayalvizhi S \nDepartment of Computer Science \nSSN College of Engineering \nkayalvizhis@ssn.edu.in"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.555,
|
| 51 |
+
0.265,
|
| 52 |
+
0.777,
|
| 53 |
+
0.321
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Thenmozhi D \nDepartment of Computer Science \nSSN College of Engineering \ntheni_d@ssn.edu.in"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.44,
|
| 62 |
+
0.357,
|
| 63 |
+
0.556,
|
| 64 |
+
0.371
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "February 8, 2022"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "title",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.449,
|
| 73 |
+
0.39,
|
| 74 |
+
0.547,
|
| 75 |
+
0.404
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "ABSTRACT"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.17,
|
| 84 |
+
0.417,
|
| 85 |
+
0.828,
|
| 86 |
+
0.61
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Depression is a common mental illness that has to be detected and treated at an early stage to avoid serious consequences. There are many methods and modalities for detecting depression that involves physical examination of the individual. However, diagnosing mental health using their social media data is more effective as it avoids such physical examinations. Also, people express their emotions well in social media, it is desirable to diagnose their mental health using social media data. Though there are many existing systems that detects mental illness of a person by analysing their social media data, detecting the level of depression is also important for further treatment. Thus, in this research, we developed a gold standard data set that detects the levels of depression as 'not depressed', 'moderately depressed' and 'severely depressed' from the social media postings. Traditional learning algorithms were employed on this data set and an empirical analysis was presented in this paper. Data augmentation technique was applied to overcome the data imbalance. Among the several variations that are implemented, the model with Word2Vec vectorizer and Random Forest classifier on augmented data outperforms the other variations with a score of 0.877 for both accuracy and F1 measure."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.111,
|
| 95 |
+
0.624,
|
| 96 |
+
0.725,
|
| 97 |
+
0.639
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Keywords Depression \\(\\cdot\\) Data set \\(\\cdot\\) Data augmentation \\(\\cdot\\) Levels of depression \\(\\cdot\\) Random Forest"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.114,
|
| 106 |
+
0.658,
|
| 107 |
+
0.254,
|
| 108 |
+
0.673
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "1 Introduction"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.111,
|
| 117 |
+
0.688,
|
| 118 |
+
0.885,
|
| 119 |
+
0.814
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "Depression (major depressive disorder) is a common and serious medical illness that negatively affects the way one feels, thinks and acts ame. The rate of depression is rapidly increasing day by day. According to Global Health Data Exchange (GHDx), depression has affected 280 million people worldwide who. Detecting depression is important since it has to be observed and treated at an early stage to avoid severe consequences<sup>1</sup>. The depression was generally diagnosed by different methods modalities clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. All these methods of diagnosing involves more involvement of an individual or discussion about their feeling in person."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.111,
|
| 128 |
+
0.819,
|
| 129 |
+
0.885,
|
| 130 |
+
0.889
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "On the other hand, social media is highly emerging into our lives with a considerable rate of increase in social media users according to the statistics of statista sta. Slowly, the social media became a comfortable virtual platform to express our feelings. And so, social media platform can be considered as a source to analyse people's thoughts and so can also be used for analysing mental health of an individual. Thus, we aim to use social media texts for analysing the mental health of a person."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "page_footnote",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.136,
|
| 139 |
+
0.898,
|
| 140 |
+
0.51,
|
| 141 |
+
0.912
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "<sup>1</sup>https://www.healthline.com/health/depression/effects-on-body"
|
| 145 |
+
}
|
| 146 |
+
],
|
| 147 |
+
[
|
| 148 |
+
{
|
| 149 |
+
"type": "header",
|
| 150 |
+
"bbox": [
|
| 151 |
+
0.258,
|
| 152 |
+
0.044,
|
| 153 |
+
0.741,
|
| 154 |
+
0.058
|
| 155 |
+
],
|
| 156 |
+
"angle": 0,
|
| 157 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "header",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.803,
|
| 163 |
+
0.045,
|
| 164 |
+
0.882,
|
| 165 |
+
0.056
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": "A PREPRINT"
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.111,
|
| 174 |
+
0.092,
|
| 175 |
+
0.885,
|
| 176 |
+
0.146
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "The existing works collect social media texts from open source platforms like Reddit Wolohan et al. [2018], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. and used them to detect depression."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "title",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.114,
|
| 185 |
+
0.148,
|
| 186 |
+
0.223,
|
| 187 |
+
0.161
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "Research gaps:"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.111,
|
| 196 |
+
0.161,
|
| 197 |
+
0.885,
|
| 198 |
+
0.257
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "All these research works concentrate on diagnosing depression from the social media texts. Although detecting depression has its own significance, detecting the level of depression also has its equal importance for further treatment. Generally, depression is classified into three stages namely mild, moderate and severe typ. Each stage has its own symptoms and effects and so detecting the level of depression is also a crucial one. Thus, we propose a data set to detect the level of depression in addition to detection of depression from the social media texts. The data set is made available to the public in a CodaLab competition repository \\(^{2}\\). This paper explains the process of data set creation that detects the levels of depression along with some baseline models."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "title",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.113,
|
| 207 |
+
0.258,
|
| 208 |
+
0.414,
|
| 209 |
+
0.271
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "Our contributions in this research include:"
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.151,
|
| 218 |
+
0.283,
|
| 219 |
+
0.878,
|
| 220 |
+
0.298
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "1. Creating a new bench mark data set to detect the sign of depression from social media data at postings level."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.151,
|
| 229 |
+
0.303,
|
| 230 |
+
0.598,
|
| 231 |
+
0.318
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "2. Developing base line models with traditional learning classifiers."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.151,
|
| 240 |
+
0.322,
|
| 241 |
+
0.457,
|
| 242 |
+
0.336
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "3. Analysing the impact of data augmentation"
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "list",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.151,
|
| 251 |
+
0.283,
|
| 252 |
+
0.878,
|
| 253 |
+
0.336
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": null
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "title",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.113,
|
| 262 |
+
0.356,
|
| 263 |
+
0.265,
|
| 264 |
+
0.372
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "2 Related Work"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.111,
|
| 273 |
+
0.387,
|
| 274 |
+
0.885,
|
| 275 |
+
0.43
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "The aim of our research work is to create a data set that identifies the sign of depression and detect the level of depression and thus, the existing works are analysed in terms of data collection, modalities and methodologies of detecting depression."
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "title",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.113,
|
| 284 |
+
0.446,
|
| 285 |
+
0.53,
|
| 286 |
+
0.461
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "2.1 Modalities and methodologies of depression detection:"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.111,
|
| 295 |
+
0.471,
|
| 296 |
+
0.885,
|
| 297 |
+
0.596
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "For detecting depression, the data was collected by various methods like clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. Since, the social media users are rapidly increasing day by day, social media data can also be considered as a main source for detecting the mental health. This key idea gave rise to the most utilized data set E-Risk@CLEF-2017 pilot task data set Losada et al. [2017] that was collected from Reddit. In addition to this data set, many other data sets such as DAIC corpus Al Hanai et al. [2018], AVEC Morales and Levitan [2016], etc. also evolved that detects depression from the social media data. Though few benchmark data set exists to detect depression, more researchers tend to collect data from social media and create their own data sets."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "title",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.113,
|
| 306 |
+
0.613,
|
| 307 |
+
0.391,
|
| 308 |
+
0.627
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "2.2 Data collection from social media:"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.111,
|
| 317 |
+
0.638,
|
| 318 |
+
0.885,
|
| 319 |
+
0.737
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "The social media texts were collected from open source platforms like Reddit Wolohan et al. [2018]Tadesse et al. [2019], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. The data from twitter was collected using API's and annotated into depressed and not depressed classes based on key words like \"depressed, hopeless and suicide\" Deshpande and Rao [2017], using a questionnaire Tsugawa et al. [2015], survey Reece et al. [2017], etc. The data was also scrapped from groups of live journals Nguyen et al. [2014], blog postsTyshchenko [2018] and manually annotated into depressed and not depressed."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.111,
|
| 328 |
+
0.742,
|
| 329 |
+
0.884,
|
| 330 |
+
0.771
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "Among these social media platforms, Reddit possess large amount text discussion than the other platforms and so Reddit has become widely used platform to collect social media text data recently."
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.111,
|
| 339 |
+
0.776,
|
| 340 |
+
0.885,
|
| 341 |
+
0.888
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "The data were collected from these platforms using Application Programming Interface (API) using hashtags, groups, communities, etc. The data from reddit was collected from Subreddits like \"r/depression help, r/aww, r/AskReddit, r/news, r/Showerthoughts, r/pics, r/gaming, r/depression, r/videos r todaylearned r/funny\" and annotated manually by two annotators into depressed and not depressed class Wolohan et al. [2018]. The data was also from subreddits like \"r/anxiety, r/depression and r/depression_help\" and annotated into a data set Pirina and Cöltekin [2018]. A data set was created with classes depression, suicide.watch, opiates and controlled which was collected using subreddits such as \"r/suicidewatch, r/depression\", opioid related forums and other general forums Yao et al. [2020]. A survey was also done based on the studies of depression and anxiety from the Reddit data Boettcher et al. [2021]."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "page_footnote",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.134,
|
| 350 |
+
0.897,
|
| 351 |
+
0.536,
|
| 352 |
+
0.912
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "<sup>2</sup>https://competitions.codalab.org/competitions/36410"
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "page_number",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.494,
|
| 361 |
+
0.936,
|
| 362 |
+
0.505,
|
| 363 |
+
0.948
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "2"
|
| 367 |
+
}
|
| 368 |
+
],
|
| 369 |
+
[
|
| 370 |
+
{
|
| 371 |
+
"type": "header",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.258,
|
| 374 |
+
0.044,
|
| 375 |
+
0.74,
|
| 376 |
+
0.058
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "header",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.803,
|
| 385 |
+
0.045,
|
| 386 |
+
0.882,
|
| 387 |
+
0.056
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "A PREPRINT"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "table_caption",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.361,
|
| 396 |
+
0.098,
|
| 397 |
+
0.637,
|
| 398 |
+
0.112
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "Table 1: Comparison of existing data sets"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "table",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.123,
|
| 407 |
+
0.112,
|
| 408 |
+
0.877,
|
| 409 |
+
0.423
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "<table><tr><td>Existing system</td><td>Social Media Platform</td><td>Class Labels</td></tr><tr><td>Eichstaedt et.al Eichstaedt et al. [2018]</td><td>Facebook</td><td>Depressed and not depressed</td></tr><tr><td>Nguyen et.al Nguyen et al. [2014]</td><td>Live journal</td><td>Depressed and control</td></tr><tr><td>Tyshchenko et. al Tyshchenko [2018]</td><td>Blog post</td><td>Clinical and Control</td></tr><tr><td>Deshpande et.al Deshpande and Rao [2017]</td><td>Twitter</td><td>Neutral and negative</td></tr><tr><td>Lin et.al Lin et al. [2020]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Reece et.al Reece et al. [2017]</td><td>Twitter</td><td>PTSD and Depression</td></tr><tr><td>Tsugawa et.al Tsugawa et al. [2015]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Losada et.al Losada et al. [2017]</td><td>Reddit</td><td>Depression and Not depression</td></tr><tr><td>Wolohan et.al Wolohan et al. [2018]</td><td>Reddit</td><td>Depressed and not depressed</td></tr><tr><td>Tadesse et.al Tadesse et al. [2019]</td><td>Reddit</td><td>Depression indicative and standard</td></tr><tr><td>Pirina et.al Pirina and Çöltekin [2018]</td><td>Reddit</td><td>positive and negative</td></tr><tr><td>Yao et.al Yao et al. [2020]</td><td>Reddit</td><td>Depression, Suicide watch, Control and Opiates</td></tr><tr><td>Proposed Data set</td><td>Reddit</td><td>Not depressed, moderately depressed & severely depressed</td></tr></table>"
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.112,
|
| 418 |
+
0.453,
|
| 419 |
+
0.884,
|
| 420 |
+
0.496
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "From the Table 1, it is clear that all these research works have collected the social media data only to detect the presence of depression. Although, diagnosing depression is important, detecting the level of depression is more crucial for further treatment. And thus, we propose a data set that detects the level of depression."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "title",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.113,
|
| 429 |
+
0.521,
|
| 430 |
+
0.279,
|
| 431 |
+
0.538
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "3 Proposed Work"
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.112,
|
| 440 |
+
0.555,
|
| 441 |
+
0.885,
|
| 442 |
+
0.694
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "We propose to develop a gold standard data set that detects the levels of depression as not depressed, moderately depressed and severely depressed. Initially, the data set was created by collecting the data from the social media platform, Reddit. For collecting the data from archives of Reddit, two way communication is needed, which requires app authentication. After getting proper authentication, the subreddits from which the data must be collected are chosen and the data was extracted. After extracting the data, the data is pre-processed and exported in the required format which forms the data set. The data were then annotated into levels of depression by domain experts following the annotation guidelines. After annotation, the inter-rater agreement is calculated to analyze the quality of data and annotation. Then, the corpus is formed using the mutually annotated instances. Baseline models were also employed on the corpus to analyze the performance. To overcome the data imbalance problem, data augmentation technique was applied and their impact on performance was also analyzed."
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "title",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.113,
|
| 451 |
+
0.717,
|
| 452 |
+
0.279,
|
| 453 |
+
0.731
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "3.1 Data set creation:"
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.112,
|
| 462 |
+
0.744,
|
| 463 |
+
0.884,
|
| 464 |
+
0.773
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "For creating the data set, a suitable social media platform is chosen initially and data is scraped using suitable methods. After scraping the data, the data is processed and dumped in a suitable format."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "title",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.113,
|
| 473 |
+
0.793,
|
| 474 |
+
0.277,
|
| 475 |
+
0.807
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "3.1.1 Data collection:"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.112,
|
| 484 |
+
0.818,
|
| 485 |
+
0.884,
|
| 486 |
+
0.875
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "For creating the data set, the data was collected from Reddit<sup>3</sup>, an open source social media platform since it has more textual data when compared to other social media platforms. This data will be of postings format which includes only one or more statements of an individual. The postings data are scraped from the Reddit archives using the API \"pushshift\"."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "page_footnote",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.134,
|
| 495 |
+
0.894,
|
| 496 |
+
0.315,
|
| 497 |
+
0.91
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "<sup>3</sup>https://www.reddit.com"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "page_number",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.494,
|
| 506 |
+
0.936,
|
| 507 |
+
0.505,
|
| 508 |
+
0.948
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "3"
|
| 512 |
+
}
|
| 513 |
+
],
|
| 514 |
+
[
|
| 515 |
+
{
|
| 516 |
+
"type": "header",
|
| 517 |
+
"bbox": [
|
| 518 |
+
0.258,
|
| 519 |
+
0.044,
|
| 520 |
+
0.741,
|
| 521 |
+
0.058
|
| 522 |
+
],
|
| 523 |
+
"angle": 0,
|
| 524 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "header",
|
| 528 |
+
"bbox": [
|
| 529 |
+
0.803,
|
| 530 |
+
0.045,
|
| 531 |
+
0.882,
|
| 532 |
+
0.056
|
| 533 |
+
],
|
| 534 |
+
"angle": 0,
|
| 535 |
+
"content": "A PREPRINT"
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "title",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.113,
|
| 541 |
+
0.092,
|
| 542 |
+
0.307,
|
| 543 |
+
0.107
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": "3.1.2 App authentication:"
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.111,
|
| 552 |
+
0.116,
|
| 553 |
+
0.884,
|
| 554 |
+
0.185
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "For scraping the data from Reddit achieves, Python Reddit API Wrapper(PRAW) is used. The data can be only scraped after getting authentication from the Reddit platform. This authentication process involves creation of an application in their domain, for which a unique client secret key and client id will be assigned. Thus, PRAW allows a two way communication only with these credentials of user_agent (application name), client_id and client_secret to get data from Reddit."
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "title",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.113,
|
| 563 |
+
0.203,
|
| 564 |
+
0.303,
|
| 565 |
+
0.216
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "3.1.3 Subreddit selection"
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.111,
|
| 574 |
+
0.227,
|
| 575 |
+
0.884,
|
| 576 |
+
0.27
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "Reddit is a collection of million groups or forums called subreddits. For collecting the confessions or discussion of people about their mental health, data was scraped from the archives of subreddits groups like \"r/Mental Health, r/depression, r/loneliness, r/stress, r/anxiety\"."
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "title",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.113,
|
| 585 |
+
0.287,
|
| 586 |
+
0.28,
|
| 587 |
+
0.301
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "3.1.4 Data extraction:"
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.111,
|
| 596 |
+
0.311,
|
| 597 |
+
0.884,
|
| 598 |
+
0.355
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "For each posting, the details such as post ID, title, URL, publish date, name of the subreddit, score of the post and total number of comments can be collected using PRAW. Among these data, PostID, title, text, URL, date and subreddit name are all collected in dictionary format."
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "title",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.113,
|
| 607 |
+
0.37,
|
| 608 |
+
0.414,
|
| 609 |
+
0.387
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "3.1.5 Data pre-processing and exporting:"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.111,
|
| 618 |
+
0.395,
|
| 619 |
+
0.884,
|
| 620 |
+
0.438
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "After collecting these data, the text and title part are pre-processed by removing the non-ASCII characters and emoticons to get a clean data set. The processed data is exported into a Comma Separated Values (.csv) format file with the five columns. The sample of the collected postings is shown in Table 2."
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "table_caption",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.397,
|
| 629 |
+
0.461,
|
| 630 |
+
0.6,
|
| 631 |
+
0.475
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "Table 2: Sample Postings data"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "table",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.113,
|
| 640 |
+
0.475,
|
| 641 |
+
0.872,
|
| 642 |
+
0.773
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "<table><tr><td>Post ID</td><td>Title</td><td>Text</td><td>Url</td><td>Publish date</td><td>Subreddit</td></tr><tr><td>g69ppt</td><td>Don’t want to get of bed</td><td>I’m done with me crying all day and thinking to myself that I can’t do a thing and I don’t what to get out of bed at all</td><td>https://www.reddit.com/r/depression/comments/g69ppt/dont_want_to_get_of.bed/4</td><td>2020-04-23 02:51:32</td><td>depression</td></tr><tr><td>gb9zei</td><td>Today is a day where I feel emptier than on other days.</td><td>It’s like I am alone with all my problems. I am sad about the fact I can’t trust anyone and nobody could help me because I feel like nobody understand how I feel. Depression is holding me tight today..</td><td>https://www.reddit.com/r/depression/comments/gb9zei/today_is_a_day_where_i Feel_emptier _than_on_other/5</td><td>2020-05-01 08:10:06</td><td>depression</td></tr></table>"
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "title",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.113,
|
| 651 |
+
0.802,
|
| 652 |
+
0.273,
|
| 653 |
+
0.815
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "3.2 Data Annotation"
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.111,
|
| 662 |
+
0.828,
|
| 663 |
+
0.884,
|
| 664 |
+
0.911
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "After collecting the data, the data were annotated according to the signs of depression. Although all the postings were collected from subreddits that exhibit the characteristics of mental illness, there is a possibility of postings that do not confess or discuss depression. Thus, the collected postings data were annotated by two domain experts into three labels that denote the level of signs of depression namely \"Not depressed, Moderate and Severe\". Framing the annotation guidelines for postings data is difficult since the mental health of an individual has to be analyzed using his/her single postings. For annotating the data into three classes, the guidelines were formatted as follows:"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "page_number",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.493,
|
| 673 |
+
0.936,
|
| 674 |
+
0.506,
|
| 675 |
+
0.948
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "4"
|
| 679 |
+
}
|
| 680 |
+
],
|
| 681 |
+
[
|
| 682 |
+
{
|
| 683 |
+
"type": "header",
|
| 684 |
+
"bbox": [
|
| 685 |
+
0.258,
|
| 686 |
+
0.044,
|
| 687 |
+
0.741,
|
| 688 |
+
0.058
|
| 689 |
+
],
|
| 690 |
+
"angle": 0,
|
| 691 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "header",
|
| 695 |
+
"bbox": [
|
| 696 |
+
0.803,
|
| 697 |
+
0.045,
|
| 698 |
+
0.882,
|
| 699 |
+
0.056
|
| 700 |
+
],
|
| 701 |
+
"angle": 0,
|
| 702 |
+
"content": "A PREPRINT"
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "title",
|
| 706 |
+
"bbox": [
|
| 707 |
+
0.113,
|
| 708 |
+
0.092,
|
| 709 |
+
0.341,
|
| 710 |
+
0.106
|
| 711 |
+
],
|
| 712 |
+
"angle": 0,
|
| 713 |
+
"content": "3.2.1 Label 1 - Not depressed :"
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"bbox": [
|
| 718 |
+
0.112,
|
| 719 |
+
0.117,
|
| 720 |
+
0.875,
|
| 721 |
+
0.132
|
| 722 |
+
],
|
| 723 |
+
"angle": 0,
|
| 724 |
+
"content": "The postings data will be annotated as \"Not Depressed\", if the postings data reflect one of the following mannerism:"
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "text",
|
| 728 |
+
"bbox": [
|
| 729 |
+
0.158,
|
| 730 |
+
0.144,
|
| 731 |
+
0.612,
|
| 732 |
+
0.158
|
| 733 |
+
],
|
| 734 |
+
"angle": 0,
|
| 735 |
+
"content": "- If the statements have only one or two lines about irrelevant topics."
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"bbox": [
|
| 740 |
+
0.158,
|
| 741 |
+
0.165,
|
| 742 |
+
0.596,
|
| 743 |
+
0.179
|
| 744 |
+
],
|
| 745 |
+
"angle": 0,
|
| 746 |
+
"content": "- If the statements reflect momentary feelings of present situation."
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"bbox": [
|
| 751 |
+
0.158,
|
| 752 |
+
0.187,
|
| 753 |
+
0.621,
|
| 754 |
+
0.201
|
| 755 |
+
],
|
| 756 |
+
"angle": 0,
|
| 757 |
+
"content": "- If the statements are about asking questions about any or medication"
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"bbox": [
|
| 762 |
+
0.158,
|
| 763 |
+
0.208,
|
| 764 |
+
0.577,
|
| 765 |
+
0.222
|
| 766 |
+
],
|
| 767 |
+
"angle": 0,
|
| 768 |
+
"content": "- If the statement is about ask/seek help for friend's difficulties."
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "list",
|
| 772 |
+
"bbox": [
|
| 773 |
+
0.158,
|
| 774 |
+
0.144,
|
| 775 |
+
0.621,
|
| 776 |
+
0.222
|
| 777 |
+
],
|
| 778 |
+
"angle": 0,
|
| 779 |
+
"content": null
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "title",
|
| 783 |
+
"bbox": [
|
| 784 |
+
0.14,
|
| 785 |
+
0.246,
|
| 786 |
+
0.222,
|
| 787 |
+
0.26
|
| 788 |
+
],
|
| 789 |
+
"angle": 0,
|
| 790 |
+
"content": "Example 1:"
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"bbox": [
|
| 795 |
+
0.139,
|
| 796 |
+
0.274,
|
| 797 |
+
0.37,
|
| 798 |
+
0.287
|
| 799 |
+
],
|
| 800 |
+
"angle": 0,
|
| 801 |
+
"content": "The holidays are the most difficult."
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"bbox": [
|
| 806 |
+
0.137,
|
| 807 |
+
0.287,
|
| 808 |
+
0.86,
|
| 809 |
+
0.468
|
| 810 |
+
],
|
| 811 |
+
"angle": 0,
|
| 812 |
+
"content": "Not a big reddit poster, but I felt like this has been past due for myself. The holidays honestly are so hard for me to get through. I've spent the last 6 years of major holidays alone. Mostly because of my retail job, I never get enough time off around the holidays to go home and spend it with family, nor have they been able to visit me. My condolences to anyone else spending this time of year alone no matter what the circumstances may be. I moved to a new state 9 months ago and it's been a tough struggle meeting new friends as I didn't know anyone here before I moved. Now it's new years and all of my \"friends\" I've made while here yet again flaked on me (was actually excited to have plans for the first time I remember in a while), which I recently found out has been a common occurrence of them just getting together without me. (Which I'm used to at this point, it is what it is). It just sucks knowing you're always the last choice in anyone's lives. And that my depression may be the cause of my 'boringness'/lack of interest my friends have towards me. Any tips on making friends for someone struggling mentally? I'm just tired of this constant weight of loneliness bearing down on me. I seriously can't remember the last time someone went out of their way to invite me to something. It seems like I'm always asking to tag along, and then I'm just a burden at that point, which is why I'm starting to lose all hope."
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.139,
|
| 818 |
+
0.48,
|
| 819 |
+
0.449,
|
| 820 |
+
0.495
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": "Whoever takes the time to read this, thank you."
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "title",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.113,
|
| 829 |
+
0.527,
|
| 830 |
+
0.395,
|
| 831 |
+
0.541
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "3.2.2 Label 2 - Moderately depressed :"
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.112,
|
| 840 |
+
0.552,
|
| 841 |
+
0.813,
|
| 842 |
+
0.567
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "The postings data will be annotated as \"moderately depressed\", if the postings falls under these conditions:"
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.157,
|
| 851 |
+
0.579,
|
| 852 |
+
0.85,
|
| 853 |
+
0.594
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "- If the statements reflect change in feelings (feeling low for some time and feeling better for some time)."
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.157,
|
| 862 |
+
0.6,
|
| 863 |
+
0.729,
|
| 864 |
+
0.615
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "- If the statement shows that they aren't feeling completely immersed in any situations"
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.158,
|
| 873 |
+
0.621,
|
| 874 |
+
0.506,
|
| 875 |
+
0.636
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "- If the statements show that they have hope for life."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "list",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.157,
|
| 884 |
+
0.579,
|
| 885 |
+
0.85,
|
| 886 |
+
0.636
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": null
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "title",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.14,
|
| 895 |
+
0.66,
|
| 896 |
+
0.225,
|
| 897 |
+
0.674
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "Example 1:"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.139,
|
| 906 |
+
0.687,
|
| 907 |
+
0.441,
|
| 908 |
+
0.701
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "If I disappeared today, would it really matter?"
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "text",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.137,
|
| 917 |
+
0.702,
|
| 918 |
+
0.858,
|
| 919 |
+
0.772
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "I'm just too tired to go on, but at the same time I'm too tired to end it. I always thought about this but with the quarantine I just realised it is true. My friends never felt close to me, just like the only two relationships I have ever been in. They never cared about me, to the point where I even asked for help and they just turned a blind eye. And my family isn't any better. I don't know what to do, and I believe it won't matter if I do something or not. I'm sorry if my English isn't good, it isn't my first language."
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "title",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.113,
|
| 928 |
+
0.814,
|
| 929 |
+
0.383,
|
| 930 |
+
0.829
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "3.2.3 Label - 3 : Severely depressed :"
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.112,
|
| 939 |
+
0.839,
|
| 940 |
+
0.79,
|
| 941 |
+
0.854
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "The data will be annotated as \"Severely depressed\", if the postings have one of the following scenarios:"
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.157,
|
| 950 |
+
0.866,
|
| 951 |
+
0.567,
|
| 952 |
+
0.88
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "- If the statements express more than one disorder conditions."
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "text",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.158,
|
| 961 |
+
0.887,
|
| 962 |
+
0.556,
|
| 963 |
+
0.902
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "- If the statements explain about history of suicide attempts."
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "list",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.157,
|
| 972 |
+
0.866,
|
| 973 |
+
0.567,
|
| 974 |
+
0.902
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": null
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "page_number",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.494,
|
| 983 |
+
0.936,
|
| 984 |
+
0.504,
|
| 985 |
+
0.948
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": "5"
|
| 989 |
+
}
|
| 990 |
+
],
|
| 991 |
+
[
|
| 992 |
+
{
|
| 993 |
+
"type": "header",
|
| 994 |
+
"bbox": [
|
| 995 |
+
0.258,
|
| 996 |
+
0.044,
|
| 997 |
+
0.741,
|
| 998 |
+
0.058
|
| 999 |
+
],
|
| 1000 |
+
"angle": 0,
|
| 1001 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "header",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
0.803,
|
| 1007 |
+
0.045,
|
| 1008 |
+
0.882,
|
| 1009 |
+
0.056
|
| 1010 |
+
],
|
| 1011 |
+
"angle": 0,
|
| 1012 |
+
"content": "A PREPRINT"
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "table_caption",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.275,
|
| 1018 |
+
0.098,
|
| 1019 |
+
0.723,
|
| 1020 |
+
0.112
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "Table 3: Landis & Koch measurement table of inter rater agreement"
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "table",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.331,
|
| 1029 |
+
0.113,
|
| 1030 |
+
0.664,
|
| 1031 |
+
0.257
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "<table><tr><td>Kappa value (κ)</td><td>Strength of agreement</td></tr><tr><td>< 0</td><td>Poor</td></tr><tr><td>0.01 - 0.20</td><td>Slight</td></tr><tr><td>0.21 - 0.40</td><td>Fair</td></tr><tr><td>0.41 - 0.60</td><td>Moderate</td></tr><tr><td>0.61 - 0.80</td><td>Substantial</td></tr><tr><td>0.81 - 0.99</td><td>Almost perfect agreement</td></tr></table>"
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "title",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.14,
|
| 1040 |
+
0.299,
|
| 1041 |
+
0.223,
|
| 1042 |
+
0.314
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "Example 1:"
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.139,
|
| 1051 |
+
0.327,
|
| 1052 |
+
0.308,
|
| 1053 |
+
0.341
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "Getting depressed again?"
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.137,
|
| 1062 |
+
0.34,
|
| 1063 |
+
0.861,
|
| 1064 |
+
0.48
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "So I'm 22F and I have taken antidepressants the last time 4 years ago. I've had ups and downs when I got off and with 19 I was having a rough time for two months - started drinking and smoking weed a lot. Kinda managed to get back on track then and haven't been feeling too bad until now. Lately I've been feeling kinda blue and started making mistakes or have to go through stuff multiple times to do it correctly or to be able to remember it. Currently I'm having a week off and have to go back to work on monday. I just don't know I feel like I'm getting worse and want to sleep most of the time and at first I thought it's because I'm used to working a lot, but when I think about having to go back soon I feel like throwing up and at the same time doing nothing also doesn't sit well with me. I guess I'm kinda scared at the moment because I don't want to feel like I was feeling years ago and I still don't feel comfortable with my own mind and don't trust myself that I'm strong enough to pull through if depression hits me again."
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "title",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.113,
|
| 1073 |
+
0.521,
|
| 1074 |
+
0.308,
|
| 1075 |
+
0.536
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "3.3 Inter-rater agreement"
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "text",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.112,
|
| 1084 |
+
0.55,
|
| 1085 |
+
0.886,
|
| 1086 |
+
0.606
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "After annotating the data, inter-rater agreement was calculated between the decisions of two judges using kappa coefficient estimated using a per-annotator empirical prior over the class labels Artstein and Poesio [2008]. Inter-rater agreement<sup>6</sup> is the degree of agreement among independent observers who rate, code, or assess the same phenomenon. The inter rater agreement is measured using Cohen's kappa statistics Cohen [1960]."
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "text",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
0.112,
|
| 1095 |
+
0.611,
|
| 1096 |
+
0.886,
|
| 1097 |
+
0.669
|
| 1098 |
+
],
|
| 1099 |
+
"angle": 0,
|
| 1100 |
+
"content": "The inter-rater agreement between the annotations was calculated using sklearn Pedregosa et al. [2011a]. For our annotation, the kappa value \\((\\kappa)\\) is 0.686. According to Landis & Koch Landis and Koch [1977] in the Table 3, the \\(\\kappa\\) value denotes substantial agreement between the annotators, which proves the consistency of labeling according to the annotation guidelines. Thus, the mutually annotated instances form the corpus."
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "title",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
0.113,
|
| 1106 |
+
0.694,
|
| 1107 |
+
0.27,
|
| 1108 |
+
0.709
|
| 1109 |
+
],
|
| 1110 |
+
"angle": 0,
|
| 1111 |
+
"content": "3.4 Corpus Analysis"
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "text",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
0.112,
|
| 1117 |
+
0.723,
|
| 1118 |
+
0.885,
|
| 1119 |
+
0.766
|
| 1120 |
+
],
|
| 1121 |
+
"angle": 0,
|
| 1122 |
+
"content": "Initially 20,088 instances of postings data were annotated, out of which 16,613 instances were found to be mutually annotated instances by the two judges, and thus they were considered as instances of data set with their corresponding labels. Table 4 shows the complete statistics of the corpus."
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "text",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
0.112,
|
| 1128 |
+
0.771,
|
| 1129 |
+
0.885,
|
| 1130 |
+
0.856
|
| 1131 |
+
],
|
| 1132 |
+
"angle": 0,
|
| 1133 |
+
"content": "The whole corpus has 1,56,676 sentences with 26,59,938 words which shows the size of the corpus created. In the corpus, each posting with its labels is considered as each instance in the corpus. An instance in the corpus will have an average of 9.42 sentences each that varies in the range of 1 to 260 sentences with an average of 159.92 words that lies between 1 to 5065 words. The distribution of the three class labels in the data set is shown in Figure 1. As shown in figure, the data set is unbalanced with 10,494 instances of \"moderately depressed\" class, 1489 instances of \"severely depressed\" class and 4649 instances of \"Not depressed\" class which also includes some duplicate instances."
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "page_footnote",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
0.133,
|
| 1139 |
+
0.893,
|
| 1140 |
+
0.437,
|
| 1141 |
+
0.909
|
| 1142 |
+
],
|
| 1143 |
+
"angle": 0,
|
| 1144 |
+
"content": "<sup>6</sup>https://en.wikipedia.org/wiki/Interrater_reliability"
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "page_number",
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
0.494,
|
| 1150 |
+
0.937,
|
| 1151 |
+
0.506,
|
| 1152 |
+
0.948
|
| 1153 |
+
],
|
| 1154 |
+
"angle": 0,
|
| 1155 |
+
"content": "6"
|
| 1156 |
+
}
|
| 1157 |
+
],
|
| 1158 |
+
[
|
| 1159 |
+
{
|
| 1160 |
+
"type": "header",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
0.258,
|
| 1163 |
+
0.044,
|
| 1164 |
+
0.741,
|
| 1165 |
+
0.058
|
| 1166 |
+
],
|
| 1167 |
+
"angle": 0,
|
| 1168 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "header",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
0.803,
|
| 1174 |
+
0.045,
|
| 1175 |
+
0.882,
|
| 1176 |
+
0.056
|
| 1177 |
+
],
|
| 1178 |
+
"angle": 0,
|
| 1179 |
+
"content": "A PREPRINT"
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "table_caption",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
0.396,
|
| 1185 |
+
0.098,
|
| 1186 |
+
0.602,
|
| 1187 |
+
0.112
|
| 1188 |
+
],
|
| 1189 |
+
"angle": 0,
|
| 1190 |
+
"content": "Table 4: Postings data analysis"
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "table",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
0.28,
|
| 1196 |
+
0.113,
|
| 1197 |
+
0.714,
|
| 1198 |
+
0.411
|
| 1199 |
+
],
|
| 1200 |
+
"angle": 0,
|
| 1201 |
+
"content": "<table><tr><td>Category</td><td>Count</td></tr><tr><td>Total number of instances annotated</td><td>20,088</td></tr><tr><td>Data set instances \n(number of instances mutually annotated)</td><td>16,632</td></tr><tr><td>Total number of sentences</td><td>1,56,676</td></tr><tr><td>Total number of words</td><td>26,59,938</td></tr><tr><td>Total number of stop-words</td><td>12,47,016</td></tr><tr><td>Total number of words other than stop-words</td><td>14,12,922</td></tr><tr><td>Total number of unique words</td><td>28,415</td></tr><tr><td>Total number of unique stop-words</td><td>150</td></tr><tr><td>Total number of unique words other than stop-words</td><td>28,265</td></tr><tr><td>Range of sentences per instance</td><td>1 - 260</td></tr><tr><td>Range of words per instance</td><td>1 - 5065</td></tr><tr><td>Average number of sentences per posting instance</td><td>9.42</td></tr><tr><td>Average number of words per posting instance</td><td>159.92</td></tr></table>"
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "image",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
0.338,
|
| 1207 |
+
0.434,
|
| 1208 |
+
0.665,
|
| 1209 |
+
0.584
|
| 1210 |
+
],
|
| 1211 |
+
"angle": 0,
|
| 1212 |
+
"content": null
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "image_caption",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
0.342,
|
| 1218 |
+
0.598,
|
| 1219 |
+
0.657,
|
| 1220 |
+
0.614
|
| 1221 |
+
],
|
| 1222 |
+
"angle": 0,
|
| 1223 |
+
"content": "Figure 1: Class wise distribution of the data set"
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "title",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
0.114,
|
| 1229 |
+
0.643,
|
| 1230 |
+
0.271,
|
| 1231 |
+
0.657
|
| 1232 |
+
],
|
| 1233 |
+
"angle": 0,
|
| 1234 |
+
"content": "3.5 Base line models"
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "text",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
0.111,
|
| 1240 |
+
0.67,
|
| 1241 |
+
0.885,
|
| 1242 |
+
0.726
|
| 1243 |
+
],
|
| 1244 |
+
"angle": 0,
|
| 1245 |
+
"content": "The data set has been evaluated using traditional models which are considered as baseline models. The data set has four columns namely id, title, text and class label. For implementation, the title data and text data are initially combined. The combined text data is pre-processed, extracted features, balanced, classified using traditional classifiers and evaluated by cross validation."
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "title",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
0.113,
|
| 1251 |
+
0.744,
|
| 1252 |
+
0.314,
|
| 1253 |
+
0.76
|
| 1254 |
+
],
|
| 1255 |
+
"angle": 0,
|
| 1256 |
+
"content": "3.5.1 Data Pre-processing:"
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "text",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
0.111,
|
| 1262 |
+
0.769,
|
| 1263 |
+
0.885,
|
| 1264 |
+
0.84
|
| 1265 |
+
],
|
| 1266 |
+
"angle": 0,
|
| 1267 |
+
"content": "The title and text column are combined together as a single text data column by filling the \"NA\" instances of both title and text data. The combined text data is cleaned by converting the words to lower case letters and removing unwanted punctuation, \"[removed]\" tags, web links, HTML links, stop words and small words (words with length less than two). After cleaning, the instances are tokenized using regextokenizer Pedregosa et al. [2011b], stemmed using porter stemmer Porter [1980] and lemmatized using wordnet lemmatizer."
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "title",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
0.113,
|
| 1273 |
+
0.857,
|
| 1274 |
+
0.303,
|
| 1275 |
+
0.872
|
| 1276 |
+
],
|
| 1277 |
+
"angle": 0,
|
| 1278 |
+
"content": "3.5.2 Feature extraction:"
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "text",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
0.111,
|
| 1284 |
+
0.882,
|
| 1285 |
+
0.884,
|
| 1286 |
+
0.911
|
| 1287 |
+
],
|
| 1288 |
+
"angle": 0,
|
| 1289 |
+
"content": "The features were extracted using three vectorizers namely Word2Vec, Term Frequency - Inverse Document Frequency (TF-IDF) vectorizer and Glove Pennington et al. [2014] vectorizer."
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "page_number",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
0.494,
|
| 1295 |
+
0.936,
|
| 1296 |
+
0.505,
|
| 1297 |
+
0.948
|
| 1298 |
+
],
|
| 1299 |
+
"angle": 0,
|
| 1300 |
+
"content": "7"
|
| 1301 |
+
}
|
| 1302 |
+
],
|
| 1303 |
+
[
|
| 1304 |
+
{
|
| 1305 |
+
"type": "header",
|
| 1306 |
+
"bbox": [
|
| 1307 |
+
0.258,
|
| 1308 |
+
0.044,
|
| 1309 |
+
0.741,
|
| 1310 |
+
0.058
|
| 1311 |
+
],
|
| 1312 |
+
"angle": 0,
|
| 1313 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1314 |
+
},
|
| 1315 |
+
{
|
| 1316 |
+
"type": "header",
|
| 1317 |
+
"bbox": [
|
| 1318 |
+
0.803,
|
| 1319 |
+
0.045,
|
| 1320 |
+
0.882,
|
| 1321 |
+
0.056
|
| 1322 |
+
],
|
| 1323 |
+
"angle": 0,
|
| 1324 |
+
"content": "A PREPRINT"
|
| 1325 |
+
},
|
| 1326 |
+
{
|
| 1327 |
+
"type": "text",
|
| 1328 |
+
"bbox": [
|
| 1329 |
+
0.157,
|
| 1330 |
+
0.092,
|
| 1331 |
+
0.88,
|
| 1332 |
+
0.12
|
| 1333 |
+
],
|
| 1334 |
+
"angle": 0,
|
| 1335 |
+
"content": "- Word2Vec: It produces a vector that represents the context of the word considering the occurrence of the word. The vectors are generated using Continuous Bag Of Words."
|
| 1336 |
+
},
|
| 1337 |
+
{
|
| 1338 |
+
"type": "text",
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
0.157,
|
| 1341 |
+
0.123,
|
| 1342 |
+
0.88,
|
| 1343 |
+
0.164
|
| 1344 |
+
],
|
| 1345 |
+
"angle": 0,
|
| 1346 |
+
"content": "- TF-IDF: It produces a score considering the occurrence of the word in the document. It is based on the relevance of a topic in a particular document. The vectors are calculated using four grams considering a maximum of 2000 features."
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "text",
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
0.157,
|
| 1352 |
+
0.169,
|
| 1353 |
+
0.88,
|
| 1354 |
+
0.209
|
| 1355 |
+
],
|
| 1356 |
+
"angle": 0,
|
| 1357 |
+
"content": "- Glove: It produces the word embeddings considering the occurrence and co-occurrence of the words with reduced dimensionality. The words are mapped to a word embedding using 6 Billion pre-trained tokens with 100 features each."
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "list",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
0.157,
|
| 1363 |
+
0.092,
|
| 1364 |
+
0.88,
|
| 1365 |
+
0.209
|
| 1366 |
+
],
|
| 1367 |
+
"angle": 0,
|
| 1368 |
+
"content": null
|
| 1369 |
+
},
|
| 1370 |
+
{
|
| 1371 |
+
"type": "title",
|
| 1372 |
+
"bbox": [
|
| 1373 |
+
0.114,
|
| 1374 |
+
0.224,
|
| 1375 |
+
0.245,
|
| 1376 |
+
0.239
|
| 1377 |
+
],
|
| 1378 |
+
"angle": 0,
|
| 1379 |
+
"content": "3.5.3 Classifiers:"
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "text",
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
0.112,
|
| 1385 |
+
0.248,
|
| 1386 |
+
0.885,
|
| 1387 |
+
0.289
|
| 1388 |
+
],
|
| 1389 |
+
"angle": 0,
|
| 1390 |
+
"content": "Twelve different classifiers that include Ada Boost Classifier, Decision Tree, Gaussian Naive Bayes, K-Nearest Neighbour, linear-Support Vector Machine, Linear Deterministic Analysis, Logistic Regression, Multi-layer Perceptron, Qua-"
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
0.112,
|
| 1396 |
+
0.29,
|
| 1397 |
+
0.884,
|
| 1398 |
+
0.318
|
| 1399 |
+
],
|
| 1400 |
+
"angle": 0,
|
| 1401 |
+
"content": "dratic Deterministic Analysis, Radial Basis Function - Support Vector Machine and Random Forest of Scikit-learn Pedregosa et al. [2011b] were used for classification."
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "text",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
0.157,
|
| 1407 |
+
0.327,
|
| 1408 |
+
0.88,
|
| 1409 |
+
0.368
|
| 1410 |
+
],
|
| 1411 |
+
"angle": 0,
|
| 1412 |
+
"content": "- Ada Boost Classifier(ABC): The Adaptive Boosting algorithm is a collection of N estimator models that assigns higher weights to the mis-classified samples in the next model. In our implementation, 100 estimator models with t0 random state at a learning rate of 0.1 were used to fine tune the model."
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "text",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
0.157,
|
| 1418 |
+
0.372,
|
| 1419 |
+
0.88,
|
| 1420 |
+
0.44
|
| 1421 |
+
],
|
| 1422 |
+
"angle": 0,
|
| 1423 |
+
"content": "- Decision Tree (DT): The decision tree classifier predicts the target value based on the decision rules that was formed using features to identify the target variable. The decision rules are formed using gini index and entropy for information gain. For implementing the decision trees, the decision tree classifier was fine tuned with two splits of minimum samples of one leaf node each by calculating gini to choose the best split and random state as 0."
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "text",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
0.157,
|
| 1429 |
+
0.445,
|
| 1430 |
+
0.88,
|
| 1431 |
+
0.472
|
| 1432 |
+
],
|
| 1433 |
+
"angle": 0,
|
| 1434 |
+
"content": "- Gaussian Naive Bayes (GNB): The Gaussian normal distribution variant of Naive Bayes classifier that depends on the Bayes theorem is Gaussian Naive Bayes."
|
| 1435 |
+
},
|
| 1436 |
+
{
|
| 1437 |
+
"type": "text",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
0.157,
|
| 1440 |
+
0.476,
|
| 1441 |
+
0.88,
|
| 1442 |
+
0.517
|
| 1443 |
+
],
|
| 1444 |
+
"angle": 0,
|
| 1445 |
+
"content": "- K-Nearest Neighbour(KNN): KNN classifies the data point by plotting them and finding the similarity between the data points. In implementation, number of neighbours were set as three with equal weights and euclidean distance as metric to calculate distance."
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "text",
|
| 1449 |
+
"bbox": [
|
| 1450 |
+
0.157,
|
| 1451 |
+
0.522,
|
| 1452 |
+
0.88,
|
| 1453 |
+
0.577
|
| 1454 |
+
],
|
| 1455 |
+
"angle": 0,
|
| 1456 |
+
"content": "- Logistic Regression (LR): The probabilistic model that predicts the class label based on the sigmoid function for binary classification. As our data set are multi-class data sets, multi-nominal logistic regression was used to evaluate the data sets. For implementation, the classifier was trained with a tolerance of 1e-4, 1.0 as inverse of regularization strength and intercept scaling as 1."
|
| 1457 |
+
},
|
| 1458 |
+
{
|
| 1459 |
+
"type": "text",
|
| 1460 |
+
"bbox": [
|
| 1461 |
+
0.157,
|
| 1462 |
+
0.581,
|
| 1463 |
+
0.88,
|
| 1464 |
+
0.621
|
| 1465 |
+
],
|
| 1466 |
+
"angle": 0,
|
| 1467 |
+
"content": "- Multi-layer Perceptron (MLP): The artificial neural network that is trained to predict the class label along with back propagation of error. The multi-layer perceptron of two layers of 100 hidden nodes each was trained with relu activation function, adam optimizer, learning rate of 0.001 for a maximum 300 iterations."
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "text",
|
| 1471 |
+
"bbox": [
|
| 1472 |
+
0.157,
|
| 1473 |
+
0.626,
|
| 1474 |
+
0.88,
|
| 1475 |
+
0.667
|
| 1476 |
+
],
|
| 1477 |
+
"angle": 0,
|
| 1478 |
+
"content": "- Discriminant Analysis: The generative model that utilizes Gaussian distribution for classification by assuming each class has a different co-variance. For implementation, the co-variance is calculated with threshold of 1.0e-04. Linear DA (LDA) and Quadratic DA (QDA) both were implemented."
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "text",
|
| 1482 |
+
"bbox": [
|
| 1483 |
+
0.157,
|
| 1484 |
+
0.672,
|
| 1485 |
+
0.88,
|
| 1486 |
+
0.712
|
| 1487 |
+
],
|
| 1488 |
+
"angle": 0,
|
| 1489 |
+
"content": "- Support Vector Machine: The supervised model that projects the data into higher dimensions and then classifies using hyper-planes. The model was trained with RBF kernel (RBF-SVM) and linear kernel (L-SVM) function of three degree, 0.1 regularization parameter without any specifying any maximum iterations."
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "text",
|
| 1493 |
+
"bbox": [
|
| 1494 |
+
0.157,
|
| 1495 |
+
0.717,
|
| 1496 |
+
0.88,
|
| 1497 |
+
0.758
|
| 1498 |
+
],
|
| 1499 |
+
"angle": 0,
|
| 1500 |
+
"content": "- Random Forest (RF): Random Forest combines many decision trees as in ensemble method to generate predictions. It overcomes the limitation of decision trees by bagging and bootstrap aggregation. It was implemented with 100 number of estimators."
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "list",
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
0.157,
|
| 1506 |
+
0.327,
|
| 1507 |
+
0.88,
|
| 1508 |
+
0.758
|
| 1509 |
+
],
|
| 1510 |
+
"angle": 0,
|
| 1511 |
+
"content": null
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "title",
|
| 1515 |
+
"bbox": [
|
| 1516 |
+
0.112,
|
| 1517 |
+
0.778,
|
| 1518 |
+
0.385,
|
| 1519 |
+
0.794
|
| 1520 |
+
],
|
| 1521 |
+
"angle": 0,
|
| 1522 |
+
"content": "4 Implementation and Results"
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"bbox": [
|
| 1527 |
+
0.112,
|
| 1528 |
+
0.808,
|
| 1529 |
+
0.884,
|
| 1530 |
+
0.851
|
| 1531 |
+
],
|
| 1532 |
+
"angle": 0,
|
| 1533 |
+
"content": "The features extracted in subsection 3.5.2 are classified using the above classifiers in subsection 3.5.3 and evaluated using stratified k-fold sampling of Scikit-learn Pedregosa et al. [2011b]. In this validation, data are split into 10 folds and the evaluation results with respect to weighted average F1-score is tabulated in Table 5."
|
| 1534 |
+
},
|
| 1535 |
+
{
|
| 1536 |
+
"type": "text",
|
| 1537 |
+
"bbox": [
|
| 1538 |
+
0.112,
|
| 1539 |
+
0.856,
|
| 1540 |
+
0.884,
|
| 1541 |
+
0.912
|
| 1542 |
+
],
|
| 1543 |
+
"angle": 0,
|
| 1544 |
+
"content": "From the Table 5, it is clear that the model with Random Forest Classifier and Multi-Layer Perceptron (MLP) applied on the features extracted using Glove performs equally well with an F1-score of 0.647. The performance of the models with accuracy as metric is shown in Table 6. From the table, it is clear that the model with Random Forest classifier and Glove vectorizer performs better with an accuracy of 0.760."
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "page_number",
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
0.494,
|
| 1550 |
+
0.936,
|
| 1551 |
+
0.505,
|
| 1552 |
+
0.948
|
| 1553 |
+
],
|
| 1554 |
+
"angle": 0,
|
| 1555 |
+
"content": "8"
|
| 1556 |
+
}
|
| 1557 |
+
],
|
| 1558 |
+
[
|
| 1559 |
+
{
|
| 1560 |
+
"type": "header",
|
| 1561 |
+
"bbox": [
|
| 1562 |
+
0.258,
|
| 1563 |
+
0.044,
|
| 1564 |
+
0.741,
|
| 1565 |
+
0.058
|
| 1566 |
+
],
|
| 1567 |
+
"angle": 0,
|
| 1568 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "header",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
0.803,
|
| 1574 |
+
0.045,
|
| 1575 |
+
0.882,
|
| 1576 |
+
0.056
|
| 1577 |
+
],
|
| 1578 |
+
"angle": 0,
|
| 1579 |
+
"content": "A PREPRINT"
|
| 1580 |
+
},
|
| 1581 |
+
{
|
| 1582 |
+
"type": "table_caption",
|
| 1583 |
+
"bbox": [
|
| 1584 |
+
0.365,
|
| 1585 |
+
0.098,
|
| 1586 |
+
0.632,
|
| 1587 |
+
0.111
|
| 1588 |
+
],
|
| 1589 |
+
"angle": 0,
|
| 1590 |
+
"content": "Table 5: F1 score of all baseline models"
|
| 1591 |
+
},
|
| 1592 |
+
{
|
| 1593 |
+
"type": "table",
|
| 1594 |
+
"bbox": [
|
| 1595 |
+
0.331,
|
| 1596 |
+
0.112,
|
| 1597 |
+
0.664,
|
| 1598 |
+
0.357
|
| 1599 |
+
],
|
| 1600 |
+
"angle": 0,
|
| 1601 |
+
"content": "<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.451</td><td>0.496</td><td>0.451</td></tr><tr><td>DT</td><td>0.469</td><td>0.614</td><td>0.469</td></tr><tr><td>GNB</td><td>0.290</td><td>0.415</td><td>0.302</td></tr><tr><td>KNN</td><td>0.549</td><td>0.604</td><td>0.594</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.309</td><td>0.273</td></tr><tr><td>LDA</td><td>0.391</td><td>0.395</td><td>0.391</td></tr><tr><td>LR</td><td>0.395</td><td>0.329</td><td>0.395</td></tr><tr><td>MLP</td><td>0.625</td><td>0.647</td><td>0.625</td></tr><tr><td>QDA</td><td>0.368</td><td>0.459</td><td>0.368</td></tr><tr><td>RBF -SVM</td><td>0.452</td><td>0.560</td><td>0.452</td></tr><tr><td>RF</td><td>0.449</td><td>0.647</td><td>0.456</td></tr></table>"
|
| 1602 |
+
},
|
| 1603 |
+
{
|
| 1604 |
+
"type": "table_caption",
|
| 1605 |
+
"bbox": [
|
| 1606 |
+
0.362,
|
| 1607 |
+
0.383,
|
| 1608 |
+
0.634,
|
| 1609 |
+
0.397
|
| 1610 |
+
],
|
| 1611 |
+
"angle": 0,
|
| 1612 |
+
"content": "Table 6: Accuracy of all baseline models"
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "table",
|
| 1616 |
+
"bbox": [
|
| 1617 |
+
0.329,
|
| 1618 |
+
0.397,
|
| 1619 |
+
0.67,
|
| 1620 |
+
0.642
|
| 1621 |
+
],
|
| 1622 |
+
"angle": 0,
|
| 1623 |
+
"content": "<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.616</td><td>0.654</td><td>0.616</td></tr><tr><td>DT</td><td>0.579</td><td>0.697</td><td>0.579</td></tr><tr><td>GNB</td><td>0.351</td><td>0.464</td><td>0.351</td></tr><tr><td>KNN</td><td>0.695</td><td>0.717</td><td>0.694</td></tr><tr><td>L-SVM</td><td>0.623</td><td>0.646</td><td>0.623</td></tr><tr><td>LDA</td><td>0.619</td><td>0.659</td><td>0.619</td></tr><tr><td>LR</td><td>0.619</td><td>0.650</td><td>0.619</td></tr><tr><td>MLP</td><td>0.700</td><td>0.754</td><td>0.700</td></tr><tr><td>QDA</td><td>0.485</td><td>0.499</td><td>0.485</td></tr><tr><td>RBF -SVM</td><td>0.667</td><td>0.733</td><td>0.667</td></tr><tr><td>RF</td><td>0.689</td><td>0.760</td><td>0.695</td></tr></table>"
|
| 1624 |
+
},
|
| 1625 |
+
{
|
| 1626 |
+
"type": "title",
|
| 1627 |
+
"bbox": [
|
| 1628 |
+
0.113,
|
| 1629 |
+
0.671,
|
| 1630 |
+
0.329,
|
| 1631 |
+
0.687
|
| 1632 |
+
],
|
| 1633 |
+
"angle": 0,
|
| 1634 |
+
"content": "4.1 With Data augmentation"
|
| 1635 |
+
},
|
| 1636 |
+
{
|
| 1637 |
+
"type": "text",
|
| 1638 |
+
"bbox": [
|
| 1639 |
+
0.111,
|
| 1640 |
+
0.699,
|
| 1641 |
+
0.885,
|
| 1642 |
+
0.743
|
| 1643 |
+
],
|
| 1644 |
+
"angle": 0,
|
| 1645 |
+
"content": "The postings data is populated with more \"moderately depressed\" instances and thus, the data has to be balanced before classification for better performance. For balancing the data, Synthetic Minority Oversampling Technique (SMOTE) Chawla et al. [2002] was applied after vectorization. The effect of augmentation is shown in Figure 2."
|
| 1646 |
+
},
|
| 1647 |
+
{
|
| 1648 |
+
"type": "image",
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
0.158,
|
| 1651 |
+
0.767,
|
| 1652 |
+
0.281,
|
| 1653 |
+
0.865
|
| 1654 |
+
],
|
| 1655 |
+
"angle": 0,
|
| 1656 |
+
"content": null
|
| 1657 |
+
},
|
| 1658 |
+
{
|
| 1659 |
+
"type": "image_caption",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
0.232,
|
| 1662 |
+
0.87,
|
| 1663 |
+
0.402,
|
| 1664 |
+
0.884
|
| 1665 |
+
],
|
| 1666 |
+
"angle": 0,
|
| 1667 |
+
"content": "(a) Before applying SMOTE"
|
| 1668 |
+
},
|
| 1669 |
+
{
|
| 1670 |
+
"type": "image",
|
| 1671 |
+
"bbox": [
|
| 1672 |
+
0.517,
|
| 1673 |
+
0.768,
|
| 1674 |
+
0.637,
|
| 1675 |
+
0.858
|
| 1676 |
+
],
|
| 1677 |
+
"angle": 0,
|
| 1678 |
+
"content": null
|
| 1679 |
+
},
|
| 1680 |
+
{
|
| 1681 |
+
"type": "image_footnote",
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
0.662,
|
| 1684 |
+
0.782,
|
| 1685 |
+
0.776,
|
| 1686 |
+
0.797
|
| 1687 |
+
],
|
| 1688 |
+
"angle": 0,
|
| 1689 |
+
"content": "Not depressed"
|
| 1690 |
+
},
|
| 1691 |
+
{
|
| 1692 |
+
"type": "image_footnote",
|
| 1693 |
+
"bbox": [
|
| 1694 |
+
0.662,
|
| 1695 |
+
0.8,
|
| 1696 |
+
0.826,
|
| 1697 |
+
0.814
|
| 1698 |
+
],
|
| 1699 |
+
"angle": 0,
|
| 1700 |
+
"content": "Moderately depressed"
|
| 1701 |
+
},
|
| 1702 |
+
{
|
| 1703 |
+
"type": "image_footnote",
|
| 1704 |
+
"bbox": [
|
| 1705 |
+
0.662,
|
| 1706 |
+
0.818,
|
| 1707 |
+
0.808,
|
| 1708 |
+
0.833
|
| 1709 |
+
],
|
| 1710 |
+
"angle": 0,
|
| 1711 |
+
"content": "Severely depressed"
|
| 1712 |
+
},
|
| 1713 |
+
{
|
| 1714 |
+
"type": "list",
|
| 1715 |
+
"bbox": [
|
| 1716 |
+
0.662,
|
| 1717 |
+
0.782,
|
| 1718 |
+
0.826,
|
| 1719 |
+
0.833
|
| 1720 |
+
],
|
| 1721 |
+
"angle": 0,
|
| 1722 |
+
"content": null
|
| 1723 |
+
},
|
| 1724 |
+
{
|
| 1725 |
+
"type": "image_caption",
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
0.599,
|
| 1728 |
+
0.87,
|
| 1729 |
+
0.763,
|
| 1730 |
+
0.884
|
| 1731 |
+
],
|
| 1732 |
+
"angle": 0,
|
| 1733 |
+
"content": "(b) After applying SMOTE"
|
| 1734 |
+
},
|
| 1735 |
+
{
|
| 1736 |
+
"type": "image_caption",
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
0.373,
|
| 1739 |
+
0.892,
|
| 1740 |
+
0.625,
|
| 1741 |
+
0.907
|
| 1742 |
+
],
|
| 1743 |
+
"angle": 0,
|
| 1744 |
+
"content": "Figure 2: Effect of data augmentation"
|
| 1745 |
+
},
|
| 1746 |
+
{
|
| 1747 |
+
"type": "page_number",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
0.494,
|
| 1750 |
+
0.936,
|
| 1751 |
+
0.505,
|
| 1752 |
+
0.948
|
| 1753 |
+
],
|
| 1754 |
+
"angle": 0,
|
| 1755 |
+
"content": "9"
|
| 1756 |
+
}
|
| 1757 |
+
],
|
| 1758 |
+
[
|
| 1759 |
+
{
|
| 1760 |
+
"type": "header",
|
| 1761 |
+
"bbox": [
|
| 1762 |
+
0.258,
|
| 1763 |
+
0.044,
|
| 1764 |
+
0.741,
|
| 1765 |
+
0.058
|
| 1766 |
+
],
|
| 1767 |
+
"angle": 0,
|
| 1768 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "header",
|
| 1772 |
+
"bbox": [
|
| 1773 |
+
0.803,
|
| 1774 |
+
0.045,
|
| 1775 |
+
0.882,
|
| 1776 |
+
0.056
|
| 1777 |
+
],
|
| 1778 |
+
"angle": 0,
|
| 1779 |
+
"content": "A PREPRINT"
|
| 1780 |
+
},
|
| 1781 |
+
{
|
| 1782 |
+
"type": "table_caption",
|
| 1783 |
+
"bbox": [
|
| 1784 |
+
0.287,
|
| 1785 |
+
0.098,
|
| 1786 |
+
0.71,
|
| 1787 |
+
0.112
|
| 1788 |
+
],
|
| 1789 |
+
"angle": 0,
|
| 1790 |
+
"content": "Table 7: F1-score of all baseline models after data augmentation"
|
| 1791 |
+
},
|
| 1792 |
+
{
|
| 1793 |
+
"type": "table",
|
| 1794 |
+
"bbox": [
|
| 1795 |
+
0.333,
|
| 1796 |
+
0.113,
|
| 1797 |
+
0.665,
|
| 1798 |
+
0.357
|
| 1799 |
+
],
|
| 1800 |
+
"angle": 0,
|
| 1801 |
+
"content": "<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.263</td><td>0.622</td><td>0.559</td></tr><tr><td>DT</td><td>0.273</td><td>0.772</td><td>0.721</td></tr><tr><td>GNB</td><td>0.271</td><td>0.449</td><td>0.389</td></tr><tr><td>KNN</td><td>0.258</td><td>0.814</td><td>0.834</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.570</td><td>0.642</td></tr><tr><td>LDA</td><td>0.270</td><td>0.550</td><td>0.540</td></tr><tr><td>LR</td><td>0.270</td><td>0.544</td><td>0.551</td></tr><tr><td>MLP</td><td>0.269</td><td>0.775</td><td>0.852</td></tr><tr><td>QDA</td><td>0.276</td><td>0.592</td><td>0.477</td></tr><tr><td>RBF -SVM</td><td>0.273</td><td>0.762</td><td>0.788</td></tr><tr><td>RF</td><td>0.272</td><td>0.854</td><td>0.877</td></tr></table>"
|
| 1802 |
+
},
|
| 1803 |
+
{
|
| 1804 |
+
"type": "table_caption",
|
| 1805 |
+
"bbox": [
|
| 1806 |
+
0.284,
|
| 1807 |
+
0.388,
|
| 1808 |
+
0.713,
|
| 1809 |
+
0.403
|
| 1810 |
+
],
|
| 1811 |
+
"angle": 0,
|
| 1812 |
+
"content": "Table 8: Accuracy of all baseline models after data augmentation"
|
| 1813 |
+
},
|
| 1814 |
+
{
|
| 1815 |
+
"type": "table",
|
| 1816 |
+
"bbox": [
|
| 1817 |
+
0.333,
|
| 1818 |
+
0.404,
|
| 1819 |
+
0.665,
|
| 1820 |
+
0.647
|
| 1821 |
+
],
|
| 1822 |
+
"angle": 0,
|
| 1823 |
+
"content": "<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.384</td><td>0.628</td><td>0.562</td></tr><tr><td>DT</td><td>0.388</td><td>0.781</td><td>0.728</td></tr><tr><td>GNB</td><td>0.388</td><td>0.479</td><td>0.427</td></tr><tr><td>KNN</td><td>0.379</td><td>0.839</td><td>0.854</td></tr><tr><td>L-SVM</td><td>0.388</td><td>0.575</td><td>0.642</td></tr><tr><td>LDA</td><td>0.388</td><td>0.550</td><td>0.550</td></tr><tr><td>LR</td><td>0.387</td><td>0.547</td><td>0.559</td></tr><tr><td>MLP</td><td>0.386</td><td>0.780</td><td>0.857</td></tr><tr><td>QDA</td><td>0.393</td><td>0.615</td><td>0.497</td></tr><tr><td>RBF-SVM</td><td>0.388</td><td>0.769</td><td>0.792</td></tr><tr><td>RF</td><td>0.388</td><td>0.864</td><td>0.877</td></tr></table>"
|
| 1824 |
+
},
|
| 1825 |
+
{
|
| 1826 |
+
"type": "text",
|
| 1827 |
+
"bbox": [
|
| 1828 |
+
0.112,
|
| 1829 |
+
0.681,
|
| 1830 |
+
0.884,
|
| 1831 |
+
0.74
|
| 1832 |
+
],
|
| 1833 |
+
"angle": 0,
|
| 1834 |
+
"content": "The features extracted in subsection 3.5.2 are augmented using SMOTE and then classified using the classifiers in subsection 3.5.3. The performance of these models in terms of F1-score and accuracy after data augmentation are shown in Table 7 and 8 respectively. From the tables, it is clear that the performance was improved and model with Random Forest classifier applied on the features extracted using Word2Vec performs well with a score of 0.877."
|
| 1835 |
+
},
|
| 1836 |
+
{
|
| 1837 |
+
"type": "title",
|
| 1838 |
+
"bbox": [
|
| 1839 |
+
0.113,
|
| 1840 |
+
0.767,
|
| 1841 |
+
0.295,
|
| 1842 |
+
0.786
|
| 1843 |
+
],
|
| 1844 |
+
"angle": 0,
|
| 1845 |
+
"content": "5 Research insights"
|
| 1846 |
+
},
|
| 1847 |
+
{
|
| 1848 |
+
"type": "text",
|
| 1849 |
+
"bbox": [
|
| 1850 |
+
0.112,
|
| 1851 |
+
0.805,
|
| 1852 |
+
0.679,
|
| 1853 |
+
0.82
|
| 1854 |
+
],
|
| 1855 |
+
"angle": 0,
|
| 1856 |
+
"content": "The researchers can further extend this work by implementing the following methods:"
|
| 1857 |
+
},
|
| 1858 |
+
{
|
| 1859 |
+
"type": "text",
|
| 1860 |
+
"bbox": [
|
| 1861 |
+
0.158,
|
| 1862 |
+
0.836,
|
| 1863 |
+
0.611,
|
| 1864 |
+
0.851
|
| 1865 |
+
],
|
| 1866 |
+
"angle": 0,
|
| 1867 |
+
"content": "- Extend the data set by considering the images along with text data."
|
| 1868 |
+
},
|
| 1869 |
+
{
|
| 1870 |
+
"type": "text",
|
| 1871 |
+
"bbox": [
|
| 1872 |
+
0.158,
|
| 1873 |
+
0.865,
|
| 1874 |
+
0.49,
|
| 1875 |
+
0.879
|
| 1876 |
+
],
|
| 1877 |
+
"angle": 0,
|
| 1878 |
+
"content": "- Implement deep learning models in the data set."
|
| 1879 |
+
},
|
| 1880 |
+
{
|
| 1881 |
+
"type": "text",
|
| 1882 |
+
"bbox": [
|
| 1883 |
+
0.158,
|
| 1884 |
+
0.893,
|
| 1885 |
+
0.651,
|
| 1886 |
+
0.908
|
| 1887 |
+
],
|
| 1888 |
+
"angle": 0,
|
| 1889 |
+
"content": "- Implement other methods of data augmentation to improve performance."
|
| 1890 |
+
},
|
| 1891 |
+
{
|
| 1892 |
+
"type": "list",
|
| 1893 |
+
"bbox": [
|
| 1894 |
+
0.158,
|
| 1895 |
+
0.836,
|
| 1896 |
+
0.651,
|
| 1897 |
+
0.908
|
| 1898 |
+
],
|
| 1899 |
+
"angle": 0,
|
| 1900 |
+
"content": null
|
| 1901 |
+
},
|
| 1902 |
+
{
|
| 1903 |
+
"type": "page_number",
|
| 1904 |
+
"bbox": [
|
| 1905 |
+
0.49,
|
| 1906 |
+
0.936,
|
| 1907 |
+
0.509,
|
| 1908 |
+
0.948
|
| 1909 |
+
],
|
| 1910 |
+
"angle": 0,
|
| 1911 |
+
"content": "10"
|
| 1912 |
+
}
|
| 1913 |
+
],
|
| 1914 |
+
[
|
| 1915 |
+
{
|
| 1916 |
+
"type": "header",
|
| 1917 |
+
"bbox": [
|
| 1918 |
+
0.258,
|
| 1919 |
+
0.044,
|
| 1920 |
+
0.741,
|
| 1921 |
+
0.058
|
| 1922 |
+
],
|
| 1923 |
+
"angle": 0,
|
| 1924 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 1925 |
+
},
|
| 1926 |
+
{
|
| 1927 |
+
"type": "header",
|
| 1928 |
+
"bbox": [
|
| 1929 |
+
0.803,
|
| 1930 |
+
0.045,
|
| 1931 |
+
0.882,
|
| 1932 |
+
0.056
|
| 1933 |
+
],
|
| 1934 |
+
"angle": 0,
|
| 1935 |
+
"content": "A PREPRINT"
|
| 1936 |
+
},
|
| 1937 |
+
{
|
| 1938 |
+
"type": "title",
|
| 1939 |
+
"bbox": [
|
| 1940 |
+
0.113,
|
| 1941 |
+
0.09,
|
| 1942 |
+
0.25,
|
| 1943 |
+
0.106
|
| 1944 |
+
],
|
| 1945 |
+
"angle": 0,
|
| 1946 |
+
"content": "6 Conclusions"
|
| 1947 |
+
},
|
| 1948 |
+
{
|
| 1949 |
+
"type": "text",
|
| 1950 |
+
"bbox": [
|
| 1951 |
+
0.111,
|
| 1952 |
+
0.122,
|
| 1953 |
+
0.885,
|
| 1954 |
+
0.33
|
| 1955 |
+
],
|
| 1956 |
+
"angle": 0,
|
| 1957 |
+
"content": "Depression is a common mental illness that has to be detected and treated early to avoid serious consequences. Among the other ways of detecting, diagnosing mental health using their social media data seems much more effective since it involves less involvement of the individual. All the existing systems are designed to detect depression from social media texts. Although detecting depression is more important, detecting the level of depression also has its equal significance. Thus, we propose a data set that not only detects depression from social media but also analyzes the level of depression. For creating the data set, the data was collected from subreddits and annotated by domain experts into three levels of depression, namely not depressed, moderately depressed and severely depressed. An empirical analysis of traditional learning algorithms was also done for evaluating the data sets. Among the models, the model with Glove vectorizer and Random Forest classifier performs well with a F1-score of 0.647 and accuracy of 0.760. While analyzing the data set, \"the moderately depressed\" class seems to be highly populated than the classes and so, a data augmentation method named SMOTE was applied, and the performance is analyzed. Data augmentation improved the performance by \\(23\\%\\) and \\(12\\%\\) in terms of F1-score and accuracy respectively, with both F1-score and accuracy of 0.877. The data set can also be extended by considering the images along with texts for more accurate detection. The work can be extended further by implementing other traditional learning and deep learning models. Other augmentation techniques can also be experimented with for improving the performance of the model."
|
| 1958 |
+
},
|
| 1959 |
+
{
|
| 1960 |
+
"type": "title",
|
| 1961 |
+
"bbox": [
|
| 1962 |
+
0.113,
|
| 1963 |
+
0.351,
|
| 1964 |
+
0.285,
|
| 1965 |
+
0.369
|
| 1966 |
+
],
|
| 1967 |
+
"angle": 0,
|
| 1968 |
+
"content": "Data set availability"
|
| 1969 |
+
},
|
| 1970 |
+
{
|
| 1971 |
+
"type": "text",
|
| 1972 |
+
"bbox": [
|
| 1973 |
+
0.111,
|
| 1974 |
+
0.383,
|
| 1975 |
+
0.884,
|
| 1976 |
+
0.412
|
| 1977 |
+
],
|
| 1978 |
+
"angle": 0,
|
| 1979 |
+
"content": "The data set is available to the public in a repository of a Github in the link: https://github.com/Kayal-Sampath/detecting-signs-of-depression-from-social-media-postings."
|
| 1980 |
+
},
|
| 1981 |
+
{
|
| 1982 |
+
"type": "title",
|
| 1983 |
+
"bbox": [
|
| 1984 |
+
0.114,
|
| 1985 |
+
0.433,
|
| 1986 |
+
0.212,
|
| 1987 |
+
0.449
|
| 1988 |
+
],
|
| 1989 |
+
"angle": 0,
|
| 1990 |
+
"content": "References"
|
| 1991 |
+
},
|
| 1992 |
+
{
|
| 1993 |
+
"type": "ref_text",
|
| 1994 |
+
"bbox": [
|
| 1995 |
+
0.115,
|
| 1996 |
+
0.458,
|
| 1997 |
+
0.887,
|
| 1998 |
+
0.487
|
| 1999 |
+
],
|
| 2000 |
+
"angle": 0,
|
| 2001 |
+
"content": "American psychiatric association. https://www.psychiatry.org/patients-families/depression/what-is-depression. (Accessed: 2021-11-17)."
|
| 2002 |
+
},
|
| 2003 |
+
{
|
| 2004 |
+
"type": "ref_text",
|
| 2005 |
+
"bbox": [
|
| 2006 |
+
0.115,
|
| 2007 |
+
0.492,
|
| 2008 |
+
0.889,
|
| 2009 |
+
0.535
|
| 2010 |
+
],
|
| 2011 |
+
"angle": 0,
|
| 2012 |
+
"content": "Institute of health metrics and evaluation. global health data exchange (ghdx). http://ghdx.healthdata.org/gbd-results-tool?params=gbd-api-2019-permalink/d780cffbe8a381b25e1416884959e88b. (Accessed: 2021-11-17)."
|
| 2013 |
+
},
|
| 2014 |
+
{
|
| 2015 |
+
"type": "ref_text",
|
| 2016 |
+
"bbox": [
|
| 2017 |
+
0.115,
|
| 2018 |
+
0.54,
|
| 2019 |
+
0.885,
|
| 2020 |
+
0.569
|
| 2021 |
+
],
|
| 2022 |
+
"angle": 0,
|
| 2023 |
+
"content": "Tuka Al Hanai, Mohammad M Ghassemi, and James R Glass. Detecting depression with audio/text sequence modeling of interviews. In Interspeech, pages 1716-1720, 2018."
|
| 2024 |
+
},
|
| 2025 |
+
{
|
| 2026 |
+
"type": "ref_text",
|
| 2027 |
+
"bbox": [
|
| 2028 |
+
0.115,
|
| 2029 |
+
0.574,
|
| 2030 |
+
0.885,
|
| 2031 |
+
0.616
|
| 2032 |
+
],
|
| 2033 |
+
"angle": 0,
|
| 2034 |
+
"content": "Hamdi Dibeklioglu, Zakia Hammal, Ying Yang, and Jeffrey F Cohn. Multimodal detection of depression in clinical interviews. In Proceedings of the 2015 ACM on international conference on multimodal interaction, pages 307-310, 2015."
|
| 2035 |
+
},
|
| 2036 |
+
{
|
| 2037 |
+
"type": "ref_text",
|
| 2038 |
+
"bbox": [
|
| 2039 |
+
0.114,
|
| 2040 |
+
0.622,
|
| 2041 |
+
0.885,
|
| 2042 |
+
0.665
|
| 2043 |
+
],
|
| 2044 |
+
"angle": 0,
|
| 2045 |
+
"content": "Sharifa Alghowinem, Roland Goecke, Michael Wagner, Julien Epps, Matthew Hyett, Gordon Parker, and Michael Breakspear. Multimodal depression detection: fusion analysis of paralinguistic, head pose and eye gaze behaviors. IEEE Transactions on Affective Computing, 9(4):478-490, 2016."
|
| 2046 |
+
},
|
| 2047 |
+
{
|
| 2048 |
+
"type": "ref_text",
|
| 2049 |
+
"bbox": [
|
| 2050 |
+
0.114,
|
| 2051 |
+
0.67,
|
| 2052 |
+
0.885,
|
| 2053 |
+
0.713
|
| 2054 |
+
],
|
| 2055 |
+
"angle": 0,
|
| 2056 |
+
"content": "Md Nasir, Arindam Jati, Prashanth Gurunath Shivakumar, Sandeep Nallan Chakravarthula, and Panayiotis Georgiou. Multimodal and multiresolution depression detection from speech and facial landmark features. In Proceedings of the 6th international workshop on audio/visual emotion challenge, pages 43-50, 2016."
|
| 2057 |
+
},
|
| 2058 |
+
{
|
| 2059 |
+
"type": "ref_text",
|
| 2060 |
+
"bbox": [
|
| 2061 |
+
0.114,
|
| 2062 |
+
0.718,
|
| 2063 |
+
0.885,
|
| 2064 |
+
0.748
|
| 2065 |
+
],
|
| 2066 |
+
"angle": 0,
|
| 2067 |
+
"content": "Jana M Havigerová, Jiří Haviger, Dalibor Kucera, and Petra Hoffmannová. Text-based detection of the risk of depression. Frontiers in psychology, 10:513, 2019."
|
| 2068 |
+
},
|
| 2069 |
+
{
|
| 2070 |
+
"type": "ref_text",
|
| 2071 |
+
"bbox": [
|
| 2072 |
+
0.114,
|
| 2073 |
+
0.752,
|
| 2074 |
+
0.885,
|
| 2075 |
+
0.796
|
| 2076 |
+
],
|
| 2077 |
+
"angle": 0,
|
| 2078 |
+
"content": "Maxim Stankevich, Andrey Latyshev, Evgenia Kuminskaya, Ivan Smirnov, and Oleg Grigoriev. Depression detection from social media texts. In Data Analytics and Management in Data Intensive Domains: XXI International Conference DAMDID/RCDL-2019, page 352, 2019."
|
| 2079 |
+
},
|
| 2080 |
+
{
|
| 2081 |
+
"type": "ref_text",
|
| 2082 |
+
"bbox": [
|
| 2083 |
+
0.114,
|
| 2084 |
+
0.8,
|
| 2085 |
+
0.885,
|
| 2086 |
+
0.831
|
| 2087 |
+
],
|
| 2088 |
+
"angle": 0,
|
| 2089 |
+
"content": "Michelle Renee Morales and Rivka Levitan. Speech vs. text: A comparative analysis of features for depression detection systems. In 2016 IEEE spoken language technology workshop (SLT), pages 136-143. IEEE, 2016."
|
| 2090 |
+
},
|
| 2091 |
+
{
|
| 2092 |
+
"type": "ref_text",
|
| 2093 |
+
"bbox": [
|
| 2094 |
+
0.114,
|
| 2095 |
+
0.835,
|
| 2096 |
+
0.956,
|
| 2097 |
+
0.864
|
| 2098 |
+
],
|
| 2099 |
+
"angle": 0,
|
| 2100 |
+
"content": "Statista statistics. https://www.statista.com/statistics/278414/number-of-worldwide-social-network-users/. (Accessed: 2021-11-17)."
|
| 2101 |
+
},
|
| 2102 |
+
{
|
| 2103 |
+
"type": "ref_text",
|
| 2104 |
+
"bbox": [
|
| 2105 |
+
0.114,
|
| 2106 |
+
0.869,
|
| 2107 |
+
0.885,
|
| 2108 |
+
0.913
|
| 2109 |
+
],
|
| 2110 |
+
"angle": 0,
|
| 2111 |
+
"content": "JT Wolohan, Misato Hiraga, Atreyee Mukherjee, Zeeshan Ali Sayyed, and Matthew Millard. Detecting linguistic traces of depression in topic-restricted text: Attending to self-stigmatized depression with nlp. In Proceedings of the First International Workshop on Language Cognition and Computational Models, pages 11-21, 2018."
|
| 2112 |
+
},
|
| 2113 |
+
{
|
| 2114 |
+
"type": "list",
|
| 2115 |
+
"bbox": [
|
| 2116 |
+
0.114,
|
| 2117 |
+
0.458,
|
| 2118 |
+
0.956,
|
| 2119 |
+
0.913
|
| 2120 |
+
],
|
| 2121 |
+
"angle": 0,
|
| 2122 |
+
"content": null
|
| 2123 |
+
},
|
| 2124 |
+
{
|
| 2125 |
+
"type": "page_number",
|
| 2126 |
+
"bbox": [
|
| 2127 |
+
0.49,
|
| 2128 |
+
0.936,
|
| 2129 |
+
0.508,
|
| 2130 |
+
0.948
|
| 2131 |
+
],
|
| 2132 |
+
"angle": 0,
|
| 2133 |
+
"content": "11"
|
| 2134 |
+
}
|
| 2135 |
+
],
|
| 2136 |
+
[
|
| 2137 |
+
{
|
| 2138 |
+
"type": "header",
|
| 2139 |
+
"bbox": [
|
| 2140 |
+
0.258,
|
| 2141 |
+
0.044,
|
| 2142 |
+
0.741,
|
| 2143 |
+
0.058
|
| 2144 |
+
],
|
| 2145 |
+
"angle": 0,
|
| 2146 |
+
"content": "Data set creation and Empirical analysis for detecting signs of depression"
|
| 2147 |
+
},
|
| 2148 |
+
{
|
| 2149 |
+
"type": "header",
|
| 2150 |
+
"bbox": [
|
| 2151 |
+
0.803,
|
| 2152 |
+
0.045,
|
| 2153 |
+
0.882,
|
| 2154 |
+
0.056
|
| 2155 |
+
],
|
| 2156 |
+
"angle": 0,
|
| 2157 |
+
"content": "A PREPRINT"
|
| 2158 |
+
},
|
| 2159 |
+
{
|
| 2160 |
+
"type": "ref_text",
|
| 2161 |
+
"bbox": [
|
| 2162 |
+
0.115,
|
| 2163 |
+
0.091,
|
| 2164 |
+
0.885,
|
| 2165 |
+
0.135
|
| 2166 |
+
],
|
| 2167 |
+
"angle": 0,
|
| 2168 |
+
"content": "Johannes C Eichstaedt, Robert J Smith, Raina M Merchant, Lyle H Ungar, Patrick Crutchley, Daniel Preotciuc-Pietro, David A Asch, and H Andrew Schwartz. Facebook language predicts depression in medical records. Proceedings of the National Academy of Sciences, 115(44):11203-11208, 2018."
|
| 2169 |
+
},
|
| 2170 |
+
{
|
| 2171 |
+
"type": "ref_text",
|
| 2172 |
+
"bbox": [
|
| 2173 |
+
0.115,
|
| 2174 |
+
0.137,
|
| 2175 |
+
0.882,
|
| 2176 |
+
0.168
|
| 2177 |
+
],
|
| 2178 |
+
"angle": 0,
|
| 2179 |
+
"content": "Andrew G Reece, Andrew J Reagan, Katharina LM Lix, Peter Sheridan Dodds, Christopher M Danforth, and Ellen J Langer. Forecasting the onset and course of mental illness with twitter data. Scientific reports, 7(1):1-11, 2017."
|
| 2180 |
+
},
|
| 2181 |
+
{
|
| 2182 |
+
"type": "ref_text",
|
| 2183 |
+
"bbox": [
|
| 2184 |
+
0.115,
|
| 2185 |
+
0.171,
|
| 2186 |
+
0.882,
|
| 2187 |
+
0.213
|
| 2188 |
+
],
|
| 2189 |
+
"angle": 0,
|
| 2190 |
+
"content": "Sho Tsugawa, Yusuke Kikuchi, Fumio Kishino, Kosuke Nakajima, Yuichi Itoh, and Hiroyuki Ohsaki. Recognizing depression from twitter activity. In Proceedings of the 33rd annual ACM conference on human factors in computing systems, pages 3187-3196, 2015."
|
| 2191 |
+
},
|
| 2192 |
+
{
|
| 2193 |
+
"type": "ref_text",
|
| 2194 |
+
"bbox": [
|
| 2195 |
+
0.116,
|
| 2196 |
+
0.217,
|
| 2197 |
+
0.882,
|
| 2198 |
+
0.247
|
| 2199 |
+
],
|
| 2200 |
+
"angle": 0,
|
| 2201 |
+
"content": "Mandar Deshpande and Vignesh Rao. Depression detection using emotion artificial intelligence. In 2017 international conference on intelligent sustainable systems (iciss), pages 858-862. IEEE, 2017."
|
| 2202 |
+
},
|
| 2203 |
+
{
|
| 2204 |
+
"type": "ref_text",
|
| 2205 |
+
"bbox": [
|
| 2206 |
+
0.116,
|
| 2207 |
+
0.25,
|
| 2208 |
+
0.882,
|
| 2209 |
+
0.292
|
| 2210 |
+
],
|
| 2211 |
+
"angle": 0,
|
| 2212 |
+
"content": "Chenhao Lin, Pengwei Hu, Hui Su, Shaochun Li, Jing Mei, Jie Zhou, and Henry Leung. Sensemood: Depression detection on social media. In Proceedings of the 2020 International Conference on Multimedia Retrieval, pages 407-411, 2020."
|
| 2213 |
+
},
|
| 2214 |
+
{
|
| 2215 |
+
"type": "ref_text",
|
| 2216 |
+
"bbox": [
|
| 2217 |
+
0.116,
|
| 2218 |
+
0.296,
|
| 2219 |
+
0.882,
|
| 2220 |
+
0.325
|
| 2221 |
+
],
|
| 2222 |
+
"angle": 0,
|
| 2223 |
+
"content": "Thin Nguyen, Dinh Phung, Bo Dao, Svetha Venkatesh, and Michael Berk. Affective and content analysis of online depression communities. IEEE Transactions on Affective Computing, 5(3):217-226, 2014."
|
| 2224 |
+
},
|
| 2225 |
+
{
|
| 2226 |
+
"type": "ref_text",
|
| 2227 |
+
"bbox": [
|
| 2228 |
+
0.116,
|
| 2229 |
+
0.328,
|
| 2230 |
+
0.882,
|
| 2231 |
+
0.357
|
| 2232 |
+
],
|
| 2233 |
+
"angle": 0,
|
| 2234 |
+
"content": "Yevhen Tyshchenko. Depression and anxiety detection from blog posts data. Nature Precis. Sci., Inst. Comput. Sci. Univ. Tartu, Tartu, Estonia, 2018."
|
| 2235 |
+
},
|
| 2236 |
+
{
|
| 2237 |
+
"type": "ref_text",
|
| 2238 |
+
"bbox": [
|
| 2239 |
+
0.116,
|
| 2240 |
+
0.361,
|
| 2241 |
+
0.882,
|
| 2242 |
+
0.39
|
| 2243 |
+
],
|
| 2244 |
+
"angle": 0,
|
| 2245 |
+
"content": "Andrew G Reece and Christopher M Danforth. Instagram photos reveal predictive markers of depression. *EPJ Data Science*, 6:1–12, 2017."
|
| 2246 |
+
},
|
| 2247 |
+
{
|
| 2248 |
+
"type": "ref_text",
|
| 2249 |
+
"bbox": [
|
| 2250 |
+
0.116,
|
| 2251 |
+
0.393,
|
| 2252 |
+
0.882,
|
| 2253 |
+
0.409
|
| 2254 |
+
],
|
| 2255 |
+
"angle": 0,
|
| 2256 |
+
"content": "Healthline. https://www.healthline.com/health/depression/mild-depression. (Accessed: 2021-11-17)."
|
| 2257 |
+
},
|
| 2258 |
+
{
|
| 2259 |
+
"type": "ref_text",
|
| 2260 |
+
"bbox": [
|
| 2261 |
+
0.116,
|
| 2262 |
+
0.412,
|
| 2263 |
+
0.882,
|
| 2264 |
+
0.455
|
| 2265 |
+
],
|
| 2266 |
+
"angle": 0,
|
| 2267 |
+
"content": "David E Losada, Fabio Crestani, and Javier Parapar. erisk 2017: Clef lab on early risk prediction on the internet: experimental foundations. In International Conference of the Cross-Language Evaluation Forum for European Languages, pages 346-360. Springer, 2017."
|
| 2268 |
+
},
|
| 2269 |
+
{
|
| 2270 |
+
"type": "ref_text",
|
| 2271 |
+
"bbox": [
|
| 2272 |
+
0.116,
|
| 2273 |
+
0.458,
|
| 2274 |
+
0.882,
|
| 2275 |
+
0.487
|
| 2276 |
+
],
|
| 2277 |
+
"angle": 0,
|
| 2278 |
+
"content": "Michael M. Tadesse, Hongfei Lin, Bo Xu, and Liang Yang. Detection of depression-related posts in reddit social media forum. IEEE Access, 7:44883-44893, 2019. doi:10.1109/ACCESS.2019.2909180."
|
| 2279 |
+
},
|
| 2280 |
+
{
|
| 2281 |
+
"type": "ref_text",
|
| 2282 |
+
"bbox": [
|
| 2283 |
+
0.116,
|
| 2284 |
+
0.491,
|
| 2285 |
+
0.882,
|
| 2286 |
+
0.547
|
| 2287 |
+
],
|
| 2288 |
+
"angle": 0,
|
| 2289 |
+
"content": "Inna Pirina and Cagni Coltekin. Identifying depression on Reddit: The effect of training data. In Proceedings of the 2018 EMNLP Workshop SMM4H: The 3rd Social Media Mining for Health Applications Workshop & Shared Task, pages 9-12, Brussels, Belgium, October 2018. Association for Computational Linguistics. doi:10.18653/v1/W18-5903 URL https://aclanthology.org/W18-5903."
|
| 2290 |
+
},
|
| 2291 |
+
{
|
| 2292 |
+
"type": "ref_text",
|
| 2293 |
+
"bbox": [
|
| 2294 |
+
0.116,
|
| 2295 |
+
0.551,
|
| 2296 |
+
0.882,
|
| 2297 |
+
0.592
|
| 2298 |
+
],
|
| 2299 |
+
"angle": 0,
|
| 2300 |
+
"content": "Hannah Yao, Sina Rashidian, Xinyu Dong, Hongyi Duanmu, Richard N Rosenthal, and Fusheng Wang. Detection of suicidality among opioid users on reddit: Machine learning-based approach. Journal of medical internet research, 22(11):e15293, 2020."
|
| 2301 |
+
},
|
| 2302 |
+
{
|
| 2303 |
+
"type": "ref_text",
|
| 2304 |
+
"bbox": [
|
| 2305 |
+
0.116,
|
| 2306 |
+
0.597,
|
| 2307 |
+
0.882,
|
| 2308 |
+
0.625
|
| 2309 |
+
],
|
| 2310 |
+
"angle": 0,
|
| 2311 |
+
"content": "Nick Boettcher et al. Studies of depression and anxiety using reddit as a data source: Scoping review. JMIR Mental Health, 8(11):e29487, 2021."
|
| 2312 |
+
},
|
| 2313 |
+
{
|
| 2314 |
+
"type": "ref_text",
|
| 2315 |
+
"bbox": [
|
| 2316 |
+
0.116,
|
| 2317 |
+
0.629,
|
| 2318 |
+
0.882,
|
| 2319 |
+
0.657
|
| 2320 |
+
],
|
| 2321 |
+
"angle": 0,
|
| 2322 |
+
"content": "Ron Artstein and Massimo Poesio. Inter-coder agreement for computational linguistics. Computational Linguistics, 34 (4):555-596, 2008."
|
| 2323 |
+
},
|
| 2324 |
+
{
|
| 2325 |
+
"type": "ref_text",
|
| 2326 |
+
"bbox": [
|
| 2327 |
+
0.116,
|
| 2328 |
+
0.662,
|
| 2329 |
+
0.882,
|
| 2330 |
+
0.69
|
| 2331 |
+
],
|
| 2332 |
+
"angle": 0,
|
| 2333 |
+
"content": "Jacob Cohen. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1), 37-46, 1960."
|
| 2334 |
+
},
|
| 2335 |
+
{
|
| 2336 |
+
"type": "ref_text",
|
| 2337 |
+
"bbox": [
|
| 2338 |
+
0.116,
|
| 2339 |
+
0.694,
|
| 2340 |
+
0.882,
|
| 2341 |
+
0.722
|
| 2342 |
+
],
|
| 2343 |
+
"angle": 0,
|
| 2344 |
+
"content": "F. Pedregosa, G. Varoquaux, A. Gramfort, et al. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011a."
|
| 2345 |
+
},
|
| 2346 |
+
{
|
| 2347 |
+
"type": "ref_text",
|
| 2348 |
+
"bbox": [
|
| 2349 |
+
0.116,
|
| 2350 |
+
0.726,
|
| 2351 |
+
0.882,
|
| 2352 |
+
0.755
|
| 2353 |
+
],
|
| 2354 |
+
"angle": 0,
|
| 2355 |
+
"content": "J Richard Landis and Gary G Koch. The measurement of observer agreement for categorical data. biometrics, pages 159-174, 1977."
|
| 2356 |
+
},
|
| 2357 |
+
{
|
| 2358 |
+
"type": "ref_text",
|
| 2359 |
+
"bbox": [
|
| 2360 |
+
0.116,
|
| 2361 |
+
0.759,
|
| 2362 |
+
0.882,
|
| 2363 |
+
0.801
|
| 2364 |
+
],
|
| 2365 |
+
"angle": 0,
|
| 2366 |
+
"content": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011b."
|
| 2367 |
+
},
|
| 2368 |
+
{
|
| 2369 |
+
"type": "ref_text",
|
| 2370 |
+
"bbox": [
|
| 2371 |
+
0.116,
|
| 2372 |
+
0.805,
|
| 2373 |
+
0.554,
|
| 2374 |
+
0.821
|
| 2375 |
+
],
|
| 2376 |
+
"angle": 0,
|
| 2377 |
+
"content": "Martin F Porter. An algorithm for suffix stripping. *Program*, 1980."
|
| 2378 |
+
},
|
| 2379 |
+
{
|
| 2380 |
+
"type": "ref_text",
|
| 2381 |
+
"bbox": [
|
| 2382 |
+
0.116,
|
| 2383 |
+
0.824,
|
| 2384 |
+
0.882,
|
| 2385 |
+
0.866
|
| 2386 |
+
],
|
| 2387 |
+
"angle": 0,
|
| 2388 |
+
"content": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014."
|
| 2389 |
+
},
|
| 2390 |
+
{
|
| 2391 |
+
"type": "ref_text",
|
| 2392 |
+
"bbox": [
|
| 2393 |
+
0.116,
|
| 2394 |
+
0.87,
|
| 2395 |
+
0.882,
|
| 2396 |
+
0.913
|
| 2397 |
+
],
|
| 2398 |
+
"angle": 0,
|
| 2399 |
+
"content": "N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer. Smote: Synthetic minority over-sampling technique Journal of Artificial Intelligence Research, 16:321-357, Jun 2002. ISSN 1076-9757. doi:10.1613/jair.953. URL http://dx.doi.org/10.1613/jair.953."
|
| 2400 |
+
},
|
| 2401 |
+
{
|
| 2402 |
+
"type": "list",
|
| 2403 |
+
"bbox": [
|
| 2404 |
+
0.115,
|
| 2405 |
+
0.091,
|
| 2406 |
+
0.885,
|
| 2407 |
+
0.913
|
| 2408 |
+
],
|
| 2409 |
+
"angle": 0,
|
| 2410 |
+
"content": null
|
| 2411 |
+
},
|
| 2412 |
+
{
|
| 2413 |
+
"type": "page_number",
|
| 2414 |
+
"bbox": [
|
| 2415 |
+
0.49,
|
| 2416 |
+
0.936,
|
| 2417 |
+
0.508,
|
| 2418 |
+
0.948
|
| 2419 |
+
],
|
| 2420 |
+
"angle": 0,
|
| 2421 |
+
"content": "12"
|
| 2422 |
+
}
|
| 2423 |
+
]
|
| 2424 |
+
]
|
2202.03xxx/2202.03047/35db8a53-2285-4000-8359-95bda668e6ac_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7739976ad85d8cb4827dc872e2d0b37e9a2d2aa43415caf8a9d48e4a7cd311dc
|
| 3 |
+
size 283032
|
2202.03xxx/2202.03047/full.md
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DATA SET CREATION AND EMPIRICAL ANALYSIS FOR DETECTING SIGNS OF DEPRESSION FROM SOCIAL MEDIA POSTINGS
|
| 2 |
+
|
| 3 |
+
A PREPRINT
|
| 4 |
+
|
| 5 |
+
Kayalvizhi S
|
| 6 |
+
Department of Computer Science
|
| 7 |
+
SSN College of Engineering
|
| 8 |
+
kayalvizhis@ssn.edu.in
|
| 9 |
+
|
| 10 |
+
Thenmozhi D
|
| 11 |
+
Department of Computer Science
|
| 12 |
+
SSN College of Engineering
|
| 13 |
+
theni_d@ssn.edu.in
|
| 14 |
+
|
| 15 |
+
February 8, 2022
|
| 16 |
+
|
| 17 |
+
# ABSTRACT
|
| 18 |
+
|
| 19 |
+
Depression is a common mental illness that has to be detected and treated at an early stage to avoid serious consequences. There are many methods and modalities for detecting depression that involves physical examination of the individual. However, diagnosing mental health using their social media data is more effective as it avoids such physical examinations. Also, people express their emotions well in social media, it is desirable to diagnose their mental health using social media data. Though there are many existing systems that detects mental illness of a person by analysing their social media data, detecting the level of depression is also important for further treatment. Thus, in this research, we developed a gold standard data set that detects the levels of depression as 'not depressed', 'moderately depressed' and 'severely depressed' from the social media postings. Traditional learning algorithms were employed on this data set and an empirical analysis was presented in this paper. Data augmentation technique was applied to overcome the data imbalance. Among the several variations that are implemented, the model with Word2Vec vectorizer and Random Forest classifier on augmented data outperforms the other variations with a score of 0.877 for both accuracy and F1 measure.
|
| 20 |
+
|
| 21 |
+
Keywords Depression $\cdot$ Data set $\cdot$ Data augmentation $\cdot$ Levels of depression $\cdot$ Random Forest
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Depression (major depressive disorder) is a common and serious medical illness that negatively affects the way one feels, thinks and acts ame. The rate of depression is rapidly increasing day by day. According to Global Health Data Exchange (GHDx), depression has affected 280 million people worldwide who. Detecting depression is important since it has to be observed and treated at an early stage to avoid severe consequences<sup>1</sup>. The depression was generally diagnosed by different methods modalities clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. All these methods of diagnosing involves more involvement of an individual or discussion about their feeling in person.
|
| 26 |
+
|
| 27 |
+
On the other hand, social media is highly emerging into our lives with a considerable rate of increase in social media users according to the statistics of statista sta. Slowly, the social media became a comfortable virtual platform to express our feelings. And so, social media platform can be considered as a source to analyse people's thoughts and so can also be used for analysing mental health of an individual. Thus, we aim to use social media texts for analysing the mental health of a person.
|
| 28 |
+
|
| 29 |
+
The existing works collect social media texts from open source platforms like Reddit Wolohan et al. [2018], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. and used them to detect depression.
|
| 30 |
+
|
| 31 |
+
# Research gaps:
|
| 32 |
+
|
| 33 |
+
All these research works concentrate on diagnosing depression from the social media texts. Although detecting depression has its own significance, detecting the level of depression also has its equal importance for further treatment. Generally, depression is classified into three stages namely mild, moderate and severe typ. Each stage has its own symptoms and effects and so detecting the level of depression is also a crucial one. Thus, we propose a data set to detect the level of depression in addition to detection of depression from the social media texts. The data set is made available to the public in a CodaLab competition repository $^{2}$ . This paper explains the process of data set creation that detects the levels of depression along with some baseline models.
|
| 34 |
+
|
| 35 |
+
# Our contributions in this research include:
|
| 36 |
+
|
| 37 |
+
1. Creating a new bench mark data set to detect the sign of depression from social media data at postings level.
|
| 38 |
+
2. Developing base line models with traditional learning classifiers.
|
| 39 |
+
3. Analysing the impact of data augmentation
|
| 40 |
+
|
| 41 |
+
# 2 Related Work
|
| 42 |
+
|
| 43 |
+
The aim of our research work is to create a data set that identifies the sign of depression and detect the level of depression and thus, the existing works are analysed in terms of data collection, modalities and methodologies of detecting depression.
|
| 44 |
+
|
| 45 |
+
# 2.1 Modalities and methodologies of depression detection:
|
| 46 |
+
|
| 47 |
+
For detecting depression, the data was collected by various methods like clinical interviews Al Hanai et al. [2018]Dibeklioglu et al. [2015], analysing the behaviourAlghowinem et al. [2016], monitoring facial and speech modulationsNasir et al. [2016], physical exams with Depression scales Havigerova et al. [2019]Stankevich et al. [2019], videos and audios Morales and Levitan [2016], etc. Since, the social media users are rapidly increasing day by day, social media data can also be considered as a main source for detecting the mental health. This key idea gave rise to the most utilized data set E-Risk@CLEF-2017 pilot task data set Losada et al. [2017] that was collected from Reddit. In addition to this data set, many other data sets such as DAIC corpus Al Hanai et al. [2018], AVEC Morales and Levitan [2016], etc. also evolved that detects depression from the social media data. Though few benchmark data set exists to detect depression, more researchers tend to collect data from social media and create their own data sets.
|
| 48 |
+
|
| 49 |
+
# 2.2 Data collection from social media:
|
| 50 |
+
|
| 51 |
+
The social media texts were collected from open source platforms like Reddit Wolohan et al. [2018]Tadesse et al. [2019], FacebookEichstaedt et al. [2018], Twitter Reece et al. [2017]Tsugawa et al. [2015]Deshpande and Rao [2017]Lin et al. [2020], Live journals Nguyen et al. [2014], blog postsTyshchenko [2018], Instagram Reece and Danforth [2017] etc. The data from twitter was collected using API's and annotated into depressed and not depressed classes based on key words like "depressed, hopeless and suicide" Deshpande and Rao [2017], using a questionnaire Tsugawa et al. [2015], survey Reece et al. [2017], etc. The data was also scrapped from groups of live journals Nguyen et al. [2014], blog postsTyshchenko [2018] and manually annotated into depressed and not depressed.
|
| 52 |
+
|
| 53 |
+
Among these social media platforms, Reddit possess large amount text discussion than the other platforms and so Reddit has become widely used platform to collect social media text data recently.
|
| 54 |
+
|
| 55 |
+
The data were collected from these platforms using Application Programming Interface (API) using hashtags, groups, communities, etc. The data from reddit was collected from Subreddits like "r/depression help, r/aww, r/AskReddit, r/news, r/Showerthoughts, r/pics, r/gaming, r/depression, r/videos r todaylearned r/funny" and annotated manually by two annotators into depressed and not depressed class Wolohan et al. [2018]. The data was also from subreddits like "r/anxiety, r/depression and r/depression_help" and annotated into a data set Pirina and Cöltekin [2018]. A data set was created with classes depression, suicide.watch, opiates and controlled which was collected using subreddits such as "r/suicidewatch, r/depression", opioid related forums and other general forums Yao et al. [2020]. A survey was also done based on the studies of depression and anxiety from the Reddit data Boettcher et al. [2021].
|
| 56 |
+
|
| 57 |
+
Table 1: Comparison of existing data sets
|
| 58 |
+
|
| 59 |
+
<table><tr><td>Existing system</td><td>Social Media Platform</td><td>Class Labels</td></tr><tr><td>Eichstaedt et.al Eichstaedt et al. [2018]</td><td>Facebook</td><td>Depressed and not depressed</td></tr><tr><td>Nguyen et.al Nguyen et al. [2014]</td><td>Live journal</td><td>Depressed and control</td></tr><tr><td>Tyshchenko et. al Tyshchenko [2018]</td><td>Blog post</td><td>Clinical and Control</td></tr><tr><td>Deshpande et.al Deshpande and Rao [2017]</td><td>Twitter</td><td>Neutral and negative</td></tr><tr><td>Lin et.al Lin et al. [2020]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Reece et.al Reece et al. [2017]</td><td>Twitter</td><td>PTSD and Depression</td></tr><tr><td>Tsugawa et.al Tsugawa et al. [2015]</td><td>Twitter</td><td>Depressed and not depressed</td></tr><tr><td>Losada et.al Losada et al. [2017]</td><td>Reddit</td><td>Depression and Not depression</td></tr><tr><td>Wolohan et.al Wolohan et al. [2018]</td><td>Reddit</td><td>Depressed and not depressed</td></tr><tr><td>Tadesse et.al Tadesse et al. [2019]</td><td>Reddit</td><td>Depression indicative and standard</td></tr><tr><td>Pirina et.al Pirina and Çöltekin [2018]</td><td>Reddit</td><td>positive and negative</td></tr><tr><td>Yao et.al Yao et al. [2020]</td><td>Reddit</td><td>Depression, Suicide watch, Control and Opiates</td></tr><tr><td>Proposed Data set</td><td>Reddit</td><td>Not depressed, moderately depressed & severely depressed</td></tr></table>
|
| 60 |
+
|
| 61 |
+
From the Table 1, it is clear that all these research works have collected the social media data only to detect the presence of depression. Although, diagnosing depression is important, detecting the level of depression is more crucial for further treatment. And thus, we propose a data set that detects the level of depression.
|
| 62 |
+
|
| 63 |
+
# 3 Proposed Work
|
| 64 |
+
|
| 65 |
+
We propose to develop a gold standard data set that detects the levels of depression as not depressed, moderately depressed and severely depressed. Initially, the data set was created by collecting the data from the social media platform, Reddit. For collecting the data from archives of Reddit, two way communication is needed, which requires app authentication. After getting proper authentication, the subreddits from which the data must be collected are chosen and the data was extracted. After extracting the data, the data is pre-processed and exported in the required format which forms the data set. The data were then annotated into levels of depression by domain experts following the annotation guidelines. After annotation, the inter-rater agreement is calculated to analyze the quality of data and annotation. Then, the corpus is formed using the mutually annotated instances. Baseline models were also employed on the corpus to analyze the performance. To overcome the data imbalance problem, data augmentation technique was applied and their impact on performance was also analyzed.
|
| 66 |
+
|
| 67 |
+
# 3.1 Data set creation:
|
| 68 |
+
|
| 69 |
+
For creating the data set, a suitable social media platform is chosen initially and data is scraped using suitable methods. After scraping the data, the data is processed and dumped in a suitable format.
|
| 70 |
+
|
| 71 |
+
# 3.1.1 Data collection:
|
| 72 |
+
|
| 73 |
+
For creating the data set, the data was collected from Reddit<sup>3</sup>, an open source social media platform since it has more textual data when compared to other social media platforms. This data will be of postings format which includes only one or more statements of an individual. The postings data are scraped from the Reddit archives using the API "pushshift".
|
| 74 |
+
|
| 75 |
+
# 3.1.2 App authentication:
|
| 76 |
+
|
| 77 |
+
For scraping the data from Reddit achieves, Python Reddit API Wrapper(PRAW) is used. The data can be only scraped after getting authentication from the Reddit platform. This authentication process involves creation of an application in their domain, for which a unique client secret key and client id will be assigned. Thus, PRAW allows a two way communication only with these credentials of user_agent (application name), client_id and client_secret to get data from Reddit.
|
| 78 |
+
|
| 79 |
+
# 3.1.3 Subreddit selection
|
| 80 |
+
|
| 81 |
+
Reddit is a collection of million groups or forums called subreddits. For collecting the confessions or discussion of people about their mental health, data was scraped from the archives of subreddits groups like "r/Mental Health, r/depression, r/loneliness, r/stress, r/anxiety".
|
| 82 |
+
|
| 83 |
+
# 3.1.4 Data extraction:
|
| 84 |
+
|
| 85 |
+
For each posting, the details such as post ID, title, URL, publish date, name of the subreddit, score of the post and total number of comments can be collected using PRAW. Among these data, PostID, title, text, URL, date and subreddit name are all collected in dictionary format.
|
| 86 |
+
|
| 87 |
+
# 3.1.5 Data pre-processing and exporting:
|
| 88 |
+
|
| 89 |
+
After collecting these data, the text and title part are pre-processed by removing the non-ASCII characters and emoticons to get a clean data set. The processed data is exported into a Comma Separated Values (.csv) format file with the five columns. The sample of the collected postings is shown in Table 2.
|
| 90 |
+
|
| 91 |
+
Table 2: Sample Postings data
|
| 92 |
+
|
| 93 |
+
<table><tr><td>Post ID</td><td>Title</td><td>Text</td><td>Url</td><td>Publish date</td><td>Subreddit</td></tr><tr><td>g69ppt</td><td>Don’t want to get of bed</td><td>I’m done with me crying all day and thinking to myself that I can’t do a thing and I don’t what to get out of bed at all</td><td>https://www.reddit.com/r/depression/comments/g69ppt/dont_want_to_get_of.bed/4</td><td>2020-04-23 02:51:32</td><td>depression</td></tr><tr><td>gb9zei</td><td>Today is a day where I feel emptier than on other days.</td><td>It’s like I am alone with all my problems. I am sad about the fact I can’t trust anyone and nobody could help me because I feel like nobody understand how I feel. Depression is holding me tight today..</td><td>https://www.reddit.com/r/depression/comments/gb9zei/today_is_a_day_where_i Feel_emptier _than_on_other/5</td><td>2020-05-01 08:10:06</td><td>depression</td></tr></table>
|
| 94 |
+
|
| 95 |
+
# 3.2 Data Annotation
|
| 96 |
+
|
| 97 |
+
After collecting the data, the data were annotated according to the signs of depression. Although all the postings were collected from subreddits that exhibit the characteristics of mental illness, there is a possibility of postings that do not confess or discuss depression. Thus, the collected postings data were annotated by two domain experts into three labels that denote the level of signs of depression namely "Not depressed, Moderate and Severe". Framing the annotation guidelines for postings data is difficult since the mental health of an individual has to be analyzed using his/her single postings. For annotating the data into three classes, the guidelines were formatted as follows:
|
| 98 |
+
|
| 99 |
+
# 3.2.1 Label 1 - Not depressed :
|
| 100 |
+
|
| 101 |
+
The postings data will be annotated as "Not Depressed", if the postings data reflect one of the following mannerism:
|
| 102 |
+
|
| 103 |
+
- If the statements have only one or two lines about irrelevant topics.
|
| 104 |
+
- If the statements reflect momentary feelings of present situation.
|
| 105 |
+
- If the statements are about asking questions about any or medication
|
| 106 |
+
- If the statement is about ask/seek help for friend's difficulties.
|
| 107 |
+
|
| 108 |
+
# Example 1:
|
| 109 |
+
|
| 110 |
+
The holidays are the most difficult.
|
| 111 |
+
|
| 112 |
+
Not a big reddit poster, but I felt like this has been past due for myself. The holidays honestly are so hard for me to get through. I've spent the last 6 years of major holidays alone. Mostly because of my retail job, I never get enough time off around the holidays to go home and spend it with family, nor have they been able to visit me. My condolences to anyone else spending this time of year alone no matter what the circumstances may be. I moved to a new state 9 months ago and it's been a tough struggle meeting new friends as I didn't know anyone here before I moved. Now it's new years and all of my "friends" I've made while here yet again flaked on me (was actually excited to have plans for the first time I remember in a while), which I recently found out has been a common occurrence of them just getting together without me. (Which I'm used to at this point, it is what it is). It just sucks knowing you're always the last choice in anyone's lives. And that my depression may be the cause of my 'boringness'/lack of interest my friends have towards me. Any tips on making friends for someone struggling mentally? I'm just tired of this constant weight of loneliness bearing down on me. I seriously can't remember the last time someone went out of their way to invite me to something. It seems like I'm always asking to tag along, and then I'm just a burden at that point, which is why I'm starting to lose all hope.
|
| 113 |
+
|
| 114 |
+
Whoever takes the time to read this, thank you.
|
| 115 |
+
|
| 116 |
+
# 3.2.2 Label 2 - Moderately depressed :
|
| 117 |
+
|
| 118 |
+
The postings data will be annotated as "moderately depressed", if the postings falls under these conditions:
|
| 119 |
+
|
| 120 |
+
- If the statements reflect change in feelings (feeling low for some time and feeling better for some time).
|
| 121 |
+
- If the statement shows that they aren't feeling completely immersed in any situations
|
| 122 |
+
- If the statements show that they have hope for life.
|
| 123 |
+
|
| 124 |
+
# Example 1:
|
| 125 |
+
|
| 126 |
+
If I disappeared today, would it really matter?
|
| 127 |
+
|
| 128 |
+
I'm just too tired to go on, but at the same time I'm too tired to end it. I always thought about this but with the quarantine I just realised it is true. My friends never felt close to me, just like the only two relationships I have ever been in. They never cared about me, to the point where I even asked for help and they just turned a blind eye. And my family isn't any better. I don't know what to do, and I believe it won't matter if I do something or not. I'm sorry if my English isn't good, it isn't my first language.
|
| 129 |
+
|
| 130 |
+
# 3.2.3 Label - 3 : Severely depressed :
|
| 131 |
+
|
| 132 |
+
The data will be annotated as "Severely depressed", if the postings have one of the following scenarios:
|
| 133 |
+
|
| 134 |
+
- If the statements express more than one disorder conditions.
|
| 135 |
+
- If the statements explain about history of suicide attempts.
|
| 136 |
+
|
| 137 |
+
Table 3: Landis & Koch measurement table of inter rater agreement
|
| 138 |
+
|
| 139 |
+
<table><tr><td>Kappa value (κ)</td><td>Strength of agreement</td></tr><tr><td>< 0</td><td>Poor</td></tr><tr><td>0.01 - 0.20</td><td>Slight</td></tr><tr><td>0.21 - 0.40</td><td>Fair</td></tr><tr><td>0.41 - 0.60</td><td>Moderate</td></tr><tr><td>0.61 - 0.80</td><td>Substantial</td></tr><tr><td>0.81 - 0.99</td><td>Almost perfect agreement</td></tr></table>
|
| 140 |
+
|
| 141 |
+
# Example 1:
|
| 142 |
+
|
| 143 |
+
Getting depressed again?
|
| 144 |
+
|
| 145 |
+
So I'm 22F and I have taken antidepressants the last time 4 years ago. I've had ups and downs when I got off and with 19 I was having a rough time for two months - started drinking and smoking weed a lot. Kinda managed to get back on track then and haven't been feeling too bad until now. Lately I've been feeling kinda blue and started making mistakes or have to go through stuff multiple times to do it correctly or to be able to remember it. Currently I'm having a week off and have to go back to work on monday. I just don't know I feel like I'm getting worse and want to sleep most of the time and at first I thought it's because I'm used to working a lot, but when I think about having to go back soon I feel like throwing up and at the same time doing nothing also doesn't sit well with me. I guess I'm kinda scared at the moment because I don't want to feel like I was feeling years ago and I still don't feel comfortable with my own mind and don't trust myself that I'm strong enough to pull through if depression hits me again.
|
| 146 |
+
|
| 147 |
+
# 3.3 Inter-rater agreement
|
| 148 |
+
|
| 149 |
+
After annotating the data, inter-rater agreement was calculated between the decisions of two judges using kappa coefficient estimated using a per-annotator empirical prior over the class labels Artstein and Poesio [2008]. Inter-rater agreement<sup>6</sup> is the degree of agreement among independent observers who rate, code, or assess the same phenomenon. The inter rater agreement is measured using Cohen's kappa statistics Cohen [1960].
|
| 150 |
+
|
| 151 |
+
The inter-rater agreement between the annotations was calculated using sklearn Pedregosa et al. [2011a]. For our annotation, the kappa value $(\kappa)$ is 0.686. According to Landis & Koch Landis and Koch [1977] in the Table 3, the $\kappa$ value denotes substantial agreement between the annotators, which proves the consistency of labeling according to the annotation guidelines. Thus, the mutually annotated instances form the corpus.
|
| 152 |
+
|
| 153 |
+
# 3.4 Corpus Analysis
|
| 154 |
+
|
| 155 |
+
Initially 20,088 instances of postings data were annotated, out of which 16,613 instances were found to be mutually annotated instances by the two judges, and thus they were considered as instances of data set with their corresponding labels. Table 4 shows the complete statistics of the corpus.
|
| 156 |
+
|
| 157 |
+
The whole corpus has 1,56,676 sentences with 26,59,938 words which shows the size of the corpus created. In the corpus, each posting with its labels is considered as each instance in the corpus. An instance in the corpus will have an average of 9.42 sentences each that varies in the range of 1 to 260 sentences with an average of 159.92 words that lies between 1 to 5065 words. The distribution of the three class labels in the data set is shown in Figure 1. As shown in figure, the data set is unbalanced with 10,494 instances of "moderately depressed" class, 1489 instances of "severely depressed" class and 4649 instances of "Not depressed" class which also includes some duplicate instances.
|
| 158 |
+
|
| 159 |
+
Table 4: Postings data analysis
|
| 160 |
+
|
| 161 |
+
<table><tr><td>Category</td><td>Count</td></tr><tr><td>Total number of instances annotated</td><td>20,088</td></tr><tr><td>Data set instances
|
| 162 |
+
(number of instances mutually annotated)</td><td>16,632</td></tr><tr><td>Total number of sentences</td><td>1,56,676</td></tr><tr><td>Total number of words</td><td>26,59,938</td></tr><tr><td>Total number of stop-words</td><td>12,47,016</td></tr><tr><td>Total number of words other than stop-words</td><td>14,12,922</td></tr><tr><td>Total number of unique words</td><td>28,415</td></tr><tr><td>Total number of unique stop-words</td><td>150</td></tr><tr><td>Total number of unique words other than stop-words</td><td>28,265</td></tr><tr><td>Range of sentences per instance</td><td>1 - 260</td></tr><tr><td>Range of words per instance</td><td>1 - 5065</td></tr><tr><td>Average number of sentences per posting instance</td><td>9.42</td></tr><tr><td>Average number of words per posting instance</td><td>159.92</td></tr></table>
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Figure 1: Class wise distribution of the data set
|
| 166 |
+
|
| 167 |
+
# 3.5 Base line models
|
| 168 |
+
|
| 169 |
+
The data set has been evaluated using traditional models which are considered as baseline models. The data set has four columns namely id, title, text and class label. For implementation, the title data and text data are initially combined. The combined text data is pre-processed, extracted features, balanced, classified using traditional classifiers and evaluated by cross validation.
|
| 170 |
+
|
| 171 |
+
# 3.5.1 Data Pre-processing:
|
| 172 |
+
|
| 173 |
+
The title and text column are combined together as a single text data column by filling the "NA" instances of both title and text data. The combined text data is cleaned by converting the words to lower case letters and removing unwanted punctuation, "[removed]" tags, web links, HTML links, stop words and small words (words with length less than two). After cleaning, the instances are tokenized using regextokenizer Pedregosa et al. [2011b], stemmed using porter stemmer Porter [1980] and lemmatized using wordnet lemmatizer.
|
| 174 |
+
|
| 175 |
+
# 3.5.2 Feature extraction:
|
| 176 |
+
|
| 177 |
+
The features were extracted using three vectorizers namely Word2Vec, Term Frequency - Inverse Document Frequency (TF-IDF) vectorizer and Glove Pennington et al. [2014] vectorizer.
|
| 178 |
+
|
| 179 |
+
- Word2Vec: It produces a vector that represents the context of the word considering the occurrence of the word. The vectors are generated using Continuous Bag Of Words.
|
| 180 |
+
- TF-IDF: It produces a score considering the occurrence of the word in the document. It is based on the relevance of a topic in a particular document. The vectors are calculated using four grams considering a maximum of 2000 features.
|
| 181 |
+
- Glove: It produces the word embeddings considering the occurrence and co-occurrence of the words with reduced dimensionality. The words are mapped to a word embedding using 6 Billion pre-trained tokens with 100 features each.
|
| 182 |
+
|
| 183 |
+
# 3.5.3 Classifiers:
|
| 184 |
+
|
| 185 |
+
Twelve different classifiers that include Ada Boost Classifier, Decision Tree, Gaussian Naive Bayes, K-Nearest Neighbour, linear-Support Vector Machine, Linear Deterministic Analysis, Logistic Regression, Multi-layer Perceptron, Qua-
|
| 186 |
+
|
| 187 |
+
dratic Deterministic Analysis, Radial Basis Function - Support Vector Machine and Random Forest of Scikit-learn Pedregosa et al. [2011b] were used for classification.
|
| 188 |
+
|
| 189 |
+
- Ada Boost Classifier(ABC): The Adaptive Boosting algorithm is a collection of N estimator models that assigns higher weights to the mis-classified samples in the next model. In our implementation, 100 estimator models with t0 random state at a learning rate of 0.1 were used to fine tune the model.
|
| 190 |
+
- Decision Tree (DT): The decision tree classifier predicts the target value based on the decision rules that was formed using features to identify the target variable. The decision rules are formed using gini index and entropy for information gain. For implementing the decision trees, the decision tree classifier was fine tuned with two splits of minimum samples of one leaf node each by calculating gini to choose the best split and random state as 0.
|
| 191 |
+
- Gaussian Naive Bayes (GNB): The Gaussian normal distribution variant of Naive Bayes classifier that depends on the Bayes theorem is Gaussian Naive Bayes.
|
| 192 |
+
- K-Nearest Neighbour(KNN): KNN classifies the data point by plotting them and finding the similarity between the data points. In implementation, number of neighbours were set as three with equal weights and euclidean distance as metric to calculate distance.
|
| 193 |
+
- Logistic Regression (LR): The probabilistic model that predicts the class label based on the sigmoid function for binary classification. As our data set are multi-class data sets, multi-nominal logistic regression was used to evaluate the data sets. For implementation, the classifier was trained with a tolerance of 1e-4, 1.0 as inverse of regularization strength and intercept scaling as 1.
|
| 194 |
+
- Multi-layer Perceptron (MLP): The artificial neural network that is trained to predict the class label along with back propagation of error. The multi-layer perceptron of two layers of 100 hidden nodes each was trained with relu activation function, adam optimizer, learning rate of 0.001 for a maximum 300 iterations.
|
| 195 |
+
- Discriminant Analysis: The generative model that utilizes Gaussian distribution for classification by assuming each class has a different co-variance. For implementation, the co-variance is calculated with threshold of 1.0e-04. Linear DA (LDA) and Quadratic DA (QDA) both were implemented.
|
| 196 |
+
- Support Vector Machine: The supervised model that projects the data into higher dimensions and then classifies using hyper-planes. The model was trained with RBF kernel (RBF-SVM) and linear kernel (L-SVM) function of three degree, 0.1 regularization parameter without any specifying any maximum iterations.
|
| 197 |
+
- Random Forest (RF): Random Forest combines many decision trees as in ensemble method to generate predictions. It overcomes the limitation of decision trees by bagging and bootstrap aggregation. It was implemented with 100 number of estimators.
|
| 198 |
+
|
| 199 |
+
# 4 Implementation and Results
|
| 200 |
+
|
| 201 |
+
The features extracted in subsection 3.5.2 are classified using the above classifiers in subsection 3.5.3 and evaluated using stratified k-fold sampling of Scikit-learn Pedregosa et al. [2011b]. In this validation, data are split into 10 folds and the evaluation results with respect to weighted average F1-score is tabulated in Table 5.
|
| 202 |
+
|
| 203 |
+
From the Table 5, it is clear that the model with Random Forest Classifier and Multi-Layer Perceptron (MLP) applied on the features extracted using Glove performs equally well with an F1-score of 0.647. The performance of the models with accuracy as metric is shown in Table 6. From the table, it is clear that the model with Random Forest classifier and Glove vectorizer performs better with an accuracy of 0.760.
|
| 204 |
+
|
| 205 |
+
Table 5: F1 score of all baseline models
|
| 206 |
+
|
| 207 |
+
<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.451</td><td>0.496</td><td>0.451</td></tr><tr><td>DT</td><td>0.469</td><td>0.614</td><td>0.469</td></tr><tr><td>GNB</td><td>0.290</td><td>0.415</td><td>0.302</td></tr><tr><td>KNN</td><td>0.549</td><td>0.604</td><td>0.594</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.309</td><td>0.273</td></tr><tr><td>LDA</td><td>0.391</td><td>0.395</td><td>0.391</td></tr><tr><td>LR</td><td>0.395</td><td>0.329</td><td>0.395</td></tr><tr><td>MLP</td><td>0.625</td><td>0.647</td><td>0.625</td></tr><tr><td>QDA</td><td>0.368</td><td>0.459</td><td>0.368</td></tr><tr><td>RBF -SVM</td><td>0.452</td><td>0.560</td><td>0.452</td></tr><tr><td>RF</td><td>0.449</td><td>0.647</td><td>0.456</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 6: Accuracy of all baseline models
|
| 210 |
+
|
| 211 |
+
<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.616</td><td>0.654</td><td>0.616</td></tr><tr><td>DT</td><td>0.579</td><td>0.697</td><td>0.579</td></tr><tr><td>GNB</td><td>0.351</td><td>0.464</td><td>0.351</td></tr><tr><td>KNN</td><td>0.695</td><td>0.717</td><td>0.694</td></tr><tr><td>L-SVM</td><td>0.623</td><td>0.646</td><td>0.623</td></tr><tr><td>LDA</td><td>0.619</td><td>0.659</td><td>0.619</td></tr><tr><td>LR</td><td>0.619</td><td>0.650</td><td>0.619</td></tr><tr><td>MLP</td><td>0.700</td><td>0.754</td><td>0.700</td></tr><tr><td>QDA</td><td>0.485</td><td>0.499</td><td>0.485</td></tr><tr><td>RBF -SVM</td><td>0.667</td><td>0.733</td><td>0.667</td></tr><tr><td>RF</td><td>0.689</td><td>0.760</td><td>0.695</td></tr></table>
|
| 212 |
+
|
| 213 |
+
# 4.1 With Data augmentation
|
| 214 |
+
|
| 215 |
+
The postings data is populated with more "moderately depressed" instances and thus, the data has to be balanced before classification for better performance. For balancing the data, Synthetic Minority Oversampling Technique (SMOTE) Chawla et al. [2002] was applied after vectorization. The effect of augmentation is shown in Figure 2.
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
(a) Before applying SMOTE
|
| 219 |
+
|
| 220 |
+
(b) After applying SMOTE
|
| 221 |
+
Figure 2: Effect of data augmentation
|
| 222 |
+

|
| 223 |
+
Not depressed
|
| 224 |
+
Moderately depressed
|
| 225 |
+
Severely depressed
|
| 226 |
+
|
| 227 |
+
Table 7: F1-score of all baseline models after data augmentation
|
| 228 |
+
|
| 229 |
+
<table><tr><td>F1 - score</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.263</td><td>0.622</td><td>0.559</td></tr><tr><td>DT</td><td>0.273</td><td>0.772</td><td>0.721</td></tr><tr><td>GNB</td><td>0.271</td><td>0.449</td><td>0.389</td></tr><tr><td>KNN</td><td>0.258</td><td>0.814</td><td>0.834</td></tr><tr><td>L-SVM</td><td>0.273</td><td>0.570</td><td>0.642</td></tr><tr><td>LDA</td><td>0.270</td><td>0.550</td><td>0.540</td></tr><tr><td>LR</td><td>0.270</td><td>0.544</td><td>0.551</td></tr><tr><td>MLP</td><td>0.269</td><td>0.775</td><td>0.852</td></tr><tr><td>QDA</td><td>0.276</td><td>0.592</td><td>0.477</td></tr><tr><td>RBF -SVM</td><td>0.273</td><td>0.762</td><td>0.788</td></tr><tr><td>RF</td><td>0.272</td><td>0.854</td><td>0.877</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Table 8: Accuracy of all baseline models after data augmentation
|
| 232 |
+
|
| 233 |
+
<table><tr><td>Accuracy</td><td>TF-IDF</td><td>Glove</td><td>Word2Vec</td></tr><tr><td>ABC</td><td>0.384</td><td>0.628</td><td>0.562</td></tr><tr><td>DT</td><td>0.388</td><td>0.781</td><td>0.728</td></tr><tr><td>GNB</td><td>0.388</td><td>0.479</td><td>0.427</td></tr><tr><td>KNN</td><td>0.379</td><td>0.839</td><td>0.854</td></tr><tr><td>L-SVM</td><td>0.388</td><td>0.575</td><td>0.642</td></tr><tr><td>LDA</td><td>0.388</td><td>0.550</td><td>0.550</td></tr><tr><td>LR</td><td>0.387</td><td>0.547</td><td>0.559</td></tr><tr><td>MLP</td><td>0.386</td><td>0.780</td><td>0.857</td></tr><tr><td>QDA</td><td>0.393</td><td>0.615</td><td>0.497</td></tr><tr><td>RBF-SVM</td><td>0.388</td><td>0.769</td><td>0.792</td></tr><tr><td>RF</td><td>0.388</td><td>0.864</td><td>0.877</td></tr></table>
|
| 234 |
+
|
| 235 |
+
The features extracted in subsection 3.5.2 are augmented using SMOTE and then classified using the classifiers in subsection 3.5.3. The performance of these models in terms of F1-score and accuracy after data augmentation are shown in Table 7 and 8 respectively. From the tables, it is clear that the performance was improved and model with Random Forest classifier applied on the features extracted using Word2Vec performs well with a score of 0.877.
|
| 236 |
+
|
| 237 |
+
# 5 Research insights
|
| 238 |
+
|
| 239 |
+
The researchers can further extend this work by implementing the following methods:
|
| 240 |
+
|
| 241 |
+
- Extend the data set by considering the images along with text data.
|
| 242 |
+
- Implement deep learning models in the data set.
|
| 243 |
+
- Implement other methods of data augmentation to improve performance.
|
| 244 |
+
|
| 245 |
+
# 6 Conclusions
|
| 246 |
+
|
| 247 |
+
Depression is a common mental illness that has to be detected and treated early to avoid serious consequences. Among the other ways of detecting, diagnosing mental health using their social media data seems much more effective since it involves less involvement of the individual. All the existing systems are designed to detect depression from social media texts. Although detecting depression is more important, detecting the level of depression also has its equal significance. Thus, we propose a data set that not only detects depression from social media but also analyzes the level of depression. For creating the data set, the data was collected from subreddits and annotated by domain experts into three levels of depression, namely not depressed, moderately depressed and severely depressed. An empirical analysis of traditional learning algorithms was also done for evaluating the data sets. Among the models, the model with Glove vectorizer and Random Forest classifier performs well with a F1-score of 0.647 and accuracy of 0.760. While analyzing the data set, "the moderately depressed" class seems to be highly populated than the classes and so, a data augmentation method named SMOTE was applied, and the performance is analyzed. Data augmentation improved the performance by $23\%$ and $12\%$ in terms of F1-score and accuracy respectively, with both F1-score and accuracy of 0.877. The data set can also be extended by considering the images along with texts for more accurate detection. The work can be extended further by implementing other traditional learning and deep learning models. Other augmentation techniques can also be experimented with for improving the performance of the model.
|
| 248 |
+
|
| 249 |
+
# Data set availability
|
| 250 |
+
|
| 251 |
+
The data set is available to the public in a repository of a Github in the link: https://github.com/Kayal-Sampath/detecting-signs-of-depression-from-social-media-postings.
|
| 252 |
+
|
| 253 |
+
# References
|
| 254 |
+
|
| 255 |
+
American psychiatric association. https://www.psychiatry.org/patients-families/depression/what-is-depression. (Accessed: 2021-11-17).
|
| 256 |
+
Institute of health metrics and evaluation. global health data exchange (ghdx). http://ghdx.healthdata.org/gbd-results-tool?params=gbd-api-2019-permalink/d780cffbe8a381b25e1416884959e88b. (Accessed: 2021-11-17).
|
| 257 |
+
Tuka Al Hanai, Mohammad M Ghassemi, and James R Glass. Detecting depression with audio/text sequence modeling of interviews. In Interspeech, pages 1716-1720, 2018.
|
| 258 |
+
Hamdi Dibeklioglu, Zakia Hammal, Ying Yang, and Jeffrey F Cohn. Multimodal detection of depression in clinical interviews. In Proceedings of the 2015 ACM on international conference on multimodal interaction, pages 307-310, 2015.
|
| 259 |
+
Sharifa Alghowinem, Roland Goecke, Michael Wagner, Julien Epps, Matthew Hyett, Gordon Parker, and Michael Breakspear. Multimodal depression detection: fusion analysis of paralinguistic, head pose and eye gaze behaviors. IEEE Transactions on Affective Computing, 9(4):478-490, 2016.
|
| 260 |
+
Md Nasir, Arindam Jati, Prashanth Gurunath Shivakumar, Sandeep Nallan Chakravarthula, and Panayiotis Georgiou. Multimodal and multiresolution depression detection from speech and facial landmark features. In Proceedings of the 6th international workshop on audio/visual emotion challenge, pages 43-50, 2016.
|
| 261 |
+
Jana M Havigerová, Jiří Haviger, Dalibor Kucera, and Petra Hoffmannová. Text-based detection of the risk of depression. Frontiers in psychology, 10:513, 2019.
|
| 262 |
+
Maxim Stankevich, Andrey Latyshev, Evgenia Kuminskaya, Ivan Smirnov, and Oleg Grigoriev. Depression detection from social media texts. In Data Analytics and Management in Data Intensive Domains: XXI International Conference DAMDID/RCDL-2019, page 352, 2019.
|
| 263 |
+
Michelle Renee Morales and Rivka Levitan. Speech vs. text: A comparative analysis of features for depression detection systems. In 2016 IEEE spoken language technology workshop (SLT), pages 136-143. IEEE, 2016.
|
| 264 |
+
Statista statistics. https://www.statista.com/statistics/278414/number-of-worldwide-social-network-users/. (Accessed: 2021-11-17).
|
| 265 |
+
JT Wolohan, Misato Hiraga, Atreyee Mukherjee, Zeeshan Ali Sayyed, and Matthew Millard. Detecting linguistic traces of depression in topic-restricted text: Attending to self-stigmatized depression with nlp. In Proceedings of the First International Workshop on Language Cognition and Computational Models, pages 11-21, 2018.
|
| 266 |
+
|
| 267 |
+
Johannes C Eichstaedt, Robert J Smith, Raina M Merchant, Lyle H Ungar, Patrick Crutchley, Daniel Preotciuc-Pietro, David A Asch, and H Andrew Schwartz. Facebook language predicts depression in medical records. Proceedings of the National Academy of Sciences, 115(44):11203-11208, 2018.
|
| 268 |
+
Andrew G Reece, Andrew J Reagan, Katharina LM Lix, Peter Sheridan Dodds, Christopher M Danforth, and Ellen J Langer. Forecasting the onset and course of mental illness with twitter data. Scientific reports, 7(1):1-11, 2017.
|
| 269 |
+
Sho Tsugawa, Yusuke Kikuchi, Fumio Kishino, Kosuke Nakajima, Yuichi Itoh, and Hiroyuki Ohsaki. Recognizing depression from twitter activity. In Proceedings of the 33rd annual ACM conference on human factors in computing systems, pages 3187-3196, 2015.
|
| 270 |
+
Mandar Deshpande and Vignesh Rao. Depression detection using emotion artificial intelligence. In 2017 international conference on intelligent sustainable systems (iciss), pages 858-862. IEEE, 2017.
|
| 271 |
+
Chenhao Lin, Pengwei Hu, Hui Su, Shaochun Li, Jing Mei, Jie Zhou, and Henry Leung. Sensemood: Depression detection on social media. In Proceedings of the 2020 International Conference on Multimedia Retrieval, pages 407-411, 2020.
|
| 272 |
+
Thin Nguyen, Dinh Phung, Bo Dao, Svetha Venkatesh, and Michael Berk. Affective and content analysis of online depression communities. IEEE Transactions on Affective Computing, 5(3):217-226, 2014.
|
| 273 |
+
Yevhen Tyshchenko. Depression and anxiety detection from blog posts data. Nature Precis. Sci., Inst. Comput. Sci. Univ. Tartu, Tartu, Estonia, 2018.
|
| 274 |
+
Andrew G Reece and Christopher M Danforth. Instagram photos reveal predictive markers of depression. *EPJ Data Science*, 6:1–12, 2017.
|
| 275 |
+
Healthline. https://www.healthline.com/health/depression/mild-depression. (Accessed: 2021-11-17).
|
| 276 |
+
David E Losada, Fabio Crestani, and Javier Parapar. erisk 2017: Clef lab on early risk prediction on the internet: experimental foundations. In International Conference of the Cross-Language Evaluation Forum for European Languages, pages 346-360. Springer, 2017.
|
| 277 |
+
Michael M. Tadesse, Hongfei Lin, Bo Xu, and Liang Yang. Detection of depression-related posts in reddit social media forum. IEEE Access, 7:44883-44893, 2019. doi:10.1109/ACCESS.2019.2909180.
|
| 278 |
+
Inna Pirina and Cagni Coltekin. Identifying depression on Reddit: The effect of training data. In Proceedings of the 2018 EMNLP Workshop SMM4H: The 3rd Social Media Mining for Health Applications Workshop & Shared Task, pages 9-12, Brussels, Belgium, October 2018. Association for Computational Linguistics. doi:10.18653/v1/W18-5903 URL https://aclanthology.org/W18-5903.
|
| 279 |
+
Hannah Yao, Sina Rashidian, Xinyu Dong, Hongyi Duanmu, Richard N Rosenthal, and Fusheng Wang. Detection of suicidality among opioid users on reddit: Machine learning-based approach. Journal of medical internet research, 22(11):e15293, 2020.
|
| 280 |
+
Nick Boettcher et al. Studies of depression and anxiety using reddit as a data source: Scoping review. JMIR Mental Health, 8(11):e29487, 2021.
|
| 281 |
+
Ron Artstein and Massimo Poesio. Inter-coder agreement for computational linguistics. Computational Linguistics, 34 (4):555-596, 2008.
|
| 282 |
+
Jacob Cohen. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1), 37-46, 1960.
|
| 283 |
+
F. Pedregosa, G. Varoquaux, A. Gramfort, et al. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011a.
|
| 284 |
+
J Richard Landis and Gary G Koch. The measurement of observer agreement for categorical data. biometrics, pages 159-174, 1977.
|
| 285 |
+
F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011b.
|
| 286 |
+
Martin F Porter. An algorithm for suffix stripping. *Program*, 1980.
|
| 287 |
+
Jeffrey Pennington, Richard Socher, and Christopher D Manning. Glove: Global vectors for word representation In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014.
|
| 288 |
+
N. V. Chawla, K. W. Bowyer, L. O. Hall, and W. P. Kegelmeyer. Smote: Synthetic minority over-sampling technique Journal of Artificial Intelligence Research, 16:321-357, Jun 2002. ISSN 1076-9757. doi:10.1613/jair.953. URL http://dx.doi.org/10.1613/jair.953.
|
2202.03xxx/2202.03047/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a600b72782d7713c31cf2cf0258b313bfd3defe811e9528207bc0cc73879d884
|
| 3 |
+
size 487884
|
2202.03xxx/2202.03047/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03052/7c482f11-4fe8-48dc-95df-1f574b21fa82_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3a9da0a0e63b3ec52ed392c630de72b559c82a8cac3ac205e78db383f083c10
|
| 3 |
+
size 35913390
|
2202.03xxx/2202.03052/full.md
ADDED
|
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# OFA: UNIFYING ARCHITECTURES, TASKS, AND MODALITIES THROUGH A SIMPLE SEQUENCE-TO-SEQUENCE LEARNING FRAMEWORK
|
| 2 |
+
|
| 3 |
+
Peng Wang, An Yang, Rui Men, Junyang Lin, Shuai Bai
|
| 4 |
+
|
| 5 |
+
Zhikang Li, Jianxin Ma, Chang Zhou, Jingren Zhou, Hongxia Yang
|
| 6 |
+
|
| 7 |
+
DAMO Academy, Alibaba Group *
|
| 8 |
+
|
| 9 |
+
{zheluo.wp, ya235025, menrui.mr, junyang.ljy, baishuai.ps, zhikang.lzk, jason.mjx, ericzhou.zc, jingren.zhou, yang.yhx}@alibaba-inc.com
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
Figure 1: Examples of various tasks supported by OFA.
|
| 13 |
+
|
| 14 |
+

|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
# ABSTRACT
|
| 21 |
+
|
| 22 |
+
In this work, we pursue a unified paradigm for multimodal pretraining to break the scaffolds of complex task/modality-specific customization. We propose OFA, a Task-Agnostic and Modality-Agnostic framework that supports Task Comprehensiveness. OFA unifies a diverse set of cross-modal and unimodal tasks, including image generation, visual grounding, image captioning, image classification, language modeling, etc., in a simple sequence-to-sequence learning framework. OFA follows the instruction-based learning in both pretraining and finetuning stages, requiring no extra task-specific layers for downstream tasks. In comparison with the recent state-of-the-art vision & language models that rely on extremely large cross-modal datasets, OFA is pretrained on only 20M publicly available image-text pairs. Despite its simplicity and relatively small-scale training data, OFA achieves new SOTAs in a series of cross-modal tasks while attaining highly competitive performances on uni-modal tasks. Our further analysis indicates that OFA can also effectively transfer to unseen tasks and unseen domains. Our code and models are publicly available at https://github.com/OFA-Sys/OFA.
|
| 23 |
+
|
| 24 |
+
Keywords Unified frameworks $\cdot$ Multimodal pretraining $\cdot$ Multitask learning $\cdot$ Zero-shot learning
|
| 25 |
+
|
| 26 |
+
# 1 Introduction
|
| 27 |
+
|
| 28 |
+
Building an omnipotent model that handles as many tasks and modalities as human beings is an attractive goal in the AI community. The possibilities of achieving this goal may largely depend on whether massive varieties of modalities, tasks and training regimes can be represented with only a few forms that can be unified and managed by a single model or system.
|
| 29 |
+
|
| 30 |
+
Recent developments of the Transformer [1] architecture have shown its potential for being a universal computation engine [2, 3, 4, 5, 6, 7, 8]. In the settings of supervised learning, the "pretrain-finetune" paradigm achieves excellent success in many domains. In the regimes of few-/zero-shot learning, language models with prompt / instruction tuning prove powerful zero-/few-shot learners [3, 9, 10]. These advances have provided more significant than ever opportunities for the emergence of an omni-model.
|
| 31 |
+
|
| 32 |
+
To support better generalization for open-ended problems while maintaining multitask performance and ease of use, we advocate that an omnipotent model should have the following three properties: 1. Task-Agnostic (TA): unified task representation to support different types of tasks, including classification, generation, self-supervised pretext tasks, etc., and to be agnostic to either pretraining or finetuning. 2. Modality-Agnostic (MA): unified input and output representation shared among all tasks to handle different modalities. 3. Task Comprehensiveness (TC): enough task variety to accumulate generalization ability robustly.
|
| 33 |
+
|
| 34 |
+
However, it is challenging to satisfy these properties while maintaining superior performance in downstream tasks. Current language and multimodal pretrained models readily fail at parts of these properties, due to their following design choices. 1. Extra learnable components for finetuning, e.g., task-specific heads [2], adapters [11], soft prompts [12]. This makes the model structure task-specific and poses discrepancy between pretraining and finetuning. Such designs are also not friendly to supporting unseen tasks in a zero-shot manner. 2. Task-specific formulation. For most current methods, pretraining, finetuning and zero-shot tasks usually differ in task formulation and training objectives. This violates TA and it is burdensome to scale up the task population to achieve TC. 3. Entangling modality representation with downstream tasks. It is a common practice for Vision-Language models to take the detected objects as part of the image input features [8, 13, 14, 15, 16, 17]. Though it demonstrates better downstream task performance on some closed-domain datasets, it depends on an extra object detector which usually fails at open-domain data.
|
| 35 |
+
|
| 36 |
+
Therefore, we explore an omni-model for multimodal pretraining and propose OFA, hopefully "One For All", which achieves the objectives of unifying architectures, tasks, and modalities, and supports the three properties above.2 We formulate both pretraining and finetuning tasks in a unified sequence-to-sequence abstraction via handcrafted instructions [9, 10] to achieve Task-Agnostic. A Transformer is adopted as the Modality-Agnostic compute engine, with a constraint that no learnable task- or modality-specific components will be added to downstream tasks. It is available to represent information from different modalities within a globally shared multimodal vocabulary across all tasks. We then support Task Comprehensiveness by pretraining on varieties of uni-modal and cross-modal tasks.
|
| 37 |
+
|
| 38 |
+
# To summarize:
|
| 39 |
+
|
| 40 |
+
- We propose OFA, a Task-Agnostic and Modality-Agnostic framework that supports Task Comprehensiveness. OFA is the first attempt to unify the following vision & language, vision-only and language-only tasks, including understanding and generation, e.g., text-to-image generation, visual grounding, visual question answering (VQA), image captioning, image classification, language modeling, etc., via a simple sequence-to-sequence learning framework with a unified instruction-based task representation.
|
| 41 |
+
- OFA is pretrained on the publicly available datasets of 20M image-text pairs, in comparison with recent models that rely on paired data of a much larger scale [22, 23]. OFA achieves state-of-the-art performances in a series of vision & language downstream tasks, including image captioning, visual question answering, visual entailment, referring expression comprehension, etc.
|
| 42 |
+
- OFA, as a multimodal pretrained model, achieves comparable performances on unimodal tasks with SOTA pretrained models in language or vision, e.g., RoBERTa, ELECTRA and DeBERTa for natural language understanding, UniLM, Pegasus and ProphetNet for natural language generation, and MoCo-v3, BEiT and MAE for image classification.
|
| 43 |
+
- We verify that OFA achieves competitive performance in zero-shot learning. Also, it can transfer to unseen tasks with new task instructions and adapt to out-of-domain information without finetuning.
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
Figure 2: A demonstration of the pretraining tasks, including visual grounding, grounded captioning, image-text matching, image captioning, VQA, object detection, image infilling as well as text infilling.
|
| 47 |
+
|
| 48 |
+
# 2 Related Work
|
| 49 |
+
|
| 50 |
+
Language Pretraining & Vision Pretraining Natural language pretraining has revolutionized the whole NLP research community. A representation of this track is the birth of BERT [2] and GPT [24]. A number of studies have been progressively advancing pretraining by improving pretraining tasks and designing more sophisticated model architectures [25, 26, 27, 28, 29, 30, 31]. Having witnessed the success of natural language pretraining, researchers have promoted self-supervised learning (SSL) in computer vision [32, 33, 34, 35]. Recently, mirroring masked language modeling (MLM) in language pretraining, generative pretraining [36, 37] with ViT architecture [6] further boosts downstream performance.
|
| 51 |
+
|
| 52 |
+
Multimodal Pretraining Multimodal pretraining has been developing rapidly [38, 13, 39, 40, 14, 41, 42, 43, 44, 15, 16, 17, 45, 46, 47]. Researchers have applied the masking strategies and the encoder-decoder architecture to adapt models to generation tasks [15, 17, 18, 22]. Besides, to simplify preprocessing, patch projection has received attention and helped Transformer achieve SOTA performance in downstream tasks [22, 48]. To make full use of large-scale weakly supervised data, [49] trains a bi-encoder on 400 million pairs and demonstrates excellent performance in retrieval tasks. Another line of work is text-to-image synthesis. A bunch of works [50, 51, 18, 52] incorporate Transformer with VQVAE [53] or VQGAN [54] to generate high-quality images with high resolution. However, the previously mentioned methods are limited in processing a single type of data, such as cross-modal data only or limited in their capabilities. Also, the discrepancy between pretraining and finetuning behaviors limits the transferability to open-ended data.
|
| 53 |
+
|
| 54 |
+
Unified Frameworks To pursue the unified models, [55] demonstrate a uniform format to represent tasks. In NLP, recent studies unify diverse tasks covering natural language understanding and generation to text-to-text transfer [30] or language modeling [3]. Following this idea, [56] and [57] demonstrate text-generation-based multimodal pretrained models. [7] and [58] propose a simple framework that can process information from multiple modalities with a uniform byte-sequence representation. [59] and [60] unify tasks of different modalities by designing various task-specific layers. [61] explores to employ a retrieval-based unified paradigm. However, these multimodal pretrained models suffer from performance degradation in downstream tasks, e.g., VQA, image captioning, etc., and they have no image generation capability.
|
| 55 |
+
|
| 56 |
+
# 3 OFA
|
| 57 |
+
|
| 58 |
+
In this work, we propose OFA, a unified Seq2Seq framework for the unification of I/O & architectures, tasks, and modalities. The overall framework is illustrated in Figure 2.
|
| 59 |
+
|
| 60 |
+
# 3.1 I/O & Architecture
|
| 61 |
+
|
| 62 |
+
I/O The most common practice of multimodal pretraining is the pretraining of Transformer models on image-text pair corpus at scale. This requires data preprocessing or modality-specific adaptors to enable the joint training of both visual and linguistic information with the Transformer architecture. Compared with the complex, resource&time-consuming object feature extraction, we aim for simplicity and directly use ResNet modules to convolve $\mathbf{x}_v \in \mathbb{R}^{H \times W \times C}$ to $P$ patch features of the hidden size, following [62] and [22]. As to processing the linguistic information, we follow the practice of GPT [24] and BART [31] that we apply byte-pair encoding (BPE) [63] to the given text sequence to transform it into a subword sequence and then embed them to features.
|
| 63 |
+
|
| 64 |
+
To process different modalities without task-specific output schema, it is essential to represent data of various modalities in a unified space. A possible solution is to discretize text, image, and object and represent them with tokens in a unified vocabulary. Recent advances in image quantization [53, 54] have demonstrated effectiveness in text-to-image synthesis [50, 18, 51, 19], and thus we utilize this strategy for the target-side image representations. Sparse coding is effective in reducing the sequence length of image representation. For example, an image of the resolution of $256 \times 256$ is represented as a code sequence of the length of $16 \times 16$ . Each discrete code strongly correlates with the corresponding patch [36].
|
| 65 |
+
|
| 66 |
+
Apart from representing images, it is also essential to represent objects within images as there are a series of region-related tasks. Following [64], we represent objects as a sequence of discrete tokens. To be more specific, for each object, we extract its label and its bounding box. The continuous corner coordinates (the top left and the bottom right) of the bounding box are uniformly discretized to integers as location tokens $\langle x_1, y_1, x_2, y_2 \rangle$ . As to the object labels, they are intrisically words and thus can be represented with BPE tokens.
|
| 67 |
+
|
| 68 |
+
Finally, we use a unified vocabulary for all the linguistic and visual tokens, including subwords, image codes, and location tokens.
|
| 69 |
+
|
| 70 |
+
Architecture Following the previous successful practices in multimodal pretraining [14, 17, 22], we choose Transformer as the backbone architecture, and we adopt the encoder-decoder framework as the unified architecture for all the pretraining, finetuning, and zero-shot tasks. Specifically, both the encoder and the decoder are stacks of Transformer layers. A Transformer encoder layer consists of a self attention and a feed-forward network (FFN), while a Transformer decoder layer consists of a self attention, an FFN and a cross attention for building the connection between the decoder and the encoder output representations. To stabilize training and accelerate convergence, we add head scaling to self attention, a post-attention layer normalization (LN) [65], and an LN following the first layer of FFN [66]. For positional information, we use two absolute position embeddings for text and images, respectively. Instead of simply adding the position embeddings, we decoupling the position correlation from token embeddings and patch embeddings [67]. In addition, we also use 1D relative position bias for text [30] and 2D relative position bias for image [22, 62].
|
| 71 |
+
|
| 72 |
+
# 3.2 Tasks & Modalities
|
| 73 |
+
|
| 74 |
+
A unified framework is designed to provide architecture compatibility across different modalities and downstream tasks so that opportunities can arise to generalize to unseen tasks within the same model. Then we have to represent the possible downstream tasks concerning different modalities in a unified paradigm. Therefore, an essential point for the design of pretraining tasks is the consideration of multitask and multimodality.
|
| 75 |
+
|
| 76 |
+
To unify tasks and modalities, we design a unified sequence-to-sequence learning paradigm for pretraining, finetuning, and inference on all tasks concerning different modalities. Both pretraining tasks and downstream tasks of cross-modal and uni-modal understanding and generation are all formed as Seq2Seq generation. It is available to perform multitask pretraining on multimodal and uni-modal data to endow the model with comprehensive capabilities. Specifically, we share the identical schema across all tasks, while we specify handcrafted instructions for discrimination [9].
|
| 77 |
+
|
| 78 |
+
For cross-modal representation learning, we design 5 tasks, including visual grounding (VG), grounded captioning (GC), image-text matching (ITM), image captioning (IC), and visual question answering (VQA). For VG, the model learns to generate location tokens specifying the region position $\langle x_1,y_1,x_2,y_2\rangle$ based on the input of the image $x^i$ and the instruction "Which region does the text $x^t$ describe?" where $x^t$ refers to the region caption. GC is an inverse task of VG. The model learns to generate a description based on the input image $x^i$ and the instruction "What does the region describe? region: $\langle x_1,y_1,x_2,y_2\rangle$ " . For ITM, we use each original image-text pair as the positive sample and construct a new one as the negative by pairing the image with a randomly substituted caption. The model learns to discriminate whether the given image and text are paired by learning to generate "Yes" or "No" based on the input image $x^i$ and the instruction "Does the image describe $x^t?$ " . As to image captioning, this task can naturally adapt to the sequence-to-sequence format. The model learns to generate the caption based on the given image and the instruction
|
| 79 |
+
|
| 80 |
+
Table 1: Detailed hyperparameters of OFA model configuration. We list the configuration for OFA of 5 different sizes.
|
| 81 |
+
|
| 82 |
+
<table><tr><td>Model</td><td>#Param.</td><td>Backbone</td><td>Hidden size</td><td>Intermediate Size</td><td>#Head</td><td>#Enc. Layers</td><td>#Dec. Layers</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Tiny}}\)</td><td>33M</td><td>ResNet50</td><td>256</td><td>1024</td><td>4</td><td>4</td><td>4</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Medium}}\)</td><td>93M</td><td>ResNet101</td><td>512</td><td>2048</td><td>8</td><td>4</td><td>4</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Base}}\)</td><td>182M</td><td>ResNet101</td><td>768</td><td>3072</td><td>12</td><td>6</td><td>6</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Large}}\)</td><td>472M</td><td>ResNet152</td><td>1024</td><td>4096</td><td>16</td><td>12</td><td>12</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Huge}}\)</td><td>930M</td><td>ResNet152</td><td>1280</td><td>5120</td><td>16</td><td>24</td><td>12</td></tr></table>
|
| 83 |
+
|
| 84 |
+
"What does the image describe?" For VQA, we send the image and the question as the input and require the model to learn to generate correct answers.
|
| 85 |
+
|
| 86 |
+
For uni-modal representation learning, we design 2 tasks for vision and 1 task for language, respectively. The model is pretrained with image infilling and object detection for vision representation learning. Recent advances in generative self-supervised learning for computer vision show that masked image modeling is an effective pretraining task [36, 37]. In practice, we mask the middle part of the images as the input. The model learns to generate the sparse codes for the central part of the image based on the corrupted input and the specified instruction "What is the image in the middle part?". We additionally add object detection to pretraining following [44]. The model learns to generate human-annotated object representations, i.e., the sequence of object position and label, based on the input image and the text "What are the objects in the image?" as the instruction. Both tasks strengthen the representation learning on both pixel and object levels. For language representation learning, following the practice of [31], we pretrain the unified model on plain text data with text infilling.
|
| 87 |
+
|
| 88 |
+
In this way, we unify multiple modalities and multiple tasks to a single model and pretraining paradigm. OFA is pretrained jointly with those tasks and data. Thus, it can perform different tasks concerning natural language, vision, and cross-modality.
|
| 89 |
+
|
| 90 |
+
# 3.3 Pretraining Datasets
|
| 91 |
+
|
| 92 |
+
We construct pretraining datasets by incorporating Vision & Language data (i.e., image-text pairs), Vision data (i.e., raw image data, object-labeled data), and Language data (i.e., plain texts). For replication, we only use datasets that are publicly available. We carefully filter our pretraining data and exclude images that appear in the validation and test sets of downstream tasks to avoid data leakage. We provide more details about pretraining datasets in Appendix A.1.
|
| 93 |
+
|
| 94 |
+
# 3.4 Training & Inference
|
| 95 |
+
|
| 96 |
+
We optimize the model with the cross-entropy loss. Given an input $x$ , an instruction $s$ and an output $y$ , we train OFA by minimizing $\mathcal{L} = -\sum_{i=1}^{|y|} \log P_{\theta}(y_i | y_{<i}, x, s)$ , where $\theta$ refers to the model parameters. For inference, we apply the decoding strategies, e.g., beam search, to enhance the quality of generation. However, this paradigm has several problems in classification tasks: 1. optimizing on the entire vocabulary is unnecessary and inefficient; 2. the model may generate invalid labels out of the closed label set during inference. To overcome these issues, we introduce a search strategy based on prefix tree (Trie, [68]). Experimental results show that the Trie-based search can enhance the performance of OFA on classification tasks. See Appendix B for more details.
|
| 97 |
+
|
| 98 |
+
# 3.5 Scaling Models
|
| 99 |
+
|
| 100 |
+
In order to investigate how OFA of different model sizes perform in downstream tasks, we have developed 5 versions of OFA models, scaling from 33M to 940M parameters, and we list their detailed hyperparameters in Table 1.
|
| 101 |
+
|
| 102 |
+
To be more specific, we have built basic models of Base and Large sizes, $\mathrm{OFA_{Base}}$ and $\mathrm{OFA_{Large}}$ . As our network configuration is similar to BART [31], their sizes are similar to those of BART $_{\text{Base}}$ and BART $_{\text{Large}}$ . Additionally, we have developed OFA of a larger size, which we name it $\mathrm{OFA_{Huge}}$ , or OFA without specific mentioning in the tables. Its size is comparable to that of SimVLM $_{\text{Huge}}$ or ViT $_{\text{Huge}}$ . To investigate whether smaller OFA can still reach satisfactory performance, we have developed $\mathrm{OFA_{Medium}}$ and $\mathrm{OFA_{Tiny}}$ , which are solely around half and less than $20\%$ as large as $\mathrm{OFA_{Base}}$ .
|
| 103 |
+
|
| 104 |
+
Table 2: Experimental results on cross-modal understanding tasks including VQA and visual entailment. Note that we report the best results from the previous SOTAs, and specifically SimVLM is a huge-size model comparable to ViT-Huge pretrained on 1.8B image-text pairs, and Florence is built with CoSwin-H and RoBERTa and it is pretrained on 900M image-text pairs.
|
| 105 |
+
|
| 106 |
+
<table><tr><td>Model</td><td>VQATest-dev</td><td>test-standard</td><td>SNL1-VEdev</td><td>test</td></tr><tr><td>UNITER [14]</td><td>73.8</td><td>74.0</td><td>79.4</td><td>79.4</td></tr><tr><td>OSCAR [15]</td><td>73.6</td><td>73.8</td><td>-</td><td>-</td></tr><tr><td>VILLA [16]</td><td>74.7</td><td>74.9</td><td>80.2</td><td>80.0</td></tr><tr><td>VL-T5 [56]</td><td>-</td><td>70.3</td><td>-</td><td>-</td></tr><tr><td>VinVL [17]</td><td>76.5</td><td>76.6</td><td>-</td><td>-</td></tr><tr><td>UNIMO [46]</td><td>75.0</td><td>75.3</td><td>81.1</td><td>80.6</td></tr><tr><td>ALBEF [69]</td><td>75.8</td><td>76.0</td><td>80.8</td><td>80.9</td></tr><tr><td>METER [70]</td><td>77.7</td><td>77.6</td><td>80.9</td><td>81.2</td></tr><tr><td>VLMo [48]</td><td>79.9</td><td>80.0</td><td>-</td><td>-</td></tr><tr><td>SimVLM [22]</td><td>80.0</td><td>80.3</td><td>86.2</td><td>86.3</td></tr><tr><td>Florence [23]</td><td>80.2</td><td>80.4</td><td>-</td><td>-</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Tiny}}\)</td><td>70.3</td><td>70.4</td><td>85.3</td><td>85.2</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Medium}}\)</td><td>75.4</td><td>75.5</td><td>86.6</td><td>87.0</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Base}}\)</td><td>78.0</td><td>78.1</td><td>89.3</td><td>89.2</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Large}}\)</td><td>80.3</td><td>80.5</td><td>90.3</td><td>90.2</td></tr><tr><td>OFA</td><td>82.0</td><td>82.0</td><td>91.0</td><td>91.2</td></tr></table>
|
| 107 |
+
|
| 108 |
+
Table 3: Experimental results on MSCOCO Image Captioning. We report the results on the Karpathy test split. Note that SimVLM and LEMON are huge-size models.
|
| 109 |
+
|
| 110 |
+
<table><tr><td rowspan="2">Model</td><td colspan="4">Cross-Entropy Optimization</td><td colspan="4">CIDEr Optimization</td></tr><tr><td>BLEU@4</td><td>METEOR</td><td>CIDEr</td><td>SPICE</td><td>BLEU@4</td><td>METEOR</td><td>CIDEr</td><td>SPICE</td></tr><tr><td>VL-T5 [56]</td><td>34.5</td><td>28.7</td><td>116.5</td><td>21.9</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>OSCAR [15]</td><td>37.4</td><td>30.7</td><td>127.8</td><td>23.5</td><td>41.7</td><td>30.6</td><td>140.0</td><td>24.5</td></tr><tr><td>UNICORN [57]</td><td>35.8</td><td>28.4</td><td>119.1</td><td>21.5</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VinVL [17]</td><td>38.5</td><td>30.4</td><td>130.8</td><td>23.4</td><td>41.0</td><td>31.1</td><td>140.9</td><td>25.2</td></tr><tr><td>UNIMO [46]</td><td>39.6</td><td>-</td><td>127.7</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LEMON [71]</td><td>41.5</td><td>30.8</td><td>139.1</td><td>24.1</td><td>42.6</td><td>31.4</td><td>145.5</td><td>25.5</td></tr><tr><td>SimVLM [22]</td><td>40.6</td><td>33.7</td><td>143.3</td><td>25.4</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>OFATiny</td><td>35.9</td><td>28.1</td><td>119.0</td><td>21.6</td><td>38.1</td><td>29.2</td><td>128.7</td><td>23.1</td></tr><tr><td>OFAMedium</td><td>39.1</td><td>30.0</td><td>130.4</td><td>23.2</td><td>41.4</td><td>30.8</td><td>140.7</td><td>24.8</td></tr><tr><td>OFABase</td><td>41.0</td><td>30.9</td><td>138.2</td><td>24.2</td><td>42.8</td><td>31.7</td><td>146.7</td><td>25.8</td></tr><tr><td>OFALarge</td><td>42.4</td><td>31.5</td><td>142.2</td><td>24.5</td><td>43.6</td><td>32.2</td><td>150.7</td><td>26.2</td></tr><tr><td>OFA</td><td>43.9</td><td>31.8</td><td>145.3</td><td>24.8</td><td>44.9</td><td>32.5</td><td>154.9</td><td>26.6</td></tr></table>
|
| 111 |
+
|
| 112 |
+
# 4 Experiments
|
| 113 |
+
|
| 114 |
+
This section provides experimental details and analyses to demonstrate our model's effectiveness. See Appendix A for implementation details.
|
| 115 |
+
|
| 116 |
+
# 4.1 Results on Cross-modal Tasks
|
| 117 |
+
|
| 118 |
+
We evaluate our models on different cross-modal downstream tasks, covering cross-modal understanding and generation. Specifically, we implement experiments on multimodal understanding datasets including VQAv2 for visual question answering and SNLI-VE [73] for visual entailment, and multimodal generation including MSCOCO Image Caption [74] for image captioning, RefCOCO / RefCOCO+ / RefCOCOg [75, 76] for referring expression comprehension as this
|
| 119 |
+
|
| 120 |
+
Table 4: Experimental results on the 3 datasets of referring expression comprehension, namely RefCOCO, RefCOCO+, and RefCOCOg. We report the Acc@0.5 on different test splits of the datasets.
|
| 121 |
+
|
| 122 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">RefCOCO</td><td colspan="3">RefCOCO+</td><td colspan="2">RefCOCOg</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>val</td><td>testA</td><td>testB</td><td>val-u</td><td>test-u</td></tr><tr><td>VL-T5 [56]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>71.3</td></tr><tr><td>UNITER [14]</td><td>81.41</td><td>87.04</td><td>74.17</td><td>75.90</td><td>81.45</td><td>66.70</td><td>74.86</td><td>75.77</td></tr><tr><td>VILLA [16]</td><td>82.39</td><td>87.48</td><td>74.84</td><td>76.17</td><td>81.54</td><td>66.84</td><td>76.18</td><td>76.71</td></tr><tr><td>MDETR [72]</td><td>86.75</td><td>89.58</td><td>81.41</td><td>79.52</td><td>84.09</td><td>70.62</td><td>81.64</td><td>80.89</td></tr><tr><td>UNICORN [57]</td><td>88.29</td><td>90.42</td><td>83.06</td><td>80.30</td><td>85.05</td><td>71.88</td><td>83.44</td><td>83.93</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Tiny}}\)</td><td>80.20</td><td>84.07</td><td>75.00</td><td>68.22</td><td>75.13</td><td>57.66</td><td>72.02</td><td>69.74</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Medium}}\)</td><td>85.34</td><td>87.68</td><td>77.92</td><td>76.09</td><td>83.04</td><td>66.25</td><td>78.76</td><td>78.58</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Base}}\)</td><td>88.48</td><td>90.67</td><td>83.30</td><td>81.39</td><td>87.15</td><td>74.29</td><td>82.29</td><td>82.31</td></tr><tr><td>\(\mathsf{OFA}_{\mathsf{Large}}\)</td><td>90.05</td><td>92.93</td><td>85.26</td><td>85.80</td><td>89.87</td><td>79.22</td><td>85.89</td><td>86.55</td></tr><tr><td>OFA</td><td>92.04</td><td>94.03</td><td>88.44</td><td>87.86</td><td>91.70</td><td>80.71</td><td>88.07</td><td>88.78</td></tr></table>
|
| 123 |
+
|
| 124 |
+
task can be viewed as bounding box generation, and MSCOCO Image Caption for text-to-image generation. More details are provided in Appendix A.3.
|
| 125 |
+
|
| 126 |
+
Table 2 presents the performance of OFA and baseline models on VQA and SNLI-VE. In general, OFA achieves the best performance in both tasks with 82.0 on the VQA test-standard set and 91.2 on the SNLI-VE test set. For smaller-size models, $\mathrm{OFA}_{\text{Large}}$ can outperform the recent SOTAs, e.g., VLMo and SimVLM, and $\mathrm{OFA}_{\text{Base}}$ can beat the SOTAs before the aforementioned two models in both tasks. This demonstrates that OFA can achieve superior performance on cross-modal understanding tasks and scaling up OFA can bring significant improvements, reflecting the strong potential of large-scale pretrained models.
|
| 127 |
+
|
| 128 |
+
Table 3 presents the performance of OFA and baseline models on the MSCOCO image captioning dataset. We report the results on the Karpathy test split, and we demonstrate the performance of models trained with Cross-Entropy optimization and additionally with CIDEr optimization based on reinforcement learning. In comparison with the previous SOTA $\mathrm{SimVLM_{Huge}}$ for Cross-Entropy optimization, OFA outperforms it by around 2 points in CIDEr evaluation. For CIDEr optimization, OFA of the 3 sizes all outperform the huge-size LEMON, and OFA demonstrates a new SOTA of 154.9 CIDEr score. By May 31 2022, the single-model OFA had topped the MSCOCO Image Caption Leaderboard. $^3$
|
| 129 |
+
|
| 130 |
+
To evaluate the capability of visual grounding, we conduct experiments on RefCOCO, RefCOCO+, and RefCOCOg. While we unify locations to the vocabulary, visual grounding can be viewed as a sequence generation task. As there is only one target for each query, we limit the generation length to 4 in order to generate a bounding box by $< x_{1}, y_{1}, x_{2}, y_{2} >$ . Experimental results in Table 4 show that OFA reaches the SOTA performance on the 3 datasets. Compared with the previous SOTA UNICORN [57], OFA achieves significant improvement with a gain of 3.61, 6.65 and 4.85 points on the testA sets of RefCOCO and RefCOCO+ as well as the test-u set of RefCOCOg.
|
| 131 |
+
|
| 132 |
+
Text-to-image generation is a challenging task even for pretrained model. As we pretrain OFA with the task "image-infilling", i.e., recovering masked patches by generating the corresponding codes [36], and thus OFA is able to generate code. We thus directly finetune OFA on the MSCOCO Image Caption dataset for text-to-code generation. At the inference stage, we additionally transform the generated codes to an image with the code decoder. Specifically, we use the codes from VQGAN [54] following [52]. Experimental results show that OFA outperforms the baselines in all the metrics. Note that increasing the sampling size during inference is expected to bring clear improvements on FID and IS. Compared with DALLE [50], CogView [51] and NUWA [52], whose sampling sizes are 512, 60 and 60, respectively, OFA outperforms these SOTA methods on FID and IS with a much smaller sampling size 24. This illustrates that OFA has learned better correspondence among the query text, the image and the image codes.
|
| 133 |
+
|
| 134 |
+
We compare OFA with CogView and GLIDE on generation quality with normal and counterfactual queries. Normal queries describe existing things in the real world, while counterfactual queries refer to those describing things that could only exist in our imagination. For normal queries, both CogView and OFA generate images semantically consistent with the given texts, in comparison with GLIDE. The generated examples from our model can provide more sophisticated
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Figure 3: Qualitative comparison with state-of-the-art models for text-to-image generation task. We present more qualitative examples of text-to-image generation for better demonstration in Appendix C.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
|
| 141 |
+
Table 5: Experimental results on text-to-image generation. Models are evaluated on FID, CLIPSIM, and IS scores. OFA outperforms the baselines, including the concurrent SOTA NüWA. We report the results of $\mathrm{OFA}_{\text{Large}}$ . Note that GLIDE additionally has $1.5B$ parameters for upsampling except for the $3.5B$ parameters.
|
| 142 |
+
|
| 143 |
+
<table><tr><td>Model</td><td>FID↓</td><td>CLIPSIM↑</td><td>IS↑</td></tr><tr><td>DALLE [50]</td><td>27.5</td><td>-</td><td>17.9</td></tr><tr><td>CogView [51]</td><td>27.1</td><td>33.3</td><td>18.2</td></tr><tr><td>GLIDE [77]</td><td>12.2</td><td>-</td><td>-</td></tr><tr><td>Unifying [78]</td><td>29.9</td><td>30.9</td><td>-</td></tr><tr><td>NÜWA [52]</td><td>12.9</td><td>34.3</td><td>27.2</td></tr><tr><td>OFA</td><td>10.5</td><td>34.4</td><td>31.1</td></tr></table>
|
| 144 |
+
|
| 145 |
+
details of objects, say the horse and the double-decker bus. For counterfactual queries, we find that OFA is the only one that can generate the three imaginary scenes, which indicates its imaginative power based on its strong capability to align text to the image. See Appendix C for more qualitative examples.
|
| 146 |
+
|
| 147 |
+
# 4.2 Results on Uni-modal Tasks
|
| 148 |
+
|
| 149 |
+
As the design of OFA unifies different modalities, we evaluate its performance on unimodal tasks, namely tasks of natural language and computer vision. For natural language tasks, we evaluate OFA on 6 tasks of the GLUE benchmark [79] for natural language understanding and Gigaword abstractive summarization [80] for natural language generation. For computer vision, we evaluate OFA on the classic ImageNet-1K [81] dataset for image classification. More details are provided in Appendix A.3.
|
| 150 |
+
|
| 151 |
+
As OFA has been pretrained on plain text data, it can be directly transferred to natural language downstream tasks. For natural language generation, it is essentially a sequence-to-sequence generation task, and for natural language
|
| 152 |
+
|
| 153 |
+
Table 6: Experimental results on the GLUE benchmark datasets [79]. For comparison, we list the performance of multimodal pretrained models as well the recent SOTA models that were pretrained on natural language data only. Following [28], we finetune RTE and MRPC starting from the checkpoint finetuned on MNLI.
|
| 154 |
+
|
| 155 |
+
<table><tr><td>Model</td><td>SST-2</td><td>RTE</td><td>MRPC</td><td>QQP</td><td>MNLI</td><td>QNLI</td></tr><tr><td colspan="7">Multimodal Pretrained Baseline Models</td></tr><tr><td>VisualBERT [38]</td><td>89.4</td><td>56.6</td><td>71.9</td><td>89.4</td><td>81.6</td><td>87.0</td></tr><tr><td>UNITER [14]</td><td>89.7</td><td>55.6</td><td>69.3</td><td>89.2</td><td>80.9</td><td>86.0</td></tr><tr><td>VL-BERT [8]</td><td>89.8</td><td>55.7</td><td>70.6</td><td>89.0</td><td>81.2</td><td>86.3</td></tr><tr><td>VilBERT [13]</td><td>90.4</td><td>53.7</td><td>69.0</td><td>88.6</td><td>79.9</td><td>83.8</td></tr><tr><td>LXMERT [40]</td><td>90.2</td><td>57.2</td><td>69.8</td><td>75.3</td><td>80.4</td><td>84.2</td></tr><tr><td>Uni-Perceiver [61]</td><td>90.2</td><td>64.3</td><td>86.6</td><td>87.1</td><td>81.7</td><td>89.9</td></tr><tr><td>SimVLM [22]</td><td>90.9</td><td>63.9</td><td>75.2</td><td>90.4</td><td>83.4</td><td>88.6</td></tr><tr><td>FLAVA [60]</td><td>90.9</td><td>57.8</td><td>81.4</td><td>90.4</td><td>80.3</td><td>87.3</td></tr><tr><td>UNIMO [46]</td><td>96.8</td><td>-</td><td>-</td><td>-</td><td>89.8</td><td>-</td></tr><tr><td colspan="7">Natural-Language-Pretrained SOTA Models</td></tr><tr><td>BERT [2]</td><td>93.2</td><td>70.4</td><td>88.0</td><td>91.3</td><td>86.6</td><td>92.3</td></tr><tr><td>RoBERTa [28]</td><td>96.4</td><td>86.6</td><td>90.9</td><td>92.2</td><td>90.2</td><td>93.9</td></tr><tr><td>XLNet [25]</td><td>97.0</td><td>85.9</td><td>90.8</td><td>92.3</td><td>90.8</td><td>94.9</td></tr><tr><td>ELECTRA [82]</td><td>96.9</td><td>88.0</td><td>90.8</td><td>92.4</td><td>90.9</td><td>95.0</td></tr><tr><td>DeBERTa [83]</td><td>96.8</td><td>88.3</td><td>91.9</td><td>92.3</td><td>91.1</td><td>95.3</td></tr><tr><td>Ours</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>OFA</td><td>96.6</td><td>91.0</td><td>91.7</td><td>92.5</td><td>90.2</td><td>94.8</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 7: Experimental results on Gigaword abstractive summarization. We report performance on the ROUGE evaluation [84]
|
| 158 |
+
|
| 159 |
+
<table><tr><td>Model</td><td>ROUGE-1</td><td>Gigaword ROUGE-2</td><td>ROUGE-L</td></tr><tr><td>BERTSHARE [85]</td><td>38.13</td><td>19.81</td><td>35.62</td></tr><tr><td>MASS [86]</td><td>38.73</td><td>19.71</td><td>35.96</td></tr><tr><td>UniLM [29]</td><td>38.45</td><td>19.45</td><td>35.75</td></tr><tr><td>PEGASUS [87]</td><td>39.12</td><td>19.86</td><td>36.24</td></tr><tr><td>ProphetNet [88]</td><td>39.55</td><td>20.27</td><td>36.57</td></tr><tr><td>UNIMO [46]</td><td>39.71</td><td>20.37</td><td>36.88</td></tr><tr><td>OFA</td><td>39.81</td><td>20.66</td><td>37.11</td></tr></table>
|
| 160 |
+
|
| 161 |
+
understanding, typically text classification, we regard them as generation tasks where labels are essentially word sequences. Additionally, for each task, we design a manual instruction to indicate the model what types of questions it should answer. We list our instruction design in Appendix A.3.
|
| 162 |
+
|
| 163 |
+
We demonstrate that even a unified multimodal pretrained model can achieve highly competitive performance in natural language tasks. Specifically, in the evaluation of natural language understanding, OFA surpasses multimodal pretrained models by large margins in all tasks. In comparison with the state-of-the-art natural language pretrained models, including RoBERTa [28], XLNET [25], ELECTRA [82], and DeBERTa [83], OFA reaches a comparable performance. In the evaluation of natural language generation, OFA even reaches a new state-of-the-art performance on the Gigaword dataset.
|
| 164 |
+
|
| 165 |
+
Also, OFA can reach a competitive performance in image classification. Table 8 shows the performance of OFA on image classification. $\mathrm{OFA}_{\mathrm{Large}}$ achieves higher accuracy than previous backbone models such as EfficientNet-B7 [89] and ViT-L [6]. We also compare OFA with self-supervised pretraining models based on contrastive learning and masked image modeling. OFA outperforms contrastive-based models such as SimCLR [32] and MoCo-v3 [33, 35] with similar parameters. Compared with pretrained models based on masked image modeling, e.g., BEiT-L [36] and MAE-L [37], OFA can achieve similar performance.
|
| 166 |
+
|
| 167 |
+
Table 8: ImageNet-1K finetuning results. All the listed models do not use extra labeled image classification samples during training for fair comparison. We report the results of $\mathrm{OFA}_{\text{Large}}$ .
|
| 168 |
+
|
| 169 |
+
<table><tr><td>Model</td><td>Top-1 Acc.</td></tr><tr><td>EfficientNet-B7 [89]</td><td>84.3</td></tr><tr><td>ViT-L/16 [6]</td><td>82.5</td></tr><tr><td>DINO [90]</td><td>82.8</td></tr><tr><td>SimCLR v2 [32]</td><td>82.9</td></tr><tr><td>MoCo v3 [35]</td><td>84.1</td></tr><tr><td>BEiT\(_{384}\)-L/16 [36]</td><td>86.3</td></tr><tr><td>MAE-L/16 [37]</td><td>85.9</td></tr><tr><td>OFA</td><td>85.6</td></tr></table>
|
| 170 |
+
|
| 171 |
+
Table 9: Zero-shot performance on 6 GLUE tasks and SNLI-VE.
|
| 172 |
+
|
| 173 |
+
<table><tr><td>Model</td><td>SST-2 Acc.</td><td>RTE Acc.</td><td>MRPC F1</td><td>QQP F1</td><td>QNLI Acc.</td><td>MNLI Acc.</td><td>SNLI-VE Acc. (dev/test)</td></tr><tr><td>Uni-Perceiver</td><td>70.6</td><td>55.6</td><td>76.1</td><td>53.6</td><td>51.0</td><td>49.6</td><td>-</td></tr><tr><td>\(\mathsf{OFA}_{\text{Base}}\)</td><td>71.6</td><td>56.7</td><td>79.5</td><td>54.0</td><td>51.4</td><td>37.3</td><td>49.71 / 49.18</td></tr></table>
|
| 174 |
+
|
| 175 |
+
These aforementioned results in both natural language and vision tasks indicate that a unified multimodal pretrained model is not only effective in multimodal tasks but also capable of tackling unimodal tasks, and in the future, it might be sufficient for such a model to solve complex tasks concerning different modality combinations.
|
| 176 |
+
|
| 177 |
+
# 4.3 Zero-shot Learning & Task Transfer
|
| 178 |
+
|
| 179 |
+
The instruction-guided pretraining enables OFA to perform zero-shot inference. Following Uni-Perceiver [61], we evaluate our model on the 6 tasks of the GLUE benchmark, including single-sentence classification and sentence pair classification. Table 9 demonstrates that OFA generally outperforms Uni-Perceiver. However, both models do not achieve satisfactory performance in sentence-pair classification (with Acc. $< 60\%$ ). We hypothesize that the missing sentence-pair data in the pretraining dataset attributes to the performance.
|
| 180 |
+
|
| 181 |
+
Also, we find that the model performance is highly sensitive to the design of instructions. To obtain the best result, one should search a proper instruction template possibly from a large pool of candidates. A slight change to manual prompts or model parameters may drastically influence the model performance, which is not robust. We leave this issue to the future work.
|
| 182 |
+
|
| 183 |
+
We observe that the model can transfer to unseen tasks well with new task instructions. We design a new task called grounded question answering and present examples in Figure 4. In this scenario, given a question about a certain region on the image, the model should provide a correct answer. We find that the model can achieve a satisfactory performance in this new task, which reflects its strong transferability. Besides, OFA can solve tasks with the out-of-domain input data. For example, OFA without finetuning achieves satisfactory performance in VQA for the out-of-domain images. Examples are demonstrated in Figure 5. OFA can also perform accurate visual grounding on the out-of-domain images, e.g., anime pictures, synthetic images, etc., and we demonstrate more examples on Figure 11 in Appendix C.
|
| 184 |
+
|
| 185 |
+
# 4.4 Ablation on Multitask Pretraining
|
| 186 |
+
|
| 187 |
+
Thanks to the unified framework, OFA has been pretrained on multiple tasks and thus endowed with comprehensive capabilities. However, the effects of each task are still undiscovered. We verify their effects on multiple downstream tasks, including image captioning, VQA, image classification, and text-to-image generation.
|
| 188 |
+
|
| 189 |
+
We first evaluate how uni-modal pretraining tasks influence the performance in both cross-modal and uni-modal tasks. Table 10 demonstrates our experimental results. We observe some interesting phenomena about the effects of uni-modal pretraining tasks. Text infilling brings improvement on image caption (+0.8 CIDEr) and VQA (+0.46 Acc.). Natural language pretraining betters the contextualized representation of language and thus enhances performance in cross-modal tasks. However, it is noticed that the language pretraining task may degrade the performance in image
|
| 190 |
+
|
| 191 |
+
Q: what color is the car in the region? region: <loc301> <loc495> <loc501> <loc596>
|
| 192 |
+

|
| 193 |
+
A: tan
|
| 194 |
+
|
| 195 |
+
Q: what color is the car in the region? region: <loc512> <loc483> <loc675> <loc576>
|
| 196 |
+

|
| 197 |
+
A: gray
|
| 198 |
+
|
| 199 |
+
Figure 4: Qualitative results on an unseen task grounded QA. We design a new task called grounded question answering, where the model should answer a question about a certain region in the image. More samples are provided in Figure 10 in Appendix C.
|
| 200 |
+
Q: what is grown on the plant?
|
| 201 |
+
Figure 5: Qualitative results on unseen domain VQA. During pretraining, only real-world photographs are used for VQA. We present cases of VQA on out-of-domain images, i.e., the iconic and sci-fi images, and demonstrate their capability of transferring to unseen domains. More samples are provided in Figure 9 in Appendix C.
|
| 202 |
+

|
| 203 |
+
A: money
|
| 204 |
+
|
| 205 |
+
Q: what does the red-roofed building right to the big airship look like?
|
| 206 |
+

|
| 207 |
+
A: a mushroom
|
| 208 |
+
|
| 209 |
+
classification, leading to the decrease in ImageNet-1K $(-1.0\mathrm{Acc.})$ . Also, it is interesting to find that it does not encourage improvement in text-to-image generation $(-0.1$ CLIPSIM). It may attribute to the simplicity of text in this task, which indicates that improved representation of language does not affect the performance. As to image infilling, it significantly improves the performance in image classification $(+1.0\mathrm{Acc.})$ and text-to-image generation $(+0.6$ CLIPSIM). Learning to recover images is an effective self-supervised task for image representation, and it also encourages the decoder's ability to generate image codes. However, it hurts the performance in image captioning and VQA. Both tasks require a strong capability in generating texts, and the decoder's learning of image generation naturally brings performance degradation in captioning $(-0.7$ CIDEr) and VQA $(-0.3\mathrm{Acc.})$ .
|
| 210 |
+
|
| 211 |
+
Furthermore, we evaluate how multimodal tasks impact the performance. Previous studies have provided evidence of the contribution of conventional pretraining tasks, e.g., MLM, MOC, ITM, VQA, image captioning, etc. [14, 17]. However, they miss other tasks, e.g., detection and visual grounding & grounded captioning. We conduct experiments on these tasks and find that tasks predicting regions are crucial to multimodal tasks, with a performance increase in image captioning $(+2.3\text{CIDEr} + + 1.4\text{CIDEr})$ and VQA $(+0.6\text{Acc.} +0.5\text{Acc.})$ . It suggests that detection and visual grounding & grounded captioning help the model grasp fined-grained alignments between vision and language.
|
| 212 |
+
|
| 213 |
+
Table 10: Ablation results of OFA. All models are pretrained for $250k$ steps. w/o ground. represents the removal of both visual grounding and grounded captioning tasks. Note that all models are only finetuned with the cross-entropy loss in image captioning.
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Model</td><td>Caption CIDEr</td><td>VQA Test-dev</td><td>ImageNet Top-1 Acc.</td><td>Image Generation FID / CLIPSIM / IS</td></tr><tr><td>OFABase</td><td>135.6</td><td>76.0</td><td>82.2</td><td>20.8 / 31.6 / 21.5</td></tr><tr><td>w/o text infill.</td><td>134.8</td><td>75.6</td><td>83.2</td><td>20.3 / 31.7 / 21.8</td></tr><tr><td>w/o image infill.</td><td>136.3</td><td>76.3</td><td>81.8</td><td>23.2 / 31.0 / 20.0</td></tr><tr><td>w/o det.</td><td>133.3</td><td>75.4</td><td>81.4</td><td>20.9 / 31.5 / 21.6</td></tr><tr><td>w/o ground.</td><td>134.2</td><td>75.5</td><td>82.0</td><td>21.2 / 31.5 / 21.5</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Region information contributes little to text-to-image generation (+0.1 CLIPSIM & +0.1 CLIPSIM), as this task requires far less text-region alignment information. We surprisingly find that detection can encourage the performance in visual understanding (+0.8 Acc.). It indicates that incorporating region information might be essential to visual understanding, especially on images with complex objects.
|
| 218 |
+
|
| 219 |
+
# 5 Conclusion
|
| 220 |
+
|
| 221 |
+
In this work, we propose OFA, a Task-Agnostic and Modality-Agnostic framework supporting Task Comprehensiveness. OFA achieves the unification in architecture, tasks and modalities, and thus is capable of multimodal & uni-modal understanding and generation, without specification in additional layers or tasks. Our experiments show that OFA creates new SOTAs in a series of tasks, including image captioning, VQA, visual entailment, and referring expression comprehension. OFA also demonstrates a comparable performance with language / vision pretrained SOTA models in uni-modal understanding and generation tasks, e.g., GLUE, abstractive summarization, and image classification. We provide a further analysis to demonstrate its capability in zero-shot learning and domain & task transfer, and we also verify the effectiveness of pretraining tasks.
|
| 222 |
+
|
| 223 |
+
In the future, we will continue exploring the issues discovered in this work. Also, we endeavor to figure out a reasonable solution to building an omni-model essentially generalizable to the complex real world.
|
| 224 |
+
|
| 225 |
+
# Acknowledgments
|
| 226 |
+
|
| 227 |
+
We would like to thank Jie Zhang, Yong Li, Jiamang Wang, Shao Yuan, and Zheng Cao for their support to this project, and we would like to thank Guangxiang Zhao and Fei Sun for their insightful comments to our paper.
|
| 228 |
+
|
| 229 |
+
# References
|
| 230 |
+
|
| 231 |
+
[1] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS 2017, pages 5998-6008, 2017.
|
| 232 |
+
[2] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, NAACL-HLT 2019, pages 4171–4186. Association for Computational Linguistics, 2019.
|
| 233 |
+
[3] Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.
|
| 234 |
+
[4] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.
|
| 235 |
+
[5] Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. arXiv preprint arXiv:1904.05862, 2019.
|
| 236 |
+
[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.
|
| 237 |
+
|
| 238 |
+
[7] Andrew Jaegle, Felix Gimeno, Andrew Brock, Andrew Zisserman, Oriol Vinyals, and Joao Carreira. Perceiver: General perception with iterative attention. arXiv preprint arXiv:2103.03206, 2021.
|
| 239 |
+
[8] Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. Vl-bert: Pre-training of generic visual-linguistic representations. In International Conference on Learning Representations, 2019.
|
| 240 |
+
[9] Jason Wei, Maarten Bosma, Vincent Y Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652, 2021.
|
| 241 |
+
[10] Victor Sanh, Albert Webson, Colin Raffel, Stephen H Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Teven Le Scao, Arun Raja, et al. Multitask prompted training enables zero-shot task generalization. arXiv preprint arXiv:2110.08207, 2021.
|
| 242 |
+
[11] Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. Parameter-efficient transfer learning for nlp. In International Conference on Machine Learning, pages 2790-2799. PMLR, 2019.
|
| 243 |
+
[12] Brian Lester, Rami Al-Rfou, and Noah Constant. The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691, 2021.
|
| 244 |
+
[13] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurIPS, 2019.
|
| 245 |
+
[14] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Universal image-text representation learning. In ECCV, 2020.
|
| 246 |
+
[15] Xiujun Li, Xi Yin, Chunyuan Li, Xiaowei Hu, Pengchuan Zhang, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, Yejin Choi, and Jianfeng Gao. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV, 2020.
|
| 247 |
+
[16] Zhe Gan, Yen-Chun Chen, Linjie Li, Chen Zhu, Yu Cheng, and Jingjing Liu. Large-scale adversarial training for vision-and-language representation learning. ArXiv, abs/2006.06195, 2020.
|
| 248 |
+
[17] Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. Vinvl: Revisiting visual representations in vision-language models. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5575-5584, 2021.
|
| 249 |
+
[18] Junyang Lin, Rui Men, An Yang, Chang Zhou, Ming Ding, Yichang Zhang, Peng Wang, Ang Wang, Le Jiang, Xianyan Jia, et al. M6: A chinese multimodal pretrainer. arXiv preprint arXiv:2103.00823, 2021.
|
| 250 |
+
[19] Zhu Zhang, Jianxin Ma, Chang Zhou, Rui Men, Zhikang Li, Ming Ding, Jie Tang, Jingren Zhou, and Hongxia Yang. M6-ufc: Unifying multi-modal controls for conditional image synthesis. arXiv preprint arXiv:2105.14211, 2021.
|
| 251 |
+
[20] An Yang, Junyang Lin, Rui Men, Chang Zhou, Le Jiang, Xianyan Jia, Ang Wang, Jie Zhang, Jiamang Wang, Yong Li, et al. Exploring sparse expert models and beyond. arXiv preprint arXiv:2105.15082, 2021.
|
| 252 |
+
[21] Junyang Lin, An Yang, Jinze Bai, Chang Zhou, Le Jiang, Xianyan Jia, Ang Wang, Jie Zhang, Yong Li, Wei Lin, et al. M6-10t: A sharing-delinking paradigm for efficient multi-trillion parameter pretraining. arXiv preprint arXiv:2110.03888, 2021.
|
| 253 |
+
[22] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. ArXiv, abs/2108.10904, 2021.
|
| 254 |
+
[23] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel C. F. Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. Florence: A new foundation model for computer vision. ArXiv, abs/2111.11432, 2021.
|
| 255 |
+
[24] Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. URL https://s3-us-west-2.amazon.com/openai-assetss/researchcovers/languageunsupervised/language understanding paper. pdf, 2018.
|
| 256 |
+
[25] Zhilin Yang, Zihang Dai, Yiming Yang, Jaime G. Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. Xlnet: Generalized autoregressive pretraining for language understanding. In NeurIPS 2019, pages 5754-5764, 2019.
|
| 257 |
+
[26] Yu Sun, Shuohuan Wang, Yu-Kun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, and Hua Wu. ERNIE: enhanced representation through knowledge integration. CoRR, abs/1904.09223, 2019.
|
| 258 |
+
[27] Yu Sun, Shuohuan Wang, Yu-Kun Li, Shikun Feng, Hao Tian, Hua Wu, and Haifeng Wang. ERNIE 2.0: A continual pre-training framework for language understanding. CoRR, abs/1907.12412, 2019.
|
| 259 |
+
|
| 260 |
+
[28] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. *Roberta: A robustly optimized BERT pretraining approach*. CoRR, abs/1907.11692, 2019.
|
| 261 |
+
[29] Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. Unified language model pre-training for natural language understanding and generation. In NeurIPS 2019, pages 13042-13054, 2019.
|
| 262 |
+
[30] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67, 2020.
|
| 263 |
+
[31] Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In ACL 2020, July 2020.
|
| 264 |
+
[32] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020.
|
| 265 |
+
[33] Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020.
|
| 266 |
+
[34] Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020.
|
| 267 |
+
[35] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15750-15758, 2021.
|
| 268 |
+
[36] Hangbo Bao, Li Dong, and Furu Wei. Beit: Bert pre-training of image transformers. arXiv preprint arXiv:2106.08254, 2021.
|
| 269 |
+
[37] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dólár, and Ross Girshick. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021.
|
| 270 |
+
[38] Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. Visualbert: A simple and performant baseline for vision and language. ArXiv, abs/1908.03557, 2019.
|
| 271 |
+
[39] Luowei Zhou, Hamid Palangi, Lei Zhang, Houdong Hu, Jason J. Corso, and Jianfeng Gao. Unified vision-language pre-training for image captioning and VQA. In AAI 2020, pages 13041-13049, 2020.
|
| 272 |
+
[40] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5100-5111, 2019.
|
| 273 |
+
[41] Gen Li, Nan Duan, Yuejian Fang, Daxin Jiang, and Ming Zhou. Unicoder-vl: A universal encoder for vision and language by cross-modal pre-training. CoRR, abs/1908.06066, 2019.
|
| 274 |
+
[42] Junyang Lin, An Yang, Yichang Zhang, Jie Liu, Jingren Zhou, and Hongxia Yang. Interbert: Vision-and-language interaction for multi-modal pretraining. arXiv preprint arXiv:2003.13198, 2020.
|
| 275 |
+
[43] Jiasen Lu, Vedanuj Goswami, Marcus Rohrbach, Devi Parikh, and Stefan Lee. 12-in-1: Multi-task vision and language representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10437–10446, 2020.
|
| 276 |
+
[44] Haiyang Xu, Ming Yan, Chenliang Li, Bin Bi, Songfang Huang, Wenming Xiao, and Fei Huang. E2e-vlp: End-to-end vision-language pre-training enhanced by visual learning. arXiv preprint arXiv:2106.01804, 2021.
|
| 277 |
+
[45] Fei Yu, Jiji Tang, Weichong Yin, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. Ernie-vil: Knowledge enhanced vision-language representations through scene graphs. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 3208-3216, 2021.
|
| 278 |
+
[46] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. UNIMO: towards unified-modal understanding and generation via cross-modal contrastive learning. In Chengqing Zong, Fei Xia, Wenjie Li, and Roberto Navigli, editors, ACL/IJCNLP 2021, pages 2592-2607. Association for Computational Linguistics, 2021.
|
| 279 |
+
[47] Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-bert: Aligning image pixels with text by deep multi-modal transformers. ArXiv, abs/2004.00849, 2020.
|
| 280 |
+
[48] Wenhui Wang, Hangbo Bao, Li Dong, and Furu Wei. Vlmo: Unified vision-language pre-training with mixture-of-modality-experts. ArXiv, abs/2111.02358, 2021.
|
| 281 |
+
|
| 282 |
+
[49] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In Marina Meila and Tong Zhang, editors, ICML 2021, volume 139 of Proceedings of Machine Learning Research, pages 8748-8763. PMLR, 2021.
|
| 283 |
+
[50] Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. Zero-shot text-to-image generation. arXiv preprint arXiv:2102.12092, 2021.
|
| 284 |
+
[51] Ming Ding, Zhuoyi Yang, Wenyi Hong, Wendi Zheng, Chang Zhou, Da Yin, Junyang Lin, Xu Zou, Zhou Shao, Hongxia Yang, et al. Cogview: Mastering text-to-image generation via transformers. arXiv preprint arXiv:2105.13290, 2021.
|
| 285 |
+
[52] Chenfei Wu, Jian Liang, Lei Ji, Fan Yang, Yuejian Fang, Daxin Jiang, and Nan Duan. N\`uwa: Visual synthesis pre-training for neural visual world creation. arXiv preprint arXiv:2111.12417, 2021.
|
| 286 |
+
[53] Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. Neural discrete representation learning. In NIPS, 2017.
|
| 287 |
+
[54] Patrick Esser, Robin Rombach, and Bjorn Ommer. Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12873-12883, 2021.
|
| 288 |
+
[55] Lukasz Kaiser, Aidan N Gomez, Noam Shazeer, Ashish Vaswani, Niki Parmar, Llion Jones, and Jakob Uszkoreit. One model to learn them all. arXiv preprint arXiv:1706.05137, 2017.
|
| 289 |
+
[56] Jaemin Cho, Jie Lei, Haochen Tan, and Mohit Bansal. Unifying vision-and-language tasks via text generation. In ICML, 2021.
|
| 290 |
+
[57] Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Faisal Ahmed, Zicheng Liu, Yumao Lu, and Lijuan Wang. Crossing the format boundary of text and boxes: Towards unified vision-language modeling. ArXiv, abs/2111.12085, 2021.
|
| 291 |
+
[58] Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, et al. Perceiver io: A general architecture for structured inputs & outputs. arXiv preprint arXiv:2107.14795, 2021.
|
| 292 |
+
[59] Ronghang Hu and Amanpreet Singh. Unit: Multimodal multitask learning with a unified transformer. arXiv preprint arXiv:2102.10772, 2021.
|
| 293 |
+
[60] Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. Flava: A foundational language and vision alignment model. arXiv preprint arXiv:2112.04482, 2021.
|
| 294 |
+
[61] Xizhou Zhu, Jinguo Zhu, Hao Li, Xiaoshi Wu, Xiaogang Wang, Hongsheng Li, Xiaohua Wang, and Jifeng Dai. Uni-perceiver: Pre-training unified architecture for generic perception for zero-shot and few-shot tasks. arXiv preprint arXiv:2112.01522, 2021.
|
| 295 |
+
[62] Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. arXiv preprint arXiv:2106.04803, 2021.
|
| 296 |
+
[63] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715–1725, 2016.
|
| 297 |
+
[64] Ting Chen, Saurabh Saxena, Lala Li, David J Fleet, and Geoffrey Hinton. Pix2seq: A language modeling framework for object detection. arXiv preprint arXiv:2109.10852, 2021.
|
| 298 |
+
[65] Lei Jimmy Ba, Jamie Ryan Kiros, and Geoffrey E. Hinton. Layer normalization. CoRR, abs/1607.06450, 2016.
|
| 299 |
+
[66] Sam Shleifer, Jason Weston, and Myle Ott. Normformer: Improved transformer pretraining with extra normalization. arXiv preprint arXiv:2110.09456, 2021.
|
| 300 |
+
[67] Guolin Ke, Di He, and Tie-Yan Liu. Rethinking positional encoding in language pre-training. In International Conference on Learning Representations, 2020.
|
| 301 |
+
[68] Thomas H Cormen, Charles E Leiserson, Ronald L Rivest, and Clifford Stein. Introduction to algorithms. MIT press, 2009.
|
| 302 |
+
[69] Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Thirty-Fifth Conference on Neural Information Processing Systems, 2021.
|
| 303 |
+
|
| 304 |
+
[70] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Nanyun Peng, Zicheng Liu, and Michael Zeng. An empirical study of training end-to-end vision-and-language transformers. ArXiv, abs/2111.02387, 2021.
|
| 305 |
+
[71] Xiaowei Hu, Zhe Gan, Jianfeng Wang, Zhengyuan Yang, Zicheng Liu, Yumao Lu, and Lijuan Wang. Scaling up vision-language pre-training for image captioning. CoRR, abs/2111.12233, 2021.
|
| 306 |
+
[72] Aishwarya Kamath, Mannat Singh, Yann LeCun, Ishan Misra, Gabriel Synnaeve, and Nicolas Carion. Mdetr modulated detection for end-to-end multi-modal understanding. ArXiv, abs/2104.12763, 2021.
|
| 307 |
+
[73] Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment: A novel task for fine-grained image understanding. arXiv preprint arXiv:1901.06706, 2019.
|
| 308 |
+
[74] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco captions: Data collection and evaluation server. arXiv preprint arXiv:1504.00325, 2015.
|
| 309 |
+
[75] Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. Modeling context in referring expressions. In European Conference on Computer Vision, pages 69-85. Springer, 2016.
|
| 310 |
+
[76] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11–20, 2016.
|
| 311 |
+
[77] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021.
|
| 312 |
+
[78] Yupan Huang, Hongwei Xue, Bei Liu, and Yutong Lu. Unifying multimodal transformer for bi-directional image and text generation. In Proceedings of the 29th ACM International Conference on Multimedia, pages 1138-1147, 2021.
|
| 313 |
+
[79] Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multitask benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461, 2018.
|
| 314 |
+
[80] Alexander M Rush, Sumit Chopra, and Jason Weston. A neural attention model for abstractive sentence summarization. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 379–389, 2015.
|
| 315 |
+
[81] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009.
|
| 316 |
+
[82] Kevin Clark, Minh-Thang Luong, Quoc V. Le, and Christopher D. Manning. ELECTRA: pre-training text encoders as discriminators rather than generators. In 8th International Conference on Learning Representations, ICLR 2020. OpenReview.net, 2020.
|
| 317 |
+
[83] Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. Deberta: decoding-enhanced bert with disentangled attention. In 9th International Conference on Learning Representations, ICLR 2021. OpenReview.net, 2021.
|
| 318 |
+
[84] Chin-Yew Lin. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, Barcelona, Spain, July 2004. Association for Computational Linguistics.
|
| 319 |
+
[85] Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. Leveraging pre-trained checkpoints for sequence generation tasks. Transactions of the Association for Computational Linguistics, 8:264-280, 2020.
|
| 320 |
+
[86] Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie-Yan Liu. MASS: masked sequence to sequence pre-training for language generation. In ICML 2019, pages 5926-5936, 2019.
|
| 321 |
+
[87] Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter Liu. Pegasus: Pre-training with extracted gap-sentences for abstractive summarization. In International Conference on Machine Learning, pages 11328-11339. PMLR, 2020.
|
| 322 |
+
[88] Weizhen Qi, Yu Yan, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, and Ming Zhou. Prophetnet: Predicting future n-gram for sequence-to-sequence pre-training. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, pages 2401–2410, 2020.
|
| 323 |
+
[89] Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pages 6105-6114. PMLR, 2019.
|
| 324 |
+
[90] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. arXiv preprint arXiv:2104.14294, 2021.
|
| 325 |
+
|
| 326 |
+
[91] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3558-3568, 2021.
|
| 327 |
+
[92] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In ACL 2018, pages 2556-2565, 2018.
|
| 328 |
+
[93] Vicente Ordonez, Girish Kulkarni, and Tamara L. Berg. Im2text: Describing images using 1 million captioned photographs. In NeurIPS 2011, pages 1143-1151, 2011.
|
| 329 |
+
[94] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael S. Bernstein, and Li Fei-Fei. Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision, 123(1):32–73, 2017.
|
| 330 |
+
[95] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6904-6913, 2017.
|
| 331 |
+
[96] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR 2019, pages 6700-6709, 2019.
|
| 332 |
+
[97] Bart Thomee, David A Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. Yfcc100m: The new data in multimedia research. Communications of the ACM, 59(2):64-73, 2016.
|
| 333 |
+
[98] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4. International Journal of Computer Vision, 128(7):1956-1981, 2020.
|
| 334 |
+
[99] Shuai Shao, Zeming Li, Tianyuan Zhang, Chao Peng, Gang Yu, Xiangyu Zhang, Jing Li, and Jian Sun. Objects365: A large-scale, high-quality dataset for object detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 8430-8439, 2019.
|
| 335 |
+
[100] Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020.
|
| 336 |
+
[101] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR 2016, pages 770-778, 2016.
|
| 337 |
+
[102] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In ICLR 2019, 2019.
|
| 338 |
+
[103] Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q. Weinberger. Deep networks with stochastic depth. In ECCV, 2016.
|
| 339 |
+
[104] Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318, 2002.
|
| 340 |
+
[105] Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or summarization, pages 65-72, 2005.
|
| 341 |
+
[106] Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575, 2015.
|
| 342 |
+
[107] Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. Spice: Semantic propositional image caption evaluation. In European conference on computer vision, pages 382-398. Springer, 2016.
|
| 343 |
+
[108] Andrej Karpathy and Li Fei-Fei. Deep visual-semantic alignments for generating image descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3128-3137, 2015.
|
| 344 |
+
[109] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.
|
| 345 |
+
[110] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29:2234-2242, 2016.
|
| 346 |
+
|
| 347 |
+
[111] Steven J Rennie, Etienne Marcheret, Youssef Mroueh, Jerret Ross, and Vaibhava Goel. Self-critical sequence training for image captioning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7008-7024, 2017.
|
| 348 |
+
[112] Guangxiang Zhao, Wenkai Yang, Xuancheng Ren, Lei Li, and Xu Sun. Well-classified examples are underestimated in classification with deep neural networks. CoRR, abs/2110.06537, 2021.
|
| 349 |
+
[113] Ekin D Cubuk, Barret Zoph, Jonathon Shlens, and Quoc V Le. Randaugment: Practical automated data augmentation with a reduced search space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 702-703, 2020.
|
| 350 |
+
[114] Zhun Zhong, Liang Zheng, Guoliang Kang, Shaozi Li, and Yi Yang. Random erasing data augmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 13001-13008, 2020.
|
| 351 |
+
[115] Hongyi Zhang, Moustapha Cissé, Yann N. Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. In 6th International Conference on Learning Representations, ICLR 2018, Vancouver, BC, Canada, April 30 - May 3, 2018, Conference Track Proceedings. OpenReview.net, 2018.
|
| 352 |
+
[116] Sangdoo Yun, Dongyoon Han, Sanghyuk Chun, Seong Joon Oh, Youngjoon Yoo, and Junsuk Choe. Cutmix: Regularization strategy to train strong classifiers with localizable features. In 2019 IEEE/CVF International Conference on Computer Vision, ICCV 2019, Seoul, Korea (South), October 27 - November 2, 2019, pages 6022-6031. IEEE, 2019.
|
| 353 |
+
[117] Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114, 2021.
|
| 354 |
+
|
| 355 |
+
# A Implementation Details
|
| 356 |
+
|
| 357 |
+
# A.1 Pretraining Datasets
|
| 358 |
+
|
| 359 |
+
We construct pretraining datasets by incorporating Vision & Language data (i.e., image-text pairs), Vision data (i.e., raw image data, object-labeled data), and Language data (i.e., plain texts). For replication, the pretraining datasets are publicly available. We carefully filter our pretraining data and exclude images that appear in the validation and test sets of downstream tasks to avoid data leakage. The statistics on the pretraining datasets are listed in Table 11.
|
| 360 |
+
|
| 361 |
+
Cross-modal Data For vision & language pretraining, we mainly apply image-text pairs, including image-caption pairs, image-QA pairs, and image-region pairs, as the pretraining data. For the pretraining tasks of image captioning and image-text matching, we collect Conceptual Caption 12M (CC12M) [91], Conceptual Captions (CC3M) [92], SBU [93], MSCOCO image captions (COCO) [74], and Visual Genome Captions (VG Captions) [94]. Specifically, the part of data from VG requires some additional processing. As texts in VG captions describe local regions on the images, we retrieve regions with area larger than 16, 384 pixels and construct region-caption pairs. For visual question answering, we collect VQAv2 [95], VG-QA [94], as well as GQA [96]. VQAv2 is a visual question answering dataset with real-world photographs from COCO. VG-QA is also a visual question answering dataset with real-world photographs from VG. The questions of VG-QA are related to specific regions on the images. GQA is a large VQA dataset featuring compositional questions. The images of GQA are also collected from VG. For visual grounding and grounded captioning, we collect data from RefCOCO [75], RefCOCO+ [75], RefCOCOg [76] and VG captions. Additional processing is applied to VG Captions for this task. Specifically, we use the data of VG that contains regions with area smaller than 16, 384 pixels for Visual Grounding, in order to encourage model to grasp fine-grained alignments between vision and language.
|
| 362 |
+
|
| 363 |
+
Uni-modal Data Uni-modal data includes vision and language data. Vision data consists of raw images for image infilling and object-labeled images for object detection. For image infilling, we collect raw images from OpenImages, YFCC100M [97] and ImageNet-21K [81], and exclude annotations. Thus the model is unable to access labels in the pretraining stage. For object detection, we collect OpenImages [98], Object365 [99], VG and COCO for object detection. Language data consists of plain texts, i.e., passages consisting of sentences. We use around 140GB of data from Pile [100] to leverage its diversity. Specifically, we extract natural language data and implement preprocessing methods, including truncation to the length of 512.
|
| 364 |
+
|
| 365 |
+
Table 11: Statistics on the datasets of pretraining tasks. "#Image" denotes the number of distinct images, and "#Sample" denotes the number of samples. *For language data, we report its storage following the previous studies [2, 28].
|
| 366 |
+
|
| 367 |
+
<table><tr><td>Type</td><td>Pretraining Task</td><td>Source</td><td>#Image</td><td>#Sample</td></tr><tr><td rowspan="5">Vision & Language</td><td>Image Captioning</td><td rowspan="2">CC12M, CC3M, SBU, COCO, VG-Cap</td><td rowspan="2">14.78M</td><td rowspan="2">15.25M</td></tr><tr><td>Image-Text Matching</td></tr><tr><td>Visual Question Answering</td><td>VQAv2, VG-QA, GQA</td><td>178K</td><td>2.92M</td></tr><tr><td>Visual Grounding</td><td rowspan="2">RefCOCO, RefCOCO+, RefCOCOg, VG-Cap</td><td rowspan="2">131K</td><td rowspan="2">3.20M</td></tr><tr><td>Grounded Captioning</td></tr><tr><td rowspan="2">Vision</td><td>Detection</td><td>OpenImages, Object365, VG, COCO</td><td>2.98M</td><td>3.00M</td></tr><tr><td>Image Infilling</td><td>OpenImages, YFCC100M, ImageNet-21K</td><td>36.27M</td><td>-</td></tr><tr><td>Language</td><td>Masked Language Modeling</td><td>Pile (Filtered)</td><td>-</td><td>140GB*</td></tr></table>
|
| 368 |
+
|
| 369 |
+
# A.2 Pretraining Details
|
| 370 |
+
|
| 371 |
+
For the image processing, we first resize and crop the images into different resolutions, $256 \times 256$ for $\mathrm{OFA}_{\mathrm{Tiny}}$ and $\mathrm{OFA}_{\mathrm{Medium}}$ , $384 \times 384$ for $\mathrm{OFA}_{\mathrm{Base}}$ , $480 \times 480$ for $\mathrm{OFA}_{\mathrm{Large}}$ and $\mathrm{OFA}_{\mathrm{Huge}}$ , with a fixed patch size of $16 \times 16$ . Note that training $\mathrm{OFA}_{\mathrm{Large}}$ and $\mathrm{OFA}_{\mathrm{Huge}}$ are time and computation consuming, we first train them with images of the resolution of $384 \times 384$ and $256 \times 256$ , and continue pretraining with images of the resolution of $480 \times 480$ .
|
| 372 |
+
|
| 373 |
+
For each patch, we obtain its feature vector with the first three blocks of ResNet [101]. The ResNet module is jointly trained along with the transformer module. Note that through extensive experiments we find that random sampling patches [47] does not bring additional benefits in our scenario. For the text processing, we tokenize the texts with the
|
| 374 |
+
|
| 375 |
+
same BPE Tokenizer [63] as BART [31]. The maximum text sequence length of both encoder and decoder is set to 256. We share parameters between the embedding and the decoder softmax output layer.
|
| 376 |
+
|
| 377 |
+
From our preliminary experiments, we find that the initialization for Transformer plays an important role. For $\mathrm{OFA}_{\mathrm{Base}}$ and $\mathrm{OFA}_{\mathrm{Large}}$ , we initialize the transformer with most of the weights of $\mathrm{BART}_{\mathrm{Base}}$ and $\mathrm{BART}_{\mathrm{Large}}$ considering the slight difference between OFA Transformer and BART as described in Sec 3.1. For OFA of the other sizes, we pretrain language models with the same pretraining strategy with BART and use the pretrained weights to initialize the Transformer in OFA.
|
| 378 |
+
|
| 379 |
+
We use the AdamW [102] optimizer with $(\beta_{1},\beta_{2}) = (0.9,0.999)$ and $\epsilon = 1e - 8$ to pretrain our models. We set the peak learning rate to $2e - 4$ , and apply a scheduler with linear decay with a warmup ratio of 0.01 to control the learning rate. For regulation, we set dropout to 0.1 and use weight decay with 0.01. We employ stochastic depth [103] with a 0.1 rate (applied to encoder and decoder except for convolution blocks). We mix all the pretraining data within each batch, which contains 2, 048 vision&language samples, 256 object detection samples, 256 image-only samples and 512 text-only samples. All models are pretrained for at least $300K$ steps except the models used for ablation study.
|
| 380 |
+
|
| 381 |
+
# A.3 Details of Downstream Tasks
|
| 382 |
+
|
| 383 |
+
We verify the capability of OFA on various downstream tasks in both finetuning and zero-shot settings. We design various task-specific instructions to transfer the knowledge learned from pretraining to downstream tasks effectively. The instructions of different tasks are listed in Table 12. For finetuning, if not specified, the input image resolution is set to $480 \times 480$ , and the other hyper-parameters remain the same as for pretraining. The experimental details of different downstream tasks, including both multimodal and uni-modal tasks, are listed below:
|
| 384 |
+
|
| 385 |
+
Image Captioning Image captioning is a standard vision&language task that requires models to generate an appropriate and fluent caption for an image. We adopt the most widely used MSCOCO Image Caption dataset [74] to evaluate the multi-modal generation capability of OFA. We report BLEU-4 [104], METEOR [105], CIDEr [106], and SPICE [107] scores on the Karpathy test split [108]. Following the previous standard practice, we first finetune OFA with cross-entropy loss for 2 epochs with a batch size of 128 and a learning rate of $1e - 5$ , and label smoothing is set to 0.1. We then finetune the model with CIDEr optimization for 3 epochs with a batch size of 64, and disable dropout and stochastic depth. We report both scores at the two stages.
|
| 386 |
+
|
| 387 |
+
Visual Question Answering Visual question answering (VQA) is a cross-modal task that requires the models to answer the question given an image. Previous works such as VLMo [48] or SimVLM [22] define VQA as a classification task. They use a linear output layer to predict the probability of each candidate answer on a given set. In contrast with these studies, to adapt the generative OFA model to VQA benchmark, we use the Trie-based search strategy mentioned in Sec. 3.4 to ensure that the answer generated by OFA is constrained in the candidate set. We evaluate our model with other baselines on the commonly used VQAv2 dataset [95]. Accuracy scores on both test-dev and test-standard sets are reported. The OFA models of all the reported sizes are finetuned for 40,000 steps with a batch size of 512. The learning rate is $5e - 5$ with the label smoothing of 0.1. When finetuning $\mathrm{OFA}_{\mathrm{Large}}$ and $\mathrm{OFA}_{\mathrm{Huge}}$ , we increase the image resolution from 480 to 640. Linear interpolation of the image absolute positional embedding proposed in [6] is employed when transferring the pretrained OFA to VQA finetuning. During Trie-based searching, we constrain the generated answers over the most frequent 3, 129 answer candidates. Exponential moving average (EMA) with decay rate 0.9999 is employed in finetuning.
|
| 388 |
+
|
| 389 |
+
Visual Entailment Visual entailment requires the model to evaluate how the given image and text are semantically correlated, i.e., entailment, neutral, or contradiction. We perform experiments on the SNLI-VE dataset [73]. The image premise, text premise and text hypothesis are fed to the encoder, and the decoder generates appropriate labels. To transfer the knowledge learned by pretraining to this task, we convert the labels entailment/neutral/contradiction to yes/maybe/no. We also use the Trie-based search strategy to constrain the generated labels over the candidate set. We report accuracy on both dev and test sets. The OFA model is finetuned for 6 epochs with a learning rate of $2e - 5$ and a batch size of 256.
|
| 390 |
+
|
| 391 |
+
Referring Expression Comprehension Referring expression comprehension requires models to locate an image region described by a language query. Different from the approach taken by most previous methods [13, 14] which ranks a set of candidate bounding boxes detected by a pretrained object detector, our method directly predicts the best matching bounding box without any proposals. We perform experiments on RefCOCO [75], RefCOCO+ [75], and RefCOCOg [76]. Consistent with other downstream tasks, we formulate referring expression comprehension as a conditional sequence generation task. In detail, given an image and a language query, OFA generates the box sequence (e.g., $\langle x_1, y_1, x_2, y_2 \rangle$ ) in an autoregressive manner. We report the standard metric Acc@0.5 on the validation and test
|
| 392 |
+
|
| 393 |
+
Table 12: Instructions for downstream tasks.
|
| 394 |
+
|
| 395 |
+
<table><tr><td>Task</td><td>Dataset</td><td>Instruction</td><td>Target</td></tr><tr><td>Image Captioning</td><td>COCO</td><td>[Image] What does the image describe?</td><td>{Caption}</td></tr><tr><td>Visual Question Answering</td><td>VQA</td><td>[Image] {Question}</td><td>{Answer}</td></tr><tr><td>Visual Entailment</td><td>SNLI-VE</td><td>[Image] Can image and text1 “{Text1}" imply text2 “{Text2}?</td><td>Yes/No/Maybe</td></tr><tr><td>Referring Expression Comprehension</td><td>RefCOCO, RefCOCO+, RefCOCOg</td><td>[Image] Which region does the text “{Text}" describe?</td><td>{Location}</td></tr><tr><td>Image Generation</td><td>COCO</td><td>What is the complete image? caption: {Caption}</td><td>{Image}</td></tr><tr><td>Image Classification</td><td>ImageNet-1K</td><td>[Image] What does the image describe?</td><td>{Label}</td></tr><tr><td>Single-Sentence Classification</td><td>SST-2</td><td>Is the sentiment of text “{Text}" positive or negative?</td><td>Positive/Negative</td></tr><tr><td rowspan="5">Sentence-Pair Classification</td><td>RTE</td><td>Can text1 “{Text1}" imply text2 “{Text2}?</td><td>Yes/No</td></tr><tr><td>MRPC</td><td>Does text1 “{Text1}" and text2 “{Text2}" have the same semantics?</td><td>Yes/No</td></tr><tr><td>QQP</td><td>Is question “{Question1}" and question “{Question2}" equivalent?</td><td>Yes/No</td></tr><tr><td>MNLI</td><td>Can text1 “{Text1}" imply text2 “{Text2}?</td><td>Yes/No/Maybe</td></tr><tr><td>QNLI</td><td>Does “{Text}" contain the answer to question “{Question}"?</td><td>Yes/No</td></tr><tr><td>Text Summarization</td><td>Gigaword</td><td>What is the summary of article “{Article}?</td><td>{Summary}</td></tr></table>
|
| 396 |
+
|
| 397 |
+
sets. For finetuning, the input image resolution is set to $512 \times 512$ . We finetune the OFA model on each dataset for about 10 epochs with a batch size of 128. The learning rate is $3e - 5$ with the label smoothing of 0.1. Each query only corresponds to an image region, so we limit the maximum generated length to 4 during inference.
|
| 398 |
+
|
| 399 |
+
Image Generation Following the same setting with [52], we train our model on the MS COCO train split and evaluate our model on the validation split by randomly sampling 30,000 images. We use Fréchet Inception Distance (FID) [109] and Inception Score (IS) [110] to evaluate the quality of the images. Following the previous studies [78, 52], we also compute CLIP Similarity Score (CLIPSIM) to evaluate the semantic similarity between the query text and the generated images. During finetuning, OFA learns to generate the image code sequence according to the given text query only. The model is first finetuned with cross-entropy and then with CLIPSIM optimization following [111, 78]. In the first stage, we finetune the OFA model for about 50 epochs with a batch size of 512 and a learning rate of $1e - 3$ . In the second stage, the model is finetuned for extra 5000 steps with a batch size of 32 and a learning rate of $1e - 6$ . During the evaluation, we sample 24 images with the resolution of $256 \times 256$ for each query and choose the best one using the pretrained CLIP model [49].
|
| 400 |
+
|
| 401 |
+
For case study, we compare OFA with CogView and GLIDE. CogView provides an API website $^{5}$ . Note that this API samples 8 images of resolution of $512 \times 512$ for each query. We select the first one of generated images and resize it to the resolution of $256 \times 256$ . GLIDE provides a Colab notebook. $^{6}$ . Note that the only publicly available GLIDE model is of base size ( $\sim 385\mathrm{M}$ ).
|
| 402 |
+
|
| 403 |
+
Image Classification We provide finetuning results on ImageNet-1K [81] following recent studies in self-supervised learning for computer vision. During finetuning and inference, a Trie-based search strategy is employed to constrain the generated text into the set of 1,000 candidate labels. We finetune OFA for 32 epochs and a batch size of 256. The learning rate is $5e - 5$ . The ratio for label smoothing is 0.1. The encouraging loss proposed in [112] is employed with the hyperparameter LE set to 0.75. Following [36], we use the same random resize cropping, random flipping, RandAug [113] and random erasing [114] transformations as data augmentation strategies. Mixup [115] and CutMix [116] are used with overall 0.5 probability to be performed on each batch and alpha is 0.8 and 1.0, respectively. To adapt the mixed soft target of Mixup and CutMix into generation paradigm during finetuning, we run the decoder twice each with one of the target sequences to be mixed and sum the loss weighted by the mixing ratio.
|
| 404 |
+
|
| 405 |
+
Natural Language Understanding To verify the natural language understanding ability of OFA, we select 6 language understanding tasks from GLUE benchmark [79], including both single-sentence classification tasks and sentence-pair
|
| 406 |
+
|
| 407 |
+
classification tasks. To adapt to sentence-pair classification, previous models [2, 28] usually use segment embeddings to distinguish different sentences. Unlike those models, OFA can apply the model to sentence-pair classification tasks by constructing appropriate instructions without introducing additional segment embeddings. For the hyper-parameters of finetuning, we tune the training epochs among $\{5,7,10\}$ , learning rate among $\{3e - 5,5e - 5,6e - 5,7e - 5,1e - 4\}$ , batch size among $\{32,64,128\}$ , weight decay among $\{0.01,0.05\}$ , and dropout rate among $\{0.0,0.1\}$ . We report the best performance on the development set for each task.
|
| 408 |
+
|
| 409 |
+
Natural Language Generation We verify the natural language generation ability of OFA in the Gigaword dataset [80]. We report ROUGE-1/ROUGE-2/ROUGE-L to evaluate the generation results following [80]. We finetune the OFA models for 6 epochs with a batch size of 512. The learning rate is $1e - 4$ with the label smoothing of 0.1, and the maximum input text sequence length is set to 512. During inference, we set the length penalty to 0.7 and beam size to 6, and limit the maximum generated length to 32.
|
| 410 |
+
|
| 411 |
+
# B Trie-based Search
|
| 412 |
+
|
| 413 |
+
This section describes how to use Trie-based search to improve model performance on downstream classification tasks. When dealing with classification tasks, we first construct a Trie where nodes are annotated with tokens from the candidate label-set. During finetuning, the model computes the log-probabilities of the target tokens based on their positions on the Trie. As shown in Figure 6, when computing the log-probabilities of the target token "sky", we only consider tokens in {"sky", "ocean"} and forcefully set the logits for all invalid tokens to $-\infty$ . During inference, we constrain the generated labels over the candidate set. As shown in Table 13, Trie-based search strategy can boost the performance of OFA in various downstream classification tasks.
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
Figure 6: Example of Trie-based search where the constraint labels are "blue sky", "blue ocean" and "green". When computing the log-prob of token "sky", we only consider tokens in {"sky", "ocean"} and forcefully set the logits for all invalid tokens to $-\infty$ .
|
| 417 |
+
|
| 418 |
+
Table 13: Ablation results of Tri. The removal of Tri-based search degenerates the performance on downstream tasks. Note that the baseline $\mathrm{OFA_{Base}}$ is only pre-trained for $250\mathrm{k}$ steps, which is also used in Table 10.
|
| 419 |
+
|
| 420 |
+
<table><tr><td>Model</td><td>VQATest-dev Acc.</td><td>SNLI-VE Dev Acc.</td><td>ImageNet Top-1 Acc.</td><td>MRPC F1</td><td>QQPF1</td></tr><tr><td>OFABase</td><td>76.03</td><td>89.2</td><td>82.2</td><td>90.6</td><td>88.4</td></tr><tr><td>w/o Trie</td><td>75.86(-0.17)</td><td>89.0(-0.2)</td><td>81.9(-0.3)</td><td>90.1(-0.5)</td><td>88.2(-0.2)</td></tr></table>
|
| 421 |
+
|
| 422 |
+
# C Qualitative Examples
|
| 423 |
+
|
| 424 |
+
This section provides more qualitative examples of multiple tasks, including text-to-image generation, open-domain VQA, grounded question answering, and open-domain visual grounding, from the generation of OFA. By reading this section, we hope that readers can better perceive OFA.
|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
An eagle view of a magic city.
|
| 428 |
+
|
| 429 |
+

|
| 430 |
+
A pathway to a temple with sakura trees in full bloom, HD.
|
| 431 |
+
|
| 432 |
+

|
| 433 |
+
A beautiful painting of native forest landscape photography, HD.
|
| 434 |
+
|
| 435 |
+

|
| 436 |
+
An art painting of a soldier, in the style of cyperpunk.
|
| 437 |
+
|
| 438 |
+

|
| 439 |
+
The golden palace of the land of clouds.
|
| 440 |
+
|
| 441 |
+

|
| 442 |
+
Rustic interior of an alchemy shop.
|
| 443 |
+
|
| 444 |
+

|
| 445 |
+
An art painting of a dog, in the style of steampunk, white background.
|
| 446 |
+
|
| 447 |
+

|
| 448 |
+
A strawberry splashing in the coffee in a mug under the starry sky.
|
| 449 |
+
|
| 450 |
+

|
| 451 |
+
Elf elk in the forest illustration, HD, fantasy art.
|
| 452 |
+
Figure 7: Examples of text-to-image generation. For better demonstration, we continue finetuning OFA on a subset of LAION-400M [117].
|
| 453 |
+
|
| 454 |
+

|
| 455 |
+
An art painting of a city, in the style of cyberpunk.
|
| 456 |
+
|
| 457 |
+

|
| 458 |
+
A painting of the sunset cliffs in the style of fantasy art.
|
| 459 |
+
|
| 460 |
+

|
| 461 |
+
A painting of the superman.
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
An art painting of a city, in the style of steampunk.
|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
A painting of the sunset cliffs in the style of dark fantasy art.
|
| 468 |
+
|
| 469 |
+

|
| 470 |
+
A painting of the superman, in the dark style.
|
| 471 |
+
|
| 472 |
+

|
| 473 |
+
An oil painting of a pizza on the beach.
|
| 474 |
+
|
| 475 |
+

|
| 476 |
+
An oil painting of a computer in the sky.
|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
A painting of the ship in the style of etching, HD.
|
| 480 |
+
Figure 8: Examples of text-to-image generation.
|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
Q: what is the man sitting on?
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
Q: what is the dog doing now?
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
Q: what is the person in the right-bottom corner holding now?
|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
A: light bulb
|
| 495 |
+
Q: what is the mood of the children in the picture?
|
| 496 |
+
A: happy
|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
A: getting a bath
|
| 500 |
+
Q: what is the man doing?
|
| 501 |
+
A: walking
|
| 502 |
+
|
| 503 |
+

|
| 504 |
+
A:computer
|
| 505 |
+
Q: what is the name of the largest planet in the picture?
|
| 506 |
+
A: sun
|
| 507 |
+
|
| 508 |
+

|
| 509 |
+
Figure 9: More samples of VQA task on unseen domains. The answers are generated by pretrained OFA without finetuning. The datasets used in VQA pretraining task only contain real-world photographs. We present more cases of VQA task on out-of-domain (non-photographic) images and demonstrate the capability of transferring OFA to these unseen domains.
|
| 510 |
+
Q: what color is the car in the region? region: <loc301> <loc495> <loc501> <loc596>
|
| 511 |
+
|
| 512 |
+

|
| 513 |
+
Q: what color is the car in the region? region: <loc512> <loc483> <loc675> <loc576>
|
| 514 |
+
|
| 515 |
+

|
| 516 |
+
Q: what color is the roof in the region? region: <loc521> <loc176> <loc689> <loc290>
|
| 517 |
+
|
| 518 |
+

|
| 519 |
+
A: tan
|
| 520 |
+
Q: what color is the house in the region? region: <loc295> <loc120> <loc524> <loc491>
|
| 521 |
+
A:light blue
|
| 522 |
+
Figure 10: Samples of the unseen grounded question answering task. In this task, the model should answer a question about a particular region in the image. This task is unseen in pretraining. We demonstrate that directly transferring pretrained OFA to this new task without finetuning works well.
|
| 523 |
+
|
| 524 |
+

|
| 525 |
+
A: gray
|
| 526 |
+
Q: what color is the house in the region? region: <loc534> <loc172> <loc731> <loc516>
|
| 527 |
+
A:White
|
| 528 |
+
|
| 529 |
+

|
| 530 |
+
A: brown
|
| 531 |
+
Q: what object is in the region? region: <loc571> <loc175> <loc598> <loc240>
|
| 532 |
+
A: chimney
|
| 533 |
+
|
| 534 |
+

|
| 535 |
+
A blue turtle-like poker with round head.
|
| 536 |
+
|
| 537 |
+

|
| 538 |
+
A green toad-like lemon with seeds on its back.
|
| 539 |
+
|
| 540 |
+

|
| 541 |
+
A red dinosaur-like poker with a flaming tail.
|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
a man with green hair in green clothes with three swords at his waist
|
| 545 |
+
|
| 546 |
+

|
| 547 |
+
a man in a straw hat and a red dress
|
| 548 |
+
|
| 549 |
+

|
| 550 |
+
a blond-haired man in a black suit and brown tie
|
| 551 |
+
|
| 552 |
+

|
| 553 |
+
a sexy lady wearing sunglasses and a crop top with black hair
|
| 554 |
+
|
| 555 |
+

|
| 556 |
+
a man with a long nose in a hat and yellow pants
|
| 557 |
+
|
| 558 |
+

|
| 559 |
+
a strange skeleton
|
| 560 |
+
|
| 561 |
+

|
| 562 |
+
A green elephant.
|
| 563 |
+
|
| 564 |
+

|
| 565 |
+
(a)
|
| 566 |
+
A normal elephant.
|
| 567 |
+
|
| 568 |
+

|
| 569 |
+
A red elephant.
|
| 570 |
+
|
| 571 |
+

|
| 572 |
+
A blue giraffe.
|
| 573 |
+
Figure 11: Samples of visual grounding task generated by OFA for various unseen domains: (a) anime (the corresponding animations are Pokemon and One Piece); (b) synthetic images with attribute combinations.
|
| 574 |
+
|
| 575 |
+

|
| 576 |
+
A giraffe near the blue giraffe.
|
| 577 |
+
(b)
|
| 578 |
+
|
| 579 |
+

|
| 580 |
+
A white giraffe.
|
2202.03xxx/2202.03052/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7d1171a75de9db5929d9c92af5d1f22f7f465ab48a76b8c7d33db4f9d09eed0
|
| 3 |
+
size 2021201
|
2202.03xxx/2202.03052/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03086/a8d50bac-a469-485a-80c5-e14a27594346_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36cfdcffb77c2ff983ad289e000844c7d35fa8378b93a8483b91804d1c490efd
|
| 3 |
+
size 747443
|
2202.03xxx/2202.03086/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03086/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:634c8c5ff83609f48327b0944b279d90a81b485a92be275cd64b59adbeba97d5
|
| 3 |
+
size 376428
|
2202.03xxx/2202.03086/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03091/bd9d16b8-1b86-4a7c-a8fd-9c6b49e0a193_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c65e9bb765fae70f0def49432959e1fcb84f1fde61004b86f95ca3d87a308e3
|
| 3 |
+
size 3159839
|
2202.03xxx/2202.03091/full.md
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto- $\lambda$ : Disentangling Dynamic Task Relationships
|
| 2 |
+
|
| 3 |
+
Shikun Liu\*, Stephen James, Andrew J. Davison, and Edward Johns
|
| 4 |
+
|
| 5 |
+
$^{1}$ Dyson Robotics Lab, Imperial College London
|
| 6 |
+
|
| 7 |
+
$^{2}$ Robot Learning Lab, Imperial College London
|
| 8 |
+
|
| 9 |
+
$^{3}$ UC Berkeley
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Understanding the structure of multiple related tasks allows for multi-task learning to improve the generalisation ability of one or all of them. However, it usually requires training each pairwise combination of tasks together in order to capture task relationships, at an extremely high computational cost. In this work, we learn task relationships via an automated weighting framework, named Auto-λ. Unlike previous methods where task relationships are assumed to be fixed, Auto-λ is a gradient-based meta learning framework which explores continuous, dynamic task relationships via task-specific weightings, and can optimise any choice of combination of tasks through the formulation of a meta-loss; where the validation loss automatically influences task weightings throughout training. We apply the proposed framework to both multi-task and auxiliary learning problems in computer vision and robotics, and show that Auto-λ achieves state-of-the-art performance, even when compared to optimisation strategies designed specifically for each problem and data domain. Finally, we observe that Auto-λ can discover interesting learning behaviors, leading to new insights in multi-task learning. Code is available at https://github.com/lorenmt/auto-lambda.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Multi-task learning can improve model accuracy, memory efficiency, and inference speed, when compared to training tasks individually. However, it often requires careful selection of which tasks should be trained together, to avoid negative transfer, where irrelevant tasks produce conflicting gradients and complicate the optimisation landscape. As such, without prior knowledge of the underlying relationships between the tasks, and hence which tasks should be trained together, multi-task learning can sometimes have worse prediction performance than single-task learning.
|
| 18 |
+
|
| 19 |
+
We define the relationship between two tasks to mean to what extent these two tasks should be trained together, following a similar definition in [59, 49, 8]. For example, we say that task $A$ is more related to task $B$ than task $C$ , if the performance of task $A$ is higher when training tasks $A$ and $B$ together, compared to when training tasks $A$ and $C$ together.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: In Auto- $\lambda$ , task weightings are dynamically changed along with the multi-task network parameters, in joint optimisation. The task weightings can be updated in both the auxiliary learning setting (one task is the primary task) and the multi-task learning setting (all tasks are the primary tasks). In this example, in the auxiliary learning setting, semantic segmentation is the primary task which we are optimising for. During training, task weightings provide interpretable dynamic task relationships, where high weightings emerge when tasks are strongly related (e.g. normal prediction to segmentation) and low weightings when tasks are weakly related (e.g. depth prediction to segmentation).
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+
To determine which tasks should be trained together, we could exhaustively search over all possible task groupings, where tasks in a group are equally weighted but all other tasks are ignored. However, this requires training $2^{|T|} - 1$ multi-task networks for a set of tasks $T$ , and the computational cost for this search can be intractable when $|T|$ is large. Prior works have developed efficient task grouping frameworks based on heuristics to speed up training, such as using an early stopping approximation [49] and computing a lookahead loss averaged across a few training steps [8]. However, these task grouping methods are bounded by two prominent limitations. Firstly, they are designed to be two-stage methods, requiring a search for the best task structure and then re-training multi-task network with the best task structure. Secondly, higher-order task relationships for three or more tasks are not directly obtainable due to high computational cost. Instead, higher-order relationships are approximated by small combinations of lower-order relationships, and thus, as the number of training tasks increases, even evaluating these combinations may become prohibitively costly.
|
| 27 |
+
|
| 28 |
+
In this paper, instead of requiring these expensive searches or approximations, we propose that the relationship between tasks is dynamic, and based on the current state of the multitask network during training. We consider that task relationships could be inferred within a single optimisation problem, which runs recurrently throughout training, and automatically balances the contributions of all tasks depending on which tasks we are optimising for. In this way, we aim to unify multi-task learning and auxiliary learning into a single framework — whilst multi-task learning aims to achieve optimal performance for all training tasks, aux-
|
| 29 |
+
|
| 30 |
+
iliary learning aims to achieve optimal performance for only a subset of training tasks (usually only one), which we call the primary tasks, and the rest of the training tasks are included purely to assist the primary tasks.
|
| 31 |
+
|
| 32 |
+
To this end, we propose a simple meta-learning algorithm, named Auto- $\lambda$ . Auto- $\lambda$ explores dynamic task relationships parameterised by task-specific weightings, termed $\lambda$ . Through a meta-loss formulation, we use the validation loss of the primary tasks to dictate how the task weightings should be altered, such that the performance of these primary tasks can be improved in the next iteration. This optimisation strategy allows us to jointly update the multi-task network as well as task weightings in a fully end-to-end manner.
|
| 33 |
+
|
| 34 |
+
We extensively evaluate Auto- $\lambda$ in both multi-task learning and auxiliary learning settings within both computer vision and robotics domains. We show that Auto- $\lambda$ outperforms not only all multi-task and auxiliary learning optimisation strategies, but also the optimal (but static) task groupings we found in the selected datasets. Finally, we take a deep introspection into Auto- $\lambda$ 's learning behaviour, and we find that the dynamic relationship between tasks is consistent across numerous multi-task architecture designs, with the converged final relationships aligned with the fixed relationships we found via brute-force search. The simple and efficient nature of our method leads to a promising new insight towards understanding the structure of tasks, task relationships, and multi-task learning in general.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
Multi-task Architectures Multi-Task Learning (MTL) aims at simultaneously solving multiple learning problems while sharing information across tasks. The techniques used in multi-task architecture design can be categorised into hard-parameter sharing [22, 14], soft-parameter sharing [40, 35, 38, 54, 51], and neural architecture search [11, 50, 45].
|
| 39 |
+
|
| 40 |
+
Multi-task and Auxiliary-task Optimisation In an orthogonal direction to advance architecture design, significant efforts have also been invested to improve multi-task optimisation strategies. Although this is a multi-objective optimisation problem [47, 29, 55], a single surrogate loss consisting of linear combination of task losses are more commonly studied in practice. Notable works have investigated finding suitable task weightings based on different criteria, such as task uncertainty [21], task prioritisation [12] and task loss magnitudes [35]. Other works have focused on directly modify task gradients [2, 3, 57, 19, 30, 43].
|
| 41 |
+
|
| 42 |
+
Similar to multi-task learning, there is a challenge in choosing appropriate tasks to act as auxiliaries for the primary tasks. [6] proposed to use cosine similarity as an adaptive task weighting to determine when a defined auxiliary task is useful. [42] applied neural networks to optimally combine auxiliary losses in a non-linear manner.
|
| 43 |
+
|
| 44 |
+
Our approach is essentially a weighting-based optimisation framework by parameterising these task relationships via learned task weightings. Though these multi-task and auxiliary learning optimisation strategies are encoded to each problem, Auto- $\lambda$ is designed to solve multi-task learning and auxiliary learning in a unified framework.
|
| 45 |
+
|
| 46 |
+
Understanding Task Grouping and Relationships These optimisation methods typically assume all training tasks are somewhat related, and the problem of which tasks should be trained together is often overlooked. In general, task relationships are often empirically measured by human intuition rather than prescient knowledge of the underlying structures learned by a neural network. This motivated the study of task relationships in the transfer learning setting [59, 7]. However, [49] showed that transfer learning algorithms do not carry over to the multi-task learning domain and instead propose a multi-task specific framework to approximate exhaustive search performance. Further work improved the training efficiency for which the task groupings are computed with only a single training run [8]. Rather than exploring fixed relationships, our method instead explores dynamic relationships directly during training.
|
| 47 |
+
|
| 48 |
+
Meta Learning for Multi-task Learning Meta learning [52, 15] has been often used in the multi-task learning setting, such to generate auxiliary tasks in a self-supervised manner [34, 42] and improve training efficiency on unseen tasks [9, 53]. Our work is also closely related to [20, 31] which proposed a task scheduler to learn a task-agnostic features for supervised pre-training, whilst ours learns features that adapt specifically to the primary task; [55] which applied meta learning to solve multi-objective problems, whilst ours focuses on single-objective problems; [39] which applied meta learning to balance worst-performing tasks, whilst ours balances multi-task learning by finding optimal task relationships. Related to meta learning, our framework is learning to generate suitable and unbounded task weightings as a lookahead method, as a form of gradient-based meta learning.
|
| 49 |
+
|
| 50 |
+
Meta Learning for Hyper-parameter Optimisation Since Auto- $\lambda$ 's design models multi-task learning optimisation as learning task weightings $\lambda$ dynamically via gradients, we may also consider Auto- $\lambda$ as a meta learning-based hyper-parameter optimisation framework [37, 10, 1] by treating $\lambda$ as hyper-parameters. Similar to these frameworks, we also formulate a bi-level optimisation problem. However, different to these frameworks, we offer training strategies specifically tailored to the problem of multi-task learning whose goal is not only to obtain good primary task performance, but also explore interesting learning behaviours of Auto- $\lambda$ from the perspective of task relationships.
|
| 51 |
+
|
| 52 |
+
# 3 Background
|
| 53 |
+
|
| 54 |
+
Notations We denote a multi-task network to be $f(\cdot ;\theta)$ , with network parameters $\theta$ , consisting of task-shared and $K$ task-specific parameters: $\theta = \{\theta_{sh},\theta_{1:K}\}$ . Each task is assigned with task-specific weighting $\lambda = \{\lambda_{1:K}\}$ . We represent a set of task spaces by a pair of task-specific inputs and outputs: $\mathcal{T} = \{T_{1:K}\}$ , where $T_{i} = (X_{i},Y_{i})$ .
|
| 55 |
+
|
| 56 |
+
The design of the task spaces can be further divided into two different settings: a single-domain setting (where all inputs are the same $X_{i} = X_{j}, i \neq j$ , i.e., one-to-many mapping), and a multi-domain setting (where all inputs are different: $X_{i} \neq X_{j}, i \neq j, i.e.$ , many-to-many mapping). We want to optimise $\theta$ for all tasks $\mathcal{T}$ and obtain a good performance in some pre-selected primary tasks $\mathcal{T}^{pri} \subseteq \mathcal{T}$ . If $\mathcal{T}^{pri} = \mathcal{T}$ , we are in the multi-task learning setting, otherwise we are in the auxiliary learning setting.
|
| 57 |
+
|
| 58 |
+
The Design of Optimisation Methods Multi-task or auxiliary learning optimisation methods are designed to balance training and avoid negative transfer. These optimisation strategies can further be categorised into two main directions:
|
| 59 |
+
|
| 60 |
+
(i) Single Objective Optimisation:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\min _ {\boldsymbol {\theta}} \sum_ {i = 1} ^ {K} \lambda_ {i} \cdot L _ {i} \left(f \left(x _ {i}; \theta_ {s h}, \theta_ {i}\right), y _ {i}\right), \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where the task-specific weightings $\lambda$ are applied for a linearly combined single valued loss. Each task's influence on the network parameters can be indirectly balanced by finding a suitable set of weightings which can be manually chosen, or learned through a heuristic [21, 35] — which we called weighting-based methods; or directly balanced by operating on task-specific gradients [6, 57, 2, 3, 19, 30, 43] — which we called gradient-based methods. These methods are designed exclusively to alter optimisation.
|
| 67 |
+
|
| 68 |
+
On the other hand, we also have another class of approaches that determine task groupings [8, 49], which can be considered as an alternate form of weighting-based method, by finding fixed and binary task weightings indicating which tasks should be trained together. Mixing the best of both worlds, Auto- $\lambda$ is an optimisation framework, simultaneously exploring dynamic task relationships.
|
| 69 |
+
|
| 70 |
+
(ii) Multi-Objective Optimisation:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\min _ {\boldsymbol {\theta}} \left[ L _ {i} \left(f \left(x _ {i}; \theta_ {s h}, \theta_ {i}\right), y _ {i}\right) _ {i = 1: K} \right] ^ {\intercal}, \tag {2}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
a vector-valued loss which is optimised by achieving Pareto optimality — when no common gradient updates can be found such that all task-specific losses can be decreased [47, 29]. Note that, this optimisation strategy can only be used in a multi-task learning setup.
|
| 77 |
+
|
| 78 |
+
# 4 Auto-λ: Exploring Dynamic Task Relationships
|
| 79 |
+
|
| 80 |
+
We now introduce our simple but powerful optimisation framework called Auto- $\lambda$ , which explores dynamic task relationships through task-specific weightings.
|
| 81 |
+
|
| 82 |
+
The Design Philosophy Auto- $\lambda$ is a gradient-based meta learning framework, a unified optimisation strategy for both multi-task and auxiliary learning problems, which learns task weightings, based on any combination of primary tasks. The design of Auto- $\lambda$ borrows the concept of lookahead methods in meta learning literature [9, 44], to update parameters at the current state of learning, based on the observed effect of those parameters on a future state. A recently proposed task grouping method [8] also applied a similar concept, to compute the relationships based on how gradient updates of one task can affect the performance of other tasks, additionally offering the option to couple with other gradient-based optimisation methods. Auto- $\lambda$ however is a standalone framework and encodes task relationships explicitly with a set of task weightings associated with training loss, directly optimised based on the validation loss of the primary tasks.
|
| 83 |
+
|
| 84 |
+
Bi-level Optimisation Let us denote $\mathcal{P}$ as the set of indices for all primary tasks defined in $\mathcal{T}^{pri}$ ; $(x_{i}^{val}, y_{i}^{val})$ and $(x_{i}^{train}, y_{i}^{train})$ are sampled from the validation and training sets of the $i^{th}$ task space, respectively. The goal of Auto- $\lambda$ is to find optimal task weightings $\lambda^*$ , which minimise the validation loss on the primary tasks, as a way to measure generalisation, where the optimal multi-task network parameters $\theta^*$ are obtained by minimising the $\lambda^*$ weighted training loss on all tasks. This implies the following bi-level optimisation problem:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\min _ {\lambda} \sum_ {i \in \mathcal {P}} L _ {i} \left(f \left(x _ {i} ^ {v a l}; \theta_ {s h} ^ {*}, \theta_ {i} ^ {*}\right), y _ {i} ^ {v a l}\right)
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
s.t. $\theta^{*} = \arg \min_{\theta}\sum_{i = 1}^{K}\lambda_{i}\cdot L_{i}(f(x_{i}^{train};\theta_{sh},\theta_{i}),y_{i}^{train}).$ (3)
|
| 91 |
+
|
| 92 |
+
Approximation via Finite Difference Now, we may rewrite Eq. 3 with a simple approximation scheme by updating $\theta$ and $\lambda$ iteratively with one gradient update each:
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\boldsymbol {\theta} ^ {\prime} = \boldsymbol {\theta} - \alpha \nabla_ {\boldsymbol {\theta}} \sum_ {i = 1} ^ {K} \lambda_ {i} \cdot L _ {i} \left(f \left(x _ {i} ^ {\text {t r a i n}}; \theta_ {s h}, \theta_ {i}\right), y _ {i} ^ {\text {t r a i n}}\right), \tag {4}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\lambda \leftarrow \lambda - \beta \nabla_ {\lambda} \sum_ {i \in \mathcal {P}} L _ {i} \left(f \left(x _ {i} ^ {v a l}; \theta_ {s h} ^ {\prime}, \theta_ {i} ^ {\prime}\right), y _ {i} ^ {v a l}\right), \tag {5}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\boldsymbol {\theta} \leftarrow \boldsymbol {\theta} - \alpha \nabla_ {\boldsymbol {\theta}} \sum_ {i = 1} ^ {K} \lambda_ {i} \cdot L _ {i} \left(f \left(x _ {i} ^ {\text {t r a i n}}; \theta_ {s h}, \theta_ {i}\right), y _ {i} ^ {\text {t r a i n}}\right), \tag {6}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
for which $\alpha, \beta$ are manually defined learning rates.
|
| 107 |
+
|
| 108 |
+
The above optimisation requires computing second-order gradients which may produce large memory and slow down training speed. Therefore, we apply finite difference approximation to reduce complexity, similar to other gradient-based meta learning methods [9, 32]. For simplicity, let's denote $\mathcal{L}(\theta, \lambda), \mathcal{L}^{pri}(\theta, \lambda)$ represent $\lambda$ weighted loss produced by all tasks and primary tasks respectively. The gradient to update $\lambda$ can be approximated by:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\begin{array}{l} \nabla_ {\lambda} \mathcal {L} ^ {p r i} \left(\boldsymbol {\theta} ^ {*}, \mathbf {1}\right) \approx \nabla \sim \mathcal {L} ^ {p r i} \left(\boldsymbol {\theta} - \alpha \nabla_ {\boldsymbol {\theta}} \mathcal {L} (\boldsymbol {\theta}, \lambda), \mathbf {1}\right) \\ = \nabla_ {\lambda} \mathcal {L} ^ {p r i} \left(\boldsymbol {\theta} ^ {\prime}, \mathbf {1}\right) - \alpha \nabla_ {\theta , \lambda} ^ {2} \mathcal {L} (\boldsymbol {\theta}, \lambda) \nabla_ {\theta^ {\prime}} \mathcal {L} ^ {p r i} \left(\boldsymbol {\theta} ^ {\prime}, \mathbf {1}\right) \tag {7} \\ \approx - \alpha \frac {\nabla_ {\lambda} \mathcal {L} (\pmb {\theta} ^ {+} , \lambda) - \nabla_ {\lambda} \mathcal {L} (\pmb {\theta} ^ {-} , \lambda)}{2 \epsilon}, \\ \end{array}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
where $\theta' \gets \theta - \alpha \nabla_{\theta} \mathcal{L}(\theta, \lambda)$ denotes the network weights for a one-step forward model, and $\theta^{\pm} = \theta \pm \epsilon \cdot \nabla_{\theta'} \mathcal{L}^{pri}(\theta', \mathbf{1})$ , with $\epsilon$ a small constant. $\mathbf{1}$ are constants indicating that all primary tasks are of equal importance, and we may also apply different constants based on prior knowledge.
|
| 115 |
+
|
| 116 |
+
Note that, $\lambda$ is only applied on the training loss not validation loss, otherwise, we would easily reach trivial solutions $\lambda = 0$ . In addition, assuming $\theta' = \theta^*$ is also not applicable, otherwise we have $\nabla_{\lambda} = 0$ .
|
| 117 |
+
|
| 118 |
+
Swapping Training Data In practice, instead of splitting training data into training and validation sets as in the standard meta learning setup, we sampled training and validation data to be the different batches in the same training dataset. We found that this simple swapping training data strategy can learn similar weightings comparing to sampling batches in different datasets, making Auto- $\lambda$ a single-stage framework with end-to-end optimisation.
|
| 119 |
+
|
| 120 |
+
Stochastic Task Sampling Eq. 4 requires to compute gradients for all training tasks. This may lead to significant GPU memory consumption particularly in the multi-domain setting for which the task-shared parameters are accumulating gradients for all training tasks. To further save memory, we may also optimise $\lambda$ in multiple steps, and for each step, we only compute gradients for $K' \ll K$ tasks sampled stochastically. This design allows Auto- $\lambda$ to be optimised with a constant memory independent of the number of training tasks. In practice, we choose the largest possible $K'$ in each dataset that fits in a GPU to speed up training, and we observed that the performance is consistent from a wide range of different $K'$ .
|
| 121 |
+
|
| 122 |
+
# 5 Experiments
|
| 123 |
+
|
| 124 |
+
To validate the generalisation of Auto- $\lambda$ , we experimented on both single-domain and multi-domain computer vision and robotics datasets, in multi-task learning and auxiliary learning settings, with various choices of multi-task architectures.
|
| 125 |
+
|
| 126 |
+
Baselines In multi-task experiments, we compared Auto- $\lambda$ with state-of-the-art weighting-based multi-task optimisation methods: i) Equal weighting, ii) Uncertainty weighting [21], and iii) DWA (Dynamic Weight Average) [35]. In auxiliary learning experiments, we only compared with GCS (Gradient Cosine Similarity) [6] due to the limited works for this setting. Additional experiments comparing to gradient-based methods are further shown in Additional Analysis (Section 7.2).
|
| 127 |
+
|
| 128 |
+
Optimisation Strategies By default, we considered each single task as the primary task in the auxiliary learning setting, unless labelled otherwise. In all experiments, Auto- $\lambda$ 's task weightings were initialised to 0.1, a small weighting which assumes that all tasks are equally not related. The learning rate to update these weightings is hand-selected for each dataset. For fair comparison, the optimisation strategies used in all baselines and our method are the same with respect to each dataset and in each data domain. Detailed hyper-parameters are listed in Appendix A.
|
| 129 |
+
|
| 130 |
+
# 5.1 Results on Dense Prediction Tasks
|
| 131 |
+
|
| 132 |
+
First, we evaluated Auto- $\lambda$ with dense prediction tasks in NYUv2 [41] and CityScapes [4], two standard multi-task datasets in a single-domain setting. In NYUv2, we trained on 3 tasks: 13-class semantic segmentation, depth prediction, and surface normal prediction, with the same experimental setting as in [35]. In CityScapes, we trained on 3 tasks: 19-class semantic segmentation, disparity (inverse depth) estimation, and a recently proposed 10-class part segmentation [5], with the same experimental setting as in [21]. In both datasets, we trained on two multi-task architectures: Split: the standard multi-task learning architecture with
|
| 133 |
+
|
| 134 |
+
<table><tr><td>NYUv2</td><td>Method</td><td>Sem. Seg. [mIoU ↑]</td><td>Depth [aErr. ↓]</td><td>Normal [mDist. ↓]</td><td>ΔMTL ↑</td></tr><tr><td>Single-Task</td><td>-</td><td>43.37</td><td>52.24</td><td>22.40</td><td>-</td></tr><tr><td rowspan="4">Split Multi-Task</td><td>Equal</td><td>44.64</td><td>43.32</td><td>24.48</td><td>+3.57%</td></tr><tr><td>DWA</td><td>45.14</td><td>43.06</td><td>24.17</td><td>+4.58%</td></tr><tr><td>Uncertainty</td><td>45.98</td><td>41.26</td><td>24.09</td><td>+6.50%</td></tr><tr><td>Auto-λ</td><td>47.17</td><td>40.97</td><td>23.68</td><td>+8.21%</td></tr><tr><td rowspan="4">Split Auxiliary-Task</td><td>Uncertainty</td><td>45.26</td><td>42.25</td><td>24.36</td><td>+4.91%</td></tr><tr><td>GCS</td><td>45.01</td><td>42.06</td><td>24.12</td><td>+5.20%</td></tr><tr><td>Auto-λ [3 Tasks]</td><td>48.04</td><td>40.61</td><td>23.31</td><td>+9.66%</td></tr><tr><td>Auto-λ [1 Task]</td><td>47.80</td><td>40.27</td><td>23.09</td><td>+10.02%</td></tr><tr><td rowspan="4">MTAN Multi-Task</td><td>Equal</td><td>44.62</td><td>42.64</td><td>24.29</td><td>+4.27%</td></tr><tr><td>DWA</td><td>45.04</td><td>42.81</td><td>24.02</td><td>+4.89%</td></tr><tr><td>Uncertainty</td><td>46.41</td><td>40.94</td><td>23.65</td><td>+7.69%</td></tr><tr><td>Auto-λ</td><td>47.63</td><td>40.37</td><td>23.28</td><td>+9.54%</td></tr><tr><td rowspan="4">MTAN Auxiliary-Task</td><td>Uncertainty</td><td>44.56</td><td>42.21</td><td>24.26</td><td>+4.55%</td></tr><tr><td>GCS</td><td>44.28</td><td>44.07</td><td>24.03</td><td>+3.49%</td></tr><tr><td>Auto-λ [3 Tasks]</td><td>47.35</td><td>40.10</td><td>23.41</td><td>+9.30%</td></tr><tr><td>Auto-λ [1 Task]</td><td>47.70</td><td>39.89</td><td>22.75</td><td>+10.69%</td></tr></table>
|
| 135 |
+
|
| 136 |
+
<table><tr><td>CityScapes</td><td>Method</td><td>Sem. Seg. [mIoU ↑]</td><td>Part Seg. [mIoU ↑]</td><td>Disp. [aErr. ↓]</td><td>ΔMTL ↑</td></tr><tr><td>Single-Task</td><td>-</td><td>56.20</td><td>52.74</td><td>0.84</td><td>-</td></tr><tr><td rowspan="4">Split Multi-Task</td><td>Equal</td><td>54.03</td><td>50.18</td><td>0.79</td><td>-0.92%</td></tr><tr><td>DWA</td><td>54.93</td><td>50.15</td><td>0.80</td><td>-0.80%</td></tr><tr><td>Uncertainty</td><td>56.06</td><td>52.98</td><td>0.82</td><td>+0.86%</td></tr><tr><td>Auto-λ</td><td>56.08</td><td>51.88</td><td>0.76</td><td>+2.56%</td></tr><tr><td rowspan="4">Split Auxiliary-Task</td><td>Uncertainty</td><td>55.72</td><td>52.62</td><td>0.83</td><td>+0.04%</td></tr><tr><td>GCS</td><td>55.76</td><td>52.19</td><td>0.80</td><td>+0.98%</td></tr><tr><td>Auto-λ [3 Tasks]</td><td>56.42</td><td>52.42</td><td>0.78</td><td>+2.31%</td></tr><tr><td>Auto-λ [1 Task]</td><td>57.89</td><td>53.56</td><td>0.77</td><td>+4.30%</td></tr><tr><td rowspan="4">MTAN Multi-Task</td><td>Equal</td><td>55.05</td><td>50.74</td><td>0.78</td><td>+0.43%</td></tr><tr><td>DWA</td><td>54.71</td><td>51.07</td><td>0.80</td><td>-0.35%</td></tr><tr><td>Uncertainty</td><td>56.28</td><td>53.24</td><td>0.82</td><td>+1.16%</td></tr><tr><td>Auto-λ</td><td>56.57</td><td>52.67</td><td>0.75</td><td>+3.75%</td></tr><tr><td rowspan="4">MTAN Auxiliary-Task</td><td>Uncertainty</td><td>56.13</td><td>52.78</td><td>0.83</td><td>+0.38%</td></tr><tr><td>GCS</td><td>55.47</td><td>52.75</td><td>0.76</td><td>+2.75%</td></tr><tr><td>Auto-λ [3 Tasks]</td><td>57.64</td><td>52.77</td><td>0.78</td><td>+3.25%</td></tr><tr><td>Auto-λ [1 Task]</td><td>58.39</td><td>54.00</td><td>0.78</td><td>+4.48%</td></tr></table>
|
| 137 |
+
|
| 138 |
+
Table 1: Performance on NYUv2 and CityScapes datasets with multi-task and auxiliary learning methods in Split and MTAN multi-task architectures. Auxiliary learning is additionally trained with a noise prediction task. Results are averaged over two independent runs, and the best results are highlighted in bold.
|
| 139 |
+
|
| 140 |
+
hard parameter sharing, which splits at the last layer for the final prediction for each specific task; MTAN [35]: a state-of-the-art multi-task architecture based on task specific feature-level attention. Both networks were based on ResNet-50 [13] as the backbone architecture.
|
| 141 |
+
|
| 142 |
+
Evaluation Metrics We evaluated segmentation, depth and normal via mean intersection over union (mIoU), absolute error (aErr.), and mean angle distances (mDist.), respectively. Following [38], we also report the overall relative multi-task performance $\Delta_{\mathrm{MTL}}$ of model $m$ averaged with respect to each single-task baseline $b$ :
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
\Delta_ {\mathrm {M T L}} = \frac {1}{K} \sum_ {i = 1} ^ {K} (- 1) ^ {l _ {i}} \left(M _ {m, i} - M _ {b, i}\right) / M _ {b, i}, \tag {8}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
where $l_{i} = 1$ if lower means better performance for metric $M_{i}$ of task $i$ , and 0 otherwise.
|
| 149 |
+
|
| 150 |
+
Noise Prediction as Sanity Check In auxiliary learning, we additionally trained with a noise prediction task along with the standard three tasks defined in a dataset. The noise prediction task was generated by assigning a random noise map sampled from a Uniform distribution for each training image. This task is designed to test the effectiveness of different auxiliary learning methods in the presence of useless gradients. We trained from scratch for a fair comparison among all methods in our experiments, following prior works [35, 50, 21].
|
| 151 |
+
|
| 152 |
+
Results Table 1 showed results for CityScapes and NYUv2 datasets in both Split and MTAN multi-task architectures. Our Auto- $\lambda$ outperformed all baselines in multi-task and auxiliary learning settings across both multi-task networks, and has a particularly prominent effect in auxiliary learning setting where it doubles the relative overall multi-task performance compared to auxiliary learning baselines.
|
| 153 |
+
|
| 154 |
+
<table><tr><td>CIFAR-100</td><td>Method</td><td>People</td><td>Aquatic Animals</td><td>Small Mammals</td><td>Trees</td><td>Reptiles</td><td>Avg.</td></tr><tr><td>Single-Task</td><td>-</td><td>55.37</td><td>68.65</td><td>72.79</td><td>75.37</td><td>75.84</td><td>82.19</td></tr><tr><td rowspan="4">Multi-Task</td><td>Equal</td><td>57.73</td><td>73.59</td><td>74.41</td><td>74.64</td><td>76.69</td><td>82.46</td></tr><tr><td>Uncertainty</td><td>54.14</td><td>70.62</td><td>74.08</td><td>74.62</td><td>75.62</td><td>82.03</td></tr><tr><td>DWA</td><td>55.25</td><td>71.54</td><td>74.12</td><td>75.68</td><td>76.26</td><td>82.26</td></tr><tr><td>Auto-λ</td><td>57.57</td><td>74.00</td><td>75.05</td><td>75.15</td><td>77.55</td><td>83.92</td></tr><tr><td rowspan="2">Auxiliary-Task</td><td>GCS</td><td>56.45</td><td>71.05</td><td>72.93</td><td>74.45</td><td>76.29</td><td>82.58</td></tr><tr><td>Auto-λ</td><td>60.89</td><td>75.70</td><td>75.64</td><td>77.38</td><td>81.75</td><td>84.92</td></tr></table>
|
| 155 |
+
|
| 156 |
+
Table 2: Performance of 20 tasks in CIFAR-100 dataset with multi-task and auxiliary learning methods. We report the performance from 5 domains giving lowest single-task performance along with the averaged performance across all 20 domains. Results are averaged over two independent runs, and the best results are highlighted in bold.
|
| 157 |
+
|
| 158 |
+
We show results for two auxiliary task settings: optimising for just one task (Auto- $\lambda$ [1 Task]), where the other three tasks (including noise prediction) are purely auxiliary, and optimising for all three tasks (Auto- $\lambda$ [3 Tasks]), where only the noise prediction task is purely auxiliary. Auto- $\lambda$ [3 Tasks] has nearly identical performance to Auto- $\lambda$ in a multi-task learning setting, whereas the best multi-task baseline Uncertainty achieved notably worse performance when trained with noise prediction as an auxiliary task. This shows that standard multi-task optimisation is susceptible to negative transfer, whereas Auto- $\lambda$ can avoid negative transfer due to its ability to minimise $\lambda$ for tasks that do not assist with the primary task. We also show that Auto- $\lambda$ [1 Task] can further improve performance relative to Auto- $\lambda$ [3 Tasks], at the cost of task-specific training for each individual task.
|
| 159 |
+
|
| 160 |
+
# 5.2 Results on Multi-domain Classification Tasks
|
| 161 |
+
|
| 162 |
+
We now evaluate Auto- $\lambda$ on image classification tasks in a multi-domain setting. We trained on CIFAR-100 [23] and treated each of the 20 'coarse' classes as one domain, thus creating a dataset with 20 tasks, where each task is a 5-class classification over the dataset's fine' classes, following [45, 57]. For multi-task and auxiliary learning, we trained all methods on a VGG-16 network [48] with standard hard-parameter sharing (Split), where each task has a task-specific prediction layer.
|
| 163 |
+
|
| 164 |
+
Results In Table 2, we show classification accuracy on the 5 most challenging domains which had the lowest single-task performance, along with the average performance across all 20 domains. Multi-task learning in this dataset is particularly demanding, since we optimised with a $\times 20$ smaller parameter space per task compared to single-task learning. We observe that all multi-task baselines achieved similar overall performance to single-task learning, due to limited per-task parameter space. However, Auto- $\lambda$ was still able to improve the overall performance by a non-trivial margin. Similarly, Auto- $\lambda$ can further improve performance in the auxiliary learning setting, with significantly higher per-task performance in challenging domains with around $5 - 7\%$ absolute improvement in test accuracy.
|
| 165 |
+
|
| 166 |
+
# 5.3 Results on Robot Manipulation Tasks
|
| 167 |
+
|
| 168 |
+
Finally, to further emphasise the generality of Auto- $\lambda$ , we also experimented on visual imitation learning tasks within a multi-domain robotic manipulation setting.
|
| 169 |
+
|
| 170 |
+
To train and evaluate our method, we selected 10 tasks (visualised in Fig. 2) from the robot learning environment, RLBench [18]. Training data was acquired by first collecting 100 demonstrations for each task, and then running keyframe discovery following [16], to split the task into a smaller number of simple stages to create a behavioural cloning dataset. Our network takes RGB and point-cloud inputs from 3 cameras (left shoulder, right shoulder, and wrist camera), and outputs a continuous 6D pose and discrete gripper action. In order to distinguish between each of the tasks, a learnable task encoding is also fed to the network for multi-task and auxiliary learning. Full training details are given in Appendix B.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
Figure 2: A visual illustration of 10 RLBench tasks from the front-facing camera. Task names are: reach target, push button, pick and lift, pick up cup, put knife on chopping board, take money out of safe, put money in safe, take umbrella out of umbrella stand, stack wine, slide block to target.
|
| 174 |
+
|
| 175 |
+
Results In Table 3, we reported success rate of each and averaged performance over 10 RL-Bench tasks. In addition to the baselines outlined in Section 5, we also included an additional baseline based on Priority Replay [46]: a popular method for increasing sample efficiency in robot learning systems. For this baseline, prioritisation is applied individually for each task. Similar to computer vision tasks, Auto-λ achieved the best performance in both multi-task and auxiliary learning setup, particularly can improved up to $30 - 40\%$ success rate in some multi-stage tasks compared to single-task learning.
|
| 176 |
+
|
| 177 |
+
<table><tr><td>RLBench</td><td>Method</td><td>Reach Target</td><td>Push Button</td><td>Pick And Lift</td><td>Pick Up Cup</td><td>Put Knife on Chopping Board</td><td>Take Money Out Safe</td><td>Put Money In Safe</td><td>Pick Up Umbrella</td><td>Stack Wine</td><td>Slide Block To Target</td><td>Avg.</td></tr><tr><td>Single-Task</td><td>-</td><td>100</td><td>95</td><td>82</td><td>72</td><td>36</td><td>38</td><td>31</td><td>37</td><td>23</td><td>36</td><td>55.0</td></tr><tr><td rowspan="5">Multi-Task</td><td>Equal</td><td>100</td><td>92</td><td>86</td><td>69</td><td>40</td><td>57</td><td>57</td><td>44</td><td>16</td><td>40</td><td>60.1</td></tr><tr><td>Uncertainty</td><td>100</td><td>95</td><td>75</td><td>56</td><td>19</td><td>60</td><td>79</td><td>70</td><td>16</td><td>65</td><td>63.5</td></tr><tr><td>DWA</td><td>100</td><td>90</td><td>88</td><td>82</td><td>35</td><td>66</td><td>57</td><td>61</td><td>16</td><td>66</td><td>66.1</td></tr><tr><td>Priority</td><td>100</td><td>96</td><td>78</td><td>78</td><td>28</td><td>52</td><td>36</td><td>46</td><td>15</td><td>34</td><td>56.2</td></tr><tr><td>Auto-λ</td><td>100</td><td>95</td><td>87</td><td>78</td><td>31</td><td>64</td><td>62</td><td>80</td><td>19</td><td>77</td><td>69.3</td></tr><tr><td rowspan="2">Auxiliary-Task</td><td>GCS</td><td>100</td><td>97</td><td>81</td><td>67</td><td>42</td><td>56</td><td>58</td><td>60</td><td>14</td><td>77</td><td>65.2</td></tr><tr><td>Auto-λ</td><td>100</td><td>93</td><td>90</td><td>85</td><td>49</td><td>64</td><td>75</td><td>74</td><td>20</td><td>78</td><td>72.8</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Table 3: Performance of 10 RLBench tasks with multi-task and auxiliary learning methods. We reported the success rate with 100 evaluations for each task averaged across two random seeds. Best results are highlighted in bold.
|
| 180 |
+
|
| 181 |
+
# 6 Intriguing Learning Strategies in Auto-λ
|
| 182 |
+
|
| 183 |
+
In this section, we visualise and analyse the learned weightings from Auto- $\lambda$ , and find that Auto- $\lambda$ produces interesting learning strategies with interpretable relationships. Specifically, we focus on using Auto- $\lambda$ to understand the underlying structure of tasks, introduced next.
|
| 184 |
+
|
| 185 |
+
# 6.1 Understanding The Structure of Tasks
|
| 186 |
+
|
| 187 |
+
Task relationships are consistent. Firstly, we observe that the structure of tasks is consistent across the choices of learning algorithms. As shown in Fig. 3, the learned weightings with both the NYUv2 and CityScapes datasets are nearly identical, given the same optimisation strategies, independent of the network architectures. This observation is also supported by the empirical findings in [59, 49] in both task transfer and multi-task learning settings.
|
| 188 |
+
|
| 189 |
+
Task relationships are asymmetric. We also found that the task relationships are asymmetric, i.e. learning task $A$ with the knowledge of task $B$ is not equivalent to learning task $B$ with the knowledge of task $A$ . A simple example is shown in Fig. 4 Right, where the semantic segmentation task in CityScapes helps the part segmentation task much more than the part segmentation helps the semantic segmentation. This also follows intuition: the representation required for semantic segmentation is a subset of the representation required for part segmentation. This observation is also consistent with recent multi-task learning frameworks [25, 26, 58, 56].
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Figure 3: Auto-λ explored consistent task relationships in NYUv2 and CityScapes datasets for both Split and MTAN architectures. Higher task weightings indicate stronger relationships, and lower task weightings indicate weaker relationships.
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 4: Auto- $\lambda$ learned dynamic relationships based on the choice of primary tasks and can avoid negative transfer. Whilst Uncertainty method is not able to avoid negative transfer, having a constant weighting on noise prediction task across the entire training stage. [·] represents the choice of primary tasks.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+
Task relationships are dynamic. A unique property of Auto- $\lambda$ is the ability to explore dynamic task relationships. As shown in Fig. 4 Left, we can observe a weighting crossover appears in NYUv2 near the end of training, which can be considered as a learning strategy of automated curricula. Further, in Fig. 5, we verify that Auto- $\lambda$ achieved higher per-task performance compared to every combination of fixed task groupings in NYUv2 and CityScapes datasets. We can also observe that the task relationships inferred by the fixed task groupings is perfectly aligned with the relationships learned with Auto- $\lambda$ . For example, the performance of semantic segmentation trained with nor
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
Figure 5: Auto- $\lambda$ achieved best per-task performance compared to every combination of fixed task groupings in NYUv2 and CityScapes trained with Split architecture.
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+
mal prediction $(+6.6\%)$ is higher than the performance trained with depth prediction $(-6.0\%)$ , which is consistent with fact that the weighting of normal prediction (0.84) is higher than depth prediction (0.52) as shown in Fig. 3. In addition, we can observe that the Uncertainty method [21] is not able to avoid negative transfer from the noise prediction task, having a constant weighting across the entire training stage, which leads to a degraded multi-task performance as in Table 1. These observations confirm that Auto- $\lambda$ is an advanced optimisation strategy, and is able to learn accurate and consistent task relationships.
|
| 217 |
+
|
| 218 |
+
# 7 Additional Analysis
|
| 219 |
+
|
| 220 |
+
Finally, we present some additional analyses on NYUv2 dataset with Split multi-task architecture to understand the behaviour of Auto- $\lambda$ with respect to different hyper-parameters and other types of optimisation strategies.
|
| 221 |
+
|
| 222 |
+
# 7.1 Robustness on Training Strategies
|
| 223 |
+
|
| 224 |
+
Here, we evaluate different hyper-parameters trained with Auto- $\lambda$ [3 Tasks] in the auxiliary learning setting. As seen in Fig. 6, we found that Auto- $\lambda$ optimised with direct second-order gradients offers very similar task weightings compared to when optimised with approximated first-order gradients ( $< 0.05$ averaged difference across training time in all three tasks), resulting in a near-identical multi-task performance. In addition, we discovered that using first-order gradients may speed up training time roughly $\times 2.3$ .
|
| 225 |
+
|
| 226 |
+
In Table 4, we show that initialising with a small weighting and a suitable learning rate is important to achieve a good performance. A larger learning rate leads to saturated weightings which cause unstable network optimisation; and a larger initialisation would not successfully avoid negative transfer. In addition, optimising network parameters and task weightings with different data is also essential (to properly measure generalisation), which otherwise would slightly decrease performance.
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Figure 6: Mean and the range of per-task weighting difference for Auto-λ [3 Tasks] optimised with direct and approximated gradients in NYUv2 dataset.
|
| 230 |
+
|
| 231 |
+
<table><tr><td></td><td colspan="4">Task Weightings</td><td rowspan="2">ΔMTL</td></tr><tr><td></td><td>Sem. Seg.</td><td>Depth</td><td>Normal</td><td>Noise</td></tr><tr><td>Init = 0.01</td><td>0.97</td><td>0.95</td><td>1.1</td><td>0.02</td><td>+8.98%</td></tr><tr><td>Init = 1.0</td><td>2.00</td><td>2.11</td><td>2.08</td><td>1.00</td><td>+1.42%</td></tr><tr><td>LR = 3·10-5</td><td>0.43</td><td>0.37</td><td>0.46</td><td>0.11</td><td>+8.53%</td></tr><tr><td>LR = 3·10-4</td><td>3.10</td><td>3.34</td><td>3.26</td><td>0.15</td><td>+8.56%</td></tr><tr><td>LR = 1·10-3</td><td>10.5</td><td>10.5</td><td>10.3</td><td>0.23</td><td>+5.04%</td></tr><tr><td>No Swapping</td><td>2.67</td><td>2.76</td><td>2.98</td><td>0.20</td><td>+8.17%</td></tr><tr><td>Our Setting</td><td>1.11</td><td>1.06</td><td>1.26</td><td>0.12</td><td>+9.66%</td></tr></table>
|
| 232 |
+
|
| 233 |
+
# 7.2 Comparison to Gradient-based Methods
|
| 234 |
+
|
| 235 |
+
Finally, since Auto- $\lambda$ is a weighting-based optimisation method, it can naturally be combined with gradient-based methods to further improve performance. We evaluated Auto- $\lambda$ along with the other weighting-based baselines described in Sec. 5, when combined with recently proposed state-of-the-art gradient-based methods designed for multi-task learning: GradDrop [3], PCGrad [57] and CAGrad [30]. We trained all
|
| 236 |
+
|
| 237 |
+
Table 4: Relative multi-task performance in NYUv2 dataset trained with Auto- $\lambda$ [3 Tasks] with different hyper-parameters. The default setting is Init $= 0.1$ , $LR = 1\cdot 10^{-4}$ and with training data swapping.
|
| 238 |
+
|
| 239 |
+
<table><tr><td></td><td>Equal</td><td>DWA</td><td>Uncertainty</td><td>Auto-λ</td></tr><tr><td>Vanilla</td><td>+3.57%</td><td>+4.58%</td><td>+6.50%</td><td>+8.21%</td></tr><tr><td>+ GradDrop</td><td>+4.65%</td><td>+5.93%</td><td>+6.22%</td><td>+8.12%</td></tr><tr><td>+ PCGrad</td><td>+5.09%</td><td>+4.37%</td><td>+6.20%</td><td>+8.50%</td></tr><tr><td>+ CAGrad</td><td>+7.05%</td><td>+8.08%</td><td>+9.65%</td><td>+11.07%</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 5: NYUv2 relative multi-task performance trained with both weighting-based and gradient-based methods in the multi-task learning setting.
|
| 242 |
+
|
| 243 |
+
methods in NYUv2 dataset with standard 3 tasks in the multi-task learning setup.
|
| 244 |
+
|
| 245 |
+
In Table 5, we can observe that Auto- $\lambda$ remains the best optimisation method even compared to other gradient-based methods in the vanilla setting (with Equal weighting). Further, combined with a more advanced gradient-based method such as CAGrad, Auto- $\lambda$ can reach even higher performance.
|
| 246 |
+
|
| 247 |
+
# 7.3 Comparison to Strong Regularisation Methods
|
| 248 |
+
|
| 249 |
+
Finally, recent works [28, 24] suggested that many multi-task optimisation methods can be interpreted as forms of implicit regularisation. They showed that when using strong regularisation and stabilisation techniques from single-task learning, training by simply minimising the sum of task losses, or with randomly generated task weightings, can achieve performance competitive with complex multi-task methods.
|
| 250 |
+
|
| 251 |
+
As such, we now evaluate Auto- $\lambda$ , along with all multi-task baselines evaluated in our Experiments section, as well as all multi-task methods included in the original work of [24], coupled with this strong regularisation on CelebA dataset [36], for a challenging 40-task classification problem. We trained these multi-task methods with the exact same experimental setting in [24] for a fair comparison. To conclude, we compared with: Unit. Scal.
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
(a) Mean and the range (3 runs) for the averaged task test accuracy
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
(b) Mean per-epoch training time (10 repetitions)
|
| 258 |
+
Figure 7: All multi-task methods perform the same or worse than Unit. Scal. on the CelebA dataset trained with strong regularisation, except Auto-λ. Part of the results are directly borrow from [24].
|
| 259 |
+
|
| 260 |
+
[24], DWA [35], RLW (with weights sampled from a Dirichlet and a Normal Distribution) [28], IMTL [33], MGDA [47], GradDrop [3], PCGrad [57], CAGrad [30], for a total of 10 multi-task optimisation methods.
|
| 261 |
+
|
| 262 |
+
To our surprise, though most methods achieve similar performance, which is consistent with the findings in [24], Auto- $\lambda$ is still able to improve performance (marginally in the multi-task learning setting, and significantly in the auxiliary learning setting) with a clear statistical significance. The improvement is especially pronounced in the auxiliary learning mode, which is the unique learning mode of Auto- $\lambda$ , showing the multi-task network's generalisation imposed from Auto- $\lambda$ is more than implicit regularisation.
|
| 263 |
+
|
| 264 |
+
In addition, we also compared training time across these multi-task methods, and we rescaled the training time in our implementation to [24]'s setting for a fair comparison. We can observe that Auto- $\lambda$ requires three times longer the training time than Unit. Scal. (Equal weighting) [24], in consistent with its theoretical design, since Auto- $\lambda$ needs to compute additional two forward and two backward passes to approximate the second-order gradients. Though Auto- $\lambda$ requires longer training time, it can outperform other multi-task methods, and still an order of magnitude faster than some gradient-based methods such as PCGrad [57] and CAGrad [30].
|
| 265 |
+
|
| 266 |
+
# 8 Conclusions, Limitations and Discussion
|
| 267 |
+
|
| 268 |
+
In this paper, we have presented Auto- $\lambda$ , a unified multi-task and auxiliary learning optimisation framework. Auto- $\lambda$ operates by exploring task relationships in the form of task weightings in the loss function, which are allowed to dynamically change throughout the training period. This allows optimal weightings to be determined at any one point during training, and hence, a more optimal period of learning can emerge than if these weightings were fixed throughout training. Auto- $\lambda$ achieves state-of-the-art performance in both computer vision and robotics benchmarks, for both multi-task learning and auxiliary learning, even when compared to optimisation methods that are specifically designed for just one of those two settings.
|
| 269 |
+
|
| 270 |
+
For transparency, we now discuss some limitations of Auto- $\lambda$ that we have noted during our implementations, and we discuss our thoughts on future directions with this work.
|
| 271 |
+
|
| 272 |
+
Advanced Training Strategies To achieve optimal performance, Auto- $\lambda$ still requires hyperparameter search (although the performance is primarily sensitive to only one parameter, the learning rate, making this search relatively simple). Some advanced training techniques, such as incorporating weighting decay or bounded task weightings, might be helpful to find a general set of hyper-parameters which work for all datasets.
|
| 273 |
+
|
| 274 |
+
Training Speed The design of Auto- $\lambda$ requires computing second-order gradients, which is computationally expensive. To address this, we applied a finite-difference approximation scheme to reduce the complexity, which requires the addition of only two forward passes and two backward passes. However, this may still be slower than alternative optimisation methods.
|
| 275 |
+
|
| 276 |
+
Single Task Decomposition Auto- $\lambda$ can optimise on any type of task. Therefore, it is natural to consider a compositional design, where we decompose a single task into multiple small sub-tasks, e.g. to decompose a multi-stage manipulation tasks into a sequence of stages. Applying Auto- $\lambda$ on these sub-tasks might enable us to explore interesting learning behaviours to improve single task learning efficiency.
|
| 277 |
+
|
| 278 |
+
Open-ended Learning Given the dynamic structure of the tasks explored by Auto- $\lambda$ , it would be interesting to study whether Auto- $\lambda$ could be incorporated into an open-ended learning system, where tasks are continually added during training. The flexibility of Auto- $\lambda$ to dynamically optimise task relationships may naturally facilitate open-ended learning in this way, without requiring manual selection of hyper-parameters for each new task.
|
| 279 |
+
|
| 280 |
+
# Acknowledgement
|
| 281 |
+
|
| 282 |
+
This work has been supported by Dyson Technology Ltd.
|
| 283 |
+
|
| 284 |
+
# References
|
| 285 |
+
|
| 286 |
+
[1] Sungyong Baik, Myungsub Choi, Janghoon Choi, Heewon Kim, and Kyoung Mu Lee. Meta-learning with adaptive hyperparameters. Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 287 |
+
[2] Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In Proceedings of the International Conference on Machine Learning (ICML), 2018.
|
| 288 |
+
[3] Zhao Chen, Jiquan Ngiam, Yanping Huang, Thang Luong, Henrik Kretzschmar, Yun-ing Chai, and Dragomir Anguelov. Just pick a sign: Optimizing deep multitask models with gradient sign dropout. In Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 289 |
+
[4] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 290 |
+
[5] Daan de Geus, Panagiotis Meletis, Chenyang Lu, Xiaoxiao Wen, and Gijs Dubbelman. Part-aware panoptic segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
|
| 291 |
+
[6] Yunshu Du, Wojciech M Czarnecki, Siddhant M Jayakumar, Razvan Pascanu, and Balaji Lakshminarayanan. Adapting auxiliary losses using gradient similarity. arXiv preprint arXiv:1812.02224, 2018.
|
| 292 |
+
[7] Kshitij Dwivedi and Gemma Roig. Representation similarity analysis for efficient task taxonomy & transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
|
| 293 |
+
[8] Christopher Fifty, Ehsan Amid, Zhe Zhao, Tianhe Yu, Rohan Anil, and Chelsea Finn. Efficiently identifying task groupings for multi-task learning. In Advances in Neural Information Processing Systems (NeurIPS), 2021.
|
| 294 |
+
[9] Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the International Conference on Machine Learning (ICML), 2017.
|
| 295 |
+
[10] Luca Franceschi, Paolo Frasconi, Saverio Salzo, Riccardo Grazzi, and Massimiliano Ponti. Bilevel programming for hyperparameter optimization and meta-learning. In Proceedings of the International Conference on Machine Learning (ICML), 2018.
|
| 296 |
+
[11] Yuan Gao, Haoping Bai, Zequn Jie, Jiayi Ma, Kui Jia, and Wei Liu. Mtl-nas: Task-agnostic neural architecture search towards general-purpose multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
|
| 297 |
+
|
| 298 |
+
[12] Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In Proceedings of the European Conference on Computer Vision (ECCV), 2018.
|
| 299 |
+
[13] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 300 |
+
[14] Falk Heuer, Sven Mantowsky, Saqib Bukhari, and Georg Schneider. Multitask-centernet (mcn): Efficient and diverse multitask learning using an anchor free approach. In Proceedings of the International Conference on Computer Vision (ICCV), 2021.
|
| 301 |
+
[15] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. Meta-learning in neural networks: A survey. arXiv preprint arXiv:2004.05439, 2020.
|
| 302 |
+
[16] Stephen James and Andrew J Davison. Q-attention: Enabling efficient learning for vision-based robotic manipulation. IEEE Robotics and Automation Letters, 2021.
|
| 303 |
+
[17] Stephen James, Andrew J Davison, and Edward Johns. Transferring end-to-end visuomotor control from simulation to real world for a multi-stage task. Conference on Robot Learning (CoRL), 2017.
|
| 304 |
+
[18] Stephen James, Zicong Ma, David Rovick Arrojo, and Andrew J Davison. Rlbench: The robot learning benchmark & learning environment. IEEE Robotics and Automation Letters, 2020.
|
| 305 |
+
[19] Adrián Javaloy and Isabel Valera. Rotograd: Dynamic gradient homogenization for multi-task learning. In Proceedings of the International Conference on Learning Representations (ICLR), 2022.
|
| 306 |
+
[20] Jean Kaddour, Steindór Sæmundsson, et al. Probabilistic active meta-learning. In Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 307 |
+
[21] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
|
| 308 |
+
[22] Iasonas Kokkinos. Übernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
|
| 309 |
+
[23] Alex Krizhevsky. Learning multiple layers of features from tiny images. 2009.
|
| 310 |
+
[24] Vitaly Kurin, Alessandro De Palma, Ilya Kostrikov, Shimon Whiteson, and M Pawan Kumar. In defense of the unitary scalarization for deep multi-task learning. arXiv preprint arXiv:2201.04122, 2022.
|
| 311 |
+
[25] Giwoong Lee, Eunho Yang, and Sung Hwang. Asymmetric multi-task learning based on task relatedness and loss. In Proceedings of the International Conference on Machine Learning (ICML), 2016.
|
| 312 |
+
|
| 313 |
+
[26] Hae Beom Lee, Eunho Yang, and Sung Ju Hwang. Deep asymmetric multi-task feature learning. In Proceedings of the International Conference on Machine Learning (ICML), 2018.
|
| 314 |
+
[27] Sergey Levine, Chelsea Finn, Trevor Darrell, and Pieter Abbeel. End-to-end training of deep visuomotor policies. Journal of Machine Learning Research, 2016.
|
| 315 |
+
[28] Baijiong Lin, Feiyang Ye, and Yu Zhang. A closer look at loss weighting in multi-task learning. arXiv preprint arXiv:2111.10603, 2021.
|
| 316 |
+
[29] Xi Lin, Hui-Ling Zhen, Zhenhua Li, Qing-Fu Zhang, and Sam Kwong. Pareto multi-task learning. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 317 |
+
[30] Bo Liu, Xingchao Liu, Xiaojie Jin, Peter Stone, and Qiang Liu. Conflict-averse gradient descent for multitask learning. In Advances in Neural Information Processing Systems (NeurIPS), 2021.
|
| 318 |
+
[31] Chenghao Liu, Zhihao Wang, Doyen Sahoo, Yuan Fang, Kun Zhang, and Steven C. H. Hoi. Adaptive task sampling for meta-learning. In Proceedings of the European Conference on Computer Vision (ECCV), 2020.
|
| 319 |
+
[32] Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Proceedings of the International Conference on Learning Representations (ICLR), 2019.
|
| 320 |
+
[33] Liyang Liu, Yi Li, Zhanghui Kuang, J Xue, Yimin Chen, Wenming Yang, Qingmin Liao, and Wayne Zhang. Towards impartial multi-task learning. In Proceedings of the International Conference on Learning Representations (ICLR), 2021.
|
| 321 |
+
[34] Shikun Liu, Andrew J Davison, and Edward Johns. Self-supervised generalisation with meta auxiliary learning. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 322 |
+
[35] Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
|
| 323 |
+
[36] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the International Conference on Computer Vision (ICCV), 2015.
|
| 324 |
+
[37] Dougal Maclaurin, David Duvenaud, and Ryan Adams. Gradient-based hyperparameter optimization through reversible learning. In Proceedings of the International Conference on Machine Learning (ICML), 2015.
|
| 325 |
+
[38] Kevis-Kokitsi Maninis, Ilija Radosavovic, and Iasonas Kokkinos. Attentive single-tasking of multiple tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
|
| 326 |
+
[39] Paul Michel, Sebastian Ruder, and Dani Yogatama. Balancing average and worst-case accuracy in multitask learning. arXiv preprint arXiv:2110.05838, 2021.
|
| 327 |
+
|
| 328 |
+
[40] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 329 |
+
[41] Pushmeet Kohli Nathan Silberman, Derek Hoiem and Rob Fergus. Indoor segmentation and support inference from rgbd images. In Proceedings of the European Conference on Computer Vision (ECCV), 2012.
|
| 330 |
+
[42] Aviv Navon, Idan Achituve, Haggai Maron, Gal Chechik, and Ethan Fetaya. Auxiliary learning by implicit differentiation. In Proceedings of the International Conference on Learning Representations (ICLR), 2021.
|
| 331 |
+
[43] Aviv Navon, Aviv Shamsian, Idan Achituve, Haggai Maron, Kenji Kawaguchi, Gal Chechik, and Ethan Fetaya. Multi-task learning as a bargaining game. arXiv preprint arXiv:2202.01017, 2022.
|
| 332 |
+
[44] Alex Nichol, Joshua Achiam, and John Schulman. On first-order meta-learning algorithms. arXiv preprint arXiv:1803.02999, 2018.
|
| 333 |
+
[45] Clemens Rosenbaum, Tim Klinger, and Matthew Riemer. Routing networks: Adaptive selection of non-linear functions for multi-task learning. In Proceedings of the International Conference on Learning Representations (ICLR), 2018.
|
| 334 |
+
[46] Tom Schaul, John Quan, Ioannis Antonoglou, and David Silver. Prioritized experience replay. In Proceedings of the International Conference on Learning Representations (ICLR), 2016.
|
| 335 |
+
[47] Ozan Sener and Vladlen Koltun. Multi-task learning as multi-objective optimization. In Advances in Neural Information Processing Systems (NeurIPS), 2018.
|
| 336 |
+
[48] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. In Proceedings of the International Conference on Learning Representations (ICLR), 2015.
|
| 337 |
+
[49] Trevor Standley, Amir Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In Proceedings of the International Conference on Machine Learning (ICML), 2020.
|
| 338 |
+
[50] Ximeng Sun, Rameswar Panda, Rogerio Feris, and Kate Saenko. Adashare: Learning what to share for efficient deep multi-task learning. Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 339 |
+
[51] Simon Vandenhende, Stamatios Georgoulis, and Luc Van Gool. Mti-net: Multi-scale task interaction networks for multi-task learning. Proceedings of the European Conference on Computer Vision (ECCV), 2020.
|
| 340 |
+
[52] Ricardo Vilalta and Youssef Drissi. A perspective view and survey of meta-learning. Artificial intelligence review, 2002.
|
| 341 |
+
[53] Haoxiang Wang, Han Zhao, and Bo Li. Bridging multi-task learning and meta-learning: Towards efficient training and effective adaptation. In Proceedings of the International Conference on Machine Learning (ICML), 2021.
|
| 342 |
+
|
| 343 |
+
[54] Dan Xu, Wanli Ouyang, Xiaogang Wang, and Nicu Sebe. Pad-net: Multi-tasks guided prediction-and-distillation network for simultaneous depth estimation and scene parsing. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
|
| 344 |
+
[55] Feiyang Ye, Baijiong Lin, Zhixiong Yue, Pengxin Guo, Qiao Xiao, and Yu Zhang. Multi-objective meta learning. In Advances in Neural Information Processing Systems (NeurIPS), 2021.
|
| 345 |
+
[56] Teresa Yeo, Oğuzhan Fatih Kar, and Amir Zamir. Robustness via cross-domain ensembles. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
|
| 346 |
+
[57] Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 347 |
+
[58] Amir R Zamir, Alexander Sax, Nikhil Cheerla, Rohan Suri, Zhangjie Cao, Jitendra Malik, and Leonidas J Guibas. Robust learning through cross-task consistency. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
|
| 348 |
+
[59] Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
|
| 349 |
+
|
| 350 |
+
# A Detailed Training Strategies
|
| 351 |
+
|
| 352 |
+
For dense prediction tasks, we followed the same training setup with MTAN based on the code that was made publicly available by the authors [35]. We trained Auto- $\lambda$ with learning rate $10^{-4}$ and $3\cdot 10^{-5}$ for NYUv2 and CityScapes respectively.
|
| 353 |
+
|
| 354 |
+
For multi-domain classification tasks, we trained each and all tasks with SGD momentum with 0.1 initial learning rate, 0.9 momentum, and $5 \cdot 10^{-4}$ weight decay. We applied cosine annealing for learning rate decay trained with total 200 epochs. We set batch size 32 and we trained Auto- $\lambda$ with $3 \cdot 10^{-4}$ learning rate.
|
| 355 |
+
|
| 356 |
+
For robot manipulation tasks, we trained with Adam with a constant learning rate $10^{-3}$ for 8000 iterations. We set batch size 32 and we trained Auto- $\lambda$ with $3 \cdot 10^{-5}$ learning rate.
|
| 357 |
+
|
| 358 |
+
# B Detailed Experimental Setting for Robotic Manipulation Tasks
|
| 359 |
+
|
| 360 |
+
Naively applying behaviour cloning (e.g. mapping observations to joint velocities or end-effector incremental poses) for robot manipulations tasks often requires thousands of demonstrations [17]. To circumvent that, we first pre-processed the demonstrations by running keyframe discovery [16]; a process that iterates over each of the demo trajectories and outputs the transitions where interesting things happen, e.g. change in gripper state, or velocities approach zero. The results of the keyframe discovery is a small number of end-effector poses and gripper actions for each of the demonstrations, essentially splitting the task into a set of simple stages. The goal of our behaviour cloning setup is to predict these end-effector poses and gripper actions for new task configurations. Training data was then acquired by first collecting 100 demonstrations for each task, and then running keyframe discovery, to split the task into a smaller number of simple stages to create our behavioural cloning dataset.
|
| 361 |
+
|
| 362 |
+
We optimised an encoder-decoder network which takes the inputs of RGB and point clouds captured by three different cameras (left shoulder, right shoulder and wrist camera), and outputs a continuous 6D pose and a discrete gripper action. The 6D pose is composed of a 3-dimensional vector encoding spatial position and a 4-dimensional vector encoding rotation (parameterised by a unit quaternion); the gripper action is represented by a binary scalar indicating gripper open and close. The position and rotation are learned through two separate de
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
Figure 8: Visualisation of the network design for RLBench.
|
| 366 |
+
|
| 367 |
+
coders. The position decoder predicts attention maps based on RGB images, then we apply spatial (soft) argmax [27] on the corresponding point cloud to output a 3D spatial position of the attended pixel. We additionally optimised a position off-set for each stage of the task, so the predicted position will not be bounded by the position only available in the images. The rotation encoder predicts quaternion and gripper action via direct regression. A learnable task embedding is fed to the network bottleneck for multi-task and auxiliary learning.
|
| 368 |
+
|
| 369 |
+
# C Auto- $\lambda$ Learned Weightings for NYUv2 and CityScapes
|
| 370 |
+
|
| 371 |
+
We found that the relationships in NYUv2 and CityScapes dataset are usually static from the beginning of training (except for NYUv2 [3 tasks] where we can observe a clear weighting cross-over).
|
| 372 |
+
|
| 373 |
+

|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
CityScapes [Sem. Seg.]
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
CityScapes [Part Seg.]
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
CityScapes [Disp.]
|
| 389 |
+
Figure 9: Learning dynamics of Auto- $\lambda$ optimised on various choices of primary tasks in the auxiliary learning setup with Split architecture.
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
CityScapes [3 Tasks]
|
| 393 |
+
|
| 394 |
+
# D Auto- $\lambda$ Learned Weightings for RLBench
|
| 395 |
+
|
| 396 |
+
The relationships vary more wildly in RLBench tasks, where we can observe multiple weighting cross-over in different training stages.
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
Take Money Out Safe
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
Put Money In Safe
|
| 413 |
+
|
| 414 |
+

|
| 415 |
+
Pick Up Umbrella
|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
Stack Wine
|
| 419 |
+
|
| 420 |
+

|
| 421 |
+
Slide Block To Target
|
| 422 |
+
Figure 10: Learning dynamics of Auto- $\lambda$ optimised on each individual task in the auxiliary learning setup for 10 RLbench tasks. We list 3 tasks with the highest task weightings in each setting.
|
| 423 |
+
|
| 424 |
+
# E Auto- $\lambda$ Learned Weightings for CIFAR-100
|
| 425 |
+
|
| 426 |
+
Interestingly, in the multi-task learning setting of multi-domain classification tasks (last row of Fig. 11), we can see a clear correlation between task weighting and single task learning performance, where the higher weighting is applied for more difficult domain (with low single task learning performance). For example, 'People' and 'Vehicles 2', which have the lowest and highest single task learning performance respectively, were assigned with the lowest and the highest task weightings.
|
| 427 |
+
|
| 428 |
+
<table><tr><td>ID 1</td><td>Aquatic Mammals</td><td>ID 2</td><td>Fish</td><td>ID 3</td><td>Flowers</td><td>ID 4</td><td>Food Containers</td></tr><tr><td>ID 5</td><td>Fruit and Vegetables</td><td>ID 6</td><td>Household Electrical Devices</td><td>ID 7</td><td>Household furniture</td><td>ID 8</td><td>Insects</td></tr><tr><td>ID 9</td><td>Large Carnivores</td><td>ID 10</td><td>Large Man-made Outdoor Things</td><td>ID 11</td><td>large natural outdoor scenes</td><td>ID 12</td><td>Large Omnivores and Herbivores</td></tr><tr><td>ID 13</td><td>Medium-sized Mammals</td><td>ID 14</td><td>Non-insect Invertebrates</td><td>ID 15</td><td>People</td><td>ID 16</td><td>Reptiles</td></tr><tr><td>ID 17</td><td>Small Mammals</td><td>ID 18</td><td>Trees</td><td>ID 19</td><td>Vehicles 1</td><td>ID 20</td><td>Vehicles 2</td></tr></table>
|
| 429 |
+
|
| 430 |
+
Table 6: The description of each domain ID in multi-domain CIFAR-100 dataset.
|
| 431 |
+
|
| 432 |
+
<table><tr><td>CIFAR-100</td><td>Method</td><td>ID 1</td><td>ID 2</td><td>ID 3</td><td>ID 4</td><td>ID 5</td><td>ID 6</td><td>ID 7</td><td>ID 8</td><td>ID 9</td><td>ID 10</td></tr><tr><td>Single-Task</td><td>-</td><td>68.65</td><td>81.00</td><td>82.34</td><td>83.71</td><td>89.10</td><td>88.72</td><td>84.75</td><td>85.88</td><td>87.07</td><td>90.15</td></tr><tr><td rowspan="4">Multi-Task</td><td>Equal</td><td>73.59</td><td>82.36</td><td>79.78</td><td>83.94</td><td>89.14</td><td>87.03</td><td>83.73</td><td>85.87</td><td>86.67</td><td>89.86</td></tr><tr><td>Uncertainty</td><td>70.62</td><td>81.01</td><td>80.46</td><td>83.59</td><td>88.06</td><td>86.83</td><td>82.96</td><td>86.46</td><td>87.40</td><td>89.58</td></tr><tr><td>DWA</td><td>71.54</td><td>82.12</td><td>81.60</td><td>83.22</td><td>89.70</td><td>86.64</td><td>82.57</td><td>86.17</td><td>87.34</td><td>90.19</td></tr><tr><td>Auto-λ</td><td>74.00</td><td>83.96</td><td>81.30</td><td>83.57</td><td>88.69</td><td>87.85</td><td>84.57</td><td>87.75</td><td>88.04</td><td>92.03</td></tr><tr><td rowspan="2">Auxiliary-Task</td><td>GCS</td><td>71.05</td><td>82.27</td><td>80.31</td><td>83.36</td><td>87.07</td><td>85.94</td><td>83.05</td><td>86.80</td><td>87.54</td><td>89.34</td></tr><tr><td>Auto-λ</td><td>75.70</td><td>84.39</td><td>82.71</td><td>84.64</td><td>90.23</td><td>88.02</td><td>85.52</td><td>87.36</td><td>89.04</td><td>92.20</td></tr><tr><td></td><td>Method</td><td>ID 11</td><td>ID 12</td><td>ID 13</td><td>ID 14</td><td>ID 15</td><td>ID 16</td><td>ID 17</td><td>ID 18</td><td>ID 19</td><td>ID 20</td></tr><tr><td>Single-Task</td><td>-</td><td>89.76</td><td>84.88</td><td>90.33</td><td>84.41</td><td>55.37</td><td>75.84</td><td>72.79</td><td>75.37</td><td>91.48</td><td>94.69</td></tr><tr><td rowspan="4">Multi-Task</td><td>Equal</td><td>89.21</td><td>86.40</td><td>89.45</td><td>85.52</td><td>57.73</td><td>76.69</td><td>74.41</td><td>74.64</td><td>90.64</td><td>94.21</td></tr><tr><td>Uncertainty</td><td>89.80</td><td>87.07</td><td>89.76</td><td>85.64</td><td>54.14</td><td>75.62</td><td>74.08</td><td>74.62</td><td>90.83</td><td>89.54</td></tr><tr><td>DWA</td><td>89.08</td><td>85.91</td><td>89.39</td><td>85.15</td><td>55.25</td><td>76.26</td><td>74.12</td><td>75.68</td><td>90.95</td><td>94.33</td></tr><tr><td>Auto-λ</td><td>90.05</td><td>88.00</td><td>91.25</td><td>84.98</td><td>57.57</td><td>77.55</td><td>75.05</td><td>75.15</td><td>91.87</td><td>95.19</td></tr><tr><td rowspan="2">Auxiliary-Task</td><td>GCS</td><td>89.80</td><td>85.59</td><td>89.41</td><td>85.70</td><td>56.45</td><td>76.29</td><td>72.93</td><td>74.45</td><td>90.31</td><td>93.98</td></tr><tr><td>Auto-λ</td><td>90.82</td><td>87.32</td><td>90.76</td><td>86.56</td><td>60.89</td><td>81.75</td><td>75.64</td><td>77.38</td><td>91.58</td><td>95.87</td></tr></table>
|
| 433 |
+
|
| 434 |
+
Table 7: The complete performance of 20 tasks in multi-domain CIFAR-100 dataset with multi-task and auxiliary learning methods.
|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
Figure 11: Visualisation of learned weightings in Auto- $\lambda$ in auxiliary learning and multi-task learning setup.
|
2202.03xxx/2202.03091/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f12678724b6751a0cbc424c77b3ae2ac60e21915623b83ab17c59c46bc48545
|
| 3 |
+
size 1301251
|
2202.03xxx/2202.03091/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_content_list.json
ADDED
|
@@ -0,0 +1,1941 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
135,
|
| 8 |
+
99,
|
| 9 |
+
859,
|
| 10 |
+
151
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jun Xia $^{1,2\\dagger}$ , Lirong Wu $^{1,2\\dagger}$ , Jintao Chen $^{3}$ , Bozhen Hu $^{1,2}$ , Stan Z.Li $^{1,2\\star}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
225,
|
| 19 |
+
176,
|
| 20 |
+
767,
|
| 21 |
+
196
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ School of Engineering, Westlake University, Hangzhou 310030, China",
|
| 28 |
+
"bbox": [
|
| 29 |
+
261,
|
| 30 |
+
196,
|
| 31 |
+
738,
|
| 32 |
+
210
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ Institute of Advanced Technology, Westlake Institute for Advanced Study, Hangzhou 310030, China",
|
| 39 |
+
"bbox": [
|
| 40 |
+
161,
|
| 41 |
+
210,
|
| 42 |
+
836,
|
| 43 |
+
224
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "3 Zhejiang University, Hangzhou 310058, China",
|
| 50 |
+
"bbox": [
|
| 51 |
+
339,
|
| 52 |
+
226,
|
| 53 |
+
658,
|
| 54 |
+
241
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "{xiajun, wulirong, hubozhen, stan.zq.li}@westlake.edu.cn, chen jintao@zju.edu.cn",
|
| 61 |
+
"bbox": [
|
| 62 |
+
233,
|
| 63 |
+
242,
|
| 64 |
+
761,
|
| 65 |
+
257
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "ABSTRACT",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
83,
|
| 75 |
+
266,
|
| 76 |
+
183,
|
| 77 |
+
279
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Graph contrastive learning (GCL) has emerged as a dominant technique for graph representation learning which maximizes the mutual information between paired graph augmentations that share the same semantics. Unfortunately, it is difficult to preserve semantics well during augmentations in view of the diverse nature of graph data. Currently, data augmentations in GCL broadly fall into three unsatisfactory ways. First, the augmentations can be manually picked per dataset by trial-and-errors. Second, the augmentations can be selected via cumbersome search. Third, the augmentations can be obtained with expensive domain knowledge as guidance. All of these limit the efficiency and more general applicability of existing GCL methods. To circumvent these crucial issues, we propose a Simple framework for GRaph Contrastive lEarning, SimGRACE for brevity, which does not require data augmentations. Specifically, we take original graph as input and GNN model with its perturbed version as two encoders to obtain two correlated views for contrast. SimGRACE is inspired by the observation that graph data can preserve their semantics well during encoder perturbations while not requiring manual trial-and-errors, cumbersome search or expensive domain knowledge for augmentations selection. Also, we explain why SimGRACE can succeed. Furthermore, we devise adversarial training scheme, dubbed AT-SimGRACE, to enhance the robustness of graph contrastive learning and theoretically explain the reasons. Albeit simple, we show that SimGRACE can yield competitive or better performance compared with state-of-the-art methods in terms of generalizability, transferability and robustness, while enjoying unprecedented degree of flexibility and efficiency. The code is available at: https://github.com/junxia97/SimGRACE.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
81,
|
| 86 |
+
285,
|
| 87 |
+
483,
|
| 88 |
+
672
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "CCS CONCEPTS",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
83,
|
| 98 |
+
684,
|
| 99 |
+
220,
|
| 100 |
+
698
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "- Computing methodologies $\\rightarrow$ Neural networks; Learning latent representations; - Mathematics of computing $\\rightarrow$ Graph algorithms.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
81,
|
| 109 |
+
703,
|
| 110 |
+
483,
|
| 111 |
+
744
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "$\\dagger$ Equal Contribution, $\\star$ Corresponding Author.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
83,
|
| 120 |
+
755,
|
| 121 |
+
303,
|
| 122 |
+
768
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
81,
|
| 131 |
+
780,
|
| 132 |
+
482,
|
| 133 |
+
852
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 140 |
+
"bbox": [
|
| 141 |
+
84,
|
| 142 |
+
853,
|
| 143 |
+
346,
|
| 144 |
+
863
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "© 2022 Association for Computing Machinery.",
|
| 151 |
+
"bbox": [
|
| 152 |
+
84,
|
| 153 |
+
864,
|
| 154 |
+
303,
|
| 155 |
+
875
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "ACM ISBN 978-1-4503-9096-5/22/04...$15.00",
|
| 162 |
+
"bbox": [
|
| 163 |
+
84,
|
| 164 |
+
875,
|
| 165 |
+
294,
|
| 166 |
+
883
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 0
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "https://doi.org/10.1145/3485447.3512156",
|
| 173 |
+
"bbox": [
|
| 174 |
+
84,
|
| 175 |
+
883,
|
| 176 |
+
272,
|
| 177 |
+
895
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "KEYWORDS",
|
| 184 |
+
"text_level": 1,
|
| 185 |
+
"bbox": [
|
| 186 |
+
514,
|
| 187 |
+
266,
|
| 188 |
+
620,
|
| 189 |
+
279
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 0
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "Graph neural networks, graph self-supervised learning, contrastive learning, graph representation learning, robustness",
|
| 196 |
+
"bbox": [
|
| 197 |
+
513,
|
| 198 |
+
284,
|
| 199 |
+
911,
|
| 200 |
+
313
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 0
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "ACM Reference Format:",
|
| 207 |
+
"text_level": 1,
|
| 208 |
+
"bbox": [
|
| 209 |
+
514,
|
| 210 |
+
321,
|
| 211 |
+
661,
|
| 212 |
+
334
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 0
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "Jun Xia, Lirong Wu, Jintao Chen, Bozhen Hu, Stan Z.Li. 2022. SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation. In Proceedings of the ACM Web Conference 2022 (WWW '22), April 25-29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3485447.3512156",
|
| 219 |
+
"bbox": [
|
| 220 |
+
513,
|
| 221 |
+
335,
|
| 222 |
+
915,
|
| 223 |
+
398
|
| 224 |
+
],
|
| 225 |
+
"page_idx": 0
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "text",
|
| 229 |
+
"text": "1 INTRODUCTION",
|
| 230 |
+
"text_level": 1,
|
| 231 |
+
"bbox": [
|
| 232 |
+
514,
|
| 233 |
+
420,
|
| 234 |
+
687,
|
| 235 |
+
434
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 0
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "text",
|
| 241 |
+
"text": "Graph Neural Networks (GNNs), inheriting the power of neural networks and utilizing the structural information of graph data simultaneously, have achieved overwhelming accomplishments in various graph-based tasks, such as node, graph classification or graph generation [10, 21, 52]. However, most existing GNNs are trained in a supervised manner and it is often resource- and time-intensive to collect abundant true-labeled data [40, 47, 48]. To remedy this issue, tremendous endeavors have been devoted to graph self-supervised learning that learns representations from unlabeled graphs. Among many, graph contrastive learning (GCL) [51, 55, 56] follows the general framework of contrastive learning in computer vision domain [42, 46], in which two augmentations are generated for each graph and then maximizes the mutual information between these two augmented views. In this way, the model can learn representations that are invariant to perturbations. For example, GraphCL [56] first designs four types of general augmentations (node dropping, edge perturbation, attribute masking and subgraph) for GCL. However, these augmentations are not suitable for all scenarios because the structural information and semantics of the graphs varies significantly across domains. For example, GraphCL [56] finds that edge perturbation benefits social networks but hurt some biochemical molecules in GCL. Worse still, these augmentations may alter the graph semantics completely even if the perturbation is weak. For example, dropping a carbon atom in the phenyl ring will alter the aromatic system and result in an alkene chain, which will drastically change the molecular properties [39].",
|
| 242 |
+
"bbox": [
|
| 243 |
+
511,
|
| 244 |
+
438,
|
| 245 |
+
915,
|
| 246 |
+
797
|
| 247 |
+
],
|
| 248 |
+
"page_idx": 0
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"type": "text",
|
| 252 |
+
"text": "To remedy these issues, several strategies have been proposed recently. Typically, GraphCL [56] manually picks data augmentations per dataset by tedious trial-and-errors, which significantly limits the generality and practicality of their proposed framework. To get rid of the tedious dataset-specific manual tuning of GraphCL, JOAO [55] proposes to automate GraphCL in selecting augmentation pairs. However, it suffers more computational overhead to",
|
| 253 |
+
"bbox": [
|
| 254 |
+
511,
|
| 255 |
+
797,
|
| 256 |
+
915,
|
| 257 |
+
896
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 0
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "aside_text",
|
| 263 |
+
"text": "arXiv:2202.03104v3 [cs.LG] 20 Mar 2023",
|
| 264 |
+
"bbox": [
|
| 265 |
+
22,
|
| 266 |
+
260,
|
| 267 |
+
57,
|
| 268 |
+
705
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 0
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "table",
|
| 274 |
+
"img_path": "images/fe0551fecfe18d88bc3ce62947b6d32086bbb245ab84132133f2a4e8a9121aaa.jpg",
|
| 275 |
+
"table_caption": [
|
| 276 |
+
"Table 1: Comparison between state-of-the-art GCL methods (graph-level representation learning) and SimGRACE."
|
| 277 |
+
],
|
| 278 |
+
"table_footnote": [],
|
| 279 |
+
"table_body": "<table><tr><td></td><td>No manual trial-and-errors</td><td>No domain knowledge</td><td>Preserving semantics</td><td>No cumbersome search</td><td>Generality</td></tr><tr><td>GraphCL [56]</td><td>X</td><td>✓</td><td>X</td><td>✓</td><td>X</td></tr><tr><td>MoCL [39]</td><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>JOAO(v2) [55]</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>✓</td></tr><tr><td>SimGRACE</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>",
|
| 280 |
+
"bbox": [
|
| 281 |
+
137,
|
| 282 |
+
130,
|
| 283 |
+
859,
|
| 284 |
+
215
|
| 285 |
+
],
|
| 286 |
+
"page_idx": 1
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"type": "image",
|
| 290 |
+
"img_path": "images/587b0889a29f856de2a01911d907c1d819dc185fc1f4fa3573041c9bb835abdd.jpg",
|
| 291 |
+
"image_caption": [],
|
| 292 |
+
"image_footnote": [],
|
| 293 |
+
"bbox": [
|
| 294 |
+
94,
|
| 295 |
+
238,
|
| 296 |
+
215,
|
| 297 |
+
309
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 1
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "image",
|
| 303 |
+
"img_path": "images/24bb5a6cc4e0e7a51c3dc8290ed8b45b103845cd5555214b9b59920d7194d82b.jpg",
|
| 304 |
+
"image_caption": [],
|
| 305 |
+
"image_footnote": [],
|
| 306 |
+
"bbox": [
|
| 307 |
+
225,
|
| 308 |
+
238,
|
| 309 |
+
346,
|
| 310 |
+
309
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 1
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "image",
|
| 316 |
+
"img_path": "images/2c3b52ed335ef8d51b9e65f0513d9405ed68576ebcf387fc9425a55f46ad31ec.jpg",
|
| 317 |
+
"image_caption": [],
|
| 318 |
+
"image_footnote": [],
|
| 319 |
+
"bbox": [
|
| 320 |
+
357,
|
| 321 |
+
238,
|
| 322 |
+
477,
|
| 323 |
+
309
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 1
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "image",
|
| 329 |
+
"img_path": "images/651057bcfe2f7095fc9ebcc4f5305c939dfdca16610c0c759e1b82d494dc762e.jpg",
|
| 330 |
+
"image_caption": [
|
| 331 |
+
"GraphCL"
|
| 332 |
+
],
|
| 333 |
+
"image_footnote": [],
|
| 334 |
+
"bbox": [
|
| 335 |
+
94,
|
| 336 |
+
325,
|
| 337 |
+
214,
|
| 338 |
+
395
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 1
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "image",
|
| 344 |
+
"img_path": "images/85f0385d74ea6d3b22c31148e8cf8c215e75d1ba16c7b1203094ba1a4bbeb8de.jpg",
|
| 345 |
+
"image_caption": [
|
| 346 |
+
"MoCL"
|
| 347 |
+
],
|
| 348 |
+
"image_footnote": [],
|
| 349 |
+
"bbox": [
|
| 350 |
+
223,
|
| 351 |
+
325,
|
| 352 |
+
344,
|
| 353 |
+
395
|
| 354 |
+
],
|
| 355 |
+
"page_idx": 1
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "image",
|
| 359 |
+
"img_path": "images/8a6a3cef65350c04750ea5c529f988a539eeeb495684f995345227a17bed5ae3.jpg",
|
| 360 |
+
"image_caption": [
|
| 361 |
+
"SimGRACE",
|
| 362 |
+
"Figure 1: Comparison of GraphCL [56], MoCL [39] and SimGRACE on MUTAG dataset. The samples of two classes are distinguished by colors (blue & orange). We first train three GNN encoders with these methods respectively and visualise the representations of original graphs with t-SNE in the upper row. Then, we perturb graphs or encoders in their respective ways (edge perturbation for GraphCL, replacing functional group with bioisosteres of similar properties for MoCL, encoder perturbation for SimGRACE) and visualise the representations of perturbed (GraphCL, MoCL) or original (SimGRACE) graphs in the below row. Unlike GraphCL, SimGRACE and MoCL can preserve the class identity semantics well after perturbations. However, MoCL requires expensive domain knowledge as guidance."
|
| 363 |
+
],
|
| 364 |
+
"image_footnote": [],
|
| 365 |
+
"bbox": [
|
| 366 |
+
354,
|
| 367 |
+
325,
|
| 368 |
+
475,
|
| 369 |
+
395
|
| 370 |
+
],
|
| 371 |
+
"page_idx": 1
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"type": "text",
|
| 375 |
+
"text": "search suitable augmentations and still relies on human prior knowledge in constructing and configuring the augmentation pool to select from. To avoid altering the semantics in the general augmentations adopted in GraphCL and JOAO(v2), MoCL [39] proposes to replace valid substructures in molecular graph with bioisosteres that share similar properties. However, it requires expensive domain knowledge as guidance and can not be applied in other domains like social graphs. Hence, a natural question emerges: Can we emancipate graph contrastive learning from tedious manual trial-and-errors, cumbersome search or expensive domain knowledge?",
|
| 376 |
+
"bbox": [
|
| 377 |
+
81,
|
| 378 |
+
631,
|
| 379 |
+
482,
|
| 380 |
+
768
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 1
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "To answer this question, instead of devising more advanced data augmentations strategies for GCL, we attempt to break through state-of-the-arts GCL framework which takes semantic-preserved data augmentations as prerequisite. More specifically, we take original graph data as input and GNN model with its perturbed version as two encoders to obtain two correlated views. And then, we maximize the agreement of these two views. With the encoder perturbation as noise, we can obtain two different embeddings for same input as \"positive pairs\". Similar to previous works [42, 56],",
|
| 387 |
+
"bbox": [
|
| 388 |
+
81,
|
| 389 |
+
770,
|
| 390 |
+
482,
|
| 391 |
+
895
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 1
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "text",
|
| 397 |
+
"text": "we take other graph data in the same mini-batch as \"negative pairs\". The idea of encoder perturbation is inspired by the observations in Figure 1. The augmentation or perturbation of MoCL and our SimGRACE can preserve the class identity semantics well while GraphCL can not. Also, we explain why SimGRACE can succeed. Besides, GraphCL [56] shows that GNNs can gain robustness using their proposed framework. However, (1) they do not explain why GraphCL can enhance the robustness; (2) GraphCL seems to be immunized to random attacks well while performing unsatisfactory against adversarial attacks. GROC [18] first integrates adversarial transformations into the graph contrastive learning framework and improves the robustness against adversarial attacks. Unfortunately, as the authors pointed out, the robustness of GROC comes at a price of much longer training time because conducting adversarial transformations for each graph is time-consuming. To remedy these deficiencies, we propose a novel algorithm AT-SimGRACE to perturb the encoder in an adversarial way, which introduces less computational overhead while showing better robustness. Theoretically, we explain why AT-SimGRACE can enhance the robustness. We highlight our contributions as follows:",
|
| 398 |
+
"bbox": [
|
| 399 |
+
511,
|
| 400 |
+
234,
|
| 401 |
+
915,
|
| 402 |
+
512
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 1
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "list",
|
| 408 |
+
"sub_type": "text",
|
| 409 |
+
"list_items": [
|
| 410 |
+
"- Significance: We emancipate graph contrastive learning from tedious manual trial-and-error, cumbersome search or expensive domain knowledge which limit the efficiency and more general applicability of existing GCL methods. The comparison between SimGRACE and state-of-the-art GCL methods can be seen in Table 1.",
|
| 411 |
+
"- Framework: We develop a novel and effective framework, SimGRACE, for graph contrastive learning which enjoys unprecedented degree of flexibility, high efficiency and ease of use. Moreover, we explain why SimGRACE can succeed.",
|
| 412 |
+
"- Algorithm: We propose a novel algorithm AT-SimGRACE to enhance the robustness of graph contrastive learning. AT-SimGRACE can achieve better robustness while introducing minor computational overhead.",
|
| 413 |
+
"- Experiments: We experimentally show that the proposed methods can yield competitive or better performance compared with state-of-the-art methods in terms of generalizability, transferability, robustness and efficiency on multiple social and biochemical graph datasets."
|
| 414 |
+
],
|
| 415 |
+
"bbox": [
|
| 416 |
+
540,
|
| 417 |
+
516,
|
| 418 |
+
913,
|
| 419 |
+
779
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 1
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "2 RELATED WORK",
|
| 426 |
+
"text_level": 1,
|
| 427 |
+
"bbox": [
|
| 428 |
+
513,
|
| 429 |
+
799,
|
| 430 |
+
689,
|
| 431 |
+
811
|
| 432 |
+
],
|
| 433 |
+
"page_idx": 1
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"type": "text",
|
| 437 |
+
"text": "2.1 Generative / Predictive self-supervised learning on graphs",
|
| 438 |
+
"text_level": 1,
|
| 439 |
+
"bbox": [
|
| 440 |
+
513,
|
| 441 |
+
819,
|
| 442 |
+
870,
|
| 443 |
+
852
|
| 444 |
+
],
|
| 445 |
+
"page_idx": 1
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"text": "Inspired by the success of self-supervised learning in computer vision [19, 42] and natural language processing [8, 22, 57], tremendous endeavors have been devoted to graph self-supervised learning that",
|
| 450 |
+
"bbox": [
|
| 451 |
+
511,
|
| 452 |
+
854,
|
| 453 |
+
913,
|
| 454 |
+
896
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 1
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "header",
|
| 460 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 461 |
+
"bbox": [
|
| 462 |
+
83,
|
| 463 |
+
75,
|
| 464 |
+
354,
|
| 465 |
+
87
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 1
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "header",
|
| 471 |
+
"text": "Jun Xia, et al.",
|
| 472 |
+
"bbox": [
|
| 473 |
+
846,
|
| 474 |
+
75,
|
| 475 |
+
911,
|
| 476 |
+
85
|
| 477 |
+
],
|
| 478 |
+
"page_idx": 1
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "image",
|
| 482 |
+
"img_path": "images/5304fef9ecf9ca3c0e532357fbc5aef2f849ef28d5a38d073c1188ddfe6d9c21.jpg",
|
| 483 |
+
"image_caption": [
|
| 484 |
+
"Figure 2: Illustration of SimGRACE, a simple framework of graph contrastive learning. Instead of augmenting the graph data, we feed the original graph $\\mathcal{G}$ into a GNN encoder $f(\\cdot ;\\theta)$ and its perturbed version $f(\\cdot ;\\theta^{\\prime})$ . After passing a shared projection head $g(\\cdot)$ , we maximize the agreement between representations $z_{i}$ and $z_{j}$ via a contrastive loss."
|
| 485 |
+
],
|
| 486 |
+
"image_footnote": [],
|
| 487 |
+
"bbox": [
|
| 488 |
+
230,
|
| 489 |
+
106,
|
| 490 |
+
754,
|
| 491 |
+
272
|
| 492 |
+
],
|
| 493 |
+
"page_idx": 2
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"text": "learns representations in an unsupervised manner with designed pretext tasks. Initially, Hu et al. [15] propose two pretext tasks, i.e., predicting neighborhood context and node attributes to conduct node-level pre-training. Besides, they utilize supervised graph-level property prediction and structure similarity prediction as pretext tasks to perform graph-level pre-training. GPT-GNN [16] designs generative task in which node attributes and edges are alternatively generated such that the likelihood of a graph is maximized. Recently, GROVER [33] incorporates GNN into a transformer-style architecture and learns node embedding by predicting contextual property and graph-level motifs. We recommend the readers to refer to a recent survey [51] for more information. Different from above methods, our SimGRACE follows a contrastive framework that will be introduced below.",
|
| 498 |
+
"bbox": [
|
| 499 |
+
81,
|
| 500 |
+
349,
|
| 501 |
+
483,
|
| 502 |
+
544
|
| 503 |
+
],
|
| 504 |
+
"page_idx": 2
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"type": "text",
|
| 508 |
+
"text": "2.2 Graph Contrastive Learning",
|
| 509 |
+
"text_level": 1,
|
| 510 |
+
"bbox": [
|
| 511 |
+
83,
|
| 512 |
+
559,
|
| 513 |
+
356,
|
| 514 |
+
574
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 2
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "text",
|
| 520 |
+
"text": "Graph contrastive learning can be categorized into two groups. One group can encode useful information by contrasting local and global representations. Initially, DGI [45] and InfoGraph [37] are proposed to obtain expressive representations for graphs or nodes via maximizing the mutual information between graph-level representations and substructure-level representations of different granularity. More recently, MVGRL [13] proposes to learn both node-level and graph-level representation by performing node diffusion and contrasting node representation to augmented graph representations. Another group is designed to learn representations that are tolerant to data transformation. Specifically, they first augment graph data and feed the augmented graphs into a shared encoder and projection head, after which their mutual information is maximized. Typically, for node-level tasks [58, 59], GCA [60] argues that data augmentation schemes should preserve intrinsic structures and attributes of graphs and thus proposes to adopt adaptive augmentations that only perturb unimportant components. DGCL [49] introduces a novel probabilistic method to alleviate the issue of false negatives in GCL. For graph-level tasks, GraphCL [56] proposes four types of augmentations for general graphs and demonstrated that the learned representations can help downstream tasks. However, the success of GraphCL comes at the price of tedious manual trial-and errors. To tackle this issue, JOAO [55] proposes a unified bi-level",
|
| 521 |
+
"bbox": [
|
| 522 |
+
81,
|
| 523 |
+
577,
|
| 524 |
+
483,
|
| 525 |
+
896
|
| 526 |
+
],
|
| 527 |
+
"page_idx": 2
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"type": "text",
|
| 531 |
+
"text": "optimization framework to automatically select data augmentations for GraphCL, which is time-consuming and inconvenient. More recently, MoCL [39] proposes to incorporate domain knowledge into molecular graph augmentations in order to preserve the semantics. However, the domain knowledge is extremely expensive. Worse still, MoCL can only work on molecular graph data, which significantly limits their generality. Despite the fruitful progress, they still require tedious manual trial-and-errors, cumbersome search or expensive domain knowledge for augmentation selection. Instead, our SimGRACE breaks through state-of-the-arts GCL framework that takes semantic-preserved data augmentations as prerequisite.",
|
| 532 |
+
"bbox": [
|
| 533 |
+
511,
|
| 534 |
+
349,
|
| 535 |
+
915,
|
| 536 |
+
503
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 2
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "text",
|
| 542 |
+
"text": "3 METHOD",
|
| 543 |
+
"text_level": 1,
|
| 544 |
+
"bbox": [
|
| 545 |
+
514,
|
| 546 |
+
515,
|
| 547 |
+
625,
|
| 548 |
+
529
|
| 549 |
+
],
|
| 550 |
+
"page_idx": 2
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"type": "text",
|
| 554 |
+
"text": "3.1 SimGRACE",
|
| 555 |
+
"text_level": 1,
|
| 556 |
+
"bbox": [
|
| 557 |
+
514,
|
| 558 |
+
535,
|
| 559 |
+
653,
|
| 560 |
+
549
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 2
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "text",
|
| 566 |
+
"text": "In this section, we will introduce SimGRACE framework in details. As sketched in Figure 2, the framework consists of the following three major components:",
|
| 567 |
+
"bbox": [
|
| 568 |
+
513,
|
| 569 |
+
553,
|
| 570 |
+
913,
|
| 571 |
+
595
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 2
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "text",
|
| 577 |
+
"text": "(1) Encoder perturbation. A GNN encoder $f(\\cdot ;\\theta)$ and its its perturbed version $f(\\cdot ;\\theta^{\\prime})$ first extract two graph-level representations $\\mathbf{h}$ and $\\mathbf{h}'$ for the same graph $\\mathcal{G}$ , which can be formulated as,",
|
| 578 |
+
"bbox": [
|
| 579 |
+
513,
|
| 580 |
+
595,
|
| 581 |
+
913,
|
| 582 |
+
650
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 2
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "equation",
|
| 588 |
+
"text": "\n$$\n\\mathbf {h} = f (\\mathcal {G}; \\theta), \\mathbf {h} ^ {\\prime} = f (\\mathcal {G}; \\theta^ {\\prime}). \\tag {1}\n$$\n",
|
| 589 |
+
"text_format": "latex",
|
| 590 |
+
"bbox": [
|
| 591 |
+
629,
|
| 592 |
+
657,
|
| 593 |
+
913,
|
| 594 |
+
672
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 2
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "text",
|
| 600 |
+
"text": "The method we proposed to perturb the encoder $f(\\cdot ;\\theta)$ can be mathematically described as,",
|
| 601 |
+
"bbox": [
|
| 602 |
+
513,
|
| 603 |
+
679,
|
| 604 |
+
913,
|
| 605 |
+
708
|
| 606 |
+
],
|
| 607 |
+
"page_idx": 2
|
| 608 |
+
},
|
| 609 |
+
{
|
| 610 |
+
"type": "equation",
|
| 611 |
+
"text": "\n$$\n\\boldsymbol {\\theta} _ {l} ^ {\\prime} = \\boldsymbol {\\theta} _ {l} + \\eta \\cdot \\Delta \\boldsymbol {\\theta} _ {l}; \\quad \\Delta \\boldsymbol {\\theta} _ {l} \\sim \\mathcal {N} \\left(0, \\sigma_ {l} ^ {2}\\right), \\tag {2}\n$$\n",
|
| 612 |
+
"text_format": "latex",
|
| 613 |
+
"bbox": [
|
| 614 |
+
598,
|
| 615 |
+
714,
|
| 616 |
+
913,
|
| 617 |
+
736
|
| 618 |
+
],
|
| 619 |
+
"page_idx": 2
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "text",
|
| 623 |
+
"text": "where $\\theta_{l}$ and $\\theta_l^{\\prime}$ are the weight tensors of the $l$ -th layer of the GNN encoder and its perturbed version respectively. $\\eta$ is the coefficient that scales the magnitude of the perturbation. $\\Delta \\theta_{l}$ is the perturbation term which samples from Gaussian distribution with zero mean and variance $\\sigma_l^2$ . Also, we show that the performance will deteriorate when we set $\\eta = 0$ in section 4.6.1. Note that BGRL [41] and MERIT [17] also update a target network with an online encoder during training. However, SimGRACE differs from them in three aspects: (1) SimGRACE perturbs the encoder with a random Gaussian noise instead of momentum updating; (2) SimGRACE does not require data augmentation while BGRL and MERIT take it as",
|
| 624 |
+
"bbox": [
|
| 625 |
+
511,
|
| 626 |
+
742,
|
| 627 |
+
915,
|
| 628 |
+
896
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 2
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "header",
|
| 634 |
+
"text": "SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation",
|
| 635 |
+
"bbox": [
|
| 636 |
+
83,
|
| 637 |
+
75,
|
| 638 |
+
532,
|
| 639 |
+
87
|
| 640 |
+
],
|
| 641 |
+
"page_idx": 2
|
| 642 |
+
},
|
| 643 |
+
{
|
| 644 |
+
"type": "header",
|
| 645 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 646 |
+
"bbox": [
|
| 647 |
+
640,
|
| 648 |
+
75,
|
| 649 |
+
913,
|
| 650 |
+
87
|
| 651 |
+
],
|
| 652 |
+
"page_idx": 2
|
| 653 |
+
},
|
| 654 |
+
{
|
| 655 |
+
"type": "text",
|
| 656 |
+
"text": "prerequisite. (3) SimGRACE focuses on graph-level representation learning while BGRL and MERIT only work in node-level tasks.",
|
| 657 |
+
"bbox": [
|
| 658 |
+
81,
|
| 659 |
+
106,
|
| 660 |
+
480,
|
| 661 |
+
133
|
| 662 |
+
],
|
| 663 |
+
"page_idx": 3
|
| 664 |
+
},
|
| 665 |
+
{
|
| 666 |
+
"type": "text",
|
| 667 |
+
"text": "(2) Projection head. As advocated in [42], a non-linear transformation $g(\\cdot)$ named projection head maps the representations to another latent space can enhance the performance. In our Sim-GRACE framework, we also adopt a two-layer perceptron (MLP) to obtain $z$ and $z'$ ,",
|
| 668 |
+
"bbox": [
|
| 669 |
+
81,
|
| 670 |
+
133,
|
| 671 |
+
482,
|
| 672 |
+
204
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 3
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "equation",
|
| 678 |
+
"text": "\n$$\nz = g (\\mathbf {h}), z ^ {\\prime} = g \\left(\\mathbf {h} ^ {\\prime}\\right). \\tag {3}\n$$\n",
|
| 679 |
+
"text_format": "latex",
|
| 680 |
+
"bbox": [
|
| 681 |
+
218,
|
| 682 |
+
208,
|
| 683 |
+
480,
|
| 684 |
+
223
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 3
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "(3) Contrastive loss. In SimGRACE framework, we utilize the normalized temperature-scaled cross entropy loss (NT-Xent) as previous works [29, 36, 46, 56] to enforce the agreement between positive pairs $z$ and $z'$ compared with negative pairs.",
|
| 691 |
+
"bbox": [
|
| 692 |
+
81,
|
| 693 |
+
227,
|
| 694 |
+
480,
|
| 695 |
+
284
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 3
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "During SimGRACE training, a minibatch of $N$ graphs are randomly sampled and then they are fed into a GNN encoder $f(\\cdot ;\\theta)$ and its perturbed version $f(\\cdot ;\\theta^{\\prime})$ , resulting in two presentations for each graph and thus $2N$ representations in total. We re-denote $z,z^{\\prime}$ as $z_{n},z_{n}^{\\prime}$ for $n$ -th graph in the minibatch. Negative pairs are generated from the other $N - 1$ perturbed representations within the same mini-batch as in [5, 42, 56]. Denoting the cosine similarity function as $\\mathrm{sim}(z,z^{\\prime}) = z^{\\top}z^{\\prime} / \\| z\\| \\| z^{\\prime}\\|$ , the contrastive loss for the $n$ -th graph is defined as,",
|
| 702 |
+
"bbox": [
|
| 703 |
+
81,
|
| 704 |
+
284,
|
| 705 |
+
482,
|
| 706 |
+
407
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 3
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "equation",
|
| 712 |
+
"text": "\n$$\n\\ell_ {n} = - \\log \\frac {\\exp \\left(\\operatorname {s i m} \\left(z _ {n} , z _ {n} ^ {\\prime}\\right)\\right) / \\tau)}{\\sum_ {n ^ {\\prime} = 1 , n ^ {\\prime} \\neq n} ^ {N} \\exp \\left(\\operatorname {s i m} \\left(z _ {n} , z _ {n ^ {\\prime}}\\right) / \\tau\\right)}, \\tag {4}\n$$\n",
|
| 713 |
+
"text_format": "latex",
|
| 714 |
+
"bbox": [
|
| 715 |
+
148,
|
| 716 |
+
412,
|
| 717 |
+
480,
|
| 718 |
+
448
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 3
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"text": "where $\\tau$ is the temperature parameter. The final loss is computed across all positive pairs in the minibatch.",
|
| 725 |
+
"bbox": [
|
| 726 |
+
81,
|
| 727 |
+
450,
|
| 728 |
+
482,
|
| 729 |
+
478
|
| 730 |
+
],
|
| 731 |
+
"page_idx": 3
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "text",
|
| 735 |
+
"text": "3.2 Why can SimGRACE work well?",
|
| 736 |
+
"text_level": 1,
|
| 737 |
+
"bbox": [
|
| 738 |
+
83,
|
| 739 |
+
489,
|
| 740 |
+
390,
|
| 741 |
+
503
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 3
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "text",
|
| 747 |
+
"text": "In order to understand why SimGRACE can work well, we first introduce the analysis tools from [43]. Specifically, they identify two key properties related to contrastive learning: alignment and uniformity and then propose two metrics to measure the quality of representations obtained via contrastive learning. One is the alignment metric which is straightforwardly defined with the expected distance between positive pairs:",
|
| 748 |
+
"bbox": [
|
| 749 |
+
81,
|
| 750 |
+
508,
|
| 751 |
+
482,
|
| 752 |
+
604
|
| 753 |
+
],
|
| 754 |
+
"page_idx": 3
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"type": "equation",
|
| 758 |
+
"text": "\n$$\n\\ell_ {\\text {a l i g n}} (f; \\alpha) \\triangleq \\underset {(x, y) \\sim p _ {\\text {p o s}}} {\\mathbb {E}} \\left[ \\| f (x) - f (y) \\| _ {2} ^ {\\alpha} \\right], \\quad \\alpha > 0 \\tag {5}\n$$\n",
|
| 759 |
+
"text_format": "latex",
|
| 760 |
+
"bbox": [
|
| 761 |
+
125,
|
| 762 |
+
609,
|
| 763 |
+
480,
|
| 764 |
+
633
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 3
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "where $p_{\\mathrm{pos}}$ is the distribution of positive pairs (augmentations of the same sample). This metric is well aligned with the objective of contrastive learning: positive samples should stay close in the embedding space. Analogously, for our SimGRACE framework, we provide a modified metric for alignment,",
|
| 771 |
+
"bbox": [
|
| 772 |
+
81,
|
| 773 |
+
638,
|
| 774 |
+
482,
|
| 775 |
+
708
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 3
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "equation",
|
| 781 |
+
"text": "\n$$\n\\ell_ {\\text {a l i g n}} (f; \\alpha) \\triangleq \\underset {x \\sim p _ {\\text {d a t a}}} {\\mathbb {E}} \\left[ \\| f (x; \\theta) - f (x; \\theta^ {\\prime}) \\| _ {2} ^ {\\alpha} \\right], \\quad \\alpha > 0 \\tag {6}\n$$\n",
|
| 782 |
+
"text_format": "latex",
|
| 783 |
+
"bbox": [
|
| 784 |
+
117,
|
| 785 |
+
710,
|
| 786 |
+
480,
|
| 787 |
+
734
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 3
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "text",
|
| 793 |
+
"text": "where $p_{\\mathrm{data}}$ is the data distribution. We set $\\alpha = 2$ in our experiments. The other is the uniformity metric which is defined as the logarithm of the average pairwise Gaussian potential:",
|
| 794 |
+
"bbox": [
|
| 795 |
+
81,
|
| 796 |
+
737,
|
| 797 |
+
482,
|
| 798 |
+
779
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 3
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "equation",
|
| 804 |
+
"text": "\n$$\n\\ell_ {\\text {u n i f o r m}} (f; \\alpha) \\triangleq \\log \\underset {x, y ^ {i, i, d}, p _ {\\text {d a t a}}} {\\mathbb {E}} \\left[ e ^ {- t \\| f (x; \\theta) - f (y; \\theta) \\| _ {2} ^ {2}} \\right]. \\quad t > 0 \\tag {7}\n$$\n",
|
| 805 |
+
"text_format": "latex",
|
| 806 |
+
"bbox": [
|
| 807 |
+
96,
|
| 808 |
+
782,
|
| 809 |
+
480,
|
| 810 |
+
825
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 3
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "In our experiments, we set $t = 2$ . The uniformity metric is also aligned with the objective of contrastive learning that the embeddings of random samples should scatter on the hypersphere. We take the checkpoints of SimGRACE, GraphCL and MoCL every 2 epochs during training and visualize the alignment $\\ell_{\\text{align}}$ and",
|
| 817 |
+
"bbox": [
|
| 818 |
+
81,
|
| 819 |
+
827,
|
| 820 |
+
482,
|
| 821 |
+
898
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 3
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "image",
|
| 827 |
+
"img_path": "images/81d7a9242691bc678dd327d32ecad512cd3f37ea3d6ff3ba656b8fcb564af16a.jpg",
|
| 828 |
+
"image_caption": [
|
| 829 |
+
"Figure 3: $\\ell_{\\text{align}} - \\ell_{\\text{uniform}}$ plot for SimGRACE, GraphCL and MoCL on MUTAG dataset. The numbers around the points are the indexes of epochs. For both $\\ell_{\\text{align}}$ and $\\ell_{\\text{uniform}}$ , lower is better."
|
| 830 |
+
],
|
| 831 |
+
"image_footnote": [],
|
| 832 |
+
"bbox": [
|
| 833 |
+
521,
|
| 834 |
+
106,
|
| 835 |
+
913,
|
| 836 |
+
334
|
| 837 |
+
],
|
| 838 |
+
"page_idx": 3
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "text",
|
| 842 |
+
"text": "uniformity $\\ell_{\\text{uniform}}$ metrics in Figure 3. As can be observed, all the three methods can improve the alignment and uniformity. However, GraphCL achieves a smaller gain on the alignment than SimGRACE and MoCL. In other words, the positive pairs can not stay close in GraphCL because general graph data augmentations (drop edges, drop nodes and etc.) destroy the semantics of original graph data, which degrades the quality of the representations learned by GraphCL. Instead, MoCL augments graph data with domain knowledge as guidance and thus can preserve semantics during augmentation. Eventually, MoCL dramatically improves the alignment. Compared with GraphCL, SimGRACE can achieve better alignment while improving uniformity because encoder perturbation can preserve data semantics well. On the other hand, although MoCL achieves better alignment than SimGRACE via introducing domain knowledge as guidance, it only achieves a small gain on the uniformity, and eventually underperforms SimGRACE.",
|
| 843 |
+
"bbox": [
|
| 844 |
+
511,
|
| 845 |
+
414,
|
| 846 |
+
916,
|
| 847 |
+
637
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 3
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "text",
|
| 853 |
+
"text": "3.3 AT-SimGRACE",
|
| 854 |
+
"text_level": 1,
|
| 855 |
+
"bbox": [
|
| 856 |
+
514,
|
| 857 |
+
656,
|
| 858 |
+
683,
|
| 859 |
+
669
|
| 860 |
+
],
|
| 861 |
+
"page_idx": 3
|
| 862 |
+
},
|
| 863 |
+
{
|
| 864 |
+
"type": "text",
|
| 865 |
+
"text": "Recently, GraphCL [56] shows that GNNs can gain robustness using their proposed framework. However, they did not explain why GraphCL can enhance the robustness. Additionally, GraphCL seems to be immunized to random attacks well while being unsatisfactory against adversarial attacks. In this section, we aim to utilize Adversarial Training (AT) [11, 23] to improve the adversarial robustness of SimGRACE in a principled way. Generally, AT directly incorporates adversarial examples into the training process to solve the following optimization problem:",
|
| 866 |
+
"bbox": [
|
| 867 |
+
511,
|
| 868 |
+
674,
|
| 869 |
+
915,
|
| 870 |
+
800
|
| 871 |
+
],
|
| 872 |
+
"page_idx": 3
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"type": "equation",
|
| 876 |
+
"text": "\n$$\n\\min _ {\\theta} \\mathcal {L} ^ {\\prime} (\\theta), \\quad \\text {w h e r e} \\quad \\mathcal {L} ^ {\\prime} (\\theta) = \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\max _ {\\left\\| \\mathbf {x} _ {i} ^ {\\prime} - \\mathbf {x} _ {i} \\right\\| _ {p} \\leq \\epsilon} \\ell_ {i} ^ {\\prime} (f (\\mathbf {x} _ {i} ^ {\\prime}; \\theta), y _ {i}), \\tag {8}\n$$\n",
|
| 877 |
+
"text_format": "latex",
|
| 878 |
+
"bbox": [
|
| 879 |
+
514,
|
| 880 |
+
819,
|
| 881 |
+
913,
|
| 882 |
+
867
|
| 883 |
+
],
|
| 884 |
+
"page_idx": 3
|
| 885 |
+
},
|
| 886 |
+
{
|
| 887 |
+
"type": "text",
|
| 888 |
+
"text": "where $n$ is the number of training examples, $\\mathbf{x}_i^{\\prime}$ is the adversarial example within the $\\epsilon$ -ball (bounded by an $L_{p}$ -norm) centered at",
|
| 889 |
+
"bbox": [
|
| 890 |
+
513,
|
| 891 |
+
867,
|
| 892 |
+
913,
|
| 893 |
+
897
|
| 894 |
+
],
|
| 895 |
+
"page_idx": 3
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"type": "header",
|
| 899 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 900 |
+
"bbox": [
|
| 901 |
+
83,
|
| 902 |
+
75,
|
| 903 |
+
354,
|
| 904 |
+
87
|
| 905 |
+
],
|
| 906 |
+
"page_idx": 3
|
| 907 |
+
},
|
| 908 |
+
{
|
| 909 |
+
"type": "header",
|
| 910 |
+
"text": "Jun Xia, et al.",
|
| 911 |
+
"bbox": [
|
| 912 |
+
846,
|
| 913 |
+
75,
|
| 914 |
+
911,
|
| 915 |
+
85
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 3
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "text",
|
| 921 |
+
"text": "natural example $\\mathbf{x}_i, f$ is the DNN with weight $\\theta$ , $\\ell'(\\cdot)$ is the standard supervised classification loss (e.g., the cross-entropy loss), and $\\mathcal{L}'(\\theta)$ is called the \"adversarial loss\". However, above general framework of AT can not directly be applied in graph contrastive learning because (1) AT requires labels as supervision while labels are not available in graph contrastive learning; (2) Perturbing each graph for the dataset in an adversarial way will introduce heavy computational overhead, which has been pointed out in GROC [18]. To remedy the first issue, we substitute supervised classification loss in Eq. (8) with contrastive loss in Eq. (4). To tackle the second issue, instead of conducting adversarial transformation of graph data, we perturb the encoder in an adversarial way, which is more computationally efficient.",
|
| 922 |
+
"bbox": [
|
| 923 |
+
81,
|
| 924 |
+
106,
|
| 925 |
+
482,
|
| 926 |
+
286
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 4
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "Assuming that $\\Theta$ is the weight space of GNNs, for any $\\mathbf{w}$ and any positive $\\epsilon$ , we can define the norm ball in $\\theta$ with radius $\\epsilon$ centered at $\\mathbf{w}$ as,",
|
| 933 |
+
"bbox": [
|
| 934 |
+
81,
|
| 935 |
+
286,
|
| 936 |
+
480,
|
| 937 |
+
328
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 4
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "equation",
|
| 943 |
+
"text": "\n$$\n\\mathbf {R} (\\mathbf {w}; \\epsilon) := \\{\\theta \\in \\Theta : \\| \\theta - \\mathbf {w} \\| \\leq \\epsilon \\}, \\tag {9}\n$$\n",
|
| 944 |
+
"text_format": "latex",
|
| 945 |
+
"bbox": [
|
| 946 |
+
176,
|
| 947 |
+
335,
|
| 948 |
+
480,
|
| 949 |
+
349
|
| 950 |
+
],
|
| 951 |
+
"page_idx": 4
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"type": "text",
|
| 955 |
+
"text": "we choose $L_{2}$ norm to define the norm ball in our experiments. With this definition, we can now formulate our AT-SimGRACE as an optimization problem,",
|
| 956 |
+
"bbox": [
|
| 957 |
+
81,
|
| 958 |
+
358,
|
| 959 |
+
482,
|
| 960 |
+
401
|
| 961 |
+
],
|
| 962 |
+
"page_idx": 4
|
| 963 |
+
},
|
| 964 |
+
{
|
| 965 |
+
"type": "equation",
|
| 966 |
+
"text": "\n$$\n\\min _ {\\theta} \\mathcal {L} (\\theta + \\Delta),\n$$\n",
|
| 967 |
+
"text_format": "latex",
|
| 968 |
+
"bbox": [
|
| 969 |
+
227,
|
| 970 |
+
410,
|
| 971 |
+
334,
|
| 972 |
+
430
|
| 973 |
+
],
|
| 974 |
+
"page_idx": 4
|
| 975 |
+
},
|
| 976 |
+
{
|
| 977 |
+
"type": "text",
|
| 978 |
+
"text": "where $\\mathcal{L}(\\theta +\\Delta) = \\frac{1}{M}\\sum_{i = 1}^{M}\\max_{\\Delta \\in \\mathbb{R}(0;\\epsilon)}\\ell_i(f(\\mathcal{G}_i;\\theta +\\Delta),f(\\mathcal{G}_i;\\theta)),$ (10)",
|
| 979 |
+
"bbox": [
|
| 980 |
+
86,
|
| 981 |
+
433,
|
| 982 |
+
480,
|
| 983 |
+
482
|
| 984 |
+
],
|
| 985 |
+
"page_idx": 4
|
| 986 |
+
},
|
| 987 |
+
{
|
| 988 |
+
"type": "text",
|
| 989 |
+
"text": "where $M$ is the number of graphs in the dataset. We propose Algorithm 1 to solve this optimization problem. Specifically, for inner maximization, we forward $I$ steps to update $\\Delta$ in the direction of increasing the contrastive loss using gradient ascent algorithm. With the output perturbation $\\Delta$ of inner maximization, the outer loops update the weights $\\theta$ of GNNs with mini-batched SGD.",
|
| 990 |
+
"bbox": [
|
| 991 |
+
81,
|
| 992 |
+
483,
|
| 993 |
+
482,
|
| 994 |
+
566
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 4
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "code",
|
| 1000 |
+
"sub_type": "algorithm",
|
| 1001 |
+
"code_caption": [
|
| 1002 |
+
"Algorithm 1: Encoder perturbation of AT-SimGRACE"
|
| 1003 |
+
],
|
| 1004 |
+
"code_body": "Data: Graph dataset $\\mathcal{D} = \\{\\mathcal{G}_1,\\mathcal{G}_2,\\dots,\\mathcal{G}_M\\}$ , contrastive loss $\\ell$ , batch size $N$ , initial encoder weights $\\theta$ , inner iterations $I$ , inner learning rate $\\zeta$ , outer learning rate $\\gamma$ , norm ball radius $\\epsilon$ . \nfor each mini-batch do \nSample $\\mathcal{D}_B = \\{\\mathcal{G}_i\\}_{i = 1}^N$ from $\\mathcal{D}$ . \nInitialize perturbation: $\\Delta \\leftarrow 0$ . \nfor $t = 0,1,2,\\ldots,I - 1$ do Update perturbation: $\\Delta \\gets \\Delta +\\zeta \\sum_{i = 1}^{N}\\nabla_{\\theta}\\ell_{i}\\left(f\\left(\\mathcal{G}_{i};\\theta +\\Delta\\right),f\\left(\\mathcal{G}_{i};\\theta\\right)\\right) / N;$ if $\\| \\Delta \\| _2 > \\epsilon$ then \nNormalize perturbation: $\\Delta \\gets \\epsilon \\Delta /\\| \\Delta \\| _2$ end \nend \nUpdate weights: \n $\\theta^{\\prime}\\gets \\theta -\\gamma \\sum_{i = 1}^{N}\\nabla_{\\theta}\\ell_{i}\\left(f\\left(\\mathcal{G}_{i};\\theta +\\Delta\\right),f\\left(\\mathcal{G}_{i};\\theta\\right)\\right) / N.$",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
84,
|
| 1007 |
+
611,
|
| 1008 |
+
483,
|
| 1009 |
+
862
|
| 1010 |
+
],
|
| 1011 |
+
"page_idx": 4
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"type": "text",
|
| 1015 |
+
"text": "3.4 Theoretical Justification",
|
| 1016 |
+
"text_level": 1,
|
| 1017 |
+
"bbox": [
|
| 1018 |
+
514,
|
| 1019 |
+
104,
|
| 1020 |
+
756,
|
| 1021 |
+
119
|
| 1022 |
+
],
|
| 1023 |
+
"page_idx": 4
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"text": "In this section, we aim to explain the reasons why AT-SimGRACE can enhance the robustness of graph contrastive learning. To start, it is widely accepted that flatter loss landscape can bring robustness [3, 30, 44]. For example, as formulated in Eq. 8, adversarial training (AT) enhances robustness via restricting the change of loss when the input of models is perturbed indeed. Thus, we want to theoretically justify why AT-SimGRACE works via validating that AT-SimGRACE can flatten the loss landscape. Inspired by previous work [28] that connects sharpness of loss landscape and PAC-Bayes theory [24, 25], we utilize PAC-Bayes framework to derive guarantees on the expected error. Assuming that the prior distribution $P$ over the weights is a zero mean, $\\sigma^2$ variance Gaussian distribution, with probability at least $1 - \\delta$ over the draw of $M$ graphs, the expected error of the encoder can be bounded as:",
|
| 1028 |
+
"bbox": [
|
| 1029 |
+
511,
|
| 1030 |
+
125,
|
| 1031 |
+
913,
|
| 1032 |
+
318
|
| 1033 |
+
],
|
| 1034 |
+
"page_idx": 4
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "equation",
|
| 1038 |
+
"text": "\n$$\n\\mathbb {E} _ {\\left\\{\\mathcal {G} _ {i} \\right\\} _ {i = 1} ^ {M}, \\Delta} [ \\mathcal {L} (\\theta + \\Delta) ] \\leq \\mathbb {E} _ {\\Delta} [ \\mathcal {L} (\\theta + \\Delta) ] + 4 \\sqrt {\\frac {K L (\\theta + \\Delta \\| P) + \\ln \\frac {2 M}{\\delta}}{M}}. \\tag {11}\n$$\n",
|
| 1039 |
+
"text_format": "latex",
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
513,
|
| 1042 |
+
333,
|
| 1043 |
+
911,
|
| 1044 |
+
381
|
| 1045 |
+
],
|
| 1046 |
+
"page_idx": 4
|
| 1047 |
+
},
|
| 1048 |
+
{
|
| 1049 |
+
"type": "text",
|
| 1050 |
+
"text": "We choose $\\Delta$ as a zero mean spherical Gaussian perturbation with variance $\\sigma^2$ in every direction, and set the variance of the perturbation to the weight with respect to its magnitude $\\sigma = \\alpha \\| \\pmb{\\theta}\\|$ . Besides, we substitute $\\mathbb{E}_{\\Delta}[\\mathcal{L}(\\theta +\\Delta)]$ with $\\mathcal{L}(\\theta) + \\mathbb{E}_{\\Delta}[\\mathcal{L}(\\theta +\\Delta)] - \\mathcal{L}(\\theta)$ . Then, we can rewrite Eq. 11 as:",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
513,
|
| 1053 |
+
381,
|
| 1054 |
+
913,
|
| 1055 |
+
450
|
| 1056 |
+
],
|
| 1057 |
+
"page_idx": 4
|
| 1058 |
+
},
|
| 1059 |
+
{
|
| 1060 |
+
"type": "equation",
|
| 1061 |
+
"text": "\n$$\n\\mathbb {E} _ {\\{\\mathcal {G} _ {i} \\} _ {i = 1} ^ {M}, \\Delta} [ \\mathcal {L} (\\theta + \\Delta) ] \\leq \\mathcal {L} (\\theta) + \\underbrace {\\{\\mathbb {E} _ {\\Delta} [ \\mathcal {L} (\\theta + \\Delta) ] - \\mathcal {L} (\\theta) \\}}\n$$\n",
|
| 1062 |
+
"text_format": "latex",
|
| 1063 |
+
"bbox": [
|
| 1064 |
+
524,
|
| 1065 |
+
463,
|
| 1066 |
+
877,
|
| 1067 |
+
487
|
| 1068 |
+
],
|
| 1069 |
+
"page_idx": 4
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "text",
|
| 1073 |
+
"text": "Expected sharpness",
|
| 1074 |
+
"bbox": [
|
| 1075 |
+
751,
|
| 1076 |
+
489,
|
| 1077 |
+
849,
|
| 1078 |
+
502
|
| 1079 |
+
],
|
| 1080 |
+
"page_idx": 4
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"type": "text",
|
| 1084 |
+
"text": "(12)",
|
| 1085 |
+
"bbox": [
|
| 1086 |
+
887,
|
| 1087 |
+
497,
|
| 1088 |
+
911,
|
| 1089 |
+
508
|
| 1090 |
+
],
|
| 1091 |
+
"page_idx": 4
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "equation",
|
| 1095 |
+
"text": "\n$$\n+ 4 \\sqrt {\\frac {1}{M} \\left(\\frac {1}{2 \\alpha} + \\ln \\frac {2 M}{\\delta}\\right)}.\n$$\n",
|
| 1096 |
+
"text_format": "latex",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
712,
|
| 1099 |
+
506,
|
| 1100 |
+
854,
|
| 1101 |
+
541
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 4
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "text",
|
| 1107 |
+
"text": "It is obvious that $\\mathbb{E}_{\\Delta}[\\mathcal{L}(\\theta + \\Delta)] \\leq \\max_{\\Delta} [\\mathcal{L}(\\theta + \\Delta)]$ and the third term $4\\sqrt{\\frac{1}{M}\\left(\\frac{1}{2\\alpha} + \\ln \\frac{2M}{\\delta}\\right)}$ is a constant. Thus, AT-SimGRACE optimizes the worst-case of sharpness of loss landscape $\\max_{\\Delta} [\\mathcal{L}(\\theta + \\Delta)] - \\mathcal{L}(\\theta)$ to the bound of the expected error, which explains why AT-SimGRACE can enhance the robustness.",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
513,
|
| 1110 |
+
555,
|
| 1111 |
+
913,
|
| 1112 |
+
637
|
| 1113 |
+
],
|
| 1114 |
+
"page_idx": 4
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"type": "text",
|
| 1118 |
+
"text": "4 EXPERIMENTS",
|
| 1119 |
+
"text_level": 1,
|
| 1120 |
+
"bbox": [
|
| 1121 |
+
514,
|
| 1122 |
+
655,
|
| 1123 |
+
671,
|
| 1124 |
+
669
|
| 1125 |
+
],
|
| 1126 |
+
"page_idx": 4
|
| 1127 |
+
},
|
| 1128 |
+
{
|
| 1129 |
+
"type": "text",
|
| 1130 |
+
"text": "In this section, we conduct experiments to evaluate SimGRACE and AT-SimGRACE through answering the following research questions.",
|
| 1131 |
+
"bbox": [
|
| 1132 |
+
513,
|
| 1133 |
+
674,
|
| 1134 |
+
913,
|
| 1135 |
+
702
|
| 1136 |
+
],
|
| 1137 |
+
"page_idx": 4
|
| 1138 |
+
},
|
| 1139 |
+
{
|
| 1140 |
+
"type": "list",
|
| 1141 |
+
"sub_type": "text",
|
| 1142 |
+
"list_items": [
|
| 1143 |
+
"- RQ1. (Generalizability) Does SimGRACE outperform competitors in unsupervised and semi-supervised settings?",
|
| 1144 |
+
"- RQ2. (Transferability) Can GNNs pre-trained with SimGRACE show better transferability than competitors?",
|
| 1145 |
+
"- RQ3. (Robustness) Can AT-SimGRACE perform better than existing competitors against various adversarial attacks?",
|
| 1146 |
+
"- RQ4. (Efficiency) How about the efficiency (time and memory) of SimGRACE? Does it more efficient than competitors?",
|
| 1147 |
+
"- RQ5. (Hyperparameters Sensitivity) Is the proposed Sim-GRACE sensitive to hyperparameters like the magnitude of the perturbation $\\eta$ , training epochs and batch size?"
|
| 1148 |
+
],
|
| 1149 |
+
"bbox": [
|
| 1150 |
+
540,
|
| 1151 |
+
723,
|
| 1152 |
+
913,
|
| 1153 |
+
896
|
| 1154 |
+
],
|
| 1155 |
+
"page_idx": 4
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "header",
|
| 1159 |
+
"text": "SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation",
|
| 1160 |
+
"bbox": [
|
| 1161 |
+
83,
|
| 1162 |
+
75,
|
| 1163 |
+
532,
|
| 1164 |
+
87
|
| 1165 |
+
],
|
| 1166 |
+
"page_idx": 4
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "header",
|
| 1170 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1171 |
+
"bbox": [
|
| 1172 |
+
640,
|
| 1173 |
+
75,
|
| 1174 |
+
913,
|
| 1175 |
+
87
|
| 1176 |
+
],
|
| 1177 |
+
"page_idx": 4
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "table",
|
| 1181 |
+
"img_path": "images/85adb6d9bd5acff1f918b72b4ba256f9ff722d95d44fddd0a507731f27223ef5.jpg",
|
| 1182 |
+
"table_caption": [
|
| 1183 |
+
"Table 2: Comparing classification accuracy with baselines under the same experiment setting. The top three accuracy or rank for each dataset are emphasized in bold. AR denotes average rank. - indicates that results are not available in published papers."
|
| 1184 |
+
],
|
| 1185 |
+
"table_footnote": [],
|
| 1186 |
+
"table_body": "<table><tr><td>Methods</td><td>NCI1</td><td>PROTEINS</td><td>DD</td><td>MUTAG</td><td>COLLAB</td><td>RDT-B</td><td>RDT-M5K</td><td>IMDB-B</td><td>AR ↓</td></tr><tr><td>GL</td><td>-</td><td>-</td><td>-</td><td>81.66 ± 2.11</td><td>-</td><td>77.34 ± 0.18</td><td>41.01 ± 0.17</td><td>65.87 ± 0.98</td><td>8.3</td></tr><tr><td>WL</td><td>80.01 ± 0.50</td><td>72.92 ± 0.56</td><td>-</td><td>80.72 ± 3.00</td><td>-</td><td>68.82 ± 0.41</td><td>46.06 ± 0.21</td><td>72.30 ± 3.44</td><td>6.2</td></tr><tr><td>DGK</td><td>80.31 ± 0.46</td><td>73.30 ± 0.82</td><td>-</td><td>87.44 ± 2.72</td><td>-</td><td>78.04 ± 0.39</td><td>41.27 ± 0.18</td><td>66.96 ± 0.56</td><td>5.5</td></tr><tr><td>node2vec</td><td>54.89 ± 1.61</td><td>57.49 ± 3.57</td><td>-</td><td>72.63 ± 10.20</td><td>-</td><td>-</td><td>-</td><td>-</td><td>9.0</td></tr><tr><td>sub2vec</td><td>52.84 ± 1.47</td><td>53.03 ± 5.55</td><td>-</td><td>61.05 ± 15.80</td><td>-</td><td>71.48 ± 0.41</td><td>36.68 ± 0.42</td><td>55.26 ± 1.54</td><td>10.2</td></tr><tr><td>graph2vec</td><td>73.22 ± 1.81</td><td>73.30 ± 2.05</td><td>-</td><td>83.15 ± 9.25</td><td>-</td><td>75.78 ± 1.03</td><td>47.86 ± 0.26</td><td>71.10 ± 0.54</td><td>6.7</td></tr><tr><td>MVGRL</td><td>-</td><td>-</td><td>-</td><td>75.40 ± 7.80</td><td>-</td><td>82.00 ± 1.10</td><td>-</td><td>63.60 ± 4.20</td><td>8.3</td></tr><tr><td>InfoGraph</td><td>76.20 ± 1.06</td><td>74.44 ± 0.31</td><td>72.85 ± 1.78</td><td>89.01 ± 1.13</td><td>70.65 ± 1.13</td><td>82.50 ± 1.42</td><td>53.46 ± 1.03</td><td>73.03 ± 0.87</td><td>3.8</td></tr><tr><td>GraphCL</td><td>77.87 ± 0.41</td><td>74.39 ± 0.45</td><td>78.62 ± 0.40</td><td>86.80 ± 1.34</td><td>71.36 ± 1.15</td><td>89.53 ± 0.84</td><td>55.99 ± 0.28</td><td>71.14 ± 0.44</td><td>3.1</td></tr><tr><td>JOAO</td><td>78.07 ± 0.47</td><td>74.55 ± 0.41</td><td>77.32 ± 0.54</td><td>87.35 ± 1.02</td><td>69.50 ± 0.36</td><td>85.29 ± 1.35</td><td>55.74 ± 0.63</td><td>70.21 ± 3.08</td><td>4.3</td></tr><tr><td>JOAOv2</td><td>78.36 ± 0.53</td><td>74.07 ± 1.10</td><td>77.40 ± 1.15</td><td>87.67 ± 0.79</td><td>69.33 ± 0.34</td><td>86.42 ± 1.45</td><td>56.03 ± 0.27</td><td>70.83 ± 0.25</td><td>3.6</td></tr><tr><td>SimGRACE</td><td>79.12 ± 0.44</td><td>75.35 ± 0.09</td><td>77.44 ± 1.11</td><td>89.01 ± 1.31</td><td>71.72 ± 0.82</td><td>89.51 ± 0.89</td><td>55.91 ± 0.34</td><td>71.30 ± 0.77</td><td>2.0</td></tr></table>",
|
| 1187 |
+
"bbox": [
|
| 1188 |
+
91,
|
| 1189 |
+
146,
|
| 1190 |
+
906,
|
| 1191 |
+
351
|
| 1192 |
+
],
|
| 1193 |
+
"page_idx": 5
|
| 1194 |
+
},
|
| 1195 |
+
{
|
| 1196 |
+
"type": "text",
|
| 1197 |
+
"text": "4.1 Experimental Setup",
|
| 1198 |
+
"text_level": 1,
|
| 1199 |
+
"bbox": [
|
| 1200 |
+
84,
|
| 1201 |
+
372,
|
| 1202 |
+
287,
|
| 1203 |
+
387
|
| 1204 |
+
],
|
| 1205 |
+
"page_idx": 5
|
| 1206 |
+
},
|
| 1207 |
+
{
|
| 1208 |
+
"type": "text",
|
| 1209 |
+
"text": "4.1.1 Datasets. For unsupervised and semi-supervised learning, we use datasets from the benchmark TUDataset [26], including graph data for various social networks [2, 54] and biochemical molecules [9, 31]. For transfer learning, we perform pre-training on ZINC-2M and PPI-306K and finetune the model with various datasets including PPI, BBBP, ToxCast and SIDER.",
|
| 1210 |
+
"bbox": [
|
| 1211 |
+
84,
|
| 1212 |
+
390,
|
| 1213 |
+
480,
|
| 1214 |
+
472
|
| 1215 |
+
],
|
| 1216 |
+
"page_idx": 5
|
| 1217 |
+
},
|
| 1218 |
+
{
|
| 1219 |
+
"type": "text",
|
| 1220 |
+
"text": "4.1.2 Evaluation Protocols. Following previous works for graph-level self-supervised representation learning [38, 55, 56], we evaluate the generalizability of the learned representations on both unsupervised and semi-supervised settings. In unsupervised setting, we train SimGRACE using the whole dataset to learn graph representations and feed them into a downstream SVM classifier with 10-fold cross-validation. For semi-supervised setting, we pre-train GNNs with SimGRACE on all the data and did finetuning & evaluation with $K$ ( $K = \\frac{1}{\\text{label rate}}$ ) folds for datasets without the explicit training/validation/test split. For datasets with the train/validation/test split, we pre-train GNNs with the training data, finetuning on the partial training data and evaluation on the validation/test sets. More details can be seen in the appendix.",
|
| 1221 |
+
"bbox": [
|
| 1222 |
+
84,
|
| 1223 |
+
484,
|
| 1224 |
+
480,
|
| 1225 |
+
665
|
| 1226 |
+
],
|
| 1227 |
+
"page_idx": 5
|
| 1228 |
+
},
|
| 1229 |
+
{
|
| 1230 |
+
"type": "text",
|
| 1231 |
+
"text": "4.1.3 Compared baselines. We compare SimGRACE with state-of-the-arts graph kernel methods including GL [35], WL [34] and DGK [54]. Also, we compare SimGRACE with other graph self-supervised learning methods: GAE [20], node2vec [12], sub2vec [1], graph2vec [27], EdgePred [15], AttrMasking [15], ContextPred [15], Infomax (DGI) [45], InfoGraph [38] and instance-instance contrastive methods GraphCL [56], JOAO(v2) [55].",
|
| 1232 |
+
"bbox": [
|
| 1233 |
+
84,
|
| 1234 |
+
676,
|
| 1235 |
+
480,
|
| 1236 |
+
773
|
| 1237 |
+
],
|
| 1238 |
+
"page_idx": 5
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "text",
|
| 1242 |
+
"text": "4.2 Unsupervised and semi-supervised learning (RQ1)",
|
| 1243 |
+
"text_level": 1,
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
84,
|
| 1246 |
+
791,
|
| 1247 |
+
408,
|
| 1248 |
+
823
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 5
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "text",
|
| 1254 |
+
"text": "For unsupervised representation learning, as can be observed in Table 2, SimGRACE outperforms other baselines and always ranks top three on all the datasets. Generally, SimGRACE performs better on biochemical molecules compared with data augmentation based methods. The reason is that the semantics of molecular graphs are",
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
84,
|
| 1257 |
+
825,
|
| 1258 |
+
480,
|
| 1259 |
+
895
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 5
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "text",
|
| 1265 |
+
"text": "more fragile compared with social networks. General augmentations (drop nodes, drop edges and etc.) adopted in other baselines will not alter the semantics of social networks dramatically. For semi-supervised task, as can be observed in Table 4, we report two semi-supervised tasks with $1\\%$ and $10\\%$ label rate respectively. In $1\\%$ setting, SimGRACE outperforms previous baselines by a large margin or matching the performance of SOTA methods. For $10\\%$ setting, SimGRACE performs comparably to SOTA methods including GraphCL and JOAO(v2) whose augmentations are derived via expensive trial-and-errors or cumbersome search.",
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
517,
|
| 1268 |
+
372,
|
| 1269 |
+
913,
|
| 1270 |
+
510
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 5
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "text",
|
| 1276 |
+
"text": "4.3 Transferability (RQ2)",
|
| 1277 |
+
"text_level": 1,
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
517,
|
| 1280 |
+
532,
|
| 1281 |
+
732,
|
| 1282 |
+
547
|
| 1283 |
+
],
|
| 1284 |
+
"page_idx": 5
|
| 1285 |
+
},
|
| 1286 |
+
{
|
| 1287 |
+
"type": "text",
|
| 1288 |
+
"text": "To evaluate the transferability of the pre-training scheme, we conduct experiments on transfer learning on molecular property prediction in chemistry and protein function prediction in biology following previous works [15, 50, 56]. Specifically, we pre-train and finetune the models with different datasets. For pre-training, learning rate is tuned in $\\{0.01, 0.1, 1.0\\}$ and epoch number in $\\{20, 40, 60, 80, 100\\}$ where grid serach is performed. As sketched in Table 3, there is no universally beneficial pre-training scheme especially for the out-of-distribution scenario in transfer learning. However, SimGRACE shows competitive or better transferability than other pre-training schemes, especially on PPI dataset.",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
517,
|
| 1291 |
+
551,
|
| 1292 |
+
924,
|
| 1293 |
+
703
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 5
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "text",
|
| 1299 |
+
"text": "4.4 Adversarial robustness (RQ3)",
|
| 1300 |
+
"text_level": 1,
|
| 1301 |
+
"bbox": [
|
| 1302 |
+
517,
|
| 1303 |
+
724,
|
| 1304 |
+
794,
|
| 1305 |
+
739
|
| 1306 |
+
],
|
| 1307 |
+
"page_idx": 5
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "text",
|
| 1311 |
+
"text": "Following previous works [7, 56], we perform on synthetic data to classify the component number in graphs, facing the RandSampling, GradArgmax and RL-S2V attacks, to evaluate the robustness of AT-SimGRACE. To keep fair, we adopt Structure2vec [6] as the GNN encoder as in [7, 56]. Besides, we pretrain the GNN encoder for 150 epochs because it takes longer time for the convergence of adversarial training. We set the inner learning rate $\\zeta = 0.001$ and the radius of perturbation ball $\\epsilon = 0.01$ . As demonstrated in Table 5, AT-SimGRACE boosts the robustness of GNNs dramatically compared with training from scratch and GraphCL under three typical evasion attacks.",
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
517,
|
| 1314 |
+
743,
|
| 1315 |
+
911,
|
| 1316 |
+
895
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 5
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "header",
|
| 1322 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
84,
|
| 1325 |
+
75,
|
| 1326 |
+
354,
|
| 1327 |
+
87
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 5
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "header",
|
| 1333 |
+
"text": "Jun Xia, et al.",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
846,
|
| 1336 |
+
75,
|
| 1337 |
+
911,
|
| 1338 |
+
85
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 5
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "table",
|
| 1344 |
+
"img_path": "images/a953d62a2be59cc79864984bec4082b57d056d414391f6ed10047b190d7da770.jpg",
|
| 1345 |
+
"table_caption": [
|
| 1346 |
+
"Table 3: Results for transfer learning setting. We report the mean (and standard deviation) ROC-AUC of 3 seeds with scaffold splitting. The top-3 accuracy for each dataset are emphasized in bold."
|
| 1347 |
+
],
|
| 1348 |
+
"table_footnote": [],
|
| 1349 |
+
"table_body": "<table><tr><td>Pre-Train dataset</td><td>PPI-306K</td><td colspan=\"9\">ZINC 2M</td></tr><tr><td>Fine-Tune dataset</td><td>PPI</td><td>Tox21</td><td>ToxCast</td><td>Sider</td><td>ClinTox</td><td>MUV</td><td>HIV</td><td>BBBP</td><td>Bace</td><td>Average</td></tr><tr><td>No Pre-Train</td><td>64.8(1.0)</td><td>74.6 (0.4)</td><td>61.7 (0.5)</td><td>58.2 (1.7)</td><td>58.4 (6.4)</td><td>70.7 (1.8)</td><td>75.5 (0.8)</td><td>65.7 (3.3)</td><td>72.4 (3.8)</td><td>67.15</td></tr><tr><td>EdgePred</td><td>65.7(1.3)</td><td>76.0 (0.6)</td><td>64.1 (0.6)</td><td>60.4 (0.7)</td><td>64.1 (3.7)</td><td>75.1 (1.2)</td><td>76.3 (1.0)</td><td>67.3 (2.4)</td><td>77.3 (3.5)</td><td>70.08</td></tr><tr><td>AttrMasking</td><td>65.2(1.6)</td><td>75.1 (0.9)</td><td>63.3 (0.6)</td><td>60.5 (0.9)</td><td>73.5 (4.3)</td><td>75.8 (1.0)</td><td>75.3 (1.5)</td><td>65.2 (1.4)</td><td>77.8 (1.8)</td><td>70.81</td></tr><tr><td>ContextPred</td><td>64.4(1.3)</td><td>73.6 (0.3)</td><td>62.6 (0.6)</td><td>59.7 (1.8)</td><td>74.0 (3.4)</td><td>72.5 (1.5)</td><td>75.6 (1.0)</td><td>70.6 (1.5)</td><td>78.8 (1.2)</td><td>70.93</td></tr><tr><td>GraphCL</td><td>67.88(0.85)</td><td>75.1 (0.7)</td><td>63.0 (0.4)</td><td>59.8 (1.3)</td><td>77.5 (3.8)</td><td>76.4 (0.4)</td><td>75.1 (0.7)</td><td>67.8 (2.4)</td><td>74.6 (2.1)</td><td>71.16</td></tr><tr><td>JOAO</td><td>64.43(1.38)</td><td>74.8 (0.6)</td><td>62.8 (0.7)</td><td>60.4 (1.5)</td><td>66.6 (3.1)</td><td>76.6 (1.7)</td><td>76.9 (0.7)</td><td>66.4 (1.0)</td><td>73.2 (1.6)</td><td>69.71</td></tr><tr><td>SimGRACE</td><td>70.25(1.22)</td><td>75.6 (0.5)</td><td>63.4 (0.5)</td><td>60.6 (1.0)</td><td>75.6 (3.0)</td><td>76.9 (1.3)</td><td>75.2 (0.9)</td><td>71.3 (0.9)</td><td>75.0 (1.7)</td><td>71.70</td></tr></table>",
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
147,
|
| 1352 |
+
145,
|
| 1353 |
+
849,
|
| 1354 |
+
268
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 6
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "table",
|
| 1360 |
+
"img_path": "images/bd96a15dcc5297e7e6148f9ff21b931e97ed02538c7564e28818a2027eb5b526.jpg",
|
| 1361 |
+
"table_caption": [
|
| 1362 |
+
"Table 4: Comparing classification accuracy with baselines under the same semi-supervised setting. The top three accuracy or rank are emphasized in bold. - indicates that label rate is too low for a given dataset size. LR and AR are short for label rate and average rank respectively."
|
| 1363 |
+
],
|
| 1364 |
+
"table_footnote": [],
|
| 1365 |
+
"table_body": "<table><tr><td>LR</td><td>Methods</td><td>NCI1</td><td>PROTEINS</td><td>DD</td><td>COLLAB</td><td>RDT-B</td><td>RDT-M5K</td><td>AR ↓</td></tr><tr><td rowspan=\"9\">1%</td><td>No pre-train.</td><td>60.72 ± 0.45</td><td>-</td><td>-</td><td>57.46 ± 0.25</td><td>-</td><td>-</td><td>8.5</td></tr><tr><td>Augmentations</td><td>60.49 ± 0.46</td><td>-</td><td>-</td><td>58.40 ± 0.97</td><td>-</td><td>-</td><td>8.0</td></tr><tr><td>GAE</td><td>61.63 ± 0.84</td><td>-</td><td>-</td><td>63.20 ± 0.67</td><td>-</td><td>-</td><td>5.5</td></tr><tr><td>Infomax</td><td>62.72 ± 0.65</td><td>-</td><td>-</td><td>61.70 ± 0.77</td><td>-</td><td>-</td><td>4.0</td></tr><tr><td>ContextPred</td><td>61.21 ± 0.77</td><td>-</td><td>-</td><td>57.60 ± 2.07</td><td>-</td><td>-</td><td>7.5</td></tr><tr><td>GraphCL</td><td>62.55 ± 0.86</td><td>-</td><td>-</td><td>64.57 ± 1.15</td><td>-</td><td>-</td><td>2.0</td></tr><tr><td>JOAO</td><td>61.97 ± 0.72</td><td>-</td><td>-</td><td>63.71 ± 0.84</td><td>-</td><td>-</td><td>4.5</td></tr><tr><td>JOAOv2</td><td>62.52 ± 1.16</td><td>-</td><td>-</td><td>64.51 ± 2.21</td><td>-</td><td>-</td><td>3.0</td></tr><tr><td>SimGRACE</td><td>64.21 ± 0.65</td><td>-</td><td>-</td><td>64.28 ± 0.98</td><td>-</td><td>-</td><td>2.0</td></tr><tr><td rowspan=\"9\">10%</td><td>No pre-train.</td><td>73.72 ± 0.24</td><td>70.40 ± 1.54</td><td>73.56 ± 0.41</td><td>73.71 ± 0.27</td><td>86.63 ± 0.27</td><td>51.33 ± 0.44</td><td>7.7</td></tr><tr><td>Augmentations</td><td>73.59 ± 0.32</td><td>70.29 ± 0.64</td><td>74.30 ± 0.81</td><td>74.19 ± 0.13</td><td>87.74 ± 0.39</td><td>52.01 ± 0.20</td><td>7.0</td></tr><tr><td>GAE</td><td>74.36 ± 0.24</td><td>70.51 ± 0.17</td><td>74.54 ± 0.68</td><td>75.09 ± 0.19</td><td>87.69 ± 0.40</td><td>33.58 ± 0.13</td><td>6.3</td></tr><tr><td>Infomax</td><td>74.86± 0.26</td><td>72.27 ± 0.40</td><td>75.78 ± 0.34</td><td>73.76 ± 0.29</td><td>88.66 ± 0.95</td><td>53.61 ± 0.31</td><td>3.7</td></tr><tr><td>ContextPred</td><td>73.00 ± 0.30</td><td>70.23 ± 0.63</td><td>74.66 ± 0.51</td><td>73.69 ± 0.37</td><td>84.76 ± 0.52</td><td>51.23 ± 0.84</td><td>8.3</td></tr><tr><td>GraphCL</td><td>74.63± 0.25</td><td>74.17± 0.34</td><td>76.17± 1.37</td><td>74.23 ± 0.21</td><td>89.11± 0.19</td><td>52.55 ± 0.45</td><td>2.8</td></tr><tr><td>JOAO</td><td>74.48 ± 0.27</td><td>72.13 ± 0.92</td><td>75.69 ± 0.67</td><td>75.30 ± 0.32</td><td>88.14 ± 0.25</td><td>52.83± 0.54</td><td>4.2</td></tr><tr><td>JOAOv2</td><td>74.86± 0.39</td><td>73.31± 0.48</td><td>75.81± 0.73</td><td>75.53± 0.18</td><td>88.79± 0.65</td><td>52.71 ± 0.28</td><td>2.5</td></tr><tr><td>SimGRACE</td><td>74.60 ± 0.41</td><td>74.03 ± 0.51</td><td>76.48 ± 0.52</td><td>74.74 ± 0.28</td><td>88.86 ± 0.62</td><td>53.97 ± 0.64</td><td>2.3</td></tr></table>",
|
| 1366 |
+
"bbox": [
|
| 1367 |
+
96,
|
| 1368 |
+
337,
|
| 1369 |
+
901,
|
| 1370 |
+
642
|
| 1371 |
+
],
|
| 1372 |
+
"page_idx": 6
|
| 1373 |
+
},
|
| 1374 |
+
{
|
| 1375 |
+
"type": "table",
|
| 1376 |
+
"img_path": "images/0c126c093c25055d91b73fcc592fef3110767005b540d499369cb460c5055a6a.jpg",
|
| 1377 |
+
"table_caption": [
|
| 1378 |
+
"Table 5: Performance under three adversarial attacks for GNN with different depth following the protocols in [7]."
|
| 1379 |
+
],
|
| 1380 |
+
"table_footnote": [],
|
| 1381 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">Two-Layer</td><td colspan=\"3\">Three-Layer</td><td colspan=\"3\">Four-Layer</td></tr><tr><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td></tr><tr><td>Unattack</td><td>93.20</td><td>94.73</td><td>94.24</td><td>98.20</td><td>98.33</td><td>99.32</td><td>98.87</td><td>99.00</td><td>99.13</td></tr><tr><td>RandSampling</td><td>78.73</td><td>80.68</td><td>81.73</td><td>92.27</td><td>92.60</td><td>94.27</td><td>95.13</td><td>97.40</td><td>97.67</td></tr><tr><td>GradArgmax</td><td>69.47</td><td>69.26</td><td>75.13</td><td>64.60</td><td>89.33</td><td>93.00</td><td>95.80</td><td>97.00</td><td>96.60</td></tr><tr><td>RL-S2V</td><td>42.93</td><td>42.20</td><td>44.86</td><td>41.93</td><td>61.66</td><td>66.00</td><td>70.20</td><td>84.86</td><td>85.29</td></tr></table>",
|
| 1382 |
+
"bbox": [
|
| 1383 |
+
94,
|
| 1384 |
+
681,
|
| 1385 |
+
903,
|
| 1386 |
+
780
|
| 1387 |
+
],
|
| 1388 |
+
"page_idx": 6
|
| 1389 |
+
},
|
| 1390 |
+
{
|
| 1391 |
+
"type": "text",
|
| 1392 |
+
"text": "4.5 Efficiency (Training time and memory cost) (RQ4)",
|
| 1393 |
+
"text_level": 1,
|
| 1394 |
+
"bbox": [
|
| 1395 |
+
81,
|
| 1396 |
+
797,
|
| 1397 |
+
480,
|
| 1398 |
+
830
|
| 1399 |
+
],
|
| 1400 |
+
"page_idx": 6
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "text",
|
| 1404 |
+
"text": "In Table 6, we compare the performance of SimGRACE with the state-of-the-arts methods including GraphCL and JOAOv2 in terms of their training time and the memory overhead. Here, the training time refers to the time for pre-training stage of the semi-supervised",
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
81,
|
| 1407 |
+
833,
|
| 1408 |
+
482,
|
| 1409 |
+
891
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 6
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "text",
|
| 1415 |
+
"text": "task and the memory overhead refers to total memory costs of model parameters and all hidden representations of a batch. As can be observed, SimGRACE runs near 40-90 times faster than JOAOv2 and 2.5-4 times faster than GraphCL. If we take the time for manual trial-and-error in GraphCL into consideration, the superiority of SimGRACE will be more pronounced. Also, SimGRACE requires less",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
511,
|
| 1418 |
+
799,
|
| 1419 |
+
915,
|
| 1420 |
+
885
|
| 1421 |
+
],
|
| 1422 |
+
"page_idx": 6
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "header",
|
| 1426 |
+
"text": "SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
83,
|
| 1429 |
+
75,
|
| 1430 |
+
532,
|
| 1431 |
+
87
|
| 1432 |
+
],
|
| 1433 |
+
"page_idx": 6
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "header",
|
| 1437 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1438 |
+
"bbox": [
|
| 1439 |
+
640,
|
| 1440 |
+
75,
|
| 1441 |
+
913,
|
| 1442 |
+
87
|
| 1443 |
+
],
|
| 1444 |
+
"page_idx": 6
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "image",
|
| 1448 |
+
"img_path": "images/6ba22d9bb72918007e14c8d857f144233ab066d18359d6c586e4080fa752fa0b.jpg",
|
| 1449 |
+
"image_caption": [
|
| 1450 |
+
"(a) NCI1"
|
| 1451 |
+
],
|
| 1452 |
+
"image_footnote": [],
|
| 1453 |
+
"bbox": [
|
| 1454 |
+
89,
|
| 1455 |
+
109,
|
| 1456 |
+
290,
|
| 1457 |
+
224
|
| 1458 |
+
],
|
| 1459 |
+
"page_idx": 7
|
| 1460 |
+
},
|
| 1461 |
+
{
|
| 1462 |
+
"type": "image",
|
| 1463 |
+
"img_path": "images/6b0d201eaf6d4ea3d92b9941921552abe9ba25a429304de33eb1d2106cb64a5f.jpg",
|
| 1464 |
+
"image_caption": [
|
| 1465 |
+
"(b) MUTAG"
|
| 1466 |
+
],
|
| 1467 |
+
"image_footnote": [],
|
| 1468 |
+
"bbox": [
|
| 1469 |
+
297,
|
| 1470 |
+
109,
|
| 1471 |
+
496,
|
| 1472 |
+
224
|
| 1473 |
+
],
|
| 1474 |
+
"page_idx": 7
|
| 1475 |
+
},
|
| 1476 |
+
{
|
| 1477 |
+
"type": "image",
|
| 1478 |
+
"img_path": "images/cf7face63c4bde16019003c3c56f91f1f0f437a0300d33bf2838faed75195800.jpg",
|
| 1479 |
+
"image_caption": [
|
| 1480 |
+
"(c) COLLAB"
|
| 1481 |
+
],
|
| 1482 |
+
"image_footnote": [],
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
504,
|
| 1485 |
+
112,
|
| 1486 |
+
702,
|
| 1487 |
+
226
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 7
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "image",
|
| 1493 |
+
"img_path": "images/fe79e4921d642174e82412344be9fa358d8905a20ab1c492ace79e96fea5c3d1.jpg",
|
| 1494 |
+
"image_caption": [
|
| 1495 |
+
"(d) RDT-5K",
|
| 1496 |
+
"Figure 4: Performance versus magnitude of the perturbation $(\\eta)$ in unsupervised representation learning task."
|
| 1497 |
+
],
|
| 1498 |
+
"image_footnote": [],
|
| 1499 |
+
"bbox": [
|
| 1500 |
+
707,
|
| 1501 |
+
114,
|
| 1502 |
+
906,
|
| 1503 |
+
227
|
| 1504 |
+
],
|
| 1505 |
+
"page_idx": 7
|
| 1506 |
+
},
|
| 1507 |
+
{
|
| 1508 |
+
"type": "table",
|
| 1509 |
+
"img_path": "images/272bf0c5b4a2bdbcb5293ca2870ea33610b92571be12ca35771b08f76c62f11b.jpg",
|
| 1510 |
+
"table_caption": [
|
| 1511 |
+
"Table 6: Comparisons of efficiency on three graph datasets. Note that we do not take the time for manual trial-and- errors of GraphCL into consideration. In fact, picking the suitable augmentations manually for GraphCL is much more time-consuming. All the three methods are evaluated on a 32GB V100 GPU."
|
| 1512 |
+
],
|
| 1513 |
+
"table_footnote": [],
|
| 1514 |
+
"table_body": "<table><tr><td>Dataset</td><td>Algorithm</td><td>Training Time</td><td>Memory</td></tr><tr><td rowspan=\"3\">PROTEINS</td><td>GraphCL</td><td>111s</td><td>1231MB</td></tr><tr><td>JOAOv2</td><td>4088s</td><td>1403MB</td></tr><tr><td>SimGRACE</td><td>46 s</td><td>1175 MB</td></tr><tr><td rowspan=\"3\">COLLAB</td><td>GraphCL</td><td>1033s</td><td>10199MB</td></tr><tr><td>JOAOv2</td><td>10742s</td><td>7303MB</td></tr><tr><td>SimGRACE</td><td>378 s</td><td>6547 MB</td></tr><tr><td rowspan=\"3\">RDT-B</td><td>GraphCL</td><td>917s</td><td>4135MB</td></tr><tr><td>JOAOv2</td><td>10278s</td><td>3935MB</td></tr><tr><td>SimGRACE</td><td>280 s</td><td>2729 MB</td></tr></table>",
|
| 1515 |
+
"bbox": [
|
| 1516 |
+
86,
|
| 1517 |
+
393,
|
| 1518 |
+
480,
|
| 1519 |
+
547
|
| 1520 |
+
],
|
| 1521 |
+
"page_idx": 7
|
| 1522 |
+
},
|
| 1523 |
+
{
|
| 1524 |
+
"type": "text",
|
| 1525 |
+
"text": "computational memory than GraphCL and JOAOv2. In particular, the efficiency of SimGRACE can be more prominent on large-scale social graphs, such as COLLAB and RDT-B.",
|
| 1526 |
+
"bbox": [
|
| 1527 |
+
81,
|
| 1528 |
+
554,
|
| 1529 |
+
482,
|
| 1530 |
+
597
|
| 1531 |
+
],
|
| 1532 |
+
"page_idx": 7
|
| 1533 |
+
},
|
| 1534 |
+
{
|
| 1535 |
+
"type": "text",
|
| 1536 |
+
"text": "4.6 Hyper-parameters sensitivity analysis (RQ5)",
|
| 1537 |
+
"text_level": 1,
|
| 1538 |
+
"bbox": [
|
| 1539 |
+
81,
|
| 1540 |
+
625,
|
| 1541 |
+
436,
|
| 1542 |
+
657
|
| 1543 |
+
],
|
| 1544 |
+
"page_idx": 7
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "text",
|
| 1548 |
+
"text": "4.6.1 Magnitude of the perturbation. As can be observed in Figure 4, weight perturbation is crucial in SimGRACE. If we set the magnitude of the perturbation as zero ( $\\eta = 0$ ), the performance is usually the lowest compared with other settings of perturbation across these four datasets. This observation aligns with our intuition. Without perturbation, SimGRACE simply compares two original samples as a negative pair while the positive pair loss becomes zero, leading to homogeneously pushes all graph representations away from each other, which is non-intuitive to justify. Instead, appropriate perturbations enforce the model to learn representations invariant to the perturbations through maximizing the agreement between a graph and its perturbation. Besides, well aligned with previous works [14, 32] that claim \"hard\" positive pairs and negative pairs can boost the performance of contrastive learning, we can observe that larger magnitude (within an appropriate range) of the perturbation can bring consistent improvement of the performance. However, over-large perturbations will lead to",
|
| 1549 |
+
"bbox": [
|
| 1550 |
+
81,
|
| 1551 |
+
660,
|
| 1552 |
+
482,
|
| 1553 |
+
896
|
| 1554 |
+
],
|
| 1555 |
+
"page_idx": 7
|
| 1556 |
+
},
|
| 1557 |
+
{
|
| 1558 |
+
"type": "text",
|
| 1559 |
+
"text": "performance degradation because the semantics of graph data are not preserved.",
|
| 1560 |
+
"bbox": [
|
| 1561 |
+
513,
|
| 1562 |
+
299,
|
| 1563 |
+
911,
|
| 1564 |
+
327
|
| 1565 |
+
],
|
| 1566 |
+
"page_idx": 7
|
| 1567 |
+
},
|
| 1568 |
+
{
|
| 1569 |
+
"type": "image",
|
| 1570 |
+
"img_path": "images/b9eb16835a0567b5f88142e71b7c98098c9ea4a907f98cc1abf1f29ad6978122.jpg",
|
| 1571 |
+
"image_caption": [
|
| 1572 |
+
"Figure 5: Performance of SimGRACE trained with different batch size and epochs on NCI1 dataset."
|
| 1573 |
+
],
|
| 1574 |
+
"image_footnote": [],
|
| 1575 |
+
"bbox": [
|
| 1576 |
+
531,
|
| 1577 |
+
354,
|
| 1578 |
+
897,
|
| 1579 |
+
564
|
| 1580 |
+
],
|
| 1581 |
+
"page_idx": 7
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "text",
|
| 1585 |
+
"text": "4.6.2 Batch-size and training epochs. Figure 5 demonstrates the performance of SimGRACE trained with various batch size and epochs. Generally, larger batch size or training epochs can bring better performance. The reason is that larger batch size will provide more negative samples for contrasting. Similarly, training longer also provides more new negative samples for each sample because the split of total datasets is more various with more training epochs. In our experiments, to keep fair, we follow the same settings of other competitors [55, 56] via training the GNN encoder with batch size as 128 and number of epochs as 20. In fact, we can further improve the performance of SimGRACE with larger batch size and longer training time.",
|
| 1586 |
+
"bbox": [
|
| 1587 |
+
511,
|
| 1588 |
+
628,
|
| 1589 |
+
913,
|
| 1590 |
+
796
|
| 1591 |
+
],
|
| 1592 |
+
"page_idx": 7
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "text",
|
| 1596 |
+
"text": "5 CONCLUSIONS",
|
| 1597 |
+
"text_level": 1,
|
| 1598 |
+
"bbox": [
|
| 1599 |
+
514,
|
| 1600 |
+
806,
|
| 1601 |
+
671,
|
| 1602 |
+
821
|
| 1603 |
+
],
|
| 1604 |
+
"page_idx": 7
|
| 1605 |
+
},
|
| 1606 |
+
{
|
| 1607 |
+
"type": "text",
|
| 1608 |
+
"text": "In this paper, we propose a simple framework (SimGRACE) for graph contrastive learning. Although it may appear simple, we demonstrate that SimGRACE can outperform or match the state-of-the-art competitors on multiple graph datasets of various scales and types, while enjoying unprecedented degree of flexibility, high",
|
| 1609 |
+
"bbox": [
|
| 1610 |
+
511,
|
| 1611 |
+
825,
|
| 1612 |
+
913,
|
| 1613 |
+
897
|
| 1614 |
+
],
|
| 1615 |
+
"page_idx": 7
|
| 1616 |
+
},
|
| 1617 |
+
{
|
| 1618 |
+
"type": "header",
|
| 1619 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
83,
|
| 1622 |
+
75,
|
| 1623 |
+
354,
|
| 1624 |
+
85
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 7
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "header",
|
| 1630 |
+
"text": "Jun Xia, et al.",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
846,
|
| 1633 |
+
75,
|
| 1634 |
+
911,
|
| 1635 |
+
85
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 7
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "text",
|
| 1641 |
+
"text": "efficiency and ease of use. We emancipate graph contrastive learning from tedious manual tuning, cumbersome search or expensive domain knowledge. Furthermore, we devise adversarial training schemes to enhance the robustness of SimGRACE in a principled way and theoretically explain the reasons. There are two promising avenues for future work: (1) exploring if encoder perturbation can work well in other domains like computer vision and natural language processing. (2) applying the pre-trained GNNs to more real-world tasks including social analysis and biochemistry.",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
81,
|
| 1644 |
+
107,
|
| 1645 |
+
482,
|
| 1646 |
+
232
|
| 1647 |
+
],
|
| 1648 |
+
"page_idx": 8
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"type": "text",
|
| 1652 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1653 |
+
"text_level": 1,
|
| 1654 |
+
"bbox": [
|
| 1655 |
+
83,
|
| 1656 |
+
247,
|
| 1657 |
+
279,
|
| 1658 |
+
261
|
| 1659 |
+
],
|
| 1660 |
+
"page_idx": 8
|
| 1661 |
+
},
|
| 1662 |
+
{
|
| 1663 |
+
"type": "text",
|
| 1664 |
+
"text": "This work is supported in part by the Science and Technology Innovation 2030 - Major Project (No. 2021ZD0150100) and National Natural Science Foundation of China (No. U21A20427).",
|
| 1665 |
+
"bbox": [
|
| 1666 |
+
81,
|
| 1667 |
+
265,
|
| 1668 |
+
482,
|
| 1669 |
+
308
|
| 1670 |
+
],
|
| 1671 |
+
"page_idx": 8
|
| 1672 |
+
},
|
| 1673 |
+
{
|
| 1674 |
+
"type": "text",
|
| 1675 |
+
"text": "REFERENCES",
|
| 1676 |
+
"text_level": 1,
|
| 1677 |
+
"bbox": [
|
| 1678 |
+
84,
|
| 1679 |
+
323,
|
| 1680 |
+
202,
|
| 1681 |
+
337
|
| 1682 |
+
],
|
| 1683 |
+
"page_idx": 8
|
| 1684 |
+
},
|
| 1685 |
+
{
|
| 1686 |
+
"type": "list",
|
| 1687 |
+
"sub_type": "ref_text",
|
| 1688 |
+
"list_items": [
|
| 1689 |
+
"[1] Bijaya Adhikari, Yao Zhang, Naren Ramakrishnan, and Aditya B. Prakash. 2018. Sub2Vec: Feature Learning for Subgraphs. ADVANCES IN KNOWLEDGE DISCOVERY AND DATA MINING, PAKDD 2018, PT II (2018), 170-182.",
|
| 1690 |
+
"[2] Rozemberczki Benedek, Kiss Oliver, and Sarkar Rik. 2020. An API Oriented Open-source Python Framework for Unsupervised Learning on Graphs. (2020).",
|
| 1691 |
+
"[3] Liu Chen, Salzmann Mathieu, Lin Tao, Tomioka Ryota, and Süsstrunk Sabine. 2020. On the Loss Landscape of Adversarial Training: Identifying Challenges and How to Overcome Them. NIPS 2020 (2020).",
|
| 1692 |
+
"[4] Ting Chen, Song Bian, and Yizhou Sun. 2019. Are Powerful Graph Neural Nets Necessary? A Dissection on Graph Classification. arXiv: Learning (2019).",
|
| 1693 |
+
"[5] Ting Chen, Yizhou Sun, Yue Shi, and Liangjie Hong. 2017. On Sampling Strategies for Neural Network-based Collaborative Filtering. Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (2017), 767-776.",
|
| 1694 |
+
"[6] Hanjun Dai, Bo Dai, and Le Song. 2016. Discriminative Embeddings of Latent Variable Models for Structured Data. ICML (2016).",
|
| 1695 |
+
"[7] Hanjun Dai, Hui Li, Tian Tian, Xin Huang, Lin Wang, Jun Zhu, and Le Song. 2018. Adversarial Attack on Graph Structured Data. international conference on machine learning (2018), 1123-1132.",
|
| 1696 |
+
"[8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. north american chapter of the association for computational linguistics (2019).",
|
| 1697 |
+
"[9] D. Paul Dobson and J. Andrew Doig. 2003. Distinguishing Enzyme Structures from Non-enzymes Without Alignments. Journal of Molecular Biology (2003), 771-783.",
|
| 1698 |
+
"[10] Yuanqi Du, Shiyu Wang, Xiaojie Guo, Hengning Cao, Shujie Hu, Junji Jiang, Aishwarya Varala, Abhinav Angirekula, and Liang Zhao. 2021. GraphGT: Machine Learning Datasets for Deep Graph Generation and Transformation. (2021).",
|
| 1699 |
+
"[11] J. Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. international conference on learning representations (2015).",
|
| 1700 |
+
"[12] Aditya Grover and Jure Leskovec. 2016. node2vec: Scalable Feature Learning for Networks. KDD (2016), 855-864.",
|
| 1701 |
+
"[13] Kaveh Hassani and Amir Hosein Khasahmadi. 2020. Contrastive multi-view representation learning on graphs. In International Conference on Machine Learning, PMLR, 4116-4126.",
|
| 1702 |
+
"[14] Chih-Hui Ho and Nuno Nvasconcelos. 2020. Contrastive Learning with Adversarial Examples. NIPS 2020 (2020).",
|
| 1703 |
+
"[15] Weihua Hu, Bowen Liu, Joseph Gomes, Marinka Zitnik, Percy Liang, Vijay Pande, and Jure Leskovec. 2020. Strategies for Pre-training Graph Neural Networks. ICLR (2020).",
|
| 1704 |
+
"[16] Ziniu Hu, Yuxiao Dong, Kuansan Wang, Kai-Wei Chang, and Yizhou Sun. 2020. GPT-GNN: Generative Pre-Training of Graph Neural Networks. KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining Virtual Event CA USA July, 2020 (2020), 1857-1867.",
|
| 1705 |
+
"[17] Ming Jin, Yizhen Zheng, Yuan-Fang Li, Chen Gong, Chuan Zhou, and Shirui Pan. 2021. Multi-Scale Contrastive Siamese Networks for Self-Supervised Graph Representation Learning. *JCAI* (2021), 1477-1483.",
|
| 1706 |
+
"[18] Nikola Jovanovic, Zhao Meng, Lukas Faber, and Roger Wattenhofer. 2021. Towards robust graph contrastive learning. arXiv preprint arXiv:2102.13085 (2021).",
|
| 1707 |
+
"[19] He Kaiming, Fan Haoqi, Wu Yuxin, Xie Saining, and Girshick Ross. 2020. Momentum Contrast for Unsupervised Visual Representation Learning. CVPR (2020), 9726-9735.",
|
| 1708 |
+
"[20] N. Thomas Kipf and Max Welling. 2016. Variational Graph Auto-Encoders. CoRR (2016)."
|
| 1709 |
+
],
|
| 1710 |
+
"bbox": [
|
| 1711 |
+
86,
|
| 1712 |
+
339,
|
| 1713 |
+
482,
|
| 1714 |
+
893
|
| 1715 |
+
],
|
| 1716 |
+
"page_idx": 8
|
| 1717 |
+
},
|
| 1718 |
+
{
|
| 1719 |
+
"type": "list",
|
| 1720 |
+
"sub_type": "ref_text",
|
| 1721 |
+
"list_items": [
|
| 1722 |
+
"[21] Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016).",
|
| 1723 |
+
"[22] Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2020. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. ICLR (2020).",
|
| 1724 |
+
"[23] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2019. Towards Deep Learning Models Resistant to Adversarial Attacks. international conference on learning representations (2019).",
|
| 1725 |
+
"[24] A. David McAllester. 1999. PAC-Bayesian model averaging. _COLT_ (1999), 164-170.",
|
| 1726 |
+
"[25] A. David McAllester and Jonathan Baxter. 1999. Some PAC-Bayesian Theorems. Machine Learning (1999), 355-363.",
|
| 1727 |
+
"[26] Christopher Morris, M. Nils Krieger, Franka Bause, Kristian Kersting, Petra Mutzel, and Marion Neumann. 2020. TUDataset: A collection of benchmark datasets for learning with graphs. (2020).",
|
| 1728 |
+
"[27] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, Lihui Chen, Yang Liu, and Shantanu Jaiswal. 2017. graph2vec: Learning Distributed Representations of Graphs. arXiv: Artificial Intelligence (2017).",
|
| 1729 |
+
"[28] Behnam Neyshabur, Srinadh Bhojanapalli, David McAllester, and Nathan Srebro. 2017. Exploring Generalization in Deep Learning. ADVANCES IN NEURAL INFORMATION PROCESSING SYSTEMS 30 (NIPS 2017) (2017), 5947-5956.",
|
| 1730 |
+
"[29] van den Aïron Oord, Yazhe Li, and Oriol Vinyals. 2019. Representation Learning with Contrastive Predictive Coding. arXiv: Learning (2019).",
|
| 1731 |
+
"[30] Zhao Pu, Chen Pin-Yu, Das Payel, Karthikeyan Ramamurthy Natesan, and Lin Xue. 2020. Bridging Mode Connectivity in Loss Landscapes and Adversarial Robustness. ICLR (2020).",
|
| 1732 |
+
"[31] Kaspar Riesen and Horst Bunke. 2008. IAM Graph Database Repository for Graph Based Pattern Recognition and Machine Learning. $SSPR/SPR$ (2008), 287-297.",
|
| 1733 |
+
"[32] Joshua David Robinson, Ching-Yao Chuang, Suvirit Sra, and Stefanie Jegelka. 2021. Contrastive Learning with Hard Negative Samples. In International Conference on Learning Representations. https://openreview.net/forum?id=CR1XOQ0UTH",
|
| 1734 |
+
"[33] Yu Rong, Yatao Bian, Tingyang Xu, Weiyang Xie, Ying WEI, Wenbing Huang, and Junzhou Huang. 2020. Self-Supervised Graph Transformer on Large-Scale Molecular Data. NIPS 2020 (2020).",
|
| 1735 |
+
"[34] Nino Shervashidze, Pascal Schweitzer, Jan van Erik Leeuwen, Kurt Mehlhorn, and M. Karsten Borgwardt. 2011. Weisfeiler-Lehman Graph Kernels. Journal of Machine Learning Research (2011), 2539-2561.",
|
| 1736 |
+
"[35] Nino Shervashidze, V. N. S. Vishwanathan, H. Tobias Petri, Kurt Mehlhorn, and M. Karsten Borgwardt. 2009. Ecient graphlet kernels for large graph comparison. AISTATS (2009), 488-495.",
|
| 1737 |
+
"[36] Kihyuk Sohn. 2016. Improved Deep Metric Learning with Multi-class N-pair Loss Objective. ADVANCES IN NEURAL INFORMATION PROCESSING SYSTEMS 29 (NIPS 2016) (2016), 1849-1857.",
|
| 1738 |
+
"[37] Fan-Yun Sun, Jordan Hoffman, Vikas Verma, and Jian Tang. 2020. InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization. ICLR (2020).",
|
| 1739 |
+
"[38] Fan-Yun Sun, Jordan Hoffmann, Vikas Verma, and Jian Tang. 2019. Infagraph: Unsupervised and semi-supervised graph-level representation learning via mutual information maximization. arXiv preprint arXiv:1908.01000 (2019).",
|
| 1740 |
+
"[39] Mengying Sun, Jing Xing, Huijun Wang, Bin Chen, and Jiayu Zhou. 2021. MoCL: Contrastive Learning on Molecular Graphs with Multi-level Domain Knowledge. KDD 2021 (2021).",
|
| 1741 |
+
"[40] Cheng Tan, Jun Xia, Lirong Wu, and Stan Z Li. 2021. Co-learning: Learning from noisy labels with self-supervision. In Proceedings of the 29th ACM International Conference on Multimedia. 1405-1413.",
|
| 1742 |
+
"[41] Shantanu Thakoor, Corentin Tallec, Mohammad Gheshlaghi Azar, Remi Munos, Petar Velickovic, and Michal Valko. 2021. Bootstrapped Representation Learning on Graphs. In ICLR 2021 Workshop on Geometrical and Topological Representation Learning. https://openreview.net/forum?id=QrzVRAA49Ud",
|
| 1743 |
+
"[42] Chen Ting, Kornblith Simon, Norouzi Mohammad, and Hinton Geoffrey. 2020. A Simple Framework for Contrastive Learning of Visual Representations. ICML (2020), 1597-1607.",
|
| 1744 |
+
"[43] Wang Tongzhou and Isola Phillip. 2020. Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. ICML (2020), 9929-9939.",
|
| 1745 |
+
"[44] Vinay Prabhu Uday, Dian Yap Ang, Xu Joyce, and Whaley John. 2019. Understanding Adversarial Robustness Through Loss Landscape Geometries. (2019).",
|
| 1746 |
+
"[45] Petar Velickovic, William Fedus, L. William Hamilton, Pietro Lio, Yoshua Bengio, and Devon R. Hjelm. 2019. Deep Graph Infomax. ICLR (2019).",
|
| 1747 |
+
"[46] Zhirong Wu, Yuanjun Xiong, X Stella Yu, and Dahua Lin. 2018. Unsupervised feature learning via non-parametric instance discrimination. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2018), 3733-3742.",
|
| 1748 |
+
"[47] Jun Xia, Haitao Lin, Yongjie Xu, Lirong Wu, Zhangyang Gao, Siyuan Li, and Stan Z. Li. 2021. Towards Robust Graph Neural Networks against Label Noise. https://openreview.net/forum?id=H38f_9b90BO",
|
| 1749 |
+
"[48] Jun Xia, Cheng Tan, Lirong Wu, Yongjie Xu, and Stan Z Li. 2022. OT Cleaner: Label Correction as Optimal Transport. IEEE International Conference on Acoustics, Speech and Signal Processing (2022)."
|
| 1750 |
+
],
|
| 1751 |
+
"bbox": [
|
| 1752 |
+
517,
|
| 1753 |
+
108,
|
| 1754 |
+
911,
|
| 1755 |
+
895
|
| 1756 |
+
],
|
| 1757 |
+
"page_idx": 8
|
| 1758 |
+
},
|
| 1759 |
+
{
|
| 1760 |
+
"type": "header",
|
| 1761 |
+
"text": "SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation",
|
| 1762 |
+
"bbox": [
|
| 1763 |
+
84,
|
| 1764 |
+
75,
|
| 1765 |
+
531,
|
| 1766 |
+
87
|
| 1767 |
+
],
|
| 1768 |
+
"page_idx": 8
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "header",
|
| 1772 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1773 |
+
"bbox": [
|
| 1774 |
+
640,
|
| 1775 |
+
75,
|
| 1776 |
+
911,
|
| 1777 |
+
87
|
| 1778 |
+
],
|
| 1779 |
+
"page_idx": 8
|
| 1780 |
+
},
|
| 1781 |
+
{
|
| 1782 |
+
"type": "list",
|
| 1783 |
+
"sub_type": "ref_text",
|
| 1784 |
+
"list_items": [
|
| 1785 |
+
"[49] Jun Xia, Lirong Wu, Jintao Chen, Ge Wang, and Stan Z. Li. 2021. Debiased Graph Contrastive Learning. CoRR abs/2110.02027 (2021). arXiv:2110.02027 https://arxiv.org/abs/2110.02027",
|
| 1786 |
+
"[50] JUN XIA, Jiangbin Zheng, Cheng Tan, Ge Wang, and Stan Z Li. 2022. Towards Effective and Generalizable Fine-tuning for Pre-trained Molecular Graph Models. bioRxiv (2022). https://doi.org/10.1101/2022.02.03.479055 arXiv:https://www.biorxiv.org/content/early/2022/02/06/2022.02.03.479055.full.pdf",
|
| 1787 |
+
"[51] Jun Xia, Yanqiao Zhu, Yuanqi Du, and Stan Z Li. 2022. A Survey of Pretraining on Graphs: Taxonomy, Methods, and Applications. arXiv preprint arXiv:2202.07893 (2022).",
|
| 1788 |
+
"[52] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks? In International Conference on Learning Representations. https://openreview.net/forum?id=ryGs6iA5Km",
|
| 1789 |
+
"[53] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks? international conference on learning representations (2019).",
|
| 1790 |
+
"[54] Pinar Yanardag and V. N. S. Vishwanathan. 2015. Deep Graph Kernels. ACM Knowledge Discovery and Data Mining (2015), 1365-1374.",
|
| 1791 |
+
"[55] Yuning You, Tianlong Chen, Yang Shen, and Zhangyang Wang. 2021. Graph Contrastive Learning Automated. arXiv preprint arXiv:2106.07594 (2021).",
|
| 1792 |
+
"[56] Yuning You, Tianlong Chen, Yongduo Sui, Ting Chen, Zhangyang Wang, and Yang Shen. 2020. Graph Contrastive Learning with Augmentations. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 5812-5823. https://proceedings.neurips.cc/paper/2020/file/3fe230348e9a12c13120749e3f9fa4cd-Paper.pdf",
|
| 1793 |
+
"[57] Jiangbin Zheng, Yile Wang, Ge Wang, Jun Xia, Yufei Huang, Guojiang Zhao, Yue Zhang, and Stan Z. Li. 2022. Using Context-to-Vector with Graph Retrofitting. ACL (2022).",
|
| 1794 |
+
"[58] Yanqiao Zhu, Yichen Xu, Qiang Liu, and Shu Wu. 2021. An Empirical Study of Graph Contrastive Learning. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, Joaquin Vanschoren and Serena Yeung (Eds.). Curran Associates, Inc.",
|
| 1795 |
+
"[59] Yanqiao Zhu, Yichen Xu, Feng Yu, Qiang Liu, Shu Wu, and Liang Wang. 2020. Deep Graph Contrastive Representation Learning. In ICML Workshop on Graph Representation Learning and Beyond. https://arxiv.org/abs/2006.04131",
|
| 1796 |
+
"[60] Yanqiao Zhu, Yichen Xu, Feng Yu, Qiang Liu, Shu Wu, and Liang Wang. 2021. Graph Contrastive Learning with Adaptive Augmentation. WWW (2021), 2069-2080."
|
| 1797 |
+
],
|
| 1798 |
+
"bbox": [
|
| 1799 |
+
83,
|
| 1800 |
+
108,
|
| 1801 |
+
485,
|
| 1802 |
+
501
|
| 1803 |
+
],
|
| 1804 |
+
"page_idx": 9
|
| 1805 |
+
},
|
| 1806 |
+
{
|
| 1807 |
+
"type": "text",
|
| 1808 |
+
"text": "A APPENDIX: DATASETS IN VARIOUS SETTINGS",
|
| 1809 |
+
"text_level": 1,
|
| 1810 |
+
"bbox": [
|
| 1811 |
+
514,
|
| 1812 |
+
104,
|
| 1813 |
+
846,
|
| 1814 |
+
136
|
| 1815 |
+
],
|
| 1816 |
+
"page_idx": 9
|
| 1817 |
+
},
|
| 1818 |
+
{
|
| 1819 |
+
"type": "text",
|
| 1820 |
+
"text": "A.1 Unsupervised learning & Semi-supervised learning",
|
| 1821 |
+
"text_level": 1,
|
| 1822 |
+
"bbox": [
|
| 1823 |
+
514,
|
| 1824 |
+
141,
|
| 1825 |
+
901,
|
| 1826 |
+
174
|
| 1827 |
+
],
|
| 1828 |
+
"page_idx": 9
|
| 1829 |
+
},
|
| 1830 |
+
{
|
| 1831 |
+
"type": "table",
|
| 1832 |
+
"img_path": "images/5a49a4efac4e0e48a9637a45a19243e2efa1c7d482e252befacb9be1c1ab589e.jpg",
|
| 1833 |
+
"table_caption": [
|
| 1834 |
+
"Table 7: Datasets statistics for unsupervised and semi-supervised experiments."
|
| 1835 |
+
],
|
| 1836 |
+
"table_footnote": [],
|
| 1837 |
+
"table_body": "<table><tr><td>Datasets</td><td>Category</td><td>Graph Num.</td><td>Avg. Node</td><td>Avg. Degree</td></tr><tr><td>NCI1</td><td>Biochemical Molecules</td><td>4110</td><td>29.87</td><td>1.08</td></tr><tr><td>PROTEINS</td><td>Biochemical Molecules</td><td>1113</td><td>39.06</td><td>1.86</td></tr><tr><td>DD</td><td>Biochemical Molecules</td><td>1178</td><td>284.32</td><td>715.66</td></tr><tr><td>MUTAG</td><td>Biochemical Molecules</td><td>188</td><td>17.93</td><td>19.79</td></tr><tr><td>COLLAB</td><td>Social Networks</td><td>5000</td><td>74.49</td><td>32.99</td></tr><tr><td>RDT-B</td><td>Social Networks</td><td>2000</td><td>429.63</td><td>1.15</td></tr><tr><td>RDB-M</td><td>Social Networks</td><td>2000</td><td>429.63</td><td>497.75</td></tr><tr><td>IMDB-B</td><td>Social Networks</td><td>1000</td><td>19.77</td><td>96.53</td></tr></table>",
|
| 1838 |
+
"bbox": [
|
| 1839 |
+
519,
|
| 1840 |
+
232,
|
| 1841 |
+
913,
|
| 1842 |
+
345
|
| 1843 |
+
],
|
| 1844 |
+
"page_idx": 9
|
| 1845 |
+
},
|
| 1846 |
+
{
|
| 1847 |
+
"type": "text",
|
| 1848 |
+
"text": "For unsupervised setting, experiments are performed for 5 times each of which corresponds to a 10-fold evaluation, with mean and standard deviation of accuracies $(\\%)$ reported. For semi-supervised learning, we perform experiments with $1\\%$ (if there are over 10 samples for each class) and $10\\%$ label rate for 5 times, each of which corresponds to a 10-fold evaluation, with mean and standard deviation of accuracies $(\\%)$ reported. For pre-training, learning rate is tuned in $\\{0.1, 1.0, 5.0, 10.0\\}$ and epoch number in $\\{20, 40, 60, 80, 100\\}$ where grid search is performed. All datasets used in both unsupervised and semi-supervised experiments can be seen in Table 7.",
|
| 1849 |
+
"bbox": [
|
| 1850 |
+
513,
|
| 1851 |
+
359,
|
| 1852 |
+
915,
|
| 1853 |
+
500
|
| 1854 |
+
],
|
| 1855 |
+
"page_idx": 9
|
| 1856 |
+
},
|
| 1857 |
+
{
|
| 1858 |
+
"type": "text",
|
| 1859 |
+
"text": "A.2 Transfer learning",
|
| 1860 |
+
"text_level": 1,
|
| 1861 |
+
"bbox": [
|
| 1862 |
+
514,
|
| 1863 |
+
508,
|
| 1864 |
+
707,
|
| 1865 |
+
526
|
| 1866 |
+
],
|
| 1867 |
+
"page_idx": 9
|
| 1868 |
+
},
|
| 1869 |
+
{
|
| 1870 |
+
"type": "table",
|
| 1871 |
+
"img_path": "images/20ce28714b87c44f0af343d1f26573747f26c13feff0be7f9e8a0b39e5042b07.jpg",
|
| 1872 |
+
"table_caption": [
|
| 1873 |
+
"Table 8: Datasets statistics for transfer learning."
|
| 1874 |
+
],
|
| 1875 |
+
"table_footnote": [],
|
| 1876 |
+
"table_body": "<table><tr><td>Datasets</td><td>Category</td><td>Utilization</td><td>Graph Num.</td><td>Avg. Node</td><td>Avg. Degree</td></tr><tr><td>ZINC-2M</td><td>Biochemical Molecules</td><td>Pre-Training</td><td>2,000,000</td><td>26.62</td><td>57.72</td></tr><tr><td>PPI-306K</td><td>Protein-Protein Intersection Networks</td><td>Pre-Training</td><td>306,925</td><td>39.82</td><td>729.62</td></tr><tr><td>BBBP</td><td>Biochemical Molecules</td><td>Finetuning</td><td>2,039</td><td>24.06</td><td>51.90</td></tr><tr><td>ToxCast</td><td>Biochemical Molecules</td><td>Finetuning</td><td>8,576</td><td>18.78</td><td>38.52</td></tr><tr><td>SIDER</td><td>Biochemical Molecules</td><td>Finetuning</td><td>1,427</td><td>33.64</td><td>70.71</td></tr></table>",
|
| 1877 |
+
"bbox": [
|
| 1878 |
+
517,
|
| 1879 |
+
570,
|
| 1880 |
+
921,
|
| 1881 |
+
638
|
| 1882 |
+
],
|
| 1883 |
+
"page_idx": 9
|
| 1884 |
+
},
|
| 1885 |
+
{
|
| 1886 |
+
"type": "text",
|
| 1887 |
+
"text": "The datasets utilized in transfer learning can be seen in Table 8. ZINC-2M and PPI-306K are used for pre-training and the left ones are for fine-tuning.",
|
| 1888 |
+
"bbox": [
|
| 1889 |
+
513,
|
| 1890 |
+
652,
|
| 1891 |
+
915,
|
| 1892 |
+
695
|
| 1893 |
+
],
|
| 1894 |
+
"page_idx": 9
|
| 1895 |
+
},
|
| 1896 |
+
{
|
| 1897 |
+
"type": "text",
|
| 1898 |
+
"text": "B GNN ARCHITECTURES IN VARIOUS SETTINGS",
|
| 1899 |
+
"text_level": 1,
|
| 1900 |
+
"bbox": [
|
| 1901 |
+
514,
|
| 1902 |
+
705,
|
| 1903 |
+
852,
|
| 1904 |
+
737
|
| 1905 |
+
],
|
| 1906 |
+
"page_idx": 9
|
| 1907 |
+
},
|
| 1908 |
+
{
|
| 1909 |
+
"type": "text",
|
| 1910 |
+
"text": "To keep fair, we adopt the same GNNs architectures with previous competitors. Specifically, for unsupervised task, GIN [53] with 3 layers and 32 hidden dimensions is adopted as the encoder. For semi-supervised task, we utilize ResGCN [4] with 5 layers and 128 hidden dimensions. For transfer learning, we adopt GIN with the default setting in [15] as the GNN-based encoder. For experiments on adversarial robustness, Structure2vec is adopted as the GNN-based encoder as in [7].",
|
| 1911 |
+
"bbox": [
|
| 1912 |
+
511,
|
| 1913 |
+
741,
|
| 1914 |
+
915,
|
| 1915 |
+
853
|
| 1916 |
+
],
|
| 1917 |
+
"page_idx": 9
|
| 1918 |
+
},
|
| 1919 |
+
{
|
| 1920 |
+
"type": "header",
|
| 1921 |
+
"text": "WWW '22, April 25-29, 2022, Virtual Event, Lyon, France",
|
| 1922 |
+
"bbox": [
|
| 1923 |
+
83,
|
| 1924 |
+
75,
|
| 1925 |
+
354,
|
| 1926 |
+
87
|
| 1927 |
+
],
|
| 1928 |
+
"page_idx": 9
|
| 1929 |
+
},
|
| 1930 |
+
{
|
| 1931 |
+
"type": "header",
|
| 1932 |
+
"text": "Jun Xia, et al.",
|
| 1933 |
+
"bbox": [
|
| 1934 |
+
846,
|
| 1935 |
+
75,
|
| 1936 |
+
911,
|
| 1937 |
+
85
|
| 1938 |
+
],
|
| 1939 |
+
"page_idx": 9
|
| 1940 |
+
}
|
| 1941 |
+
]
|
2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03104/5ca1dd62-6f9a-4416-b862-99d48cad9bd8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1116012bb5856b4294550f411c5784c21757f0131feec87148472b8aef1b1aa3
|
| 3 |
+
size 802703
|
2202.03xxx/2202.03104/full.md
ADDED
|
@@ -0,0 +1,402 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation
|
| 2 |
+
|
| 3 |
+
Jun Xia $^{1,2\dagger}$ , Lirong Wu $^{1,2\dagger}$ , Jintao Chen $^{3}$ , Bozhen Hu $^{1,2}$ , Stan Z.Li $^{1,2\star}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Engineering, Westlake University, Hangzhou 310030, China
|
| 6 |
+
|
| 7 |
+
$^{2}$ Institute of Advanced Technology, Westlake Institute for Advanced Study, Hangzhou 310030, China
|
| 8 |
+
|
| 9 |
+
3 Zhejiang University, Hangzhou 310058, China
|
| 10 |
+
|
| 11 |
+
{xiajun, wulirong, hubozhen, stan.zq.li}@westlake.edu.cn, chen jintao@zju.edu.cn
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Graph contrastive learning (GCL) has emerged as a dominant technique for graph representation learning which maximizes the mutual information between paired graph augmentations that share the same semantics. Unfortunately, it is difficult to preserve semantics well during augmentations in view of the diverse nature of graph data. Currently, data augmentations in GCL broadly fall into three unsatisfactory ways. First, the augmentations can be manually picked per dataset by trial-and-errors. Second, the augmentations can be selected via cumbersome search. Third, the augmentations can be obtained with expensive domain knowledge as guidance. All of these limit the efficiency and more general applicability of existing GCL methods. To circumvent these crucial issues, we propose a Simple framework for GRaph Contrastive lEarning, SimGRACE for brevity, which does not require data augmentations. Specifically, we take original graph as input and GNN model with its perturbed version as two encoders to obtain two correlated views for contrast. SimGRACE is inspired by the observation that graph data can preserve their semantics well during encoder perturbations while not requiring manual trial-and-errors, cumbersome search or expensive domain knowledge for augmentations selection. Also, we explain why SimGRACE can succeed. Furthermore, we devise adversarial training scheme, dubbed AT-SimGRACE, to enhance the robustness of graph contrastive learning and theoretically explain the reasons. Albeit simple, we show that SimGRACE can yield competitive or better performance compared with state-of-the-art methods in terms of generalizability, transferability and robustness, while enjoying unprecedented degree of flexibility and efficiency. The code is available at: https://github.com/junxia97/SimGRACE.
|
| 16 |
+
|
| 17 |
+
# CCS CONCEPTS
|
| 18 |
+
|
| 19 |
+
- Computing methodologies $\rightarrow$ Neural networks; Learning latent representations; - Mathematics of computing $\rightarrow$ Graph algorithms.
|
| 20 |
+
|
| 21 |
+
$\dagger$ Equal Contribution, $\star$ Corresponding Author.
|
| 22 |
+
|
| 23 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 24 |
+
|
| 25 |
+
WWW '22, April 25-29, 2022, Virtual Event, Lyon, France
|
| 26 |
+
|
| 27 |
+
© 2022 Association for Computing Machinery.
|
| 28 |
+
|
| 29 |
+
ACM ISBN 978-1-4503-9096-5/22/04...$15.00
|
| 30 |
+
|
| 31 |
+
https://doi.org/10.1145/3485447.3512156
|
| 32 |
+
|
| 33 |
+
# KEYWORDS
|
| 34 |
+
|
| 35 |
+
Graph neural networks, graph self-supervised learning, contrastive learning, graph representation learning, robustness
|
| 36 |
+
|
| 37 |
+
# ACM Reference Format:
|
| 38 |
+
|
| 39 |
+
Jun Xia, Lirong Wu, Jintao Chen, Bozhen Hu, Stan Z.Li. 2022. SimGRACE: A Simple Framework for Graph Contrastive Learning without Data Augmentation. In Proceedings of the ACM Web Conference 2022 (WWW '22), April 25-29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3485447.3512156
|
| 40 |
+
|
| 41 |
+
# 1 INTRODUCTION
|
| 42 |
+
|
| 43 |
+
Graph Neural Networks (GNNs), inheriting the power of neural networks and utilizing the structural information of graph data simultaneously, have achieved overwhelming accomplishments in various graph-based tasks, such as node, graph classification or graph generation [10, 21, 52]. However, most existing GNNs are trained in a supervised manner and it is often resource- and time-intensive to collect abundant true-labeled data [40, 47, 48]. To remedy this issue, tremendous endeavors have been devoted to graph self-supervised learning that learns representations from unlabeled graphs. Among many, graph contrastive learning (GCL) [51, 55, 56] follows the general framework of contrastive learning in computer vision domain [42, 46], in which two augmentations are generated for each graph and then maximizes the mutual information between these two augmented views. In this way, the model can learn representations that are invariant to perturbations. For example, GraphCL [56] first designs four types of general augmentations (node dropping, edge perturbation, attribute masking and subgraph) for GCL. However, these augmentations are not suitable for all scenarios because the structural information and semantics of the graphs varies significantly across domains. For example, GraphCL [56] finds that edge perturbation benefits social networks but hurt some biochemical molecules in GCL. Worse still, these augmentations may alter the graph semantics completely even if the perturbation is weak. For example, dropping a carbon atom in the phenyl ring will alter the aromatic system and result in an alkene chain, which will drastically change the molecular properties [39].
|
| 44 |
+
|
| 45 |
+
To remedy these issues, several strategies have been proposed recently. Typically, GraphCL [56] manually picks data augmentations per dataset by tedious trial-and-errors, which significantly limits the generality and practicality of their proposed framework. To get rid of the tedious dataset-specific manual tuning of GraphCL, JOAO [55] proposes to automate GraphCL in selecting augmentation pairs. However, it suffers more computational overhead to
|
| 46 |
+
|
| 47 |
+
Table 1: Comparison between state-of-the-art GCL methods (graph-level representation learning) and SimGRACE.
|
| 48 |
+
|
| 49 |
+
<table><tr><td></td><td>No manual trial-and-errors</td><td>No domain knowledge</td><td>Preserving semantics</td><td>No cumbersome search</td><td>Generality</td></tr><tr><td>GraphCL [56]</td><td>X</td><td>✓</td><td>X</td><td>✓</td><td>X</td></tr><tr><td>MoCL [39]</td><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>JOAO(v2) [55]</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>✓</td></tr><tr><td>SimGRACE</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
GraphCL
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
MoCL
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
SimGRACE
|
| 65 |
+
Figure 1: Comparison of GraphCL [56], MoCL [39] and SimGRACE on MUTAG dataset. The samples of two classes are distinguished by colors (blue & orange). We first train three GNN encoders with these methods respectively and visualise the representations of original graphs with t-SNE in the upper row. Then, we perturb graphs or encoders in their respective ways (edge perturbation for GraphCL, replacing functional group with bioisosteres of similar properties for MoCL, encoder perturbation for SimGRACE) and visualise the representations of perturbed (GraphCL, MoCL) or original (SimGRACE) graphs in the below row. Unlike GraphCL, SimGRACE and MoCL can preserve the class identity semantics well after perturbations. However, MoCL requires expensive domain knowledge as guidance.
|
| 66 |
+
|
| 67 |
+
search suitable augmentations and still relies on human prior knowledge in constructing and configuring the augmentation pool to select from. To avoid altering the semantics in the general augmentations adopted in GraphCL and JOAO(v2), MoCL [39] proposes to replace valid substructures in molecular graph with bioisosteres that share similar properties. However, it requires expensive domain knowledge as guidance and can not be applied in other domains like social graphs. Hence, a natural question emerges: Can we emancipate graph contrastive learning from tedious manual trial-and-errors, cumbersome search or expensive domain knowledge?
|
| 68 |
+
|
| 69 |
+
To answer this question, instead of devising more advanced data augmentations strategies for GCL, we attempt to break through state-of-the-arts GCL framework which takes semantic-preserved data augmentations as prerequisite. More specifically, we take original graph data as input and GNN model with its perturbed version as two encoders to obtain two correlated views. And then, we maximize the agreement of these two views. With the encoder perturbation as noise, we can obtain two different embeddings for same input as "positive pairs". Similar to previous works [42, 56],
|
| 70 |
+
|
| 71 |
+
we take other graph data in the same mini-batch as "negative pairs". The idea of encoder perturbation is inspired by the observations in Figure 1. The augmentation or perturbation of MoCL and our SimGRACE can preserve the class identity semantics well while GraphCL can not. Also, we explain why SimGRACE can succeed. Besides, GraphCL [56] shows that GNNs can gain robustness using their proposed framework. However, (1) they do not explain why GraphCL can enhance the robustness; (2) GraphCL seems to be immunized to random attacks well while performing unsatisfactory against adversarial attacks. GROC [18] first integrates adversarial transformations into the graph contrastive learning framework and improves the robustness against adversarial attacks. Unfortunately, as the authors pointed out, the robustness of GROC comes at a price of much longer training time because conducting adversarial transformations for each graph is time-consuming. To remedy these deficiencies, we propose a novel algorithm AT-SimGRACE to perturb the encoder in an adversarial way, which introduces less computational overhead while showing better robustness. Theoretically, we explain why AT-SimGRACE can enhance the robustness. We highlight our contributions as follows:
|
| 72 |
+
|
| 73 |
+
- Significance: We emancipate graph contrastive learning from tedious manual trial-and-error, cumbersome search or expensive domain knowledge which limit the efficiency and more general applicability of existing GCL methods. The comparison between SimGRACE and state-of-the-art GCL methods can be seen in Table 1.
|
| 74 |
+
- Framework: We develop a novel and effective framework, SimGRACE, for graph contrastive learning which enjoys unprecedented degree of flexibility, high efficiency and ease of use. Moreover, we explain why SimGRACE can succeed.
|
| 75 |
+
- Algorithm: We propose a novel algorithm AT-SimGRACE to enhance the robustness of graph contrastive learning. AT-SimGRACE can achieve better robustness while introducing minor computational overhead.
|
| 76 |
+
- Experiments: We experimentally show that the proposed methods can yield competitive or better performance compared with state-of-the-art methods in terms of generalizability, transferability, robustness and efficiency on multiple social and biochemical graph datasets.
|
| 77 |
+
|
| 78 |
+
# 2 RELATED WORK
|
| 79 |
+
|
| 80 |
+
# 2.1 Generative / Predictive self-supervised learning on graphs
|
| 81 |
+
|
| 82 |
+
Inspired by the success of self-supervised learning in computer vision [19, 42] and natural language processing [8, 22, 57], tremendous endeavors have been devoted to graph self-supervised learning that
|
| 83 |
+
|
| 84 |
+

|
| 85 |
+
Figure 2: Illustration of SimGRACE, a simple framework of graph contrastive learning. Instead of augmenting the graph data, we feed the original graph $\mathcal{G}$ into a GNN encoder $f(\cdot ;\theta)$ and its perturbed version $f(\cdot ;\theta^{\prime})$ . After passing a shared projection head $g(\cdot)$ , we maximize the agreement between representations $z_{i}$ and $z_{j}$ via a contrastive loss.
|
| 86 |
+
|
| 87 |
+
learns representations in an unsupervised manner with designed pretext tasks. Initially, Hu et al. [15] propose two pretext tasks, i.e., predicting neighborhood context and node attributes to conduct node-level pre-training. Besides, they utilize supervised graph-level property prediction and structure similarity prediction as pretext tasks to perform graph-level pre-training. GPT-GNN [16] designs generative task in which node attributes and edges are alternatively generated such that the likelihood of a graph is maximized. Recently, GROVER [33] incorporates GNN into a transformer-style architecture and learns node embedding by predicting contextual property and graph-level motifs. We recommend the readers to refer to a recent survey [51] for more information. Different from above methods, our SimGRACE follows a contrastive framework that will be introduced below.
|
| 88 |
+
|
| 89 |
+
# 2.2 Graph Contrastive Learning
|
| 90 |
+
|
| 91 |
+
Graph contrastive learning can be categorized into two groups. One group can encode useful information by contrasting local and global representations. Initially, DGI [45] and InfoGraph [37] are proposed to obtain expressive representations for graphs or nodes via maximizing the mutual information between graph-level representations and substructure-level representations of different granularity. More recently, MVGRL [13] proposes to learn both node-level and graph-level representation by performing node diffusion and contrasting node representation to augmented graph representations. Another group is designed to learn representations that are tolerant to data transformation. Specifically, they first augment graph data and feed the augmented graphs into a shared encoder and projection head, after which their mutual information is maximized. Typically, for node-level tasks [58, 59], GCA [60] argues that data augmentation schemes should preserve intrinsic structures and attributes of graphs and thus proposes to adopt adaptive augmentations that only perturb unimportant components. DGCL [49] introduces a novel probabilistic method to alleviate the issue of false negatives in GCL. For graph-level tasks, GraphCL [56] proposes four types of augmentations for general graphs and demonstrated that the learned representations can help downstream tasks. However, the success of GraphCL comes at the price of tedious manual trial-and errors. To tackle this issue, JOAO [55] proposes a unified bi-level
|
| 92 |
+
|
| 93 |
+
optimization framework to automatically select data augmentations for GraphCL, which is time-consuming and inconvenient. More recently, MoCL [39] proposes to incorporate domain knowledge into molecular graph augmentations in order to preserve the semantics. However, the domain knowledge is extremely expensive. Worse still, MoCL can only work on molecular graph data, which significantly limits their generality. Despite the fruitful progress, they still require tedious manual trial-and-errors, cumbersome search or expensive domain knowledge for augmentation selection. Instead, our SimGRACE breaks through state-of-the-arts GCL framework that takes semantic-preserved data augmentations as prerequisite.
|
| 94 |
+
|
| 95 |
+
# 3 METHOD
|
| 96 |
+
|
| 97 |
+
# 3.1 SimGRACE
|
| 98 |
+
|
| 99 |
+
In this section, we will introduce SimGRACE framework in details. As sketched in Figure 2, the framework consists of the following three major components:
|
| 100 |
+
|
| 101 |
+
(1) Encoder perturbation. A GNN encoder $f(\cdot ;\theta)$ and its its perturbed version $f(\cdot ;\theta^{\prime})$ first extract two graph-level representations $\mathbf{h}$ and $\mathbf{h}'$ for the same graph $\mathcal{G}$ , which can be formulated as,
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\mathbf {h} = f (\mathcal {G}; \theta), \mathbf {h} ^ {\prime} = f (\mathcal {G}; \theta^ {\prime}). \tag {1}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
The method we proposed to perturb the encoder $f(\cdot ;\theta)$ can be mathematically described as,
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\boldsymbol {\theta} _ {l} ^ {\prime} = \boldsymbol {\theta} _ {l} + \eta \cdot \Delta \boldsymbol {\theta} _ {l}; \quad \Delta \boldsymbol {\theta} _ {l} \sim \mathcal {N} \left(0, \sigma_ {l} ^ {2}\right), \tag {2}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\theta_{l}$ and $\theta_l^{\prime}$ are the weight tensors of the $l$ -th layer of the GNN encoder and its perturbed version respectively. $\eta$ is the coefficient that scales the magnitude of the perturbation. $\Delta \theta_{l}$ is the perturbation term which samples from Gaussian distribution with zero mean and variance $\sigma_l^2$ . Also, we show that the performance will deteriorate when we set $\eta = 0$ in section 4.6.1. Note that BGRL [41] and MERIT [17] also update a target network with an online encoder during training. However, SimGRACE differs from them in three aspects: (1) SimGRACE perturbs the encoder with a random Gaussian noise instead of momentum updating; (2) SimGRACE does not require data augmentation while BGRL and MERIT take it as
|
| 114 |
+
|
| 115 |
+
prerequisite. (3) SimGRACE focuses on graph-level representation learning while BGRL and MERIT only work in node-level tasks.
|
| 116 |
+
|
| 117 |
+
(2) Projection head. As advocated in [42], a non-linear transformation $g(\cdot)$ named projection head maps the representations to another latent space can enhance the performance. In our Sim-GRACE framework, we also adopt a two-layer perceptron (MLP) to obtain $z$ and $z'$ ,
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
z = g (\mathbf {h}), z ^ {\prime} = g \left(\mathbf {h} ^ {\prime}\right). \tag {3}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
(3) Contrastive loss. In SimGRACE framework, we utilize the normalized temperature-scaled cross entropy loss (NT-Xent) as previous works [29, 36, 46, 56] to enforce the agreement between positive pairs $z$ and $z'$ compared with negative pairs.
|
| 124 |
+
|
| 125 |
+
During SimGRACE training, a minibatch of $N$ graphs are randomly sampled and then they are fed into a GNN encoder $f(\cdot ;\theta)$ and its perturbed version $f(\cdot ;\theta^{\prime})$ , resulting in two presentations for each graph and thus $2N$ representations in total. We re-denote $z,z^{\prime}$ as $z_{n},z_{n}^{\prime}$ for $n$ -th graph in the minibatch. Negative pairs are generated from the other $N - 1$ perturbed representations within the same mini-batch as in [5, 42, 56]. Denoting the cosine similarity function as $\mathrm{sim}(z,z^{\prime}) = z^{\top}z^{\prime} / \| z\| \| z^{\prime}\|$ , the contrastive loss for the $n$ -th graph is defined as,
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\ell_ {n} = - \log \frac {\exp \left(\operatorname {s i m} \left(z _ {n} , z _ {n} ^ {\prime}\right)\right) / \tau)}{\sum_ {n ^ {\prime} = 1 , n ^ {\prime} \neq n} ^ {N} \exp \left(\operatorname {s i m} \left(z _ {n} , z _ {n ^ {\prime}}\right) / \tau\right)}, \tag {4}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where $\tau$ is the temperature parameter. The final loss is computed across all positive pairs in the minibatch.
|
| 132 |
+
|
| 133 |
+
# 3.2 Why can SimGRACE work well?
|
| 134 |
+
|
| 135 |
+
In order to understand why SimGRACE can work well, we first introduce the analysis tools from [43]. Specifically, they identify two key properties related to contrastive learning: alignment and uniformity and then propose two metrics to measure the quality of representations obtained via contrastive learning. One is the alignment metric which is straightforwardly defined with the expected distance between positive pairs:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\ell_ {\text {a l i g n}} (f; \alpha) \triangleq \underset {(x, y) \sim p _ {\text {p o s}}} {\mathbb {E}} \left[ \| f (x) - f (y) \| _ {2} ^ {\alpha} \right], \quad \alpha > 0 \tag {5}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $p_{\mathrm{pos}}$ is the distribution of positive pairs (augmentations of the same sample). This metric is well aligned with the objective of contrastive learning: positive samples should stay close in the embedding space. Analogously, for our SimGRACE framework, we provide a modified metric for alignment,
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\ell_ {\text {a l i g n}} (f; \alpha) \triangleq \underset {x \sim p _ {\text {d a t a}}} {\mathbb {E}} \left[ \| f (x; \theta) - f (x; \theta^ {\prime}) \| _ {2} ^ {\alpha} \right], \quad \alpha > 0 \tag {6}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $p_{\mathrm{data}}$ is the data distribution. We set $\alpha = 2$ in our experiments. The other is the uniformity metric which is defined as the logarithm of the average pairwise Gaussian potential:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\ell_ {\text {u n i f o r m}} (f; \alpha) \triangleq \log \underset {x, y ^ {i, i, d}, p _ {\text {d a t a}}} {\mathbb {E}} \left[ e ^ {- t \| f (x; \theta) - f (y; \theta) \| _ {2} ^ {2}} \right]. \quad t > 0 \tag {7}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
In our experiments, we set $t = 2$ . The uniformity metric is also aligned with the objective of contrastive learning that the embeddings of random samples should scatter on the hypersphere. We take the checkpoints of SimGRACE, GraphCL and MoCL every 2 epochs during training and visualize the alignment $\ell_{\text{align}}$ and
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Figure 3: $\ell_{\text{align}} - \ell_{\text{uniform}}$ plot for SimGRACE, GraphCL and MoCL on MUTAG dataset. The numbers around the points are the indexes of epochs. For both $\ell_{\text{align}}$ and $\ell_{\text{uniform}}$ , lower is better.
|
| 157 |
+
|
| 158 |
+
uniformity $\ell_{\text{uniform}}$ metrics in Figure 3. As can be observed, all the three methods can improve the alignment and uniformity. However, GraphCL achieves a smaller gain on the alignment than SimGRACE and MoCL. In other words, the positive pairs can not stay close in GraphCL because general graph data augmentations (drop edges, drop nodes and etc.) destroy the semantics of original graph data, which degrades the quality of the representations learned by GraphCL. Instead, MoCL augments graph data with domain knowledge as guidance and thus can preserve semantics during augmentation. Eventually, MoCL dramatically improves the alignment. Compared with GraphCL, SimGRACE can achieve better alignment while improving uniformity because encoder perturbation can preserve data semantics well. On the other hand, although MoCL achieves better alignment than SimGRACE via introducing domain knowledge as guidance, it only achieves a small gain on the uniformity, and eventually underperforms SimGRACE.
|
| 159 |
+
|
| 160 |
+
# 3.3 AT-SimGRACE
|
| 161 |
+
|
| 162 |
+
Recently, GraphCL [56] shows that GNNs can gain robustness using their proposed framework. However, they did not explain why GraphCL can enhance the robustness. Additionally, GraphCL seems to be immunized to random attacks well while being unsatisfactory against adversarial attacks. In this section, we aim to utilize Adversarial Training (AT) [11, 23] to improve the adversarial robustness of SimGRACE in a principled way. Generally, AT directly incorporates adversarial examples into the training process to solve the following optimization problem:
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
\min _ {\theta} \mathcal {L} ^ {\prime} (\theta), \quad \text {w h e r e} \quad \mathcal {L} ^ {\prime} (\theta) = \frac {1}{n} \sum_ {i = 1} ^ {n} \max _ {\left\| \mathbf {x} _ {i} ^ {\prime} - \mathbf {x} _ {i} \right\| _ {p} \leq \epsilon} \ell_ {i} ^ {\prime} (f (\mathbf {x} _ {i} ^ {\prime}; \theta), y _ {i}), \tag {8}
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
where $n$ is the number of training examples, $\mathbf{x}_i^{\prime}$ is the adversarial example within the $\epsilon$ -ball (bounded by an $L_{p}$ -norm) centered at
|
| 169 |
+
|
| 170 |
+
natural example $\mathbf{x}_i, f$ is the DNN with weight $\theta$ , $\ell'(\cdot)$ is the standard supervised classification loss (e.g., the cross-entropy loss), and $\mathcal{L}'(\theta)$ is called the "adversarial loss". However, above general framework of AT can not directly be applied in graph contrastive learning because (1) AT requires labels as supervision while labels are not available in graph contrastive learning; (2) Perturbing each graph for the dataset in an adversarial way will introduce heavy computational overhead, which has been pointed out in GROC [18]. To remedy the first issue, we substitute supervised classification loss in Eq. (8) with contrastive loss in Eq. (4). To tackle the second issue, instead of conducting adversarial transformation of graph data, we perturb the encoder in an adversarial way, which is more computationally efficient.
|
| 171 |
+
|
| 172 |
+
Assuming that $\Theta$ is the weight space of GNNs, for any $\mathbf{w}$ and any positive $\epsilon$ , we can define the norm ball in $\theta$ with radius $\epsilon$ centered at $\mathbf{w}$ as,
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\mathbf {R} (\mathbf {w}; \epsilon) := \{\theta \in \Theta : \| \theta - \mathbf {w} \| \leq \epsilon \}, \tag {9}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
we choose $L_{2}$ norm to define the norm ball in our experiments. With this definition, we can now formulate our AT-SimGRACE as an optimization problem,
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\min _ {\theta} \mathcal {L} (\theta + \Delta),
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
where $\mathcal{L}(\theta +\Delta) = \frac{1}{M}\sum_{i = 1}^{M}\max_{\Delta \in \mathbb{R}(0;\epsilon)}\ell_i(f(\mathcal{G}_i;\theta +\Delta),f(\mathcal{G}_i;\theta)),$ (10)
|
| 185 |
+
|
| 186 |
+
where $M$ is the number of graphs in the dataset. We propose Algorithm 1 to solve this optimization problem. Specifically, for inner maximization, we forward $I$ steps to update $\Delta$ in the direction of increasing the contrastive loss using gradient ascent algorithm. With the output perturbation $\Delta$ of inner maximization, the outer loops update the weights $\theta$ of GNNs with mini-batched SGD.
|
| 187 |
+
|
| 188 |
+
Algorithm 1: Encoder perturbation of AT-SimGRACE
|
| 189 |
+
Data: Graph dataset $\mathcal{D} = \{\mathcal{G}_1,\mathcal{G}_2,\dots,\mathcal{G}_M\}$ , contrastive loss $\ell$ , batch size $N$ , initial encoder weights $\theta$ , inner iterations $I$ , inner learning rate $\zeta$ , outer learning rate $\gamma$ , norm ball radius $\epsilon$ .
|
| 190 |
+
for each mini-batch do
|
| 191 |
+
Sample $\mathcal{D}_B = \{\mathcal{G}_i\}_{i = 1}^N$ from $\mathcal{D}$ .
|
| 192 |
+
Initialize perturbation: $\Delta \leftarrow 0$ .
|
| 193 |
+
for $t = 0,1,2,\ldots,I - 1$ do Update perturbation: $\Delta \gets \Delta +\zeta \sum_{i = 1}^{N}\nabla_{\theta}\ell_{i}\left(f\left(\mathcal{G}_{i};\theta +\Delta\right),f\left(\mathcal{G}_{i};\theta\right)\right) / N;$ if $\| \Delta \| _2 > \epsilon$ then
|
| 194 |
+
Normalize perturbation: $\Delta \gets \epsilon \Delta /\| \Delta \| _2$ end
|
| 195 |
+
end
|
| 196 |
+
Update weights:
|
| 197 |
+
$\theta^{\prime}\gets \theta -\gamma \sum_{i = 1}^{N}\nabla_{\theta}\ell_{i}\left(f\left(\mathcal{G}_{i};\theta +\Delta\right),f\left(\mathcal{G}_{i};\theta\right)\right) / N.$
|
| 198 |
+
|
| 199 |
+
# 3.4 Theoretical Justification
|
| 200 |
+
|
| 201 |
+
In this section, we aim to explain the reasons why AT-SimGRACE can enhance the robustness of graph contrastive learning. To start, it is widely accepted that flatter loss landscape can bring robustness [3, 30, 44]. For example, as formulated in Eq. 8, adversarial training (AT) enhances robustness via restricting the change of loss when the input of models is perturbed indeed. Thus, we want to theoretically justify why AT-SimGRACE works via validating that AT-SimGRACE can flatten the loss landscape. Inspired by previous work [28] that connects sharpness of loss landscape and PAC-Bayes theory [24, 25], we utilize PAC-Bayes framework to derive guarantees on the expected error. Assuming that the prior distribution $P$ over the weights is a zero mean, $\sigma^2$ variance Gaussian distribution, with probability at least $1 - \delta$ over the draw of $M$ graphs, the expected error of the encoder can be bounded as:
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\mathbb {E} _ {\left\{\mathcal {G} _ {i} \right\} _ {i = 1} ^ {M}, \Delta} [ \mathcal {L} (\theta + \Delta) ] \leq \mathbb {E} _ {\Delta} [ \mathcal {L} (\theta + \Delta) ] + 4 \sqrt {\frac {K L (\theta + \Delta \| P) + \ln \frac {2 M}{\delta}}{M}}. \tag {11}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
We choose $\Delta$ as a zero mean spherical Gaussian perturbation with variance $\sigma^2$ in every direction, and set the variance of the perturbation to the weight with respect to its magnitude $\sigma = \alpha \| \pmb{\theta}\|$ . Besides, we substitute $\mathbb{E}_{\Delta}[\mathcal{L}(\theta +\Delta)]$ with $\mathcal{L}(\theta) + \mathbb{E}_{\Delta}[\mathcal{L}(\theta +\Delta)] - \mathcal{L}(\theta)$ . Then, we can rewrite Eq. 11 as:
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\mathbb {E} _ {\{\mathcal {G} _ {i} \} _ {i = 1} ^ {M}, \Delta} [ \mathcal {L} (\theta + \Delta) ] \leq \mathcal {L} (\theta) + \underbrace {\{\mathbb {E} _ {\Delta} [ \mathcal {L} (\theta + \Delta) ] - \mathcal {L} (\theta) \}}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
Expected sharpness
|
| 214 |
+
|
| 215 |
+
(12)
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
+ 4 \sqrt {\frac {1}{M} \left(\frac {1}{2 \alpha} + \ln \frac {2 M}{\delta}\right)}.
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
It is obvious that $\mathbb{E}_{\Delta}[\mathcal{L}(\theta + \Delta)] \leq \max_{\Delta} [\mathcal{L}(\theta + \Delta)]$ and the third term $4\sqrt{\frac{1}{M}\left(\frac{1}{2\alpha} + \ln \frac{2M}{\delta}\right)}$ is a constant. Thus, AT-SimGRACE optimizes the worst-case of sharpness of loss landscape $\max_{\Delta} [\mathcal{L}(\theta + \Delta)] - \mathcal{L}(\theta)$ to the bound of the expected error, which explains why AT-SimGRACE can enhance the robustness.
|
| 222 |
+
|
| 223 |
+
# 4 EXPERIMENTS
|
| 224 |
+
|
| 225 |
+
In this section, we conduct experiments to evaluate SimGRACE and AT-SimGRACE through answering the following research questions.
|
| 226 |
+
|
| 227 |
+
- RQ1. (Generalizability) Does SimGRACE outperform competitors in unsupervised and semi-supervised settings?
|
| 228 |
+
- RQ2. (Transferability) Can GNNs pre-trained with SimGRACE show better transferability than competitors?
|
| 229 |
+
- RQ3. (Robustness) Can AT-SimGRACE perform better than existing competitors against various adversarial attacks?
|
| 230 |
+
- RQ4. (Efficiency) How about the efficiency (time and memory) of SimGRACE? Does it more efficient than competitors?
|
| 231 |
+
- RQ5. (Hyperparameters Sensitivity) Is the proposed Sim-GRACE sensitive to hyperparameters like the magnitude of the perturbation $\eta$ , training epochs and batch size?
|
| 232 |
+
|
| 233 |
+
Table 2: Comparing classification accuracy with baselines under the same experiment setting. The top three accuracy or rank for each dataset are emphasized in bold. AR denotes average rank. - indicates that results are not available in published papers.
|
| 234 |
+
|
| 235 |
+
<table><tr><td>Methods</td><td>NCI1</td><td>PROTEINS</td><td>DD</td><td>MUTAG</td><td>COLLAB</td><td>RDT-B</td><td>RDT-M5K</td><td>IMDB-B</td><td>AR ↓</td></tr><tr><td>GL</td><td>-</td><td>-</td><td>-</td><td>81.66 ± 2.11</td><td>-</td><td>77.34 ± 0.18</td><td>41.01 ± 0.17</td><td>65.87 ± 0.98</td><td>8.3</td></tr><tr><td>WL</td><td>80.01 ± 0.50</td><td>72.92 ± 0.56</td><td>-</td><td>80.72 ± 3.00</td><td>-</td><td>68.82 ± 0.41</td><td>46.06 ± 0.21</td><td>72.30 ± 3.44</td><td>6.2</td></tr><tr><td>DGK</td><td>80.31 ± 0.46</td><td>73.30 ± 0.82</td><td>-</td><td>87.44 ± 2.72</td><td>-</td><td>78.04 ± 0.39</td><td>41.27 ± 0.18</td><td>66.96 ± 0.56</td><td>5.5</td></tr><tr><td>node2vec</td><td>54.89 ± 1.61</td><td>57.49 ± 3.57</td><td>-</td><td>72.63 ± 10.20</td><td>-</td><td>-</td><td>-</td><td>-</td><td>9.0</td></tr><tr><td>sub2vec</td><td>52.84 ± 1.47</td><td>53.03 ± 5.55</td><td>-</td><td>61.05 ± 15.80</td><td>-</td><td>71.48 ± 0.41</td><td>36.68 ± 0.42</td><td>55.26 ± 1.54</td><td>10.2</td></tr><tr><td>graph2vec</td><td>73.22 ± 1.81</td><td>73.30 ± 2.05</td><td>-</td><td>83.15 ± 9.25</td><td>-</td><td>75.78 ± 1.03</td><td>47.86 ± 0.26</td><td>71.10 ± 0.54</td><td>6.7</td></tr><tr><td>MVGRL</td><td>-</td><td>-</td><td>-</td><td>75.40 ± 7.80</td><td>-</td><td>82.00 ± 1.10</td><td>-</td><td>63.60 ± 4.20</td><td>8.3</td></tr><tr><td>InfoGraph</td><td>76.20 ± 1.06</td><td>74.44 ± 0.31</td><td>72.85 ± 1.78</td><td>89.01 ± 1.13</td><td>70.65 ± 1.13</td><td>82.50 ± 1.42</td><td>53.46 ± 1.03</td><td>73.03 ± 0.87</td><td>3.8</td></tr><tr><td>GraphCL</td><td>77.87 ± 0.41</td><td>74.39 ± 0.45</td><td>78.62 ± 0.40</td><td>86.80 ± 1.34</td><td>71.36 ± 1.15</td><td>89.53 ± 0.84</td><td>55.99 ± 0.28</td><td>71.14 ± 0.44</td><td>3.1</td></tr><tr><td>JOAO</td><td>78.07 ± 0.47</td><td>74.55 ± 0.41</td><td>77.32 ± 0.54</td><td>87.35 ± 1.02</td><td>69.50 ± 0.36</td><td>85.29 ± 1.35</td><td>55.74 ± 0.63</td><td>70.21 ± 3.08</td><td>4.3</td></tr><tr><td>JOAOv2</td><td>78.36 ± 0.53</td><td>74.07 ± 1.10</td><td>77.40 ± 1.15</td><td>87.67 ± 0.79</td><td>69.33 ± 0.34</td><td>86.42 ± 1.45</td><td>56.03 ± 0.27</td><td>70.83 ± 0.25</td><td>3.6</td></tr><tr><td>SimGRACE</td><td>79.12 ± 0.44</td><td>75.35 ± 0.09</td><td>77.44 ± 1.11</td><td>89.01 ± 1.31</td><td>71.72 ± 0.82</td><td>89.51 ± 0.89</td><td>55.91 ± 0.34</td><td>71.30 ± 0.77</td><td>2.0</td></tr></table>
|
| 236 |
+
|
| 237 |
+
# 4.1 Experimental Setup
|
| 238 |
+
|
| 239 |
+
4.1.1 Datasets. For unsupervised and semi-supervised learning, we use datasets from the benchmark TUDataset [26], including graph data for various social networks [2, 54] and biochemical molecules [9, 31]. For transfer learning, we perform pre-training on ZINC-2M and PPI-306K and finetune the model with various datasets including PPI, BBBP, ToxCast and SIDER.
|
| 240 |
+
|
| 241 |
+
4.1.2 Evaluation Protocols. Following previous works for graph-level self-supervised representation learning [38, 55, 56], we evaluate the generalizability of the learned representations on both unsupervised and semi-supervised settings. In unsupervised setting, we train SimGRACE using the whole dataset to learn graph representations and feed them into a downstream SVM classifier with 10-fold cross-validation. For semi-supervised setting, we pre-train GNNs with SimGRACE on all the data and did finetuning & evaluation with $K$ ( $K = \frac{1}{\text{label rate}}$ ) folds for datasets without the explicit training/validation/test split. For datasets with the train/validation/test split, we pre-train GNNs with the training data, finetuning on the partial training data and evaluation on the validation/test sets. More details can be seen in the appendix.
|
| 242 |
+
|
| 243 |
+
4.1.3 Compared baselines. We compare SimGRACE with state-of-the-arts graph kernel methods including GL [35], WL [34] and DGK [54]. Also, we compare SimGRACE with other graph self-supervised learning methods: GAE [20], node2vec [12], sub2vec [1], graph2vec [27], EdgePred [15], AttrMasking [15], ContextPred [15], Infomax (DGI) [45], InfoGraph [38] and instance-instance contrastive methods GraphCL [56], JOAO(v2) [55].
|
| 244 |
+
|
| 245 |
+
# 4.2 Unsupervised and semi-supervised learning (RQ1)
|
| 246 |
+
|
| 247 |
+
For unsupervised representation learning, as can be observed in Table 2, SimGRACE outperforms other baselines and always ranks top three on all the datasets. Generally, SimGRACE performs better on biochemical molecules compared with data augmentation based methods. The reason is that the semantics of molecular graphs are
|
| 248 |
+
|
| 249 |
+
more fragile compared with social networks. General augmentations (drop nodes, drop edges and etc.) adopted in other baselines will not alter the semantics of social networks dramatically. For semi-supervised task, as can be observed in Table 4, we report two semi-supervised tasks with $1\%$ and $10\%$ label rate respectively. In $1\%$ setting, SimGRACE outperforms previous baselines by a large margin or matching the performance of SOTA methods. For $10\%$ setting, SimGRACE performs comparably to SOTA methods including GraphCL and JOAO(v2) whose augmentations are derived via expensive trial-and-errors or cumbersome search.
|
| 250 |
+
|
| 251 |
+
# 4.3 Transferability (RQ2)
|
| 252 |
+
|
| 253 |
+
To evaluate the transferability of the pre-training scheme, we conduct experiments on transfer learning on molecular property prediction in chemistry and protein function prediction in biology following previous works [15, 50, 56]. Specifically, we pre-train and finetune the models with different datasets. For pre-training, learning rate is tuned in $\{0.01, 0.1, 1.0\}$ and epoch number in $\{20, 40, 60, 80, 100\}$ where grid serach is performed. As sketched in Table 3, there is no universally beneficial pre-training scheme especially for the out-of-distribution scenario in transfer learning. However, SimGRACE shows competitive or better transferability than other pre-training schemes, especially on PPI dataset.
|
| 254 |
+
|
| 255 |
+
# 4.4 Adversarial robustness (RQ3)
|
| 256 |
+
|
| 257 |
+
Following previous works [7, 56], we perform on synthetic data to classify the component number in graphs, facing the RandSampling, GradArgmax and RL-S2V attacks, to evaluate the robustness of AT-SimGRACE. To keep fair, we adopt Structure2vec [6] as the GNN encoder as in [7, 56]. Besides, we pretrain the GNN encoder for 150 epochs because it takes longer time for the convergence of adversarial training. We set the inner learning rate $\zeta = 0.001$ and the radius of perturbation ball $\epsilon = 0.01$ . As demonstrated in Table 5, AT-SimGRACE boosts the robustness of GNNs dramatically compared with training from scratch and GraphCL under three typical evasion attacks.
|
| 258 |
+
|
| 259 |
+
Table 3: Results for transfer learning setting. We report the mean (and standard deviation) ROC-AUC of 3 seeds with scaffold splitting. The top-3 accuracy for each dataset are emphasized in bold.
|
| 260 |
+
|
| 261 |
+
<table><tr><td>Pre-Train dataset</td><td>PPI-306K</td><td colspan="9">ZINC 2M</td></tr><tr><td>Fine-Tune dataset</td><td>PPI</td><td>Tox21</td><td>ToxCast</td><td>Sider</td><td>ClinTox</td><td>MUV</td><td>HIV</td><td>BBBP</td><td>Bace</td><td>Average</td></tr><tr><td>No Pre-Train</td><td>64.8(1.0)</td><td>74.6 (0.4)</td><td>61.7 (0.5)</td><td>58.2 (1.7)</td><td>58.4 (6.4)</td><td>70.7 (1.8)</td><td>75.5 (0.8)</td><td>65.7 (3.3)</td><td>72.4 (3.8)</td><td>67.15</td></tr><tr><td>EdgePred</td><td>65.7(1.3)</td><td>76.0 (0.6)</td><td>64.1 (0.6)</td><td>60.4 (0.7)</td><td>64.1 (3.7)</td><td>75.1 (1.2)</td><td>76.3 (1.0)</td><td>67.3 (2.4)</td><td>77.3 (3.5)</td><td>70.08</td></tr><tr><td>AttrMasking</td><td>65.2(1.6)</td><td>75.1 (0.9)</td><td>63.3 (0.6)</td><td>60.5 (0.9)</td><td>73.5 (4.3)</td><td>75.8 (1.0)</td><td>75.3 (1.5)</td><td>65.2 (1.4)</td><td>77.8 (1.8)</td><td>70.81</td></tr><tr><td>ContextPred</td><td>64.4(1.3)</td><td>73.6 (0.3)</td><td>62.6 (0.6)</td><td>59.7 (1.8)</td><td>74.0 (3.4)</td><td>72.5 (1.5)</td><td>75.6 (1.0)</td><td>70.6 (1.5)</td><td>78.8 (1.2)</td><td>70.93</td></tr><tr><td>GraphCL</td><td>67.88(0.85)</td><td>75.1 (0.7)</td><td>63.0 (0.4)</td><td>59.8 (1.3)</td><td>77.5 (3.8)</td><td>76.4 (0.4)</td><td>75.1 (0.7)</td><td>67.8 (2.4)</td><td>74.6 (2.1)</td><td>71.16</td></tr><tr><td>JOAO</td><td>64.43(1.38)</td><td>74.8 (0.6)</td><td>62.8 (0.7)</td><td>60.4 (1.5)</td><td>66.6 (3.1)</td><td>76.6 (1.7)</td><td>76.9 (0.7)</td><td>66.4 (1.0)</td><td>73.2 (1.6)</td><td>69.71</td></tr><tr><td>SimGRACE</td><td>70.25(1.22)</td><td>75.6 (0.5)</td><td>63.4 (0.5)</td><td>60.6 (1.0)</td><td>75.6 (3.0)</td><td>76.9 (1.3)</td><td>75.2 (0.9)</td><td>71.3 (0.9)</td><td>75.0 (1.7)</td><td>71.70</td></tr></table>
|
| 262 |
+
|
| 263 |
+
Table 4: Comparing classification accuracy with baselines under the same semi-supervised setting. The top three accuracy or rank are emphasized in bold. - indicates that label rate is too low for a given dataset size. LR and AR are short for label rate and average rank respectively.
|
| 264 |
+
|
| 265 |
+
<table><tr><td>LR</td><td>Methods</td><td>NCI1</td><td>PROTEINS</td><td>DD</td><td>COLLAB</td><td>RDT-B</td><td>RDT-M5K</td><td>AR ↓</td></tr><tr><td rowspan="9">1%</td><td>No pre-train.</td><td>60.72 ± 0.45</td><td>-</td><td>-</td><td>57.46 ± 0.25</td><td>-</td><td>-</td><td>8.5</td></tr><tr><td>Augmentations</td><td>60.49 ± 0.46</td><td>-</td><td>-</td><td>58.40 ± 0.97</td><td>-</td><td>-</td><td>8.0</td></tr><tr><td>GAE</td><td>61.63 ± 0.84</td><td>-</td><td>-</td><td>63.20 ± 0.67</td><td>-</td><td>-</td><td>5.5</td></tr><tr><td>Infomax</td><td>62.72 ± 0.65</td><td>-</td><td>-</td><td>61.70 ± 0.77</td><td>-</td><td>-</td><td>4.0</td></tr><tr><td>ContextPred</td><td>61.21 ± 0.77</td><td>-</td><td>-</td><td>57.60 ± 2.07</td><td>-</td><td>-</td><td>7.5</td></tr><tr><td>GraphCL</td><td>62.55 ± 0.86</td><td>-</td><td>-</td><td>64.57 ± 1.15</td><td>-</td><td>-</td><td>2.0</td></tr><tr><td>JOAO</td><td>61.97 ± 0.72</td><td>-</td><td>-</td><td>63.71 ± 0.84</td><td>-</td><td>-</td><td>4.5</td></tr><tr><td>JOAOv2</td><td>62.52 ± 1.16</td><td>-</td><td>-</td><td>64.51 ± 2.21</td><td>-</td><td>-</td><td>3.0</td></tr><tr><td>SimGRACE</td><td>64.21 ± 0.65</td><td>-</td><td>-</td><td>64.28 ± 0.98</td><td>-</td><td>-</td><td>2.0</td></tr><tr><td rowspan="9">10%</td><td>No pre-train.</td><td>73.72 ± 0.24</td><td>70.40 ± 1.54</td><td>73.56 ± 0.41</td><td>73.71 ± 0.27</td><td>86.63 ± 0.27</td><td>51.33 ± 0.44</td><td>7.7</td></tr><tr><td>Augmentations</td><td>73.59 ± 0.32</td><td>70.29 ± 0.64</td><td>74.30 ± 0.81</td><td>74.19 ± 0.13</td><td>87.74 ± 0.39</td><td>52.01 ± 0.20</td><td>7.0</td></tr><tr><td>GAE</td><td>74.36 ± 0.24</td><td>70.51 ± 0.17</td><td>74.54 ± 0.68</td><td>75.09 ± 0.19</td><td>87.69 ± 0.40</td><td>33.58 ± 0.13</td><td>6.3</td></tr><tr><td>Infomax</td><td>74.86± 0.26</td><td>72.27 ± 0.40</td><td>75.78 ± 0.34</td><td>73.76 ± 0.29</td><td>88.66 ± 0.95</td><td>53.61 ± 0.31</td><td>3.7</td></tr><tr><td>ContextPred</td><td>73.00 ± 0.30</td><td>70.23 ± 0.63</td><td>74.66 ± 0.51</td><td>73.69 ± 0.37</td><td>84.76 ± 0.52</td><td>51.23 ± 0.84</td><td>8.3</td></tr><tr><td>GraphCL</td><td>74.63± 0.25</td><td>74.17± 0.34</td><td>76.17± 1.37</td><td>74.23 ± 0.21</td><td>89.11± 0.19</td><td>52.55 ± 0.45</td><td>2.8</td></tr><tr><td>JOAO</td><td>74.48 ± 0.27</td><td>72.13 ± 0.92</td><td>75.69 ± 0.67</td><td>75.30 ± 0.32</td><td>88.14 ± 0.25</td><td>52.83± 0.54</td><td>4.2</td></tr><tr><td>JOAOv2</td><td>74.86± 0.39</td><td>73.31± 0.48</td><td>75.81± 0.73</td><td>75.53± 0.18</td><td>88.79± 0.65</td><td>52.71 ± 0.28</td><td>2.5</td></tr><tr><td>SimGRACE</td><td>74.60 ± 0.41</td><td>74.03 ± 0.51</td><td>76.48 ± 0.52</td><td>74.74 ± 0.28</td><td>88.86 ± 0.62</td><td>53.97 ± 0.64</td><td>2.3</td></tr></table>
|
| 266 |
+
|
| 267 |
+
Table 5: Performance under three adversarial attacks for GNN with different depth following the protocols in [7].
|
| 268 |
+
|
| 269 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">Two-Layer</td><td colspan="3">Three-Layer</td><td colspan="3">Four-Layer</td></tr><tr><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td><td>No Pre-Train</td><td>GraphCL</td><td>AT-SimGRACE</td></tr><tr><td>Unattack</td><td>93.20</td><td>94.73</td><td>94.24</td><td>98.20</td><td>98.33</td><td>99.32</td><td>98.87</td><td>99.00</td><td>99.13</td></tr><tr><td>RandSampling</td><td>78.73</td><td>80.68</td><td>81.73</td><td>92.27</td><td>92.60</td><td>94.27</td><td>95.13</td><td>97.40</td><td>97.67</td></tr><tr><td>GradArgmax</td><td>69.47</td><td>69.26</td><td>75.13</td><td>64.60</td><td>89.33</td><td>93.00</td><td>95.80</td><td>97.00</td><td>96.60</td></tr><tr><td>RL-S2V</td><td>42.93</td><td>42.20</td><td>44.86</td><td>41.93</td><td>61.66</td><td>66.00</td><td>70.20</td><td>84.86</td><td>85.29</td></tr></table>
|
| 270 |
+
|
| 271 |
+
# 4.5 Efficiency (Training time and memory cost) (RQ4)
|
| 272 |
+
|
| 273 |
+
In Table 6, we compare the performance of SimGRACE with the state-of-the-arts methods including GraphCL and JOAOv2 in terms of their training time and the memory overhead. Here, the training time refers to the time for pre-training stage of the semi-supervised
|
| 274 |
+
|
| 275 |
+
task and the memory overhead refers to total memory costs of model parameters and all hidden representations of a batch. As can be observed, SimGRACE runs near 40-90 times faster than JOAOv2 and 2.5-4 times faster than GraphCL. If we take the time for manual trial-and-error in GraphCL into consideration, the superiority of SimGRACE will be more pronounced. Also, SimGRACE requires less
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
(a) NCI1
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
(b) MUTAG
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
(c) COLLAB
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
(d) RDT-5K
|
| 288 |
+
Figure 4: Performance versus magnitude of the perturbation $(\eta)$ in unsupervised representation learning task.
|
| 289 |
+
|
| 290 |
+
Table 6: Comparisons of efficiency on three graph datasets. Note that we do not take the time for manual trial-and- errors of GraphCL into consideration. In fact, picking the suitable augmentations manually for GraphCL is much more time-consuming. All the three methods are evaluated on a 32GB V100 GPU.
|
| 291 |
+
|
| 292 |
+
<table><tr><td>Dataset</td><td>Algorithm</td><td>Training Time</td><td>Memory</td></tr><tr><td rowspan="3">PROTEINS</td><td>GraphCL</td><td>111s</td><td>1231MB</td></tr><tr><td>JOAOv2</td><td>4088s</td><td>1403MB</td></tr><tr><td>SimGRACE</td><td>46 s</td><td>1175 MB</td></tr><tr><td rowspan="3">COLLAB</td><td>GraphCL</td><td>1033s</td><td>10199MB</td></tr><tr><td>JOAOv2</td><td>10742s</td><td>7303MB</td></tr><tr><td>SimGRACE</td><td>378 s</td><td>6547 MB</td></tr><tr><td rowspan="3">RDT-B</td><td>GraphCL</td><td>917s</td><td>4135MB</td></tr><tr><td>JOAOv2</td><td>10278s</td><td>3935MB</td></tr><tr><td>SimGRACE</td><td>280 s</td><td>2729 MB</td></tr></table>
|
| 293 |
+
|
| 294 |
+
computational memory than GraphCL and JOAOv2. In particular, the efficiency of SimGRACE can be more prominent on large-scale social graphs, such as COLLAB and RDT-B.
|
| 295 |
+
|
| 296 |
+
# 4.6 Hyper-parameters sensitivity analysis (RQ5)
|
| 297 |
+
|
| 298 |
+
4.6.1 Magnitude of the perturbation. As can be observed in Figure 4, weight perturbation is crucial in SimGRACE. If we set the magnitude of the perturbation as zero ( $\eta = 0$ ), the performance is usually the lowest compared with other settings of perturbation across these four datasets. This observation aligns with our intuition. Without perturbation, SimGRACE simply compares two original samples as a negative pair while the positive pair loss becomes zero, leading to homogeneously pushes all graph representations away from each other, which is non-intuitive to justify. Instead, appropriate perturbations enforce the model to learn representations invariant to the perturbations through maximizing the agreement between a graph and its perturbation. Besides, well aligned with previous works [14, 32] that claim "hard" positive pairs and negative pairs can boost the performance of contrastive learning, we can observe that larger magnitude (within an appropriate range) of the perturbation can bring consistent improvement of the performance. However, over-large perturbations will lead to
|
| 299 |
+
|
| 300 |
+
performance degradation because the semantics of graph data are not preserved.
|
| 301 |
+
|
| 302 |
+

|
| 303 |
+
Figure 5: Performance of SimGRACE trained with different batch size and epochs on NCI1 dataset.
|
| 304 |
+
|
| 305 |
+
4.6.2 Batch-size and training epochs. Figure 5 demonstrates the performance of SimGRACE trained with various batch size and epochs. Generally, larger batch size or training epochs can bring better performance. The reason is that larger batch size will provide more negative samples for contrasting. Similarly, training longer also provides more new negative samples for each sample because the split of total datasets is more various with more training epochs. In our experiments, to keep fair, we follow the same settings of other competitors [55, 56] via training the GNN encoder with batch size as 128 and number of epochs as 20. In fact, we can further improve the performance of SimGRACE with larger batch size and longer training time.
|
| 306 |
+
|
| 307 |
+
# 5 CONCLUSIONS
|
| 308 |
+
|
| 309 |
+
In this paper, we propose a simple framework (SimGRACE) for graph contrastive learning. Although it may appear simple, we demonstrate that SimGRACE can outperform or match the state-of-the-art competitors on multiple graph datasets of various scales and types, while enjoying unprecedented degree of flexibility, high
|
| 310 |
+
|
| 311 |
+
efficiency and ease of use. We emancipate graph contrastive learning from tedious manual tuning, cumbersome search or expensive domain knowledge. Furthermore, we devise adversarial training schemes to enhance the robustness of SimGRACE in a principled way and theoretically explain the reasons. There are two promising avenues for future work: (1) exploring if encoder perturbation can work well in other domains like computer vision and natural language processing. (2) applying the pre-trained GNNs to more real-world tasks including social analysis and biochemistry.
|
| 312 |
+
|
| 313 |
+
# ACKNOWLEDGMENTS
|
| 314 |
+
|
| 315 |
+
This work is supported in part by the Science and Technology Innovation 2030 - Major Project (No. 2021ZD0150100) and National Natural Science Foundation of China (No. U21A20427).
|
| 316 |
+
|
| 317 |
+
# REFERENCES
|
| 318 |
+
|
| 319 |
+
[1] Bijaya Adhikari, Yao Zhang, Naren Ramakrishnan, and Aditya B. Prakash. 2018. Sub2Vec: Feature Learning for Subgraphs. ADVANCES IN KNOWLEDGE DISCOVERY AND DATA MINING, PAKDD 2018, PT II (2018), 170-182.
|
| 320 |
+
[2] Rozemberczki Benedek, Kiss Oliver, and Sarkar Rik. 2020. An API Oriented Open-source Python Framework for Unsupervised Learning on Graphs. (2020).
|
| 321 |
+
[3] Liu Chen, Salzmann Mathieu, Lin Tao, Tomioka Ryota, and Süsstrunk Sabine. 2020. On the Loss Landscape of Adversarial Training: Identifying Challenges and How to Overcome Them. NIPS 2020 (2020).
|
| 322 |
+
[4] Ting Chen, Song Bian, and Yizhou Sun. 2019. Are Powerful Graph Neural Nets Necessary? A Dissection on Graph Classification. arXiv: Learning (2019).
|
| 323 |
+
[5] Ting Chen, Yizhou Sun, Yue Shi, and Liangjie Hong. 2017. On Sampling Strategies for Neural Network-based Collaborative Filtering. Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (2017), 767-776.
|
| 324 |
+
[6] Hanjun Dai, Bo Dai, and Le Song. 2016. Discriminative Embeddings of Latent Variable Models for Structured Data. ICML (2016).
|
| 325 |
+
[7] Hanjun Dai, Hui Li, Tian Tian, Xin Huang, Lin Wang, Jun Zhu, and Le Song. 2018. Adversarial Attack on Graph Structured Data. international conference on machine learning (2018), 1123-1132.
|
| 326 |
+
[8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. north american chapter of the association for computational linguistics (2019).
|
| 327 |
+
[9] D. Paul Dobson and J. Andrew Doig. 2003. Distinguishing Enzyme Structures from Non-enzymes Without Alignments. Journal of Molecular Biology (2003), 771-783.
|
| 328 |
+
[10] Yuanqi Du, Shiyu Wang, Xiaojie Guo, Hengning Cao, Shujie Hu, Junji Jiang, Aishwarya Varala, Abhinav Angirekula, and Liang Zhao. 2021. GraphGT: Machine Learning Datasets for Deep Graph Generation and Transformation. (2021).
|
| 329 |
+
[11] J. Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. international conference on learning representations (2015).
|
| 330 |
+
[12] Aditya Grover and Jure Leskovec. 2016. node2vec: Scalable Feature Learning for Networks. KDD (2016), 855-864.
|
| 331 |
+
[13] Kaveh Hassani and Amir Hosein Khasahmadi. 2020. Contrastive multi-view representation learning on graphs. In International Conference on Machine Learning, PMLR, 4116-4126.
|
| 332 |
+
[14] Chih-Hui Ho and Nuno Nvasconcelos. 2020. Contrastive Learning with Adversarial Examples. NIPS 2020 (2020).
|
| 333 |
+
[15] Weihua Hu, Bowen Liu, Joseph Gomes, Marinka Zitnik, Percy Liang, Vijay Pande, and Jure Leskovec. 2020. Strategies for Pre-training Graph Neural Networks. ICLR (2020).
|
| 334 |
+
[16] Ziniu Hu, Yuxiao Dong, Kuansan Wang, Kai-Wei Chang, and Yizhou Sun. 2020. GPT-GNN: Generative Pre-Training of Graph Neural Networks. KDD '20: The 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining Virtual Event CA USA July, 2020 (2020), 1857-1867.
|
| 335 |
+
[17] Ming Jin, Yizhen Zheng, Yuan-Fang Li, Chen Gong, Chuan Zhou, and Shirui Pan. 2021. Multi-Scale Contrastive Siamese Networks for Self-Supervised Graph Representation Learning. *JCAI* (2021), 1477-1483.
|
| 336 |
+
[18] Nikola Jovanovic, Zhao Meng, Lukas Faber, and Roger Wattenhofer. 2021. Towards robust graph contrastive learning. arXiv preprint arXiv:2102.13085 (2021).
|
| 337 |
+
[19] He Kaiming, Fan Haoqi, Wu Yuxin, Xie Saining, and Girshick Ross. 2020. Momentum Contrast for Unsupervised Visual Representation Learning. CVPR (2020), 9726-9735.
|
| 338 |
+
[20] N. Thomas Kipf and Max Welling. 2016. Variational Graph Auto-Encoders. CoRR (2016).
|
| 339 |
+
|
| 340 |
+
[21] Thomas N Kipf and Max Welling. 2016. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907 (2016).
|
| 341 |
+
[22] Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2020. ALBERT: A Lite BERT for Self-supervised Learning of Language Representations. ICLR (2020).
|
| 342 |
+
[23] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2019. Towards Deep Learning Models Resistant to Adversarial Attacks. international conference on learning representations (2019).
|
| 343 |
+
[24] A. David McAllester. 1999. PAC-Bayesian model averaging. _COLT_ (1999), 164-170.
|
| 344 |
+
[25] A. David McAllester and Jonathan Baxter. 1999. Some PAC-Bayesian Theorems. Machine Learning (1999), 355-363.
|
| 345 |
+
[26] Christopher Morris, M. Nils Krieger, Franka Bause, Kristian Kersting, Petra Mutzel, and Marion Neumann. 2020. TUDataset: A collection of benchmark datasets for learning with graphs. (2020).
|
| 346 |
+
[27] Annamalai Narayanan, Mahinthan Chandramohan, Rajasekar Venkatesan, Lihui Chen, Yang Liu, and Shantanu Jaiswal. 2017. graph2vec: Learning Distributed Representations of Graphs. arXiv: Artificial Intelligence (2017).
|
| 347 |
+
[28] Behnam Neyshabur, Srinadh Bhojanapalli, David McAllester, and Nathan Srebro. 2017. Exploring Generalization in Deep Learning. ADVANCES IN NEURAL INFORMATION PROCESSING SYSTEMS 30 (NIPS 2017) (2017), 5947-5956.
|
| 348 |
+
[29] van den Aïron Oord, Yazhe Li, and Oriol Vinyals. 2019. Representation Learning with Contrastive Predictive Coding. arXiv: Learning (2019).
|
| 349 |
+
[30] Zhao Pu, Chen Pin-Yu, Das Payel, Karthikeyan Ramamurthy Natesan, and Lin Xue. 2020. Bridging Mode Connectivity in Loss Landscapes and Adversarial Robustness. ICLR (2020).
|
| 350 |
+
[31] Kaspar Riesen and Horst Bunke. 2008. IAM Graph Database Repository for Graph Based Pattern Recognition and Machine Learning. $SSPR/SPR$ (2008), 287-297.
|
| 351 |
+
[32] Joshua David Robinson, Ching-Yao Chuang, Suvirit Sra, and Stefanie Jegelka. 2021. Contrastive Learning with Hard Negative Samples. In International Conference on Learning Representations. https://openreview.net/forum?id=CR1XOQ0UTH
|
| 352 |
+
[33] Yu Rong, Yatao Bian, Tingyang Xu, Weiyang Xie, Ying WEI, Wenbing Huang, and Junzhou Huang. 2020. Self-Supervised Graph Transformer on Large-Scale Molecular Data. NIPS 2020 (2020).
|
| 353 |
+
[34] Nino Shervashidze, Pascal Schweitzer, Jan van Erik Leeuwen, Kurt Mehlhorn, and M. Karsten Borgwardt. 2011. Weisfeiler-Lehman Graph Kernels. Journal of Machine Learning Research (2011), 2539-2561.
|
| 354 |
+
[35] Nino Shervashidze, V. N. S. Vishwanathan, H. Tobias Petri, Kurt Mehlhorn, and M. Karsten Borgwardt. 2009. Ecient graphlet kernels for large graph comparison. AISTATS (2009), 488-495.
|
| 355 |
+
[36] Kihyuk Sohn. 2016. Improved Deep Metric Learning with Multi-class N-pair Loss Objective. ADVANCES IN NEURAL INFORMATION PROCESSING SYSTEMS 29 (NIPS 2016) (2016), 1849-1857.
|
| 356 |
+
[37] Fan-Yun Sun, Jordan Hoffman, Vikas Verma, and Jian Tang. 2020. InfoGraph: Unsupervised and Semi-supervised Graph-Level Representation Learning via Mutual Information Maximization. ICLR (2020).
|
| 357 |
+
[38] Fan-Yun Sun, Jordan Hoffmann, Vikas Verma, and Jian Tang. 2019. Infagraph: Unsupervised and semi-supervised graph-level representation learning via mutual information maximization. arXiv preprint arXiv:1908.01000 (2019).
|
| 358 |
+
[39] Mengying Sun, Jing Xing, Huijun Wang, Bin Chen, and Jiayu Zhou. 2021. MoCL: Contrastive Learning on Molecular Graphs with Multi-level Domain Knowledge. KDD 2021 (2021).
|
| 359 |
+
[40] Cheng Tan, Jun Xia, Lirong Wu, and Stan Z Li. 2021. Co-learning: Learning from noisy labels with self-supervision. In Proceedings of the 29th ACM International Conference on Multimedia. 1405-1413.
|
| 360 |
+
[41] Shantanu Thakoor, Corentin Tallec, Mohammad Gheshlaghi Azar, Remi Munos, Petar Velickovic, and Michal Valko. 2021. Bootstrapped Representation Learning on Graphs. In ICLR 2021 Workshop on Geometrical and Topological Representation Learning. https://openreview.net/forum?id=QrzVRAA49Ud
|
| 361 |
+
[42] Chen Ting, Kornblith Simon, Norouzi Mohammad, and Hinton Geoffrey. 2020. A Simple Framework for Contrastive Learning of Visual Representations. ICML (2020), 1597-1607.
|
| 362 |
+
[43] Wang Tongzhou and Isola Phillip. 2020. Understanding Contrastive Representation Learning through Alignment and Uniformity on the Hypersphere. ICML (2020), 9929-9939.
|
| 363 |
+
[44] Vinay Prabhu Uday, Dian Yap Ang, Xu Joyce, and Whaley John. 2019. Understanding Adversarial Robustness Through Loss Landscape Geometries. (2019).
|
| 364 |
+
[45] Petar Velickovic, William Fedus, L. William Hamilton, Pietro Lio, Yoshua Bengio, and Devon R. Hjelm. 2019. Deep Graph Infomax. ICLR (2019).
|
| 365 |
+
[46] Zhirong Wu, Yuanjun Xiong, X Stella Yu, and Dahua Lin. 2018. Unsupervised feature learning via non-parametric instance discrimination. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2018), 3733-3742.
|
| 366 |
+
[47] Jun Xia, Haitao Lin, Yongjie Xu, Lirong Wu, Zhangyang Gao, Siyuan Li, and Stan Z. Li. 2021. Towards Robust Graph Neural Networks against Label Noise. https://openreview.net/forum?id=H38f_9b90BO
|
| 367 |
+
[48] Jun Xia, Cheng Tan, Lirong Wu, Yongjie Xu, and Stan Z Li. 2022. OT Cleaner: Label Correction as Optimal Transport. IEEE International Conference on Acoustics, Speech and Signal Processing (2022).
|
| 368 |
+
|
| 369 |
+
[49] Jun Xia, Lirong Wu, Jintao Chen, Ge Wang, and Stan Z. Li. 2021. Debiased Graph Contrastive Learning. CoRR abs/2110.02027 (2021). arXiv:2110.02027 https://arxiv.org/abs/2110.02027
|
| 370 |
+
[50] JUN XIA, Jiangbin Zheng, Cheng Tan, Ge Wang, and Stan Z Li. 2022. Towards Effective and Generalizable Fine-tuning for Pre-trained Molecular Graph Models. bioRxiv (2022). https://doi.org/10.1101/2022.02.03.479055 arXiv:https://www.biorxiv.org/content/early/2022/02/06/2022.02.03.479055.full.pdf
|
| 371 |
+
[51] Jun Xia, Yanqiao Zhu, Yuanqi Du, and Stan Z Li. 2022. A Survey of Pretraining on Graphs: Taxonomy, Methods, and Applications. arXiv preprint arXiv:2202.07893 (2022).
|
| 372 |
+
[52] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks? In International Conference on Learning Representations. https://openreview.net/forum?id=ryGs6iA5Km
|
| 373 |
+
[53] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. 2019. How Powerful are Graph Neural Networks? international conference on learning representations (2019).
|
| 374 |
+
[54] Pinar Yanardag and V. N. S. Vishwanathan. 2015. Deep Graph Kernels. ACM Knowledge Discovery and Data Mining (2015), 1365-1374.
|
| 375 |
+
[55] Yuning You, Tianlong Chen, Yang Shen, and Zhangyang Wang. 2021. Graph Contrastive Learning Automated. arXiv preprint arXiv:2106.07594 (2021).
|
| 376 |
+
[56] Yuning You, Tianlong Chen, Yongduo Sui, Ting Chen, Zhangyang Wang, and Yang Shen. 2020. Graph Contrastive Learning with Augmentations. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 5812-5823. https://proceedings.neurips.cc/paper/2020/file/3fe230348e9a12c13120749e3f9fa4cd-Paper.pdf
|
| 377 |
+
[57] Jiangbin Zheng, Yile Wang, Ge Wang, Jun Xia, Yufei Huang, Guojiang Zhao, Yue Zhang, and Stan Z. Li. 2022. Using Context-to-Vector with Graph Retrofitting. ACL (2022).
|
| 378 |
+
[58] Yanqiao Zhu, Yichen Xu, Qiang Liu, and Shu Wu. 2021. An Empirical Study of Graph Contrastive Learning. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks, Joaquin Vanschoren and Serena Yeung (Eds.). Curran Associates, Inc.
|
| 379 |
+
[59] Yanqiao Zhu, Yichen Xu, Feng Yu, Qiang Liu, Shu Wu, and Liang Wang. 2020. Deep Graph Contrastive Representation Learning. In ICML Workshop on Graph Representation Learning and Beyond. https://arxiv.org/abs/2006.04131
|
| 380 |
+
[60] Yanqiao Zhu, Yichen Xu, Feng Yu, Qiang Liu, Shu Wu, and Liang Wang. 2021. Graph Contrastive Learning with Adaptive Augmentation. WWW (2021), 2069-2080.
|
| 381 |
+
|
| 382 |
+
# A APPENDIX: DATASETS IN VARIOUS SETTINGS
|
| 383 |
+
|
| 384 |
+
# A.1 Unsupervised learning & Semi-supervised learning
|
| 385 |
+
|
| 386 |
+
Table 7: Datasets statistics for unsupervised and semi-supervised experiments.
|
| 387 |
+
|
| 388 |
+
<table><tr><td>Datasets</td><td>Category</td><td>Graph Num.</td><td>Avg. Node</td><td>Avg. Degree</td></tr><tr><td>NCI1</td><td>Biochemical Molecules</td><td>4110</td><td>29.87</td><td>1.08</td></tr><tr><td>PROTEINS</td><td>Biochemical Molecules</td><td>1113</td><td>39.06</td><td>1.86</td></tr><tr><td>DD</td><td>Biochemical Molecules</td><td>1178</td><td>284.32</td><td>715.66</td></tr><tr><td>MUTAG</td><td>Biochemical Molecules</td><td>188</td><td>17.93</td><td>19.79</td></tr><tr><td>COLLAB</td><td>Social Networks</td><td>5000</td><td>74.49</td><td>32.99</td></tr><tr><td>RDT-B</td><td>Social Networks</td><td>2000</td><td>429.63</td><td>1.15</td></tr><tr><td>RDB-M</td><td>Social Networks</td><td>2000</td><td>429.63</td><td>497.75</td></tr><tr><td>IMDB-B</td><td>Social Networks</td><td>1000</td><td>19.77</td><td>96.53</td></tr></table>
|
| 389 |
+
|
| 390 |
+
For unsupervised setting, experiments are performed for 5 times each of which corresponds to a 10-fold evaluation, with mean and standard deviation of accuracies $(\%)$ reported. For semi-supervised learning, we perform experiments with $1\%$ (if there are over 10 samples for each class) and $10\%$ label rate for 5 times, each of which corresponds to a 10-fold evaluation, with mean and standard deviation of accuracies $(\%)$ reported. For pre-training, learning rate is tuned in $\{0.1, 1.0, 5.0, 10.0\}$ and epoch number in $\{20, 40, 60, 80, 100\}$ where grid search is performed. All datasets used in both unsupervised and semi-supervised experiments can be seen in Table 7.
|
| 391 |
+
|
| 392 |
+
# A.2 Transfer learning
|
| 393 |
+
|
| 394 |
+
Table 8: Datasets statistics for transfer learning.
|
| 395 |
+
|
| 396 |
+
<table><tr><td>Datasets</td><td>Category</td><td>Utilization</td><td>Graph Num.</td><td>Avg. Node</td><td>Avg. Degree</td></tr><tr><td>ZINC-2M</td><td>Biochemical Molecules</td><td>Pre-Training</td><td>2,000,000</td><td>26.62</td><td>57.72</td></tr><tr><td>PPI-306K</td><td>Protein-Protein Intersection Networks</td><td>Pre-Training</td><td>306,925</td><td>39.82</td><td>729.62</td></tr><tr><td>BBBP</td><td>Biochemical Molecules</td><td>Finetuning</td><td>2,039</td><td>24.06</td><td>51.90</td></tr><tr><td>ToxCast</td><td>Biochemical Molecules</td><td>Finetuning</td><td>8,576</td><td>18.78</td><td>38.52</td></tr><tr><td>SIDER</td><td>Biochemical Molecules</td><td>Finetuning</td><td>1,427</td><td>33.64</td><td>70.71</td></tr></table>
|
| 397 |
+
|
| 398 |
+
The datasets utilized in transfer learning can be seen in Table 8. ZINC-2M and PPI-306K are used for pre-training and the left ones are for fine-tuning.
|
| 399 |
+
|
| 400 |
+
# B GNN ARCHITECTURES IN VARIOUS SETTINGS
|
| 401 |
+
|
| 402 |
+
To keep fair, we adopt the same GNNs architectures with previous competitors. Specifically, for unsupervised task, GIN [53] with 3 layers and 32 hidden dimensions is adopted as the encoder. For semi-supervised task, we utilize ResGCN [4] with 5 layers and 128 hidden dimensions. For transfer learning, we adopt GIN with the default setting in [15] as the GNN-based encoder. For experiments on adversarial robustness, Structure2vec is adopted as the GNN-based encoder as in [7].
|
2202.03xxx/2202.03104/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df5f40d44272b272a44563bc32eff658edba0b0a8a9a4d64ce17c3599d5a5920
|
| 3 |
+
size 685574
|
2202.03xxx/2202.03104/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.03xxx/2202.03107/a94ab9aa-3075-44f9-9854-b8ff37a3aba4_content_list.json
ADDED
|
@@ -0,0 +1,1215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Bubble identification from images with machine learning methods",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
147,
|
| 8 |
+
84,
|
| 9 |
+
850,
|
| 10 |
+
137
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "H. Hessenkemper<sup>1</sup>, S. Starke<sup>2</sup>, Y. Atassi<sup>1</sup>, T. Ziegenhein<sup>1,3</sup>, D. Lucas<sup>1</sup>",
|
| 17 |
+
"bbox": [
|
| 18 |
+
166,
|
| 19 |
+
149,
|
| 20 |
+
828,
|
| 21 |
+
168
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>Helmholtz-Zentrum Dresden-Rossendorf, Institute of Fluid Dynamics, Bautzner Landstraße 400, 01328 Dresden, Germany",
|
| 28 |
+
"bbox": [
|
| 29 |
+
144,
|
| 30 |
+
181,
|
| 31 |
+
850,
|
| 32 |
+
219
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ Helmholtz-Zentrum Dresden-Rossendorf, Department of Information Services and Computing, Bautzner Landstraße 400, 01328 Dresden, Germany",
|
| 39 |
+
"bbox": [
|
| 40 |
+
144,
|
| 41 |
+
233,
|
| 42 |
+
850,
|
| 43 |
+
294
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "$^{3}$ TIVConsultancy, $7^{\\text{th}}$ PI 1312, Tempe, AZ 85281, USA",
|
| 50 |
+
"bbox": [
|
| 51 |
+
231,
|
| 52 |
+
305,
|
| 53 |
+
763,
|
| 54 |
+
324
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "* Corresponding author. Tel.: +49 3512604719; fax: +49 3512603440.",
|
| 61 |
+
"bbox": [
|
| 62 |
+
112,
|
| 63 |
+
336,
|
| 64 |
+
719,
|
| 65 |
+
351
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "E-mail address: h.hessenkemper@hzdr.de (Hendrik Hessenkemper)",
|
| 72 |
+
"bbox": [
|
| 73 |
+
112,
|
| 74 |
+
354,
|
| 75 |
+
704,
|
| 76 |
+
370
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Abstract",
|
| 83 |
+
"text_level": 1,
|
| 84 |
+
"bbox": [
|
| 85 |
+
115,
|
| 86 |
+
410,
|
| 87 |
+
211,
|
| 88 |
+
428
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "An automated and reliable processing of bubbly flow images is highly needed to analyse large data sets of comprehensive experimental series. A particular difficulty arises due to overlapping bubble projections in recorded images, which highly complicates the identification of individual bubbles. Recent approaches focus on the use of deep learning algorithms for this task and have already proven the high potential of such techniques. The main difficulties are the capability to handle different image conditions, higher gas volume fractions and a proper reconstruction of the hidden segment of a partly occluded bubble. In the present work, we try to tackle these points by testing three different methods based on Convolutional Neural Networks (CNN's) for the two former and two individual approaches that can be used subsequently to address the latter. Our focus is hereby on spherical, ellipsoidal and wobbling bubbles, which are typically encountered in air-water bubbly flows. To validate our methodology, we created test data sets with synthetic images that further demonstrate the capabilities as well as limitations of our combined approach. The generated data, code and trained models are made accessible to facilitate the use as well as further developments in the research field of bubble recognition in experimental images.",
|
| 95 |
+
"bbox": [
|
| 96 |
+
115,
|
| 97 |
+
439,
|
| 98 |
+
884,
|
| 99 |
+
728
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "Keywords. Bubbly flows, Deep Learning, Computer Vision, CNN, Instance segmentation",
|
| 106 |
+
"bbox": [
|
| 107 |
+
115,
|
| 108 |
+
737,
|
| 109 |
+
880,
|
| 110 |
+
755
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "1. Introduction",
|
| 117 |
+
"text_level": 1,
|
| 118 |
+
"bbox": [
|
| 119 |
+
115,
|
| 120 |
+
86,
|
| 121 |
+
297,
|
| 122 |
+
107
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 1
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Deep neural networks have proven their superiority over traditional computer vision methods in various fields. Especially Convolutional Neural Networks (CNNs) have been shown to be very successful for image segmentation tasks (He et al., 2020; Ronneberger et al., 2015; Schmidt et al., 2018). They can achieve a high segmentation resolution, making the direct use of such methods interesting for investigating bubbly flows.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
115,
|
| 131 |
+
122,
|
| 132 |
+
880,
|
| 133 |
+
212
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 1
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "Using cameras to investigate bubbly flows from the outside is a common, affordable technique. However, the images' evaluation can be very complicated when bubbles start to overlap, strong turbulences are formed, and dense bubble swarms occur. Yucheng Fu provides an in-depth discussion of this problem in his dissertation (Fu, 2018).",
|
| 140 |
+
"bbox": [
|
| 141 |
+
115,
|
| 142 |
+
222,
|
| 143 |
+
880,
|
| 144 |
+
293
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "The task of recognizing bubbles with CNNs from images can be usually split up into identifying the bubble(s) in the picture, segmentation of overlapping bubbles and reconstruction of bubbles that are partly occluded from bubbles before. A common way to solve the task of identifying bubbles is to find so-called anchor points inside the bubble. Ideally, just one point per object exists, which is usually the center point. With respect to machine learning approaches, Haas et al., (2020) use a popular Region-based CNN called Faster-RCNN that proposes anchor points with corresponding bounding boxes around identified bubbles. Poletaev et al., (2020) use a CNN-based sliding window approach to approximate anchor points. Another approach to solve this task together with the task of segmenting overlapping bubbles is to directly predict a segmentation mask for an image with down- and upsampling CNN's that classify and assign each pixel to individual objects. Such pixel-to-pixel approaches have become very popular for detecting and segmenting cells and nuclei in biomedical microscopic images. For bubbly flows, Li et al., (2021) used a UNet to distinguish between foreground and background pixels as well as to generate centroid approximations. Kim and Park, (2021) used a slightly customized Mask-RCNN version that directly provides a segmentation mask as a result.",
|
| 151 |
+
"bbox": [
|
| 152 |
+
115,
|
| 153 |
+
303,
|
| 154 |
+
880,
|
| 155 |
+
592
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 1
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "For most of the above-mentioned studies on identifying bubbles, an ellipsoidal fit segments and reconstructs the objects solving the tasks of segmenting and reconstructing overlapping bubbles. Haas et al., (2020) used a subsequent CNN-based shape regression model that tries to fit an ellipse around the detected bubble. Cerqueira and Paladino, (2021) also used a CNN-based shape estimator to predict ellipses for given anchor points and bounding boxes. From our experience, an ellipsoidal fit is, however, only valid in simplified bubbly flows. With flow fields disturbed by turbulence and swarm effects, the surface tension may not be strong enough in relation to deforming forces so that bubbles can take almost arbitrary shapes (Masuk et al., 2021).",
|
| 162 |
+
"bbox": [
|
| 163 |
+
115,
|
| 164 |
+
601,
|
| 165 |
+
880,
|
| 166 |
+
764
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 1
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "In this work, we also adopt the strategy to use CNN's that provide pixel-to-pixel predictions in order to identify and segment bubbles. In particular, we test three different approaches for this. For the subsequent reconstruction task, we compare two individual approaches, one also based on fitting an ellipse and one based on a simple Neural Network. The latter aims to capture the hidden part of a partially occluded bubble even with more irregular shapes, allowing a more universal use of this approach. Finally, we test the accuracy of our combined approach on synthetic images with known gas volume fraction and bubble sizes to evaluate strength and weaknesses.",
|
| 173 |
+
"bbox": [
|
| 174 |
+
115,
|
| 175 |
+
772,
|
| 176 |
+
880,
|
| 177 |
+
917
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 1
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "2.1. Bubble segmentation",
|
| 184 |
+
"text_level": 1,
|
| 185 |
+
"bbox": [
|
| 186 |
+
144,
|
| 187 |
+
124,
|
| 188 |
+
389,
|
| 189 |
+
143
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 2
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "Double UNet",
|
| 196 |
+
"text_level": 1,
|
| 197 |
+
"bbox": [
|
| 198 |
+
174,
|
| 199 |
+
148,
|
| 200 |
+
292,
|
| 201 |
+
165
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 2
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "text",
|
| 207 |
+
"text": "In the following, the methods tested in this work for bubble identification and segmentation are described. The first method is based on so-called UNet's. A UNet describes a specific CNN architecture that allows a pixel-to-pixel prediction, which is commonly used in segmentation tasks for cell structures and medical images (Ronneberger et al., 2015). The name originates from the U-like shape of the CNN, consisting of an encoder and a decoder structure that have cross-connections between them. The encoder structure reduces the width and height of an array but increases the depth (channels), to extract features from an image, while the decoder structure does the opposite in order to obtain local information in the image. The general UNet architecture together with the parameters used in this work are shown in Figure 1.",
|
| 208 |
+
"bbox": [
|
| 209 |
+
110,
|
| 210 |
+
167,
|
| 211 |
+
885,
|
| 212 |
+
348
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 2
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "image",
|
| 218 |
+
"img_path": "images/295c620a259dd1a26c6bd358af5a624b7741dd3938caaee1e640750274981dd5.jpg",
|
| 219 |
+
"image_caption": [
|
| 220 |
+
"Figure 1: UNet architecture and used parameters. Y refers to the number of filters and A to the input dimension for the respective layer, where Y increases and A decreases up to the basement block. Correspondingly, the Basement block of UNetL3 has $\\mathrm{Y} = 256$ and $\\mathrm{A} = 128$ , while the UNetL5 has $\\mathrm{Y} = 1024$ and $\\mathrm{A} = 32$ ."
|
| 221 |
+
],
|
| 222 |
+
"image_footnote": [],
|
| 223 |
+
"bbox": [
|
| 224 |
+
110,
|
| 225 |
+
353,
|
| 226 |
+
887,
|
| 227 |
+
766
|
| 228 |
+
],
|
| 229 |
+
"page_idx": 2
|
| 230 |
+
},
|
| 231 |
+
{
|
| 232 |
+
"type": "text",
|
| 233 |
+
"text": "In order to segment bubbles in images, we use two individual UNet's with slight modifications in comparison to the original one by Ronneberger et al., (2015). The first UNet (UNetL3) consists of three down- and upsampling levels and is trained to distinguish between foreground and background, in other words to classify all pixels whether they belong to the gas phase or to the liquid phase. The second UNet (UNetL5) is trained to",
|
| 234 |
+
"bbox": [
|
| 235 |
+
110,
|
| 236 |
+
841,
|
| 237 |
+
884,
|
| 238 |
+
932
|
| 239 |
+
],
|
| 240 |
+
"page_idx": 2
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"type": "header",
|
| 244 |
+
"text": "2. Methods & Materials",
|
| 245 |
+
"bbox": [
|
| 246 |
+
114,
|
| 247 |
+
86,
|
| 248 |
+
403,
|
| 249 |
+
105
|
| 250 |
+
],
|
| 251 |
+
"page_idx": 2
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"type": "text",
|
| 255 |
+
"text": "classify all pixels that belong to intersections of overlapping bubbles. Since this task is more difficult, a deeper net with five down- and upsampling levels is used. For this UNetL5, we use manually annotated edges (Figure 2) to calculate the loss function of the output of the network. In the context of machine learning, the loss function determines the error of the algorithm with respect to the target result, which is then minimized in the training step. For the UNetL5, the target is the correct classification of all pixels marked in red in Figure 2 as intersecting pixels. We use a log softmax cross entropy loss function to achieve this classification. Since the edge pixels are under-represented compared to the background pixels, we weight the background pixels' loss, so that the training focuses on the intersection pixels the network is intended to find. Finally, we calculate the weighted average of the loss of all pixels. We tested networks with fewer layers, with however limited success, the sigmoid cross entropy loss function with one output, which performs similar to the log softmax cross entropy and different weight factors for the background loss. For the latter, a value of 0.05 provided the best results.",
|
| 256 |
+
"bbox": [
|
| 257 |
+
110,
|
| 258 |
+
83,
|
| 259 |
+
885,
|
| 260 |
+
336
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 3
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "image",
|
| 266 |
+
"img_path": "images/a9bde1523435c4358b914b59e166812be4b7a042b50b2640aae24a2a321ff427.jpg",
|
| 267 |
+
"image_caption": [
|
| 268 |
+
"Figure 2: Marked internal edges the UNetL5 is trained on, which are used to segment overlapping bubbles."
|
| 269 |
+
],
|
| 270 |
+
"image_footnote": [],
|
| 271 |
+
"bbox": [
|
| 272 |
+
223,
|
| 273 |
+
343,
|
| 274 |
+
771,
|
| 275 |
+
576
|
| 276 |
+
],
|
| 277 |
+
"page_idx": 3
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"type": "text",
|
| 281 |
+
"text": "For the UNetL3 the loss function is similar to the one we use for the edges; however, instead of just applying a weighting-factor, a weight map is used to focus the learning on specific structures. Using a weight-map is necessary so that the network training is focused on separating bubbles that have only a small gap of background pixels in between them. The weight map is calculated as follows:",
|
| 282 |
+
"bbox": [
|
| 283 |
+
112,
|
| 284 |
+
612,
|
| 285 |
+
882,
|
| 286 |
+
702
|
| 287 |
+
],
|
| 288 |
+
"page_idx": 3
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"type": "equation",
|
| 292 |
+
"text": "\n$$\nw = \\left\\{ \\begin{array}{l l} 1 0, & d _ {1} a n d d _ {2} < 1 0 \\\\ 1, & i n s i d e b u b b l e, \\\\ 0. 0 5, & e l s e \\end{array} \\right.\n$$\n",
|
| 293 |
+
"text_format": "latex",
|
| 294 |
+
"bbox": [
|
| 295 |
+
368,
|
| 296 |
+
709,
|
| 297 |
+
628,
|
| 298 |
+
762
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 3
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "where $d_{1}$ and $d_{2}$ are the distances between one interface and another interface. The weight map to train the segmentation between bubbles and background for the example given in Figure 2 is shown in Figure 3.",
|
| 305 |
+
"bbox": [
|
| 306 |
+
112,
|
| 307 |
+
770,
|
| 308 |
+
882,
|
| 309 |
+
826
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 3
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "image",
|
| 315 |
+
"img_path": "images/c869ce04e462bf6e60fb3a3faafa328fcf24ad1fba318cca21c2026a7b703089.jpg",
|
| 316 |
+
"image_caption": [
|
| 317 |
+
"Figure 3: Weight map for the loss function to train the segmentation between bubbles and background (UNetL3) for the example given in Figure 2."
|
| 318 |
+
],
|
| 319 |
+
"image_footnote": [],
|
| 320 |
+
"bbox": [
|
| 321 |
+
247,
|
| 322 |
+
80,
|
| 323 |
+
751,
|
| 324 |
+
256
|
| 325 |
+
],
|
| 326 |
+
"page_idx": 4
|
| 327 |
+
},
|
| 328 |
+
{
|
| 329 |
+
"type": "text",
|
| 330 |
+
"text": "A further advantage of having two individual UNet's is that the mask generated by the smaller and hence faster UNetL3 can also be used in PIV/PSV investigations to exclude bubbles in the liquid velocity interrogation step (Cerqueira et al., 2018; Hessenkemper and Ziegenhein, 2018).",
|
| 331 |
+
"bbox": [
|
| 332 |
+
112,
|
| 333 |
+
305,
|
| 334 |
+
882,
|
| 335 |
+
378
|
| 336 |
+
],
|
| 337 |
+
"page_idx": 4
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"type": "text",
|
| 341 |
+
"text": "StarDist",
|
| 342 |
+
"text_level": 1,
|
| 343 |
+
"bbox": [
|
| 344 |
+
173,
|
| 345 |
+
388,
|
| 346 |
+
250,
|
| 347 |
+
404
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 4
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "The second method tested is called StarDist. It was also initially developed for segmenting cell nuclei in biomedical images by proposing star-convex polygons as object candidates (Schmidt et al., 2018). Since the shape of many bubbles can be well approximated with such star-convex polygons, we adopt this method without further modification. As for the first method, StarDist is also based on a UNet architecture for pixel-to-pixel predictions, but with a more sophisticated strategy. In particular, StarDist generates two arrays, both having the dimension of the input image. The first output is an object probability $d_{i,j}$ for each pixel defined as the normalized Euclidean distance to the nearest background pixel. Hence, this output is similar to distinguishing foreground-background like it is done with the UNetL3, but with continuous values that reach higher values in the center of the mask, which then have a higher probability to serve as object centers. The second output represents the Euclidean distance $r_{i,j}^{k}$ to the background for every pixel belonging to an object along a fixed set of $k$ radial directions. In other words, for every object pixel a star-convex polygon with $k$ points is proposed. Finally, by applying non-maximum suppression (NMS), only pixels with a high object probability are considered to avoid detecting a single instance multiple times. Figure 4 shows examples of star-convex polygons representing the outline of a bubble with a fixed number of $k = 64$ radial directions. We tested different hyperparameters such as more UNet layers, different subsampling resolutions to increase computation efficiency as well as different number of radial directions. By comparing the obtained results (see section 3.1 for the evaluation strategy) we found that a three layer UNet with $k = 64$ radial directions provided the best results for our task, while keeping all other hyperparameters as in the original version of Schmidt et al.(2018).",
|
| 354 |
+
"bbox": [
|
| 355 |
+
112,
|
| 356 |
+
407,
|
| 357 |
+
884,
|
| 358 |
+
828
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 4
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "image",
|
| 364 |
+
"img_path": "images/c7c951556eadc17804d45e1985ab337282d39632b4dd321c46ee40929eb34e2c.jpg",
|
| 365 |
+
"image_caption": [
|
| 366 |
+
"Figure 4: Star-convex polygons with radial distances applied on a bubbly flow image."
|
| 367 |
+
],
|
| 368 |
+
"image_footnote": [],
|
| 369 |
+
"bbox": [
|
| 370 |
+
211,
|
| 371 |
+
80,
|
| 372 |
+
783,
|
| 373 |
+
243
|
| 374 |
+
],
|
| 375 |
+
"page_idx": 5
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"type": "text",
|
| 379 |
+
"text": "Mask R-CNN",
|
| 380 |
+
"text_level": 1,
|
| 381 |
+
"bbox": [
|
| 382 |
+
174,
|
| 383 |
+
282,
|
| 384 |
+
290,
|
| 385 |
+
297
|
| 386 |
+
],
|
| 387 |
+
"page_idx": 5
|
| 388 |
+
},
|
| 389 |
+
{
|
| 390 |
+
"type": "text",
|
| 391 |
+
"text": "The third method is called Mask R-CNN. It has been used for numerous image segmentation tasks including also the segmentation of overlapping bubbles as done recently by Kim and Park, (2021). Mask R-CNN consists of an object detector based on the Faster R-CNN method (Ren et al., 2017), which predicts bounding boxes around found objects. In parallel, Mask R-CNN creates individual segmentation masks for each region of interest (ROI) with a Fully Convolutional Network, which again gives pixel-to-pixel results. As Kim and Park, (2021) already tested this method for the task of segmenting overlapping bubbles and obtained quite good results with some modifications in comparison to the original Mask R-CNN version by He et al., (2020), we adopt their version of Mask R-CNN with only minor parameter changes. Here we only increased the minimum detection confidence to 0.9, which is the probability threshold for detected instances and increased the number of training epochs to 40. For the reason stated above, the Mask R-CNN results are only used for comparison and are not discussed in detail.",
|
| 392 |
+
"bbox": [
|
| 393 |
+
115,
|
| 394 |
+
300,
|
| 395 |
+
880,
|
| 396 |
+
533
|
| 397 |
+
],
|
| 398 |
+
"page_idx": 5
|
| 399 |
+
},
|
| 400 |
+
{
|
| 401 |
+
"type": "text",
|
| 402 |
+
"text": "Training",
|
| 403 |
+
"text_level": 1,
|
| 404 |
+
"bbox": [
|
| 405 |
+
174,
|
| 406 |
+
546,
|
| 407 |
+
250,
|
| 408 |
+
563
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 5
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"text": "The training data set consists of roughly 800 manually annotated training images with dimensions of $512 \\times 512$ (width x height). Since our goal is to obtain a rather universal model that can be applied to different kinds of bubbly flow images, we have conducted experiments with various flow and imaging conditions, i.e. different cameras, lenses, camera distances and illuminations. Most of the experiments consider air-water flows in buoyancy driven bubbly flows with gas flow rates in the range of $0.5 - 2\\mathrm{l / min}$ and a corresponding gas fraction in the range of $1 - 5\\%$ , together with some experiments in water-glycerol mixtures with a logarithmic Morton number of -6.6. The dataset consists of about 24,400 individual annotated bubbles in the size range of $1 - 10\\mathrm{mm}$ with dominantly spherical, ellipsoidal and wobbling bubble shapes. More information on the facilities in which the experiments were conducted can be found in (Liu et al., 2019; Ziegenhein and Lucas, 2019).",
|
| 415 |
+
"bbox": [
|
| 416 |
+
115,
|
| 417 |
+
565,
|
| 418 |
+
880,
|
| 419 |
+
780
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 5
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "To prevent overfitting, we apply image normalization and further random image augmentation steps to the training images, including adding Gaussian noise, intensity changes and horizontal flipping. We excluded $15\\%$ of the training data from the training as validation data, for which the performance details will be discussed in section 3.1. For the Mask R-CNN approach, we used the ImageNet pre-trained weights, while no pretrained weights were used for the other models. The training was performed on a NVIDIA",
|
| 426 |
+
"bbox": [
|
| 427 |
+
115,
|
| 428 |
+
791,
|
| 429 |
+
880,
|
| 430 |
+
898
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 5
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "Tesla V100. Additionally, the following Table 1 lists the computational aspects regarding training and inference of the tested models.",
|
| 437 |
+
"bbox": [
|
| 438 |
+
115,
|
| 439 |
+
82,
|
| 440 |
+
880,
|
| 441 |
+
118
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 6
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "table",
|
| 447 |
+
"img_path": "images/17e53f9c6d19f636b5b7d2041f00bb7a309e461b7c74410a5028dea5e05b8c15.jpg",
|
| 448 |
+
"table_caption": [
|
| 449 |
+
"Table 1: Computational aspects regarding training and inference of the different models."
|
| 450 |
+
],
|
| 451 |
+
"table_footnote": [],
|
| 452 |
+
"table_body": "<table><tr><td>Name</td><td>No. of training epochs [-]</td><td>Time per epoch [s]</td><td>Inference time CPU [s]</td></tr><tr><td>UNetL3</td><td>100</td><td>212</td><td>1.6</td></tr><tr><td>UNetL5</td><td>100</td><td>370</td><td>4.12</td></tr><tr><td>StarDist</td><td>400</td><td>29.9</td><td>0.51</td></tr><tr><td>Mask R-CNN</td><td>40</td><td>696</td><td>6.5</td></tr></table>",
|
| 453 |
+
"bbox": [
|
| 454 |
+
115,
|
| 455 |
+
152,
|
| 456 |
+
880,
|
| 457 |
+
259
|
| 458 |
+
],
|
| 459 |
+
"page_idx": 6
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"type": "text",
|
| 463 |
+
"text": "2.2. Hidden part reconstruction",
|
| 464 |
+
"text_level": 1,
|
| 465 |
+
"bbox": [
|
| 466 |
+
147,
|
| 467 |
+
286,
|
| 468 |
+
436,
|
| 469 |
+
305
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 6
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"text": "As will be shown and discussed in the result section, all of the above described methods can be used to segment overlapping bubbles, but are not able to reconstruct the hidden part of a partly occluded bubble. In other words, with such a segmentation it is only possible to separate the visible parts, which would result in an underprediction of the partly occluded bubble. Based on a given segmentation mask we test two different methods to overcome this shortcoming.",
|
| 476 |
+
"bbox": [
|
| 477 |
+
115,
|
| 478 |
+
307,
|
| 479 |
+
880,
|
| 480 |
+
414
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 6
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": "Ellipse fitting",
|
| 487 |
+
"text_level": 1,
|
| 488 |
+
"bbox": [
|
| 489 |
+
174,
|
| 490 |
+
426,
|
| 491 |
+
290,
|
| 492 |
+
444
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 6
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "text",
|
| 498 |
+
"text": "The first method is comparably simple and follows the often used approach to fit an ellipse around the contour of a detected bubble. This, however, does not help to reconstruct the hidden part of a bubble when using only the detected segments. For this reason, only the contour points of a segment that do not exhibit a neighboring contour point of another segment are used for the ellipse fitting step. The two left images of Figure 5 show an example for this method. In cases of highly occluded bubbles, with only a few contour points that do not touch neighboring segments, this can lead to very small ellipses that are smaller than the actual segment. Then an ellipse is fitted around the complete segment to ensure that the fitted ellipse is at least as large as the detected segment.",
|
| 499 |
+
"bbox": [
|
| 500 |
+
115,
|
| 501 |
+
445,
|
| 502 |
+
880,
|
| 503 |
+
607
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 6
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "Radial distance correction (RDC)",
|
| 510 |
+
"text_level": 1,
|
| 511 |
+
"bbox": [
|
| 512 |
+
174,
|
| 513 |
+
618,
|
| 514 |
+
458,
|
| 515 |
+
634
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 6
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "The second method follows the algorithm idea of StarDist to represent the contour of an object through a fixed number of $K$ radial distances $r = (r_1, \\dots, r_K)$ from the object center to the boundary. In particular, the radial distances of a partly occluded bubble $r^H = (r_1^H, \\dots, r_K^H)$ are ending at the segmentation boundary and are hence shorter than for the actual object: $r_i^H \\leq r_i$ , $i = 1, \\dots, K$ . It therefore requires a function to properly extend those directions $r_i^H$ that touch a neighboring bubble based on the information given by the image segment, in other words based on the not hidden radial distances. In particular, we want a function that is able to predict the correct distances of the occluded bubble part to the center of the detected segment, which will be called radial distance correction (RDC) in the following. This idea is illustrated in the two right images of Figure 5, where the red colored radial distances are the shortened ones that need to be corrected.",
|
| 522 |
+
"bbox": [
|
| 523 |
+
115,
|
| 524 |
+
638,
|
| 525 |
+
880,
|
| 526 |
+
837
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 6
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "image",
|
| 532 |
+
"img_path": "images/7a546ec481f43c4e0e66baca6ed40857c143820d1a466c9dc786ddd7ca0c35b9.jpg",
|
| 533 |
+
"image_caption": [
|
| 534 |
+
"Figure 5: Principles of the tested methods to reconstruct hidden bubble parts. From left to right: Segmentation mask, fitted ellipse, radial distances and ground truth radial distances."
|
| 535 |
+
],
|
| 536 |
+
"image_footnote": [],
|
| 537 |
+
"bbox": [
|
| 538 |
+
115,
|
| 539 |
+
80,
|
| 540 |
+
285,
|
| 541 |
+
161
|
| 542 |
+
],
|
| 543 |
+
"page_idx": 7
|
| 544 |
+
},
|
| 545 |
+
{
|
| 546 |
+
"type": "image",
|
| 547 |
+
"img_path": "images/6ecab05e3b5e5761662f420ccde8fea9f7dcd9f570c5c57c66d76afba6f437f7.jpg",
|
| 548 |
+
"image_caption": [],
|
| 549 |
+
"image_footnote": [],
|
| 550 |
+
"bbox": [
|
| 551 |
+
317,
|
| 552 |
+
89,
|
| 553 |
+
468,
|
| 554 |
+
158
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 7
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "image",
|
| 560 |
+
"img_path": "images/d0f9eccdedaa5295ac6c5b51cc7bf858d3ff4da935ac9939edeba0ab99f8e888.jpg",
|
| 561 |
+
"image_caption": [],
|
| 562 |
+
"image_footnote": [],
|
| 563 |
+
"bbox": [
|
| 564 |
+
514,
|
| 565 |
+
90,
|
| 566 |
+
663,
|
| 567 |
+
158
|
| 568 |
+
],
|
| 569 |
+
"page_idx": 7
|
| 570 |
+
},
|
| 571 |
+
{
|
| 572 |
+
"type": "image",
|
| 573 |
+
"img_path": "images/df4045f4f7ab4bc5e482c4ea09c092b911c80ce509485777772ad32cf6dee891.jpg",
|
| 574 |
+
"image_caption": [],
|
| 575 |
+
"image_footnote": [],
|
| 576 |
+
"bbox": [
|
| 577 |
+
710,
|
| 578 |
+
90,
|
| 579 |
+
860,
|
| 580 |
+
158
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 7
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "text",
|
| 586 |
+
"text": "To solve this regression task, we again make use of a Neural Network, but this time we use a feedforward artificial neural network. The input and output layer represent the fixed number of radial distances $r$ , in our case $K = 64$ . Hence, both layers consist of 64 neurons. We use three hidden layers with the same number of neurons as the input and output layer, both with a ReLu activation function. Furthermore, we use an Adam optimizer with a learning rate of $10^{-4}$ .",
|
| 587 |
+
"bbox": [
|
| 588 |
+
115,
|
| 589 |
+
211,
|
| 590 |
+
880,
|
| 591 |
+
319
|
| 592 |
+
],
|
| 593 |
+
"page_idx": 7
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"type": "text",
|
| 597 |
+
"text": "Training",
|
| 598 |
+
"text_level": 1,
|
| 599 |
+
"bbox": [
|
| 600 |
+
174,
|
| 601 |
+
331,
|
| 602 |
+
250,
|
| 603 |
+
347
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 7
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "text",
|
| 609 |
+
"text": "In order to train the RDC method, it is necessary to know the actual contour of a partly occluded bubble as ground truth. This is only possible with synthetic images, in other words images where bubbles are artificially placed on top of each other with respect to the view direction. Although this is possible with a generative adversarial network (GAN) (Fu and Liu, 2019), we use images of single bubbles that are cut out along their contour as done in our previous investigation (Hessenkemper et al., 2021). About 14,000 single bubble images in the size range of $2 - 7\\mathrm{mm}$ were used to create the synthetic images, where either two or three bubbles were placed randomly on top of each other in an empty image with dimensions of $256\\times 256$ . In total, about 150,000 training samples were created, where the radial distances $r$ are generated with the ideal segmentation mask (Figure 5, leftmost image). Ideal segmentation refers hereby to the correct assignment of each individual pixel to the correct bubble instance, which is given due to the synthetic nature of the data. The visible $r$ values of each bubble (Figure 5, third image from left) as well as the corresponding ground truth $r$ values (Figure 5, rightmost image) were extracted for the training. Care was taken that the bubbles overlap each other with at least $10\\%$ but not more than $90\\%$ of their area to avoid a complete occlusion. To be able to learn whether $r^H$ belongs to an occluded bubble that needs correction or whether $r^H$ belongs to a bubble in front that does not need correction, also the $r^H$ distances that do not need correction are included in the training. A further important point is that we do not use the initial pixel distances, but scale them with the known physical pixel size to obtain real distances. The advantage is that the model implicitly incorporates the size dependent shape of the bubble and is furthermore independent of the resolution of the image. Training was performed on 32 CPU cores with 2000 epochs and a batch size of 1400. Again, a small part of the data were split up before the training as validation data. Due to the larger data set size, we use only $6.67\\%$ (10,000 samples) as validation data.",
|
| 610 |
+
"bbox": [
|
| 611 |
+
115,
|
| 612 |
+
350,
|
| 613 |
+
880,
|
| 614 |
+
800
|
| 615 |
+
],
|
| 616 |
+
"page_idx": 7
|
| 617 |
+
},
|
| 618 |
+
{
|
| 619 |
+
"type": "text",
|
| 620 |
+
"text": "3.1. Bubble segmentation",
|
| 621 |
+
"text_level": 1,
|
| 622 |
+
"bbox": [
|
| 623 |
+
146,
|
| 624 |
+
124,
|
| 625 |
+
389,
|
| 626 |
+
143
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 8
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "Validation results",
|
| 633 |
+
"text_level": 1,
|
| 634 |
+
"bbox": [
|
| 635 |
+
174,
|
| 636 |
+
148,
|
| 637 |
+
329,
|
| 638 |
+
165
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 8
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "In order to evaluate the performance of the segmentation task, we use the Average Precision (AP) value at Intersection over Union (IoU) thresholds from 0.5 to 0.9. The Average Precision is commonly used when evaluating classification tasks and combines the metrics Precision and Recall, where the former focuses on the proportion of predictions that are actually correct, while the latter focuses on the proportion of actual instances that were correctly determined (Zhang and Su, 2012). The IoU thresholds determine how much of a segment needs to be captured correctly, i.e. for high IoU thresholds the predicted pixel area of an segment needs to be close to the area of the actual segment (in size and position) to be counted as correctly predicted and vice versa for low IoU thresholds.",
|
| 645 |
+
"bbox": [
|
| 646 |
+
115,
|
| 647 |
+
168,
|
| 648 |
+
880,
|
| 649 |
+
344
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 8
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"text": "We first test the different methods on the validation data, which is a subset of the training data that has not been used in the training. All three methods show good results, which demonstrates the general capability to detect and segment overlapping bubbles (Figure 6). For all IoU thresholds the UNet method shows the best result, followed by the StarDist method. This is further illustrated with the example images in Appendix A. As can be seen, the main advantage of the UNet method is that it is especially good in catching the whole bubble area, while StarDist misses some of the outer bubble regions. In Figure 6 this is reflected in higher AP scores of the UNet method at higher IoU thresholds. The Mask R-CNN results are close to the StarDist results, with slightly lower AP scores at higher IoU thresholds.",
|
| 656 |
+
"bbox": [
|
| 657 |
+
115,
|
| 658 |
+
357,
|
| 659 |
+
880,
|
| 660 |
+
535
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 8
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "image",
|
| 666 |
+
"img_path": "images/5da9d0c2f42f0b3ab4d9cc0125a50d36da8e73b68b4ca6b1433aef061372c9f1.jpg",
|
| 667 |
+
"image_caption": [
|
| 668 |
+
"Figure 6: Results of the tested methods applied on the validation data set."
|
| 669 |
+
],
|
| 670 |
+
"image_footnote": [],
|
| 671 |
+
"bbox": [
|
| 672 |
+
314,
|
| 673 |
+
552,
|
| 674 |
+
657,
|
| 675 |
+
814
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 8
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "Validation for different image conditions",
|
| 682 |
+
"text_level": 1,
|
| 683 |
+
"bbox": [
|
| 684 |
+
174,
|
| 685 |
+
854,
|
| 686 |
+
529,
|
| 687 |
+
871
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 8
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "For more rigorous testing of the methods and to test the generalization ability a further performance evaluation with additional validation data is conducted in the following. Here we use images where the bubble projections show substantial differences to the",
|
| 694 |
+
"bbox": [
|
| 695 |
+
115,
|
| 696 |
+
873,
|
| 697 |
+
880,
|
| 698 |
+
926
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 8
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "header",
|
| 704 |
+
"text": "3. Results",
|
| 705 |
+
"bbox": [
|
| 706 |
+
115,
|
| 707 |
+
87,
|
| 708 |
+
235,
|
| 709 |
+
105
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 8
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"text": "training data as well as some more challenging cases, where more and also more deformed bubbles are present. Examples are shown in Appendix B. As can be seen, the UNet method fails to correctly predict the intersections here, with many incomplete and spurious intersections. This shows a conceptual drawback of the Double UNet method, as such intersections of overlapping bubbles are in some cases hardly visible or may look very similar to other internal edges due to distortions. However, the bubble masks provided by the UNetL3 still accurately catch the outline of the bubbles. On the other hand, the StarDist method is able to detect most of the bubbles but again misses the correct outline especially for larger bubbles. For both methods the described strengths and weaknesses are reflected in corresponding AP scores at given IoU thresholds, which are shown in Figure 7. To use the advantages of both methods, we combine the prediction results and dilate the StarDist results until it fits the bubble mask of the UNetL3. With this combination, we obtain better results than with the three other methods, which is reflected in the best AP scores in Figure 7 and can be further seen in Appendix B. Again, the Mask R-CNN achieves similar results as StarDist.",
|
| 716 |
+
"bbox": [
|
| 717 |
+
115,
|
| 718 |
+
83,
|
| 719 |
+
885,
|
| 720 |
+
353
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 9
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "image",
|
| 726 |
+
"img_path": "images/9b413c86fe304b9d664cea02669a06b744bd0703f7c2d347f44fa85ff16764ce.jpg",
|
| 727 |
+
"image_caption": [
|
| 728 |
+
"Figure 7: Results of the tested methods applied on the additional test data set."
|
| 729 |
+
],
|
| 730 |
+
"image_footnote": [],
|
| 731 |
+
"bbox": [
|
| 732 |
+
314,
|
| 733 |
+
367,
|
| 734 |
+
655,
|
| 735 |
+
630
|
| 736 |
+
],
|
| 737 |
+
"page_idx": 9
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "text",
|
| 741 |
+
"text": "3.2. Hidden part reconstruction and combined validation",
|
| 742 |
+
"text_level": 1,
|
| 743 |
+
"bbox": [
|
| 744 |
+
144,
|
| 745 |
+
670,
|
| 746 |
+
678,
|
| 747 |
+
688
|
| 748 |
+
],
|
| 749 |
+
"page_idx": 9
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"type": "text",
|
| 753 |
+
"text": "To compare the results of fitting ellipses to the RDC method, we evaluate the captured area by each method with respect to the actual area of the ground truth bubble. Specifically, we calculate the root mean squared error (RMSE) of all bubbles in the validation data set. With respect to the average ground truth area, we get a relative error of $17\\%$ with the ellipse fitting method and $11\\%$ relative error with the RDC method. Although both methods provide similar results for the majority of the tested samples, the ellipse fitting method sometimes creates way too large predictions, which at the end cause the worse result in comparison to the RDC method. In order to further quantify the accuracy of our methods in practical situations, we generated test data sets with different gas volume fractions and applied the StarDist+UNet (SD+UNet) as well as the two hidden part reconstructions methods on them. We again use synthetic images for this to know the actual size of partly occluded bubbles, but this time we tried to design some more realistic images. Specifically, we use an image from a single bubble study without a bubble",
|
| 754 |
+
"bbox": [
|
| 755 |
+
115,
|
| 756 |
+
690,
|
| 757 |
+
885,
|
| 758 |
+
925
|
| 759 |
+
],
|
| 760 |
+
"page_idx": 9
|
| 761 |
+
},
|
| 762 |
+
{
|
| 763 |
+
"type": "text",
|
| 764 |
+
"text": "in it as background and then successively place single bubbles at random positions on it. The only constraint is that for every bubble at least $10\\%$ of its area has to be visible, otherwise they can not be detected. Note that we use bubbles from another experimental series, which means the RDC training has not seen any of these bubbles. An example synthetic image is given in Figure 8 together with the segmentation mask predicted by SD+UNet and the RDC corrected bubble outline. To compare parameters relevant for practical uses of the methods, we calculate the volume of each bubble in order to determine a total gas fraction for every generated image. We set the dimensions of our artificial domain to have the height and width of our image and a depth of $3\\mathrm{cm}$ . This depth was chosen because it allows the assumption that enough space is available for the overlapping bubbles, but no longer enough to allow a large number of fully occluded bubbles behind the visible bubbles. Furthermore, it reflects a typical depth dimension for experimental bubble columns or pipes (Ferreira et al., 2012; Hosokawa and Tomiyama, 2004; Lau et al., 2013; Pfleger et al., 1999). With this procedure, we generate test data for gas volume fractions of about $2.5\\%$ , $5\\%$ , $7.5\\%$ and $10\\%$ , where 50 images are generated for each case.",
|
| 765 |
+
"bbox": [
|
| 766 |
+
115,
|
| 767 |
+
83,
|
| 768 |
+
885,
|
| 769 |
+
370
|
| 770 |
+
],
|
| 771 |
+
"page_idx": 10
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"type": "image",
|
| 775 |
+
"img_path": "images/440b9e8948d15e3de2b97a219822b0531f5c8d3cb0bf5cadc67d59b27a298cde.jpg",
|
| 776 |
+
"image_caption": [],
|
| 777 |
+
"image_footnote": [],
|
| 778 |
+
"bbox": [
|
| 779 |
+
114,
|
| 780 |
+
80,
|
| 781 |
+
440,
|
| 782 |
+
218
|
| 783 |
+
],
|
| 784 |
+
"page_idx": 11
|
| 785 |
+
},
|
| 786 |
+
{
|
| 787 |
+
"type": "image",
|
| 788 |
+
"img_path": "images/d930f801aeec343f66d0ada8678d2cf08442cd7cb3c49ba1359e37ca6382515c.jpg",
|
| 789 |
+
"image_caption": [
|
| 790 |
+
"Figure 8: Generated example image for gas volume fraction of $5\\%$ (right) with the segmentation mask from StarDist+UNet (top left) and the overlapping outlines corrected with the RDC method (bottom left)."
|
| 791 |
+
],
|
| 792 |
+
"image_footnote": [],
|
| 793 |
+
"bbox": [
|
| 794 |
+
115,
|
| 795 |
+
218,
|
| 796 |
+
440,
|
| 797 |
+
354
|
| 798 |
+
],
|
| 799 |
+
"page_idx": 11
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "image",
|
| 803 |
+
"img_path": "images/2e566de44287e8f6b9b7b5b734de6ac617ac4fb8ec8287704d22a0f5732b5c16.jpg",
|
| 804 |
+
"image_caption": [],
|
| 805 |
+
"image_footnote": [],
|
| 806 |
+
"bbox": [
|
| 807 |
+
440,
|
| 808 |
+
120,
|
| 809 |
+
878,
|
| 810 |
+
312
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 11
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "image",
|
| 816 |
+
"img_path": "images/b4b2e4ecfec4cb16f491d234f13f33ff9701cb36c6e7d05d79c8d65fea7289c4.jpg",
|
| 817 |
+
"image_caption": [
|
| 818 |
+
"Figure 9: Gas volume fraction results with respect to the ground truth gas volume fraction using ideal segmentation (left) and the combined SD+UNet segmentation (right)."
|
| 819 |
+
],
|
| 820 |
+
"image_footnote": [],
|
| 821 |
+
"bbox": [
|
| 822 |
+
124,
|
| 823 |
+
397,
|
| 824 |
+
460,
|
| 825 |
+
649
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 11
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "image",
|
| 831 |
+
"img_path": "images/54720cd8a42a3f569f54fd96d43346ddf7a0d9ae6e36eba3f6c7c4f20474f8af.jpg",
|
| 832 |
+
"image_caption": [],
|
| 833 |
+
"image_footnote": [],
|
| 834 |
+
"bbox": [
|
| 835 |
+
505,
|
| 836 |
+
397,
|
| 837 |
+
843,
|
| 838 |
+
649
|
| 839 |
+
],
|
| 840 |
+
"page_idx": 11
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"type": "image",
|
| 844 |
+
"img_path": "images/9c6284c859c6e616101156d08330a02da4c962642afbc84b22287eae536750d8.jpg",
|
| 845 |
+
"image_caption": [],
|
| 846 |
+
"image_footnote": [],
|
| 847 |
+
"bbox": [
|
| 848 |
+
139,
|
| 849 |
+
105,
|
| 850 |
+
868,
|
| 851 |
+
294
|
| 852 |
+
],
|
| 853 |
+
"page_idx": 12
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"type": "image",
|
| 857 |
+
"img_path": "images/333e3dc6547cc2d6d140d1173bcf3e3a0e8e7b28e7f9a8c63f39cac9dfa146f8.jpg",
|
| 858 |
+
"image_caption": [],
|
| 859 |
+
"image_footnote": [],
|
| 860 |
+
"bbox": [
|
| 861 |
+
139,
|
| 862 |
+
307,
|
| 863 |
+
867,
|
| 864 |
+
495
|
| 865 |
+
],
|
| 866 |
+
"page_idx": 12
|
| 867 |
+
},
|
| 868 |
+
{
|
| 869 |
+
"type": "image",
|
| 870 |
+
"img_path": "images/1f73540f262a8acbe4fd90a721acdb6ece05cb151538d6ac32826755b4314a36.jpg",
|
| 871 |
+
"image_caption": [
|
| 872 |
+
"Figure 10: Bubble size histograms for the test case at $2.5\\%$ gas volume fraction. Note that a different abscissa is used for the ellipse fitting method due to some much larger prediction.",
|
| 873 |
+
"Figure 9 shows the error of the predicted gas volume fractions, where $\\alpha_{rel\\_error} = \\frac{\\alpha_{predicted}}{\\alpha_{reference}}$ , using the known ideal segmentation (left), i.e. the visible bubble parts, and using the segmentation predicted by SD+UNet (right) as well as the results when combined with the two hidden part reconstruction methods. The error bars denote the standard deviation for the 50 images of each case. As expected, using only the segmentation mask underpredicts the gas volume fraction due to the missing volume of the hidden parts. Already for the lowest tested gas volume fraction this results in an underprediction of about $9\\%$ . When using the SD+UNet prediction, this error is only slightly higher with"
|
| 874 |
+
],
|
| 875 |
+
"image_footnote": [],
|
| 876 |
+
"bbox": [
|
| 877 |
+
142,
|
| 878 |
+
511,
|
| 879 |
+
863,
|
| 880 |
+
701
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 12
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "text",
|
| 886 |
+
"text": "about $12\\%$ . With increasing gas volume fraction, this difference even decreases. However, some bubbles are still incorrectly predicted with the SD+UNet method, e.g. some bubbles are missed or multiple bubbles are predicted where only one exists. This becomes important for the hidden part reconstruction. While the RDC method underestimates the gas volume fraction of about $3\\%$ using the ideal segmentation, the error increases to about $9\\%$ when using the SD+UNet prediction. With increasing gas volume fraction, this discrepancy increases showing that the RDC method strongly relies on a correct segmentation. The ellipse fitting method is not that dependent on the segmentation and shows the smallest deviation to the ground truth when combined with the SD+UNet prediction, while, however, showing the highest standard deviation. A deeper look into the underlying bubble size histograms for all images of this first case (shown in Figure 10) reveals that the RDC method better predicts the correct bubble size than without correction as well as with the ellipse fitting method. Note that for the ellipse fitting method a different abscissa is used due to the mentioned fact, that some much larger bubbles are predicted. This is also the reason for the smaller deviation to the ground truth gas volume fraction, as these false larger bubbles correspondingly increase the gas volume fraction. However, the RDC method works better on regular (ellipsoidal) shaped bubbles than on irregular (wobbling) shaped bubbles. This trend continuous for the test cases with higher gas volume fractions as can be seen in the corresponding bubble size histograms shown in Appendix C.",
|
| 887 |
+
"bbox": [
|
| 888 |
+
115,
|
| 889 |
+
84,
|
| 890 |
+
885,
|
| 891 |
+
444
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 13
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "text",
|
| 897 |
+
"text": "4. Conclusion",
|
| 898 |
+
"text_level": 1,
|
| 899 |
+
"bbox": [
|
| 900 |
+
115,
|
| 901 |
+
462,
|
| 902 |
+
278,
|
| 903 |
+
481
|
| 904 |
+
],
|
| 905 |
+
"page_idx": 13
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"type": "text",
|
| 909 |
+
"text": "In this work, we tested the use of different AI-based methods for the task of segmenting and reconstructing overlapping bubbles in bubbly flow images. In particular, we have implemented and tested three different CNN's, namely a combination of two slightly adapted UNet's as well as the two open-source methods StarDist and Mask-RCNN. In general, all three methods are capable to detect bubbles under eased conditions, namely a proper illumination that clearly reveals intersections of overlapping bubbles, rather regular bubble shape and a manageable number of overlapping bubble segments. When these conditions are not met, the Double UNet approach fails to correctly detect the bubbles, while the StarDist and Mask R-CNN methods are more robust under conditions that are more difficult. The StarDist method performs best in identifying bubbles under various image conditions, but slightly misses the correct outline of the bubbles. We were able to improve the latter drawback by combining the StarDist results with the general foreground-background mask provided by one of the two UNets in a postprocessing step. The Mask R-CNN shows an equally good performance as the StarDist method, which underlines the capability of both methods to detect overlapping bubbles in images. However, StarDist shows better computational performance with respect to terms of a faster training and inference time in comparison to Mask R-CNN.",
|
| 910 |
+
"bbox": [
|
| 911 |
+
115,
|
| 912 |
+
498,
|
| 913 |
+
885,
|
| 914 |
+
804
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 13
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "text",
|
| 920 |
+
"text": "In order to further increase the accuracy with respect to determining correct bubble sizes and gas volume fractions, we tested two methods to reconstruct the hidden part of partly occluded bubbles. The first method tries to fit an ellipse with the boundary pixels of a segmentation instance, while the second method, called Radial Distance Correction (RDC), is based on a Neural Network that corrects radial distances from the center of the instance to the occluded part. Here, the second method provides more robust results, as the ellipse",
|
| 921 |
+
"bbox": [
|
| 922 |
+
115,
|
| 923 |
+
814,
|
| 924 |
+
885,
|
| 925 |
+
922
|
| 926 |
+
],
|
| 927 |
+
"page_idx": 13
|
| 928 |
+
},
|
| 929 |
+
{
|
| 930 |
+
"type": "text",
|
| 931 |
+
"text": "fitting method occasionally generates far too large predictions. This is further demonstrated in a final combined validation, in which we apply the StarDist+UNet method together with the two methods to reconstruct the hidden bubble part on synthetic images with known ground truth bubbles.",
|
| 932 |
+
"bbox": [
|
| 933 |
+
115,
|
| 934 |
+
83,
|
| 935 |
+
880,
|
| 936 |
+
154
|
| 937 |
+
],
|
| 938 |
+
"page_idx": 14
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"type": "text",
|
| 942 |
+
"text": "Even though we demonstrate that satisfactory results with respect to bubble size distribution and gas volume fraction can be achieved, the relative error for the latter is still not in a reasonable range when investigating cases with gas fraction above $5\\%$ . This can be improved by generating more training data of such cases. Since labelling images with a high number of overlapping bubbles is cumbersome, the use of generative models like GAN's might be beneficial for this. Furthermore, a CNN model that already proposes overlapping instances could be advantageous for the task of identifying overlapping bubbles, i.e. the multistar approach, which is an extension of the StarDist approach that focuses on the topic of overlapping instances (Walter et al., 2020). Also the use of image sequences could potentially improve the segmentation performance, as hidden bubbles may be better visible at an earlier or later moment of a sequence.",
|
| 943 |
+
"bbox": [
|
| 944 |
+
115,
|
| 945 |
+
165,
|
| 946 |
+
880,
|
| 947 |
+
362
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 14
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "text",
|
| 953 |
+
"text": "Although only bubble column experiments without an imposed liquid flow are considered in the present work, the training data still should reflect cases with background flow to some extent. Specifically the bubble appearance in the images defines the detectability with a trained algorithm, where the appearance depends on the image condition (e.g. illumination, pixel size, blurriness) as well as on the bubble shape. In this context, a liquid flow field can modify the latter, but only for higher turbulence intensities (Masuk et al., 2021), so that also low to moderate liquid background flows should be reflected in the training data. This, however, needs further investigation and otherwise an adaption of the training data set.",
|
| 954 |
+
"bbox": [
|
| 955 |
+
115,
|
| 956 |
+
373,
|
| 957 |
+
880,
|
| 958 |
+
533
|
| 959 |
+
],
|
| 960 |
+
"page_idx": 14
|
| 961 |
+
},
|
| 962 |
+
{
|
| 963 |
+
"type": "text",
|
| 964 |
+
"text": "In order to use our data and/or models, we provide two different open-access repositories. The labelled training data and the synthetic test images for the combined validation can be found under (Hessenkemper et al., 2022a). The code to train the UNet's and to apply the combined methods on bubbly flow images as well as the trained UNet, StarDist and RDC models can be accessed under (Hessenkemper et al., 2022b). To train custom StarDist models we propose to access the well-documented original StarDist repository (https://github.com/stardist/stardist). The same applies for the Mask R-CNN algorithm, for which we propose to use the BubMask repository (https://github.com/ywflow/BubMask) as introduced by Kim and Park, 2021.",
|
| 965 |
+
"bbox": [
|
| 966 |
+
115,
|
| 967 |
+
545,
|
| 968 |
+
880,
|
| 969 |
+
706
|
| 970 |
+
],
|
| 971 |
+
"page_idx": 14
|
| 972 |
+
},
|
| 973 |
+
{
|
| 974 |
+
"type": "list",
|
| 975 |
+
"sub_type": "ref_text",
|
| 976 |
+
"list_items": [
|
| 977 |
+
"Cerqueira, R.F.L., Paladino, E.E., 2021. Development of a deep learning-based image processing technique for bubble pattern recognition and shape reconstruction in dense bubbly flows. Chem. Eng. Sci. 230, 116163.",
|
| 978 |
+
"Cerqueira, R.F.L., Paladino, E.E., Ynumaru, B.K., Maliska, C.R., 2018. Image processing techniques for the measurement of two-phase bubbly pipe flows using particle image and tracking velocimetry (PIV/PTV). Chem. Eng. Sci. 189, 1-23.",
|
| 979 |
+
"Ferreira, A., Pereira, G., Teixeira, J.A., Rocha, F., 2012. Statistical tool combined with image analysis to characterize hydrodynamics and mass transfer in a bubble column. Chem. Eng. J. 180, 216-228.",
|
| 980 |
+
"Fu, Y., 2018. Development of Advanced Image Processing Algorithms for Bubbly Flow Measurement. Virginia Tech.",
|
| 981 |
+
"Fu, Y., Liu, Y., 2019. BubGAN: Bubble generative adversarial networks for synthesizing realistic bubbly flow images. Chem. Eng. Sci. 204, 35-47.",
|
| 982 |
+
"Haas, T., Schubert, C., Eickhoff, M., Pfeifer, H., 2020. BubCNN: Bubble detection using Faster RCNN and shape regression network. Chem. Eng. Sci. 216, 115467.",
|
| 983 |
+
"He, K., Gkioxari, G., Dólár, P., Girshick, R., 2020. Mask R-CNN. IEEE Trans. Pattern Anal. Mach. Intell. 42, 386-397.",
|
| 984 |
+
"Hessenkemper, H., Starke, S., Atassi, Y., Ziegenhein, T., Lucas, D., 2022a. Dataset for Bubble identification from images with machine learning methods. http://doi.org/10.14278/rodare.1487.",
|
| 985 |
+
"Hessenkemper, H., Starke, S., Atassi, Y., Ziegenhein, T., Lucas, D., 2022b. Software for Bubble identification from images with machine learning methods. http://doi.org/10.14278/rodare.1471.",
|
| 986 |
+
"Hessenkemper, H., Ziegenhein, T., 2018. Particle Shadow Velocimetry (PSV) in bubbly flows. Int. J. Multiph. Flow 106, 268-279.",
|
| 987 |
+
"Hessenkemper, H., Ziegenhein, T., Rzehak, R., Lucas, D., Tomiyama, A., 2021. Lift force coefficient of ellipsoidal single bubbles in water. Int. J. Multiph. Flow 138, 103587.",
|
| 988 |
+
"Hosokawa, S., Tomiyama, A., 2004. Turbulence modification in gas-liquid and solid-liquid dispersed two-phase pipe flows. Int. J. Heat Fluid Flow 25, 489-498.",
|
| 989 |
+
"Kim, Y., Park, H., 2021. Deep learning-based automated and universal bubble detection and mask extraction in complex two-phase flows. Sci. Rep. 11, 8940.",
|
| 990 |
+
"Lau, Y.M., Deen, N.G., Kuipers, J.A.M., 2013. Development of an image measurement technique for size distribution in dense bubbly flows. Chem. Eng. Sci. 94, 20-29.",
|
| 991 |
+
"Li, J., Shao, S., Hong, J., 2021. Machine learning shadowgraph for particle size and shape characterization. Meas. Sci. Technol. 32, 8.",
|
| 992 |
+
"Liu, L., Yan, H., Ziegenhein, T., Hessenkemper, H., Li, Q., Lucas, D., 2019. A systematic experimental study and dimensionless analysis of bubble plume oscillations in rectangular bubble columns. Chem. Eng. J. 372, 352-362.",
|
| 993 |
+
"Masuk, A.U.M., Salibindla, A.K.R., Ni, R., 2021. Simultaneous measurements of deforming"
|
| 994 |
+
],
|
| 995 |
+
"bbox": [
|
| 996 |
+
115,
|
| 997 |
+
122,
|
| 998 |
+
884,
|
| 999 |
+
929
|
| 1000 |
+
],
|
| 1001 |
+
"page_idx": 15
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "header",
|
| 1005 |
+
"text": "References",
|
| 1006 |
+
"bbox": [
|
| 1007 |
+
115,
|
| 1008 |
+
86,
|
| 1009 |
+
253,
|
| 1010 |
+
107
|
| 1011 |
+
],
|
| 1012 |
+
"page_idx": 15
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "list",
|
| 1016 |
+
"sub_type": "ref_text",
|
| 1017 |
+
"list_items": [
|
| 1018 |
+
"Hinze-scale bubbles with surrounding turbulence. J. Fluid Mech. 910, 21-22.",
|
| 1019 |
+
"Pfleger, D., Gomes, S., Gilbert, N., Wagner, H.G., 1999. Hydrodynamic simulations of laboratory scale bubble columns fundamental studies of the Eulerian-Eulerian modelling approach. Chem. Eng. Sci. 54, 5091-5099.",
|
| 1020 |
+
"Poletaev, I., Tokarev, M.P., Pervunin, K.S., 2020. Bubble patterns recognition using neural networks: Application to the analysis of a two-phase bubbly jet. Int. J. Multiph. Flow 126, 103194.",
|
| 1021 |
+
"Ren, S., He, K., Girshick, R., Sun, J., 2017. Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. IEEE Trans. Pattern Anal. Mach. Intell. 39, 1137-1149.",
|
| 1022 |
+
"Ronneberger, O., Fischer, P., Brox, T., 2015. U-net: Convolutional networks for biomedical image segmentation, in: Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). Springer Verlag, pp. 234-241.",
|
| 1023 |
+
"Schmidt, U., Weigert, M., Broaddus, C., Myers, G., 2018. Cell detection with star-convex polygons, in: Lecture Notes in Computer Science (Including Subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics). Springer Verlag, pp. 265-273.",
|
| 1024 |
+
"Walter, F.C., Damrich, S., Hamprecht, F.A., 2020. MultiStar: Instance Segmentation of Overlapping Objects with Star-Convex Polygons. Proc. - Int. Symp. Biomed. Imaging 2021-April, 295-298.",
|
| 1025 |
+
"Zhang, P., Su, W., 2012. Statistical inference on recall, precision and average precision under random selection, in: Proceedings - 2012 9th International Conference on Fuzzy Systems and Knowledge Discovery, FSKD 2012. pp. 1348-1352.",
|
| 1026 |
+
"Ziegenhein, T., Lucas, D., 2019. The critical bubble diameter of the lift force in technical and environmental, buoyancy-driven bubbly flows. Int. J. Multiph. Flow 116, 26-38."
|
| 1027 |
+
],
|
| 1028 |
+
"bbox": [
|
| 1029 |
+
115,
|
| 1030 |
+
82,
|
| 1031 |
+
882,
|
| 1032 |
+
595
|
| 1033 |
+
],
|
| 1034 |
+
"page_idx": 16
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"text": "Appendix A",
|
| 1039 |
+
"text_level": 1,
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
115,
|
| 1042 |
+
85,
|
| 1043 |
+
225,
|
| 1044 |
+
102
|
| 1045 |
+
],
|
| 1046 |
+
"page_idx": 17
|
| 1047 |
+
},
|
| 1048 |
+
{
|
| 1049 |
+
"type": "image",
|
| 1050 |
+
"img_path": "images/7d8febc647fe070bc2f97a9d692f2e6582273371c26b599e0dcbe4ed541afac6.jpg",
|
| 1051 |
+
"image_caption": [
|
| 1052 |
+
"Figure 11: Examples from the validation data set. First row: Original images; Second row: Double UNet prediction; Third row: StarDist prediction. Different colors indicate different detected bubble instances."
|
| 1053 |
+
],
|
| 1054 |
+
"image_footnote": [],
|
| 1055 |
+
"bbox": [
|
| 1056 |
+
115,
|
| 1057 |
+
103,
|
| 1058 |
+
875,
|
| 1059 |
+
884
|
| 1060 |
+
],
|
| 1061 |
+
"page_idx": 17
|
| 1062 |
+
},
|
| 1063 |
+
{
|
| 1064 |
+
"type": "text",
|
| 1065 |
+
"text": "Appendix B",
|
| 1066 |
+
"text_level": 1,
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
115,
|
| 1069 |
+
85,
|
| 1070 |
+
225,
|
| 1071 |
+
102
|
| 1072 |
+
],
|
| 1073 |
+
"page_idx": 18
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"type": "image",
|
| 1077 |
+
"img_path": "images/c8c101115630ede17e0f53832801454854edfbe66535f4f318b7a10ebea89493.jpg",
|
| 1078 |
+
"image_caption": [
|
| 1079 |
+
"Figure 12: Examples from the test data set. First row: Original images; Second row: Double UNet prediction; Third row: StarDist prediction; Fourth row: Combined prediction."
|
| 1080 |
+
],
|
| 1081 |
+
"image_footnote": [],
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
115,
|
| 1084 |
+
104,
|
| 1085 |
+
868,
|
| 1086 |
+
890
|
| 1087 |
+
],
|
| 1088 |
+
"page_idx": 18
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "image",
|
| 1092 |
+
"img_path": "images/1af70e00f8a85e85ec3b0b7ce3c5f599c752376fd1f7fc5ed43638116d0cdecd.jpg",
|
| 1093 |
+
"image_caption": [
|
| 1094 |
+
"Appendix C"
|
| 1095 |
+
],
|
| 1096 |
+
"image_footnote": [],
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
139,
|
| 1099 |
+
110,
|
| 1100 |
+
868,
|
| 1101 |
+
297
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 19
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "image",
|
| 1107 |
+
"img_path": "images/aa25c0c5e691d765651ae45ca6f3f9c0411fa6a69d4f4e4c8aaeecb57ed14c00.jpg",
|
| 1108 |
+
"image_caption": [],
|
| 1109 |
+
"image_footnote": [],
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
139,
|
| 1112 |
+
311,
|
| 1113 |
+
867,
|
| 1114 |
+
500
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 19
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "image",
|
| 1120 |
+
"img_path": "images/1af4e165e4bda84ecfd371b068a2d94bf63547aa0d901a4dee5b66b008433777.jpg",
|
| 1121 |
+
"image_caption": [
|
| 1122 |
+
"Figure 13: Bubble size histograms for the test case at $5\\%$ gas fraction."
|
| 1123 |
+
],
|
| 1124 |
+
"image_footnote": [],
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
142,
|
| 1127 |
+
514,
|
| 1128 |
+
860,
|
| 1129 |
+
703
|
| 1130 |
+
],
|
| 1131 |
+
"page_idx": 19
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "image",
|
| 1135 |
+
"img_path": "images/6419db85502b028d983c168faa28c83036208aa5ef49957c6e2ed6ad8fe1b89b.jpg",
|
| 1136 |
+
"image_caption": [],
|
| 1137 |
+
"image_footnote": [],
|
| 1138 |
+
"bbox": [
|
| 1139 |
+
139,
|
| 1140 |
+
87,
|
| 1141 |
+
868,
|
| 1142 |
+
275
|
| 1143 |
+
],
|
| 1144 |
+
"page_idx": 20
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "image",
|
| 1148 |
+
"img_path": "images/bb8bb8f9fe6b8512a4f54bba0730453d5e861d767bf0da25bfbedd2ba3bf9f34.jpg",
|
| 1149 |
+
"image_caption": [],
|
| 1150 |
+
"image_footnote": [],
|
| 1151 |
+
"bbox": [
|
| 1152 |
+
139,
|
| 1153 |
+
290,
|
| 1154 |
+
867,
|
| 1155 |
+
479
|
| 1156 |
+
],
|
| 1157 |
+
"page_idx": 20
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "image",
|
| 1161 |
+
"img_path": "images/8ca38aa5d4587465522df06541323f8ea1bef64dc2d9854824f428bbf545dc2c.jpg",
|
| 1162 |
+
"image_caption": [
|
| 1163 |
+
"Figure 14: Bubble size histograms for the test case at $7.5\\%$ gas fraction."
|
| 1164 |
+
],
|
| 1165 |
+
"image_footnote": [],
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
139,
|
| 1168 |
+
492,
|
| 1169 |
+
863,
|
| 1170 |
+
683
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 20
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "image",
|
| 1176 |
+
"img_path": "images/17d24b7eee78e035907fa0db8207914efa4b9b61491ee24f54f99aa048d83fe7.jpg",
|
| 1177 |
+
"image_caption": [],
|
| 1178 |
+
"image_footnote": [],
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
139,
|
| 1181 |
+
105,
|
| 1182 |
+
868,
|
| 1183 |
+
293
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 21
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "image",
|
| 1189 |
+
"img_path": "images/7d422c5a729dcf604d813c06c4135c9026df903bda95e57eeee05d6686d77dab.jpg",
|
| 1190 |
+
"image_caption": [],
|
| 1191 |
+
"image_footnote": [],
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
139,
|
| 1194 |
+
307,
|
| 1195 |
+
867,
|
| 1196 |
+
495
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 21
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "image",
|
| 1202 |
+
"img_path": "images/05d56582f2abb6a39229b4aed98573af5f08d4634ce127f7106f60b0f467fc04.jpg",
|
| 1203 |
+
"image_caption": [
|
| 1204 |
+
"Figure 15: Bubble size histograms for the test case at $10\\%$ gas fraction."
|
| 1205 |
+
],
|
| 1206 |
+
"image_footnote": [],
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
142,
|
| 1209 |
+
511,
|
| 1210 |
+
863,
|
| 1211 |
+
701
|
| 1212 |
+
],
|
| 1213 |
+
"page_idx": 21
|
| 1214 |
+
}
|
| 1215 |
+
]
|