Add Batch ed160481-a4e5-44c6-8249-142772ce10ef
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +32 -0
- 2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_content_list.json +1116 -0
- 2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_model.json +1433 -0
- 2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_origin.pdf +3 -0
- 2201.00xxx/2201.00007/full.md +232 -0
- 2201.00xxx/2201.00007/images.zip +3 -0
- 2201.00xxx/2201.00007/layout.json +0 -0
- 2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_content_list.json +1903 -0
- 2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_model.json +0 -0
- 2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_origin.pdf +3 -0
- 2201.00xxx/2201.00011/full.md +408 -0
- 2201.00xxx/2201.00011/images.zip +3 -0
- 2201.00xxx/2201.00011/layout.json +0 -0
- 2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_content_list.json +0 -0
- 2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_model.json +0 -0
- 2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_origin.pdf +3 -0
- 2201.00xxx/2201.00042/full.md +0 -0
- 2201.00xxx/2201.00042/images.zip +3 -0
- 2201.00xxx/2201.00042/layout.json +0 -0
- 2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_content_list.json +0 -0
- 2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_model.json +0 -0
- 2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_origin.pdf +3 -0
- 2201.00xxx/2201.00044/full.md +607 -0
- 2201.00xxx/2201.00044/images.zip +3 -0
- 2201.00xxx/2201.00044/layout.json +0 -0
- 2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_content_list.json +0 -0
- 2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_model.json +0 -0
- 2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_origin.pdf +3 -0
- 2201.00xxx/2201.00057/full.md +0 -0
- 2201.00xxx/2201.00057/images.zip +3 -0
- 2201.00xxx/2201.00057/layout.json +0 -0
- 2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_content_list.json +0 -0
- 2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_model.json +0 -0
- 2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_origin.pdf +3 -0
- 2201.00xxx/2201.00058/full.md +726 -0
- 2201.00xxx/2201.00058/images.zip +3 -0
- 2201.00xxx/2201.00058/layout.json +0 -0
- 2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_content_list.json +0 -0
- 2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_model.json +0 -0
- 2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_origin.pdf +3 -0
- 2201.00xxx/2201.00107/full.md +531 -0
- 2201.00xxx/2201.00107/images.zip +3 -0
- 2201.00xxx/2201.00107/layout.json +0 -0
- 2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_content_list.json +1598 -0
- 2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_model.json +0 -0
- 2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_origin.pdf +3 -0
- 2201.00xxx/2201.00140/full.md +368 -0
- 2201.00xxx/2201.00140/images.zip +3 -0
- 2201.00xxx/2201.00140/layout.json +0 -0
- 2201.00xxx/2201.00162/05211657-1eef-426d-a342-10424d7db537_content_list.json +1188 -0
.gitattributes
CHANGED
|
@@ -8695,3 +8695,35 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 8695 |
2201.05xxx/2201.05455/cebd4260-0e37-4135-985e-b8fd7e6f647d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8696 |
2201.07xxx/2201.07185/43213166-2acd-4bcf-95ec-446e5caf6b5a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8697 |
2201.07xxx/2201.07712/9e644226-1121-4508-8618-303ddf57bb83_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8695 |
2201.05xxx/2201.05455/cebd4260-0e37-4135-985e-b8fd7e6f647d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8696 |
2201.07xxx/2201.07185/43213166-2acd-4bcf-95ec-446e5caf6b5a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8697 |
2201.07xxx/2201.07712/9e644226-1121-4508-8618-303ddf57bb83_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8698 |
+
2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8699 |
+
2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8700 |
+
2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8701 |
+
2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8702 |
+
2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8703 |
+
2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8704 |
+
2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8705 |
+
2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8706 |
+
2201.00xxx/2201.00162/05211657-1eef-426d-a342-10424d7db537_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8707 |
+
2201.00xxx/2201.00180/8b751e29-ca39-4b4c-ab64-2ce58ee7b805_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8708 |
+
2201.00xxx/2201.00206/f2b186d8-3783-4a1c-9c30-c7e2f564f66c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8709 |
+
2201.00xxx/2201.00217/0137d433-098d-4d81-9d17-ec94d944f9b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8710 |
+
2201.00xxx/2201.00227/e50a5e9e-9233-4603-b087-0b4408c4c20b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8711 |
+
2201.00xxx/2201.00232/37353f2d-e290-47f0-a8c1-1a65e922af11_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8712 |
+
2201.00xxx/2201.00267/9368c1aa-04b8-4c6c-87ac-02168094b91d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8713 |
+
2201.00xxx/2201.00299/e00cb9ef-9d42-41c8-b852-eaf8e3104a83_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8714 |
+
2201.00xxx/2201.00308/45400984-f9cb-4350-80a7-46a4f107f24a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8715 |
+
2201.00xxx/2201.00346/09dab378-86ad-4963-b25f-0c6c6c6752f5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8716 |
+
2201.00xxx/2201.00382/32b445cb-a714-4f42-840d-87a4909f4b55_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8717 |
+
2201.00xxx/2201.00414/050fe95f-3760-4926-a725-a8323b72b1a1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8718 |
+
2201.00xxx/2201.00424/715feeb8-1e9e-4453-8407-a624f6e738d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8719 |
+
2201.00xxx/2201.00443/17a00578-7726-43a0-9d0c-5f4ee23cc6f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8720 |
+
2201.00xxx/2201.00454/2967ae4f-6abf-4b11-a6e1-4e65d8e6fd5b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8721 |
+
2201.00xxx/2201.00462/c58765d6-64c4-4365-b7ac-dd69497164cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8722 |
+
2201.00xxx/2201.00464/20d0b95a-fcd3-4c8a-8732-c2db8d13a548_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8723 |
+
2201.00xxx/2201.00466/1bf94e9a-88b2-4258-a2ee-55c2836f7e53_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8724 |
+
2201.00xxx/2201.00471/3159ff43-d53c-4b92-b1bf-3f87878cdcef_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8725 |
+
2201.00xxx/2201.00487/ac090ac0-a0c8-4c01-b944-66c2e7269aba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8726 |
+
2201.00xxx/2201.00491/6f9e7c8a-26dc-4d89-803c-4788bba38c8c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8727 |
+
2201.00xxx/2201.00759/7abeba98-7c25-430d-a28b-8b8cf788749e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8728 |
+
2201.01xxx/2201.01191/8fe7e520-b832-4c39-abf7-942e702b0397_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8729 |
+
2201.01xxx/2201.01389/6c6f9712-a2a1-4d64-a6b7-60e815c0779c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_content_list.json
ADDED
|
@@ -0,0 +1,1116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "CONFIDENCE-AWARE MULTI-TEACHER KNOWLEDGE DISTILLATION",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
118,
|
| 9 |
+
831,
|
| 10 |
+
136
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Hailin Zhang",
|
| 17 |
+
"bbox": [
|
| 18 |
+
297,
|
| 19 |
+
155,
|
| 20 |
+
408,
|
| 21 |
+
174
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Defang Chen",
|
| 28 |
+
"bbox": [
|
| 29 |
+
447,
|
| 30 |
+
156,
|
| 31 |
+
555,
|
| 32 |
+
172
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Can Wang\\*",
|
| 39 |
+
"bbox": [
|
| 40 |
+
596,
|
| 41 |
+
156,
|
| 42 |
+
687,
|
| 43 |
+
172
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Zhejiang University, China; ZJU-Bangsun Joint Research Center.",
|
| 50 |
+
"bbox": [
|
| 51 |
+
240,
|
| 52 |
+
190,
|
| 53 |
+
754,
|
| 54 |
+
208
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "{zzzhl, defchern, wcan} @ zju.edu.cn",
|
| 61 |
+
"bbox": [
|
| 62 |
+
356,
|
| 63 |
+
210,
|
| 64 |
+
643,
|
| 65 |
+
227
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "ABSTRACT",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
240,
|
| 75 |
+
258,
|
| 76 |
+
331,
|
| 77 |
+
272
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Knowledge distillation is initially introduced to utilize additional supervision from a single teacher model for the student model training. To boost the student performance, some recent variants attempt to exploit diverse knowledge sources from multiple teachers. However, existing studies mainly integrate knowledge from diverse sources by averaging over multiple teacher predictions or combining them using other label-free strategies, which may mislead student in the presence of low-quality teacher predictions. To tackle this problem, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD), which adaptively assigns sample-wise reliability for each teacher prediction with the help of ground-truth labels, with those teacher predictions close to one-hot labels assigned large weights. Besides, CA-MKD incorporates features in intermediate layers to stable the knowledge transfer process. Extensive experiments show our CA-MKD consistently outperforms all compared state-of-the-art methods across various teacher-student architectures. Code is available: https://github.com/Rorozhl/CA-MKD.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
81,
|
| 86 |
+
277,
|
| 87 |
+
488,
|
| 88 |
+
580
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "Index Terms— knowledge distillation, multiple teachers, confidence-aware weighting",
|
| 95 |
+
"bbox": [
|
| 96 |
+
83,
|
| 97 |
+
585,
|
| 98 |
+
488,
|
| 99 |
+
617
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "1. INTRODUCTION",
|
| 106 |
+
"text_level": 1,
|
| 107 |
+
"bbox": [
|
| 108 |
+
207,
|
| 109 |
+
635,
|
| 110 |
+
364,
|
| 111 |
+
648
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "Nowadays, deep neural networks have achieved unprecedented success in various applications [1, 2, 3]. However, these complex models requiring huge memory footprint and computational resources are difficult to be applied on embedded devices. Knowledge distillation (KD) is thus proposed as a model compression technique to resolve this issue, which improves the accuracy of a lightweight student model by distilling the knowledge from a pre-trained cumbersome teacher model [4]. The transferred knowledge was originally formalized as softmax outputs (soft targets) of the teacher model [4] and latter extended to the intermediate teacher layers for achieving more promising performance [5, 6, 7].",
|
| 118 |
+
"bbox": [
|
| 119 |
+
81,
|
| 120 |
+
662,
|
| 121 |
+
488,
|
| 122 |
+
845
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "image",
|
| 128 |
+
"img_path": "images/60ead1026d9deb65b03f0004af7a05952aa4ef06b281570348316b1b5d15ceea.jpg",
|
| 129 |
+
"image_caption": [
|
| 130 |
+
"Fig. 1. Comparison of the previous average direction (green line) and our proposed confidence-aware direction (red line)."
|
| 131 |
+
],
|
| 132 |
+
"image_footnote": [],
|
| 133 |
+
"bbox": [
|
| 134 |
+
562,
|
| 135 |
+
257,
|
| 136 |
+
864,
|
| 137 |
+
344
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "text",
|
| 143 |
+
"text": "As the wisdom of the masses exceeds that of the wisest individual, some multi-teacher knowledge distillation (MKD) methods are proposed and have been proven to be beneficial [8, 9, 10, 11, 12]. Basically, they combine predictions from multiple teachers with the fixed weight assignment [8, 9, 10] or other various label-free schemes, such as calculating weights based on a optimization problem or entropy criterion [11, 12], etc. However, fixed weights fail to differentiate high-quality teachers from low-quality ones [8, 9, 10], and the other schemes may mislead the student in the presence of low-quality teacher predictions [11, 12]. Figure 1 provides an intuitive illustration on this issue, where the student trained with the average weighting strategy might deviate from the correct direction once most teacher predictions are biased.",
|
| 144 |
+
"bbox": [
|
| 145 |
+
506,
|
| 146 |
+
398,
|
| 147 |
+
913,
|
| 148 |
+
609
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 0
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "text",
|
| 154 |
+
"text": "Fortunately, we actually have ground-truth labels in hand to quantify our confidence about teacher predictions and then filter out low-quality predictions for better student training. To this end, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD) to learn sample-wise weights by taking the prediction confidence of teachers into consideration for adaptive knowledge integration. The confidence is obtained based on the cross entropy loss between prediction distributions and ground-truth labels. Compared with previous label-free weighting strategies, our technique enables the student to learn from a relatively correct direction.",
|
| 155 |
+
"bbox": [
|
| 156 |
+
506,
|
| 157 |
+
609,
|
| 158 |
+
915,
|
| 159 |
+
776
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 0
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "text",
|
| 165 |
+
"text": "Note that our confidence-aware mechanism not only is able to adaptively weight different teacher predictions based on their sample-wise confidence, but also can be extended to the student-teacher feature pairs in intermediate layers. With the help of our generated flexible and effective weights, we could avoid those poor teacher predictions dominating the knowledge transfer process and considerably improve the student performance on eight teacher-student architecture combinations (as shown in Table 1 and 3).",
|
| 166 |
+
"bbox": [
|
| 167 |
+
506,
|
| 168 |
+
777,
|
| 169 |
+
915,
|
| 170 |
+
912
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 0
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "aside_text",
|
| 176 |
+
"text": "arXiv:2201.00007v3 [cs.LG] 14 Feb 2022",
|
| 177 |
+
"bbox": [
|
| 178 |
+
22,
|
| 179 |
+
263,
|
| 180 |
+
57,
|
| 181 |
+
707
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 0
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "page_footnote",
|
| 187 |
+
"text": "*Corresponding author",
|
| 188 |
+
"bbox": [
|
| 189 |
+
107,
|
| 190 |
+
852,
|
| 191 |
+
233,
|
| 192 |
+
864
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 0
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "page_footnote",
|
| 198 |
+
"text": "This work is supported by National Key R&D Program of China (Grant No: 2019YFB1600700), the Starry Night Science Fund of Zhejiang University Shanghai Institute for Advanced Study (Grant No: SN-ZJU-SIAS-001) and National Natural Science Foundation of China (Grant No: U1866602).",
|
| 199 |
+
"bbox": [
|
| 200 |
+
84,
|
| 201 |
+
864,
|
| 202 |
+
486,
|
| 203 |
+
912
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 0
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "2. RELATED WORK",
|
| 210 |
+
"text_level": 1,
|
| 211 |
+
"bbox": [
|
| 212 |
+
205,
|
| 213 |
+
90,
|
| 214 |
+
366,
|
| 215 |
+
104
|
| 216 |
+
],
|
| 217 |
+
"page_idx": 1
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"type": "text",
|
| 221 |
+
"text": "Knowledge Distillation. Vanilla KD aims to transfer knowledge from a complex network (teacher) to a simple network (student) with the KL divergence minimization between their softened outputs [13, 4]. Mimicking the teacher representations from intermediate layers was latter proposed to explore more knowledge forms [5, 6, 14, 15, 7]. Compared to these methods that require pre-training a teacher, some works simultaneously train multiple students and encourage them to learn from each other instead [16, 17]. Our technique differs from these online KD methods since we attempt to distill knowledge from multiple pre-trained teachers.",
|
| 222 |
+
"bbox": [
|
| 223 |
+
81,
|
| 224 |
+
119,
|
| 225 |
+
488,
|
| 226 |
+
285
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "Multi-teacher Knowledge Distillation. Rather than employing a single teacher, MKD boosts the effectiveness of distillation by integrating predictions from multiple teachers. A bunch of methods are proposed, such as simply assigning average or other fixed weights for different teachers [8, 9, 10], and calculating the weights based on entropy [12], latent factor [18] or multi-objective optimization in the gradient space [11]. However, these label-free strategies may mislead the student training in the presence of low-quality predictions. For instance, entropy-based strategy will prefer models with blind faith since it favors predictions with low variance [12]; optimization-based strategy favors majority opinion and will be easily misled by noisy data [11]. In contrast, our CA-MKD quantifies the teacher predictions based on ground-truth labels and further improves the student performance.",
|
| 233 |
+
"bbox": [
|
| 234 |
+
81,
|
| 235 |
+
285,
|
| 236 |
+
488,
|
| 237 |
+
512
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "3. METHODOLOGY",
|
| 244 |
+
"text_level": 1,
|
| 245 |
+
"bbox": [
|
| 246 |
+
204,
|
| 247 |
+
531,
|
| 248 |
+
366,
|
| 249 |
+
542
|
| 250 |
+
],
|
| 251 |
+
"page_idx": 1
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"type": "text",
|
| 255 |
+
"text": "We denote $\\mathcal{D} = \\{\\pmb{x}_i,\\pmb{y}_i\\}_i^N$ as a labeled training set, $N$ is the number of samples, $K$ is the number of teachers. $F\\in \\mathbb{R}^{h\\times w\\times c}$ is the output of the last network block. We denote $\\pmb {z} = [z^1,\\dots,z^C ]$ as the logits output, where $C$ is the category number. The final model prediction is obtained by a softmax function $\\sigma (z^c) = \\frac{\\exp(z^c / \\tau)}{\\sum_j\\exp(z^j / \\tau)}$ with temperature $\\tau$ . In the following sections, we will introduce our CA-MKD in detail.",
|
| 256 |
+
"bbox": [
|
| 257 |
+
83,
|
| 258 |
+
558,
|
| 259 |
+
488,
|
| 260 |
+
669
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 1
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "text",
|
| 266 |
+
"text": "3.1. The Loss of Teacher Predictions",
|
| 267 |
+
"text_level": 1,
|
| 268 |
+
"bbox": [
|
| 269 |
+
83,
|
| 270 |
+
686,
|
| 271 |
+
346,
|
| 272 |
+
700
|
| 273 |
+
],
|
| 274 |
+
"page_idx": 1
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"type": "text",
|
| 278 |
+
"text": "To effectively aggregate the prediction distributions of multiple teachers, we assign different weights which reflects their sample-wise confidence by calculating the cross entropy loss between teacher predictions and ground-truth labels",
|
| 279 |
+
"bbox": [
|
| 280 |
+
81,
|
| 281 |
+
710,
|
| 282 |
+
488,
|
| 283 |
+
771
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "equation",
|
| 289 |
+
"text": "\n$$\n\\mathcal {L} _ {C E _ {K D}} ^ {k} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {T _ {k}} ^ {c}\\right)\\right), \\tag {1}\n$$\n",
|
| 290 |
+
"text_format": "latex",
|
| 291 |
+
"bbox": [
|
| 292 |
+
169,
|
| 293 |
+
779,
|
| 294 |
+
486,
|
| 295 |
+
819
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 1
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "equation",
|
| 301 |
+
"text": "\n$$\nw _ {K D} ^ {k} = \\frac {1}{K - 1} \\left(1 - \\frac {\\exp \\left(\\mathcal {L} _ {C E _ {K D}} ^ {k}\\right)}{\\sum_ {j} \\exp \\left(\\mathcal {L} _ {C E _ {K D}} ^ {j}\\right)}\\right), \\tag {2}\n$$\n",
|
| 302 |
+
"text_format": "latex",
|
| 303 |
+
"bbox": [
|
| 304 |
+
133,
|
| 305 |
+
827,
|
| 306 |
+
486,
|
| 307 |
+
877
|
| 308 |
+
],
|
| 309 |
+
"page_idx": 1
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"type": "text",
|
| 313 |
+
"text": "where $T_{k}$ denotes the $k$ th teacher. The less $\\mathcal{L}_{CE_{KD}}^{k}$ corresponds to the larger $w_{KD}^{k}$ . The overall teacher predictions are",
|
| 314 |
+
"bbox": [
|
| 315 |
+
83,
|
| 316 |
+
881,
|
| 317 |
+
488,
|
| 318 |
+
915
|
| 319 |
+
],
|
| 320 |
+
"page_idx": 1
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"type": "image",
|
| 324 |
+
"img_path": "images/3d5973118b10c6a05de99f84c38ca8cedaf8f4052345fd55e9c1b30c1c9db35a.jpg",
|
| 325 |
+
"image_caption": [
|
| 326 |
+
"Fig. 2. An overview of our CA-MKD. The weight calculation of teacher predictions and intermediate teacher features are depicted as the red lines and green lines, respectively."
|
| 327 |
+
],
|
| 328 |
+
"image_footnote": [],
|
| 329 |
+
"bbox": [
|
| 330 |
+
529,
|
| 331 |
+
85,
|
| 332 |
+
901,
|
| 333 |
+
268
|
| 334 |
+
],
|
| 335 |
+
"page_idx": 1
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"text": "then aggregated with calculated weights",
|
| 340 |
+
"bbox": [
|
| 341 |
+
509,
|
| 342 |
+
353,
|
| 343 |
+
777,
|
| 344 |
+
368
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 1
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "equation",
|
| 350 |
+
"text": "\n$$\n\\mathcal {L} _ {K D} = - \\sum_ {k = 1} ^ {K} w _ {K D} ^ {k} \\sum_ {c = 1} ^ {C} z _ {T _ {k}} ^ {c} \\log \\left(\\sigma \\left(z _ {S} ^ {c}\\right)\\right). \\tag {3}\n$$\n",
|
| 351 |
+
"text_format": "latex",
|
| 352 |
+
"bbox": [
|
| 353 |
+
571,
|
| 354 |
+
378,
|
| 355 |
+
911,
|
| 356 |
+
421
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 1
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "According to the above formulas, the teacher whose prediction is closer to ground-truth labels will be assigned larger weight $w_{KD}^{k}$ , since it has enough confidence to make accurate judgement for correct guidance. In contrast, if we simply acquire the weights by calculating the entropy of teacher predictions [12], the weight will become large when the output distribution is sharp regardless of whether the highest probability category is correct. In this case, those biased targets may misguide the student training and further hurt its distillation performance.",
|
| 363 |
+
"bbox": [
|
| 364 |
+
508,
|
| 365 |
+
433,
|
| 366 |
+
913,
|
| 367 |
+
583
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 1
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "3.2. The Loss of Intermediate Teacher Features",
|
| 374 |
+
"text_level": 1,
|
| 375 |
+
"bbox": [
|
| 376 |
+
509,
|
| 377 |
+
604,
|
| 378 |
+
848,
|
| 379 |
+
618
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 1
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "In addition to KD Loss, inspired by FitNets [5], we believe that the intermediate layers are also beneficial for learning structural knowledge, and thus extend our method to intermediate layers for mining more information. The calculation of intermediate feature matching is presented as follows",
|
| 386 |
+
"bbox": [
|
| 387 |
+
508,
|
| 388 |
+
628,
|
| 389 |
+
913,
|
| 390 |
+
704
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 1
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "equation",
|
| 396 |
+
"text": "\n$$\nz _ {S \\rightarrow T _ {k}} = W _ {T _ {k}} h _ {S}, \\tag {4}\n$$\n",
|
| 397 |
+
"text_format": "latex",
|
| 398 |
+
"bbox": [
|
| 399 |
+
648,
|
| 400 |
+
717,
|
| 401 |
+
911,
|
| 402 |
+
733
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 1
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "equation",
|
| 408 |
+
"text": "\n$$\n\\mathcal {L} _ {C E _ {\\text {i n t e r}}} ^ {k} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {S \\rightarrow T _ {k}} ^ {c}\\right)\\right), \\tag {5}\n$$\n",
|
| 409 |
+
"text_format": "latex",
|
| 410 |
+
"bbox": [
|
| 411 |
+
578,
|
| 412 |
+
746,
|
| 413 |
+
911,
|
| 414 |
+
786
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 1
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "equation",
|
| 420 |
+
"text": "\n$$\nw _ {i n t e r} ^ {k} = \\frac {1}{K - 1} \\left(1 - \\frac {\\exp \\left(\\mathcal {L} _ {C E _ {i n t e r}} ^ {k}\\right)}{\\sum_ {j} \\exp \\left(\\mathcal {L} _ {C E _ {i n t e r}} ^ {j}\\right)}\\right). \\tag {6}\n$$\n",
|
| 421 |
+
"text_format": "latex",
|
| 422 |
+
"bbox": [
|
| 423 |
+
550,
|
| 424 |
+
795,
|
| 425 |
+
911,
|
| 426 |
+
845
|
| 427 |
+
],
|
| 428 |
+
"page_idx": 1
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"type": "text",
|
| 432 |
+
"text": "where $W_{T_k}$ is the final classifier of the $k$ th teacher. $h_S \\in \\mathbb{R}^c$ is the last student feature vector, i.e., $h_S = \\mathrm{AvgPooling}(F_S)$ . $\\mathcal{L}_{CE_{inter}}^k$ is obtained by passing $h_S$ through each teacher classifier. The calculation of $w_{inter}^k$ is similar to that of $w_{KD}^k$ .",
|
| 433 |
+
"bbox": [
|
| 434 |
+
508,
|
| 435 |
+
851,
|
| 436 |
+
913,
|
| 437 |
+
915
|
| 438 |
+
],
|
| 439 |
+
"page_idx": 1
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"type": "table",
|
| 443 |
+
"img_path": "images/0b163bd609f13d76711e6fa99d56b9d40e480d0c34e1b1ce9a67a3c06beef813.jpg",
|
| 444 |
+
"table_caption": [
|
| 445 |
+
"Table 1. Top-1 test accuracy of MKD methods by distilling the knowledge on multiple teachers with the same architectures."
|
| 446 |
+
],
|
| 447 |
+
"table_footnote": [],
|
| 448 |
+
"table_body": "<table><tr><td>Teacher Ensemble</td><td>WRN40-2 76.62±0.26 79.62</td><td>ResNet56 73.28±0.30 76.00</td><td>VGG13 75.17±0.18 77.07</td><td>VGG13 75.17±0.18 77.07</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.43</td><td>MobileNetV2 65.64±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.19</td><td>ResNet8x4 72.79±0.14</td><td>ShuffleNetV2 72.94±0.24</td><td>VGG8 70.74±0.40</td></tr><tr><td>AVER [8]</td><td>76.30±0.25</td><td>70.21±0.10</td><td>74.07±0.23</td><td>68.91±0.35</td><td>74.99±0.24</td><td>75.87±0.19</td><td>73.26±0.39</td></tr><tr><td>FitNet-MKD [5]</td><td>76.59±0.17</td><td>70.69±0.56</td><td>73.97±0.22</td><td>68.48±0.07</td><td>74.86±0.21</td><td>76.09±0.13</td><td>73.27±0.19</td></tr><tr><td>EBKD [12]</td><td>76.61±0.14</td><td>70.91±0.22</td><td>74.10±0.27</td><td>68.24±0.82</td><td>75.59±0.15</td><td>76.41±0.12</td><td>73.60±0.22</td></tr><tr><td>AEKD [11]</td><td>76.34±0.24</td><td>70.47±0.15</td><td>73.78±0.03</td><td>68.39±0.50</td><td>74.75±0.28</td><td>75.95±0.20</td><td>73.11±0.27</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>71.38±0.02</td><td>74.30±0.16</td><td>69.41±0.20</td><td>75.90±0.13</td><td>77.41±0.14</td><td>75.26±0.32</td></tr></table>",
|
| 449 |
+
"bbox": [
|
| 450 |
+
96,
|
| 451 |
+
114,
|
| 452 |
+
906,
|
| 453 |
+
276
|
| 454 |
+
],
|
| 455 |
+
"page_idx": 2
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"type": "table",
|
| 459 |
+
"img_path": "images/556f0bcfbbce6c1986d3b7c96545a83a0a4519e121e0f2fb4c3f32b00d4b6141.jpg",
|
| 460 |
+
"table_caption": [
|
| 461 |
+
"Table 2. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods."
|
| 462 |
+
],
|
| 463 |
+
"table_footnote": [],
|
| 464 |
+
"table_body": "<table><tr><td>Teacher</td><td>WRN40-2 76.62±0.26</td><td>ResNet32x4 79.31±0.14</td><td>ResNet56 73.28±0.30</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.43</td></tr><tr><td>KD [4]</td><td>75.77±0.14</td><td>72.90±0.34</td><td>69.96±0.14</td></tr><tr><td>FitNet [5]</td><td>76.22±0.21</td><td>72.55±0.66</td><td>69.02±0.28</td></tr><tr><td>AT [6]</td><td>76.44±0.38</td><td>72.16±0.12</td><td>69.79±0.26</td></tr><tr><td>VID [14]</td><td>76.32±0.08</td><td>73.09±0.29</td><td>69.45±0.17</td></tr><tr><td>CRD [15]</td><td>76.58±0.23</td><td>73.57±0.25</td><td>71.15±0.44</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>75.26±0.13</td><td>71.38±0.02</td></tr></table>",
|
| 465 |
+
"bbox": [
|
| 466 |
+
91,
|
| 467 |
+
339,
|
| 468 |
+
485,
|
| 469 |
+
511
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 2
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"text": "To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and $w_{inter}^{k}$ indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space. The ablation study also shows that utilizing $w_{inter}^{k}$ instead of $w_{KD}^{k}$ for the knowledge aggregation in intermediate layers is more effective.",
|
| 476 |
+
"bbox": [
|
| 477 |
+
81,
|
| 478 |
+
536,
|
| 479 |
+
488,
|
| 480 |
+
643
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 2
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "equation",
|
| 486 |
+
"text": "\n$$\n\\mathcal {L} _ {i n t e r} = \\sum_ {k = 1} ^ {K} w _ {i n t e r} ^ {k} \\left\\| F _ {T _ {k}} - r \\left(F _ {S}\\right) \\right\\| _ {2} ^ {2}, \\tag {7}\n$$\n",
|
| 487 |
+
"text_format": "latex",
|
| 488 |
+
"bbox": [
|
| 489 |
+
155,
|
| 490 |
+
652,
|
| 491 |
+
486,
|
| 492 |
+
694
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 2
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "text",
|
| 498 |
+
"text": "where $r(\\cdot)$ is a function for aligning the student and teacher feature dimensions. The $\\ell_2$ loss function is used as distance measure of intermediate features. Finally, the overall training loss between feature pairs will be aggregated by $w_{inter}^k$ .",
|
| 499 |
+
"bbox": [
|
| 500 |
+
81,
|
| 501 |
+
700,
|
| 502 |
+
488,
|
| 503 |
+
762
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 2
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "In our work, only the output features of the last block are adopted to avoid incurring too much computational cost.",
|
| 510 |
+
"bbox": [
|
| 511 |
+
83,
|
| 512 |
+
762,
|
| 513 |
+
488,
|
| 514 |
+
791
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 2
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "text",
|
| 520 |
+
"text": "3.3. The Overall Loss Function",
|
| 521 |
+
"text_level": 1,
|
| 522 |
+
"bbox": [
|
| 523 |
+
83,
|
| 524 |
+
809,
|
| 525 |
+
310,
|
| 526 |
+
824
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 2
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "text",
|
| 532 |
+
"text": "In addition to the aforementioned two losses, a regular cross entropy with the ground-truth labels is calculated",
|
| 533 |
+
"bbox": [
|
| 534 |
+
81,
|
| 535 |
+
833,
|
| 536 |
+
488,
|
| 537 |
+
864
|
| 538 |
+
],
|
| 539 |
+
"page_idx": 2
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"type": "equation",
|
| 543 |
+
"text": "\n$$\n\\mathcal {L} _ {C E} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {S} ^ {c}\\right)\\right). \\tag {8}\n$$\n",
|
| 544 |
+
"text_format": "latex",
|
| 545 |
+
"bbox": [
|
| 546 |
+
184,
|
| 547 |
+
875,
|
| 548 |
+
486,
|
| 549 |
+
917
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 2
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "text",
|
| 555 |
+
"text": "The overall loss function of our CA-MKD is summarized as",
|
| 556 |
+
"bbox": [
|
| 557 |
+
509,
|
| 558 |
+
300,
|
| 559 |
+
898,
|
| 560 |
+
315
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 2
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "equation",
|
| 566 |
+
"text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {C E} + \\alpha \\mathcal {L} _ {K D} + \\beta \\mathcal {L} _ {\\text {i n t e r}}, \\tag {9}\n$$\n",
|
| 567 |
+
"text_format": "latex",
|
| 568 |
+
"bbox": [
|
| 569 |
+
604,
|
| 570 |
+
328,
|
| 571 |
+
913,
|
| 572 |
+
344
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 2
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "text",
|
| 578 |
+
"text": "where $\\alpha$ and $\\beta$ are hyper-parameters to balance the effect of knowledge distillation and standard cross entropy losses.",
|
| 579 |
+
"bbox": [
|
| 580 |
+
508,
|
| 581 |
+
354,
|
| 582 |
+
915,
|
| 583 |
+
386
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 2
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "text",
|
| 589 |
+
"text": "4. EXPERIMENT",
|
| 590 |
+
"text_level": 1,
|
| 591 |
+
"bbox": [
|
| 592 |
+
642,
|
| 593 |
+
405,
|
| 594 |
+
782,
|
| 595 |
+
419
|
| 596 |
+
],
|
| 597 |
+
"page_idx": 2
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "text",
|
| 601 |
+
"text": "In this section, we conduct extensive experiments on CIFAR-100 dataset [19] to verify the effectiveness of our proposed CA-MKD. We adopt eight different teacher-student combinations based on popular neural network architectures. All compared multi-teacher knowledge distillation (MKD) methods use three teachers except for special declarations.",
|
| 602 |
+
"bbox": [
|
| 603 |
+
508,
|
| 604 |
+
433,
|
| 605 |
+
915,
|
| 606 |
+
523
|
| 607 |
+
],
|
| 608 |
+
"page_idx": 2
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "text",
|
| 612 |
+
"text": "Compared Methods. Besides the naïve AVER [8], we reimplement a single-teacher based method FitNet [5] on multiple teachers and denote it as FitNet-MKD. FitNet-MKD will leverage extra information coming from averaged intermediate teacher features. We also reimplement an entropy-based MKD method [12], which has achieved remarkable results in acoustic experiments, on our image classification task and we denote it as EBKD. As for AEKD, we adopt its logits-based version with the author provided code [11].",
|
| 613 |
+
"bbox": [
|
| 614 |
+
506,
|
| 615 |
+
523,
|
| 616 |
+
915,
|
| 617 |
+
659
|
| 618 |
+
],
|
| 619 |
+
"page_idx": 2
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "text",
|
| 623 |
+
"text": "Hyper-parameters. All neural networks are optimized by stochastic gradient descent with momentum 0.9, weight decay 0.0001. The batch size is set to 64. As the previous works do [15, 7], the initial learning rate is set to 0.1, except MobileNetV2, ShuffleNetV1 and ShuffleNetV2 are set to 0.05. The learning rate is multiplied by 0.1 at 150, 180 and 210 of the total 240 training epochs. For the sake of fairness, the temperature $\\tau$ is set to 4 and the $\\alpha$ is set to 1 in all methods. Furthermore, we set the $\\beta$ of our CA-MKD to 50 throughout the experiments. All results are reported in means and standard deviations over 3 runs with different random seeds.",
|
| 624 |
+
"bbox": [
|
| 625 |
+
506,
|
| 626 |
+
660,
|
| 627 |
+
916,
|
| 628 |
+
825
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 2
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"text": "4.1. Results on the Same Teacher Architectures",
|
| 635 |
+
"text_level": 1,
|
| 636 |
+
"bbox": [
|
| 637 |
+
508,
|
| 638 |
+
844,
|
| 639 |
+
848,
|
| 640 |
+
859
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 2
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "text",
|
| 646 |
+
"text": "Table 1 shows the top-1 accuracy comparison on CIFAR-100. We also include the results of teacher ensemble with the majority voting strategy. We can find that CA-MKD surpasses",
|
| 647 |
+
"bbox": [
|
| 648 |
+
506,
|
| 649 |
+
868,
|
| 650 |
+
915,
|
| 651 |
+
914
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 2
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "table",
|
| 657 |
+
"img_path": "images/51705e22eb8030ea8311c8ad0e155559268db12ca3533987543d6ed3807b8d3e.jpg",
|
| 658 |
+
"table_caption": [
|
| 659 |
+
"Table 3. Top-1 test accuracy of MKD approaches by distilling the knowledge on multiple teachers with different architectures."
|
| 660 |
+
],
|
| 661 |
+
"table_footnote": [],
|
| 662 |
+
"table_body": "<table><tr><td>VGG8</td><td>AVER</td><td>FitNet-MKD</td><td>EBKD</td><td>AEKD</td><td>CA-MKD</td><td>ResNet8x4</td><td>ResNet20x4</td><td>ResNet32x4</td></tr><tr><td>70.74±0.40</td><td>74.55±0.24</td><td>74.47±0.21</td><td>74.07±0.17</td><td>74.69±0.29</td><td>75.96±0.05</td><td>72.79</td><td>78.39</td><td>79.31</td></tr></table>",
|
| 663 |
+
"bbox": [
|
| 664 |
+
98,
|
| 665 |
+
116,
|
| 666 |
+
906,
|
| 667 |
+
150
|
| 668 |
+
],
|
| 669 |
+
"page_idx": 3
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "image",
|
| 673 |
+
"img_path": "images/8f399062affd846056a797ad630dd6cc09aa0404eb14d52dae222aae4ff9a1c9.jpg",
|
| 674 |
+
"image_caption": [
|
| 675 |
+
"(a) prediction weights"
|
| 676 |
+
],
|
| 677 |
+
"image_footnote": [],
|
| 678 |
+
"bbox": [
|
| 679 |
+
94,
|
| 680 |
+
181,
|
| 681 |
+
261,
|
| 682 |
+
299
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 3
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "image",
|
| 688 |
+
"img_path": "images/f802d32569f5ee3493d7392047b4e704f5f719585c8a8e11d82fd1ef2a87626b.jpg",
|
| 689 |
+
"image_caption": [
|
| 690 |
+
"(b) feature weights"
|
| 691 |
+
],
|
| 692 |
+
"image_footnote": [],
|
| 693 |
+
"bbox": [
|
| 694 |
+
279,
|
| 695 |
+
176,
|
| 696 |
+
483,
|
| 697 |
+
297
|
| 698 |
+
],
|
| 699 |
+
"page_idx": 3
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"type": "text",
|
| 703 |
+
"text": "all competitors cross various architectures. Specifically, compared to the second best method (EBKD), CA-MKD outperforms it with $0.81\\%$ average improvement<sup>1</sup>, and achieves $1.66\\%$ absolute accuracy improvement in the best case.",
|
| 704 |
+
"bbox": [
|
| 705 |
+
81,
|
| 706 |
+
381,
|
| 707 |
+
488,
|
| 708 |
+
441
|
| 709 |
+
],
|
| 710 |
+
"page_idx": 3
|
| 711 |
+
},
|
| 712 |
+
{
|
| 713 |
+
"type": "text",
|
| 714 |
+
"text": "To verify the benefits of diverse information brought by multiple teachers, we compare CA-MKD with some excellent single-teacher based methods. The results in Table 6 show the student indeed has the potential to learn knowledge from multiple teachers, and its accuracy is further improved compared with the single-teacher methods to a certain extent.",
|
| 715 |
+
"bbox": [
|
| 716 |
+
81,
|
| 717 |
+
441,
|
| 718 |
+
488,
|
| 719 |
+
532
|
| 720 |
+
],
|
| 721 |
+
"page_idx": 3
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"type": "text",
|
| 725 |
+
"text": "4.2. Results on the Different Teacher Architectures",
|
| 726 |
+
"text_level": 1,
|
| 727 |
+
"bbox": [
|
| 728 |
+
83,
|
| 729 |
+
551,
|
| 730 |
+
446,
|
| 731 |
+
565
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 3
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"text": "Table 3 shows the results of training a student (VGG8) with three different teacher architectures, i.e., ResNet8x4, ResNet20x4 and ResNet32x4. We find the student accuracy becomes even higher than that of training with three ResNet32x4 teachers, which may be attributed to that the knowledge diversity is enlarged in different architectures.",
|
| 738 |
+
"bbox": [
|
| 739 |
+
81,
|
| 740 |
+
575,
|
| 741 |
+
488,
|
| 742 |
+
666
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 3
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "Since the performance of ResNet20x4/ResNet32x4 is better than that of ResNet8x4, we could reasonably believe that for most training samples, the student will put larger weights on predictions from the former two rather than the latter one, which is verified in Figure 3. Moreover, our CA-MKD can capture those samples on which the predictions are more confident by ResNet8x4, and assign them dynamic weights to help the student model achieve better performance.",
|
| 749 |
+
"bbox": [
|
| 750 |
+
81,
|
| 751 |
+
667,
|
| 752 |
+
488,
|
| 753 |
+
787
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 3
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "4.3. Impact of the Teacher Number",
|
| 760 |
+
"text_level": 1,
|
| 761 |
+
"bbox": [
|
| 762 |
+
83,
|
| 763 |
+
806,
|
| 764 |
+
339,
|
| 765 |
+
821
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 3
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "As shown in Figure 4, the student model trained with CA-MKD generally achieves satisfactory results. For example,",
|
| 772 |
+
"bbox": [
|
| 773 |
+
83,
|
| 774 |
+
830,
|
| 775 |
+
488,
|
| 776 |
+
861
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 3
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "image",
|
| 782 |
+
"img_path": "images/426e2a2b35c7df1fffd4998960076c068b5c882f613e6e6ec4733ad9498f9e43.jpg",
|
| 783 |
+
"image_caption": [
|
| 784 |
+
"Fig. 3. The visualization results of learned weights by CA-MKD on each training sample.",
|
| 785 |
+
"(a) ResNet56 & MobileNetV2",
|
| 786 |
+
"(b) ResNet32x4 & ShuffleNetV2",
|
| 787 |
+
"Fig. 4. The effect of different teacher numbers."
|
| 788 |
+
],
|
| 789 |
+
"image_footnote": [],
|
| 790 |
+
"bbox": [
|
| 791 |
+
516,
|
| 792 |
+
176,
|
| 793 |
+
906,
|
| 794 |
+
287
|
| 795 |
+
],
|
| 796 |
+
"page_idx": 3
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"type": "table",
|
| 800 |
+
"img_path": "images/ff0412752f98e7cdef3afefe5cbd25fda7aeb8d2172e2e05eb7202afc1356f61.jpg",
|
| 801 |
+
"table_caption": [
|
| 802 |
+
"Table 4. Ablation study with VGG13 & MobileNetV2."
|
| 803 |
+
],
|
| 804 |
+
"table_footnote": [],
|
| 805 |
+
"table_body": "<table><tr><td>avg weight</td><td>w/o Linter</td><td>w/o wkinter</td><td>CA-MKD</td></tr><tr><td>67.74±0.87</td><td>68.11±0.02</td><td>68.82±0.63</td><td>69.41±0.20</td></tr></table>",
|
| 806 |
+
"bbox": [
|
| 807 |
+
519,
|
| 808 |
+
368,
|
| 809 |
+
910,
|
| 810 |
+
412
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 3
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "on the \"ResNet56 & MobileNetV2\" setting, the accuracy of CA-MKD increases continually as the number of teachers increases and it surpasses the competitors with three teachers even those competitors are trained with more teachers.",
|
| 817 |
+
"bbox": [
|
| 818 |
+
506,
|
| 819 |
+
439,
|
| 820 |
+
913,
|
| 821 |
+
500
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 3
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "text",
|
| 827 |
+
"text": "4.4. Ablation Study",
|
| 828 |
+
"text_level": 1,
|
| 829 |
+
"bbox": [
|
| 830 |
+
509,
|
| 831 |
+
520,
|
| 832 |
+
653,
|
| 833 |
+
535
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 3
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "text",
|
| 839 |
+
"text": "We summarize the observations from Table 4 as follows:",
|
| 840 |
+
"bbox": [
|
| 841 |
+
509,
|
| 842 |
+
544,
|
| 843 |
+
883,
|
| 844 |
+
558
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 3
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "list",
|
| 850 |
+
"sub_type": "text",
|
| 851 |
+
"list_items": [
|
| 852 |
+
"(1) avg weight. Simply averaging multiple teachers will cause $1.67\\%$ accuracy drop, which confirms the necessity of treating different teachers based on their specific quality.",
|
| 853 |
+
"(2) w/o $\\mathcal{L}_{inter}$ . The accuracy will appear considerably reduction as we remove the Equation (7), demonstrating the intermediate layer contains useful information for distillation.",
|
| 854 |
+
"(3) $\\mathrm{w / o}$ $w_{inter}^{k}$ . we directly use the $w_{KD}^{k}$ obtained from the last layer to integrate intermediate features. The lower result indicates the benefits of designing a separate way of calculating weights for the intermediate layer."
|
| 855 |
+
],
|
| 856 |
+
"bbox": [
|
| 857 |
+
508,
|
| 858 |
+
560,
|
| 859 |
+
913,
|
| 860 |
+
710
|
| 861 |
+
],
|
| 862 |
+
"page_idx": 3
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "text",
|
| 866 |
+
"text": "5. CONCLUSION",
|
| 867 |
+
"text_level": 1,
|
| 868 |
+
"bbox": [
|
| 869 |
+
643,
|
| 870 |
+
733,
|
| 871 |
+
781,
|
| 872 |
+
746
|
| 873 |
+
],
|
| 874 |
+
"page_idx": 3
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "text",
|
| 878 |
+
"text": "In this paper, we introduce confidence-aware mechanism on both predictions and intermediate features for multi-teacher knowledge distillation. The confidence of teachers is calculated based on the closeness between their predictions or features and the ground-truth labels for the reliability identification on each training sample. With the guidance of labels, our technique effectively integrates diverse knowledge from multiple teachers for the student training. Extensive empirical results show that our method outperforms all competitors in various teacher-student architectures.",
|
| 879 |
+
"bbox": [
|
| 880 |
+
506,
|
| 881 |
+
762,
|
| 882 |
+
913,
|
| 883 |
+
912
|
| 884 |
+
],
|
| 885 |
+
"page_idx": 3
|
| 886 |
+
},
|
| 887 |
+
{
|
| 888 |
+
"type": "page_footnote",
|
| 889 |
+
"text": "1 Average Improvement $= \\frac{1}{n}\\sum_{i}^{n}\\left(Acc_{\\mathrm{CA - MKD}}^{i} - Acc_{\\mathrm{EBKD}}^{i}\\right)$ , where the accuracies of CA-MKD, EBKD in the $i$ -th teacher-student combination are denoted as $Acc_{\\mathrm{CA - MKD}}^{i}$ , $Acc_{\\mathrm{EBKD}}^{i}$ , respectively.",
|
| 890 |
+
"bbox": [
|
| 891 |
+
83,
|
| 892 |
+
869,
|
| 893 |
+
488,
|
| 894 |
+
915
|
| 895 |
+
],
|
| 896 |
+
"page_idx": 3
|
| 897 |
+
},
|
| 898 |
+
{
|
| 899 |
+
"type": "text",
|
| 900 |
+
"text": "6. REFERENCES",
|
| 901 |
+
"text_level": 1,
|
| 902 |
+
"bbox": [
|
| 903 |
+
217,
|
| 904 |
+
90,
|
| 905 |
+
354,
|
| 906 |
+
104
|
| 907 |
+
],
|
| 908 |
+
"page_idx": 4
|
| 909 |
+
},
|
| 910 |
+
{
|
| 911 |
+
"type": "list",
|
| 912 |
+
"sub_type": "ref_text",
|
| 913 |
+
"list_items": [
|
| 914 |
+
"[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778.",
|
| 915 |
+
"[2] David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al., \"Mastering the game of go without human knowledge,\" Nature, vol. 550, no. 7676, pp. 354-359, 2017.",
|
| 916 |
+
"[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: pre-training of deep bidirectional transformers for language understanding,” in North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019, pp. 4171–4186.",
|
| 917 |
+
"[4] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean, “Distilling the knowledge in a neural network,” arXiv preprint arXiv:1503.02531, 2015.",
|
| 918 |
+
"[5] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio, “Fitness: Hints for thin deep nets,” in International Conference on Learning Representations, 2015.",
|
| 919 |
+
"[6] Sergey Zagoruyko and Nikos Komodakis, “Paying more attention to attention: improving the performance of convolutional neural networks via attention transfer,” in International Conference on Learning Representations, 2017.",
|
| 920 |
+
"[7] Defang Chen, Jian-Ping Mei, Yuan Zhang, Can Wang, Zhe Wang, Yan Feng, and Chun Chen, “Cross-layer distillation with semantic calibration,” in Proceedings of the AAAI Conference on Artificial Intelligence, 2021, vol. 35, pp. 7028–7036.",
|
| 921 |
+
"[8] Shan You, Chang Xu, Chao Xu, and Dacheng Tao, \"Learning from multiple teacher networks,\" in Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2017, pp. 1285-1294.",
|
| 922 |
+
"[9] Takashi Fukuda, Masayuki Suzuki, Gakuto Kurata, Samuel Thomas, Jia Cui, and Bhuvana Ramabhadran, \"Efficient knowledge distillation from an ensemble of teachers,\" in Interspeech, 2017, pp. 3697-3701.",
|
| 923 |
+
"[10] Meng-Chieh Wu, Ching-Te Chiu, and Kun-Hsuan Wu, \"Multi-teacher knowledge distillation for compressed video action recognition on deep neural networks,\" in ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2019, pp. 2202-2206."
|
| 924 |
+
],
|
| 925 |
+
"bbox": [
|
| 926 |
+
86,
|
| 927 |
+
119,
|
| 928 |
+
488,
|
| 929 |
+
912
|
| 930 |
+
],
|
| 931 |
+
"page_idx": 4
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "list",
|
| 935 |
+
"sub_type": "ref_text",
|
| 936 |
+
"list_items": [
|
| 937 |
+
"[11] Shangchen Du, Shan You, Xiaojie Li, Jianlong Wu, Fei Wang, Chen Qian, and Changshui Zhang, “Agree to disagree: Adaptive ensemble knowledge distillation in gradient space,” Advances in Neural Information Processing Systems, vol. 33, 2020.",
|
| 938 |
+
"[12] Kisoo Kwon, Hwidong Na, Hoshik Lee, and Nam Soo Kim, “Adaptive knowledge distillation based on entropy,” in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 7409-7413.",
|
| 939 |
+
"[13] Jimmy Ba and Rich Caruana, “Do deep nets really need to be deep?,” in Advances in Neural Information Processing Systems, 2014, pp. 2654-2662.",
|
| 940 |
+
"[14] Sungsoo Ahn, Shell Xu Hu, Andreas Damianou, Neil D Lawrence, and Zhenwen Dai, “Variational information distillation for knowledge transfer,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 9163–9171.",
|
| 941 |
+
"[15] Yonglong Tian, Dilip Krishnan, and Phillip Isola, “Contrastive representation distillation,” in International Conference on Learning Representations, 2020.",
|
| 942 |
+
"[16] Xu Lan, Xiatian Zhu, and Shaogang Gong, “Knowledge distillation by on-the-fly native ensemble,” arXiv preprint arXiv:1806.04606, 2018.",
|
| 943 |
+
"[17] Defang Chen, Jian-Ping Mei, Can Wang, Yan Feng, and Chun Chen, \"Online knowledge distillation with diverse peers,\" in Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 3430-3437.",
|
| 944 |
+
"[18] Yang Liu, Wei Zhang, and Jun Wang, \"Adaptive multiteacher multi-level knowledge distillation,\" Neurocomputing, vol. 415, pp. 106-113, 2020.",
|
| 945 |
+
"[19] Alex Krizhevsky and Geoffrey Hinton, “Learning multiple layers of features from tiny images,” Technical Report, 2009.",
|
| 946 |
+
"[20] Sukmin Yun, Jongjin Park, Kimin Lee, and Jinwoo Shin, \"Regularizing class-wise predictions via self-knowledge distillation,\" in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020, pp. 13876-13885."
|
| 947 |
+
],
|
| 948 |
+
"bbox": [
|
| 949 |
+
511,
|
| 950 |
+
90,
|
| 951 |
+
913,
|
| 952 |
+
768
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 4
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "text",
|
| 958 |
+
"text": "Appendix",
|
| 959 |
+
"text_level": 1,
|
| 960 |
+
"bbox": [
|
| 961 |
+
84,
|
| 962 |
+
88,
|
| 963 |
+
187,
|
| 964 |
+
108
|
| 965 |
+
],
|
| 966 |
+
"page_idx": 5
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"text": "1.1. The Detailed Description of $w_{inter}^{k}$",
|
| 971 |
+
"text_level": 1,
|
| 972 |
+
"bbox": [
|
| 973 |
+
84,
|
| 974 |
+
118,
|
| 975 |
+
359,
|
| 976 |
+
137
|
| 977 |
+
],
|
| 978 |
+
"page_idx": 5
|
| 979 |
+
},
|
| 980 |
+
{
|
| 981 |
+
"type": "text",
|
| 982 |
+
"text": "To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and $w_{inter}^{k}$ indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space.",
|
| 983 |
+
"bbox": [
|
| 984 |
+
81,
|
| 985 |
+
143,
|
| 986 |
+
486,
|
| 987 |
+
219
|
| 988 |
+
],
|
| 989 |
+
"page_idx": 5
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"type": "text",
|
| 993 |
+
"text": "A more detailed discussion is presented in the following paragraphs. As shown in Figure 5, samples belonging to class-1 and class-2 are depicted as circles and triangles, respectively. Although the decision surfaces of teacher-1 (in Figure 5(b)) and teacher-2 (in Figure 5(c)) correctly classify these samples in their own feature spaces, their discriminability in the student feature space is different (in Figure 5(a)).",
|
| 994 |
+
"bbox": [
|
| 995 |
+
81,
|
| 996 |
+
220,
|
| 997 |
+
486,
|
| 998 |
+
325
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 5
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "text",
|
| 1004 |
+
"text": "In order to stabilize the whole knowledge transfer process, we expect the student to pay more attention to mimicking the teacher with a similar feature space. In this sense, we conclude that teacher-1 for the student is more suitable since its decision surface performs better compared to that of teacher-2 in the student feature space, as shown in Figure 5(a).",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
81,
|
| 1007 |
+
327,
|
| 1008 |
+
486,
|
| 1009 |
+
416
|
| 1010 |
+
],
|
| 1011 |
+
"page_idx": 5
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"type": "text",
|
| 1015 |
+
"text": "Suppose the point A, B, C are the extracted features of the same sample in the feature space of student, teacher-1 and teacher-2, respectively. If we move the student feature (point A) towards the feature from teacher-1 (point B), point A will be correctly classified by the student's own classifier with only minor or even no adjustment. But if we move the student feature (point A) towards the feature from teacher-2 (point C), it will become even harder to be correctly classified by the student, which may disrupt the training of the student classifier and slow down the model convergence.",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
81,
|
| 1018 |
+
417,
|
| 1019 |
+
488,
|
| 1020 |
+
568
|
| 1021 |
+
],
|
| 1022 |
+
"page_idx": 5
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"type": "image",
|
| 1026 |
+
"img_path": "images/cb96c2e22afa7594a238ed00a0f2b9033a1ded7236631f9742014f2784a48fe2.jpg",
|
| 1027 |
+
"image_caption": [
|
| 1028 |
+
"Fig. 5. The comparison of teacher-1 and teacher-2 classifiers."
|
| 1029 |
+
],
|
| 1030 |
+
"image_footnote": [],
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
86,
|
| 1033 |
+
579,
|
| 1034 |
+
459,
|
| 1035 |
+
715
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 5
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "text",
|
| 1041 |
+
"text": "2.2. Additional Dataset experiments",
|
| 1042 |
+
"text_level": 1,
|
| 1043 |
+
"bbox": [
|
| 1044 |
+
83,
|
| 1045 |
+
782,
|
| 1046 |
+
344,
|
| 1047 |
+
797
|
| 1048 |
+
],
|
| 1049 |
+
"page_idx": 5
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "text",
|
| 1053 |
+
"text": "we add more experiments on Dogs and Tinyimagenet datasets to further verify the effectiveness of our proposed CA-MKD. Table 5 and Table 6 show that our CA-MKD can consistently surpass all the competitors in two more challenging datasets.",
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
81,
|
| 1056 |
+
806,
|
| 1057 |
+
486,
|
| 1058 |
+
867
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 5
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "text",
|
| 1064 |
+
"text": "The hyper-parameters for the TinyImagenet dataset are exactly the same as those of CIFAR-100 in our submission. Another dataset (Dogs) contains fine-grained images with",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
83,
|
| 1067 |
+
868,
|
| 1068 |
+
488,
|
| 1069 |
+
914
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 5
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "text",
|
| 1075 |
+
"text": "larger resolutions, which requires a different training procedure. We follow the setting of a previous work [20].",
|
| 1076 |
+
"bbox": [
|
| 1077 |
+
508,
|
| 1078 |
+
90,
|
| 1079 |
+
913,
|
| 1080 |
+
121
|
| 1081 |
+
],
|
| 1082 |
+
"page_idx": 5
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "table",
|
| 1086 |
+
"img_path": "images/925a3a5c0520b88b2c50f2d683b46575e174fa27a12958d7ca8b16d27daa6c2e.jpg",
|
| 1087 |
+
"table_caption": [
|
| 1088 |
+
"Table 5. Top-1 test accuracy of CA-MKD compared to multiple-teacher knowledge distillation methods."
|
| 1089 |
+
],
|
| 1090 |
+
"table_footnote": [],
|
| 1091 |
+
"table_body": "<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan=\"2\">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>64.76±1.06</td><td>53.38±0.11</td></tr><tr><td rowspan=\"2\">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>AVER</td><td>64.49±0.16</td><td>47.82±0.15</td></tr><tr><td>FitNet-MKD</td><td>64.11±0.80</td><td>47.82±0.05</td></tr><tr><td>EBKD</td><td>64.32±0.23</td><td>47.20±0.10</td></tr><tr><td>AEKD</td><td>64.19±0.34</td><td>47.62±0.38</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>",
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
535,
|
| 1094 |
+
179,
|
| 1095 |
+
890,
|
| 1096 |
+
364
|
| 1097 |
+
],
|
| 1098 |
+
"page_idx": 5
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "table",
|
| 1102 |
+
"img_path": "images/1694e76dd3874e1e1148f68ee9b250ce05e28067bffeefaabe9c981710b35961.jpg",
|
| 1103 |
+
"table_caption": [
|
| 1104 |
+
"Table 6. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods."
|
| 1105 |
+
],
|
| 1106 |
+
"table_footnote": [],
|
| 1107 |
+
"table_body": "<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan=\"2\">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>65.97</td><td>53.45</td></tr><tr><td rowspan=\"2\">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>KD</td><td>63.90±0.08</td><td>47.42±0.07</td></tr><tr><td>FitNet</td><td>62.45±0.61</td><td>47.24±0.28</td></tr><tr><td>AT</td><td>63.48±0.60</td><td>45.73±0.05</td></tr><tr><td>VID</td><td>64.45±0.23</td><td>47.76±0.08</td></tr><tr><td>CRD</td><td>64.61±0.17</td><td>48.11±0.07</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
542,
|
| 1110 |
+
422,
|
| 1111 |
+
880,
|
| 1112 |
+
623
|
| 1113 |
+
],
|
| 1114 |
+
"page_idx": 5
|
| 1115 |
+
}
|
| 1116 |
+
]
|
2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_model.json
ADDED
|
@@ -0,0 +1,1433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.265,
|
| 8 |
+
0.058,
|
| 9 |
+
0.708
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2201.00007v3 [cs.LG] 14 Feb 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.167,
|
| 18 |
+
0.119,
|
| 19 |
+
0.833,
|
| 20 |
+
0.137
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "CONFIDENCE-AWARE MULTI-TEACHER KNOWLEDGE DISTILLATION"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.299,
|
| 29 |
+
0.156,
|
| 30 |
+
0.409,
|
| 31 |
+
0.175
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Hailin Zhang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.449,
|
| 40 |
+
0.157,
|
| 41 |
+
0.556,
|
| 42 |
+
0.174
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Defang Chen"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.597,
|
| 51 |
+
0.157,
|
| 52 |
+
0.689,
|
| 53 |
+
0.174
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Can Wang\\*"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.241,
|
| 62 |
+
0.191,
|
| 63 |
+
0.756,
|
| 64 |
+
0.209
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Zhejiang University, China; ZJU-Bangsun Joint Research Center."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.357,
|
| 73 |
+
0.211,
|
| 74 |
+
0.645,
|
| 75 |
+
0.228
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "{zzzhl, defchern, wcan} @ zju.edu.cn"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.241,
|
| 84 |
+
0.26,
|
| 85 |
+
0.333,
|
| 86 |
+
0.273
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "ABSTRACT"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.082,
|
| 95 |
+
0.278,
|
| 96 |
+
0.49,
|
| 97 |
+
0.581
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Knowledge distillation is initially introduced to utilize additional supervision from a single teacher model for the student model training. To boost the student performance, some recent variants attempt to exploit diverse knowledge sources from multiple teachers. However, existing studies mainly integrate knowledge from diverse sources by averaging over multiple teacher predictions or combining them using other label-free strategies, which may mislead student in the presence of low-quality teacher predictions. To tackle this problem, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD), which adaptively assigns sample-wise reliability for each teacher prediction with the help of ground-truth labels, with those teacher predictions close to one-hot labels assigned large weights. Besides, CA-MKD incorporates features in intermediate layers to stable the knowledge transfer process. Extensive experiments show our CA-MKD consistently outperforms all compared state-of-the-art methods across various teacher-student architectures. Code is available: https://github.com/Rorozhl/CA-MKD."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.084,
|
| 106 |
+
0.586,
|
| 107 |
+
0.489,
|
| 108 |
+
0.618
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Index Terms— knowledge distillation, multiple teachers, confidence-aware weighting"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "title",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.209,
|
| 117 |
+
0.636,
|
| 118 |
+
0.365,
|
| 119 |
+
0.65
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "1. INTRODUCTION"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.082,
|
| 128 |
+
0.664,
|
| 129 |
+
0.49,
|
| 130 |
+
0.846
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Nowadays, deep neural networks have achieved unprecedented success in various applications [1, 2, 3]. However, these complex models requiring huge memory footprint and computational resources are difficult to be applied on embedded devices. Knowledge distillation (KD) is thus proposed as a model compression technique to resolve this issue, which improves the accuracy of a lightweight student model by distilling the knowledge from a pre-trained cumbersome teacher model [4]. The transferred knowledge was originally formalized as softmax outputs (soft targets) of the teacher model [4] and latter extended to the intermediate teacher layers for achieving more promising performance [5, 6, 7]."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "image",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.563,
|
| 139 |
+
0.258,
|
| 140 |
+
0.866,
|
| 141 |
+
0.345
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": null
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "image_caption",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.509,
|
| 150 |
+
0.359,
|
| 151 |
+
0.915,
|
| 152 |
+
0.391
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Fig. 1. Comparison of the previous average direction (green line) and our proposed confidence-aware direction (red line)."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.508,
|
| 161 |
+
0.399,
|
| 162 |
+
0.915,
|
| 163 |
+
0.61
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "As the wisdom of the masses exceeds that of the wisest individual, some multi-teacher knowledge distillation (MKD) methods are proposed and have been proven to be beneficial [8, 9, 10, 11, 12]. Basically, they combine predictions from multiple teachers with the fixed weight assignment [8, 9, 10] or other various label-free schemes, such as calculating weights based on a optimization problem or entropy criterion [11, 12], etc. However, fixed weights fail to differentiate high-quality teachers from low-quality ones [8, 9, 10], and the other schemes may mislead the student in the presence of low-quality teacher predictions [11, 12]. Figure 1 provides an intuitive illustration on this issue, where the student trained with the average weighting strategy might deviate from the correct direction once most teacher predictions are biased."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.508,
|
| 172 |
+
0.611,
|
| 173 |
+
0.916,
|
| 174 |
+
0.777
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "Fortunately, we actually have ground-truth labels in hand to quantify our confidence about teacher predictions and then filter out low-quality predictions for better student training. To this end, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD) to learn sample-wise weights by taking the prediction confidence of teachers into consideration for adaptive knowledge integration. The confidence is obtained based on the cross entropy loss between prediction distributions and ground-truth labels. Compared with previous label-free weighting strategies, our technique enables the student to learn from a relatively correct direction."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.508,
|
| 183 |
+
0.778,
|
| 184 |
+
0.916,
|
| 185 |
+
0.913
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "Note that our confidence-aware mechanism not only is able to adaptively weight different teacher predictions based on their sample-wise confidence, but also can be extended to the student-teacher feature pairs in intermediate layers. With the help of our generated flexible and effective weights, we could avoid those poor teacher predictions dominating the knowledge transfer process and considerably improve the student performance on eight teacher-student architecture combinations (as shown in Table 1 and 3)."
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "page_footnote",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.109,
|
| 194 |
+
0.853,
|
| 195 |
+
0.235,
|
| 196 |
+
0.865
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": "*Corresponding author"
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "page_footnote",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.086,
|
| 205 |
+
0.865,
|
| 206 |
+
0.488,
|
| 207 |
+
0.913
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": "This work is supported by National Key R&D Program of China (Grant No: 2019YFB1600700), the Starry Night Science Fund of Zhejiang University Shanghai Institute for Advanced Study (Grant No: SN-ZJU-SIAS-001) and National Natural Science Foundation of China (Grant No: U1866602)."
|
| 211 |
+
}
|
| 212 |
+
],
|
| 213 |
+
[
|
| 214 |
+
{
|
| 215 |
+
"type": "title",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.206,
|
| 218 |
+
0.092,
|
| 219 |
+
0.367,
|
| 220 |
+
0.105
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "2. RELATED WORK"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.082,
|
| 229 |
+
0.12,
|
| 230 |
+
0.489,
|
| 231 |
+
0.286
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "Knowledge Distillation. Vanilla KD aims to transfer knowledge from a complex network (teacher) to a simple network (student) with the KL divergence minimization between their softened outputs [13, 4]. Mimicking the teacher representations from intermediate layers was latter proposed to explore more knowledge forms [5, 6, 14, 15, 7]. Compared to these methods that require pre-training a teacher, some works simultaneously train multiple students and encourage them to learn from each other instead [16, 17]. Our technique differs from these online KD methods since we attempt to distill knowledge from multiple pre-trained teachers."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.082,
|
| 240 |
+
0.286,
|
| 241 |
+
0.489,
|
| 242 |
+
0.513
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "Multi-teacher Knowledge Distillation. Rather than employing a single teacher, MKD boosts the effectiveness of distillation by integrating predictions from multiple teachers. A bunch of methods are proposed, such as simply assigning average or other fixed weights for different teachers [8, 9, 10], and calculating the weights based on entropy [12], latent factor [18] or multi-objective optimization in the gradient space [11]. However, these label-free strategies may mislead the student training in the presence of low-quality predictions. For instance, entropy-based strategy will prefer models with blind faith since it favors predictions with low variance [12]; optimization-based strategy favors majority opinion and will be easily misled by noisy data [11]. In contrast, our CA-MKD quantifies the teacher predictions based on ground-truth labels and further improves the student performance."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "title",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.205,
|
| 251 |
+
0.532,
|
| 252 |
+
0.367,
|
| 253 |
+
0.544
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "3. METHODOLOGY"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.084,
|
| 262 |
+
0.559,
|
| 263 |
+
0.489,
|
| 264 |
+
0.67
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "We denote \\(\\mathcal{D} = \\{\\pmb{x}_i,\\pmb{y}_i\\}_i^N\\) as a labeled training set, \\(N\\) is the number of samples, \\(K\\) is the number of teachers. \\(F\\in \\mathbb{R}^{h\\times w\\times c}\\) is the output of the last network block. We denote \\(\\pmb {z} = [z^1,\\dots,z^C ]\\) as the logits output, where \\(C\\) is the category number. The final model prediction is obtained by a softmax function \\(\\sigma (z^c) = \\frac{\\exp(z^c / \\tau)}{\\sum_j\\exp(z^j / \\tau)}\\) with temperature \\(\\tau\\). In the following sections, we will introduce our CA-MKD in detail."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "title",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.084,
|
| 273 |
+
0.688,
|
| 274 |
+
0.347,
|
| 275 |
+
0.702
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "3.1. The Loss of Teacher Predictions"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.083,
|
| 284 |
+
0.711,
|
| 285 |
+
0.489,
|
| 286 |
+
0.772
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "To effectively aggregate the prediction distributions of multiple teachers, we assign different weights which reflects their sample-wise confidence by calculating the cross entropy loss between teacher predictions and ground-truth labels"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "equation",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.17,
|
| 295 |
+
0.78,
|
| 296 |
+
0.488,
|
| 297 |
+
0.82
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "\\[\n\\mathcal {L} _ {C E _ {K D}} ^ {k} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {T _ {k}} ^ {c}\\right)\\right), \\tag {1}\n\\]"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "equation",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.134,
|
| 306 |
+
0.828,
|
| 307 |
+
0.488,
|
| 308 |
+
0.878
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "\\[\nw _ {K D} ^ {k} = \\frac {1}{K - 1} \\left(1 - \\frac {\\exp \\left(\\mathcal {L} _ {C E _ {K D}} ^ {k}\\right)}{\\sum_ {j} \\exp \\left(\\mathcal {L} _ {C E _ {K D}} ^ {j}\\right)}\\right), \\tag {2}\n\\]"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.084,
|
| 317 |
+
0.882,
|
| 318 |
+
0.489,
|
| 319 |
+
0.916
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "where \\( T_{k} \\) denotes the \\( k \\)th teacher. The less \\( \\mathcal{L}_{CE_{KD}}^{k} \\) corresponds to the larger \\( w_{KD}^{k} \\). The overall teacher predictions are"
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "image",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.53,
|
| 328 |
+
0.087,
|
| 329 |
+
0.903,
|
| 330 |
+
0.269
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": null
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "image_caption",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.509,
|
| 339 |
+
0.293,
|
| 340 |
+
0.915,
|
| 341 |
+
0.339
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "Fig. 2. An overview of our CA-MKD. The weight calculation of teacher predictions and intermediate teacher features are depicted as the red lines and green lines, respectively."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.51,
|
| 350 |
+
0.354,
|
| 351 |
+
0.778,
|
| 352 |
+
0.369
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "then aggregated with calculated weights"
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "equation",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.573,
|
| 361 |
+
0.38,
|
| 362 |
+
0.913,
|
| 363 |
+
0.422
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "\\[\n\\mathcal {L} _ {K D} = - \\sum_ {k = 1} ^ {K} w _ {K D} ^ {k} \\sum_ {c = 1} ^ {C} z _ {T _ {k}} ^ {c} \\log \\left(\\sigma \\left(z _ {S} ^ {c}\\right)\\right). \\tag {3}\n\\]"
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.509,
|
| 372 |
+
0.434,
|
| 373 |
+
0.915,
|
| 374 |
+
0.584
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "According to the above formulas, the teacher whose prediction is closer to ground-truth labels will be assigned larger weight \\( w_{KD}^{k} \\), since it has enough confidence to make accurate judgement for correct guidance. In contrast, if we simply acquire the weights by calculating the entropy of teacher predictions [12], the weight will become large when the output distribution is sharp regardless of whether the highest probability category is correct. In this case, those biased targets may misguide the student training and further hurt its distillation performance."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "title",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.51,
|
| 383 |
+
0.605,
|
| 384 |
+
0.849,
|
| 385 |
+
0.619
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "3.2. The Loss of Intermediate Teacher Features"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.509,
|
| 394 |
+
0.629,
|
| 395 |
+
0.915,
|
| 396 |
+
0.705
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "In addition to KD Loss, inspired by FitNets [5], we believe that the intermediate layers are also beneficial for learning structural knowledge, and thus extend our method to intermediate layers for mining more information. The calculation of intermediate feature matching is presented as follows"
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "equation",
|
| 403 |
+
"bbox": [
|
| 404 |
+
0.65,
|
| 405 |
+
0.718,
|
| 406 |
+
0.913,
|
| 407 |
+
0.734
|
| 408 |
+
],
|
| 409 |
+
"angle": 0,
|
| 410 |
+
"content": "\\[\nz _ {S \\rightarrow T _ {k}} = W _ {T _ {k}} h _ {S}, \\tag {4}\n\\]"
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "equation",
|
| 414 |
+
"bbox": [
|
| 415 |
+
0.58,
|
| 416 |
+
0.747,
|
| 417 |
+
0.913,
|
| 418 |
+
0.787
|
| 419 |
+
],
|
| 420 |
+
"angle": 0,
|
| 421 |
+
"content": "\\[\n\\mathcal {L} _ {C E _ {\\text {i n t e r}}} ^ {k} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {S \\rightarrow T _ {k}} ^ {c}\\right)\\right), \\tag {5}\n\\]"
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "equation",
|
| 425 |
+
"bbox": [
|
| 426 |
+
0.552,
|
| 427 |
+
0.796,
|
| 428 |
+
0.913,
|
| 429 |
+
0.846
|
| 430 |
+
],
|
| 431 |
+
"angle": 0,
|
| 432 |
+
"content": "\\[\nw _ {i n t e r} ^ {k} = \\frac {1}{K - 1} \\left(1 - \\frac {\\exp \\left(\\mathcal {L} _ {C E _ {i n t e r}} ^ {k}\\right)}{\\sum_ {j} \\exp \\left(\\mathcal {L} _ {C E _ {i n t e r}} ^ {j}\\right)}\\right). \\tag {6}\n\\]"
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"bbox": [
|
| 437 |
+
0.509,
|
| 438 |
+
0.852,
|
| 439 |
+
0.915,
|
| 440 |
+
0.916
|
| 441 |
+
],
|
| 442 |
+
"angle": 0,
|
| 443 |
+
"content": "where \\(W_{T_k}\\) is the final classifier of the \\(k\\)th teacher. \\(h_S \\in \\mathbb{R}^c\\) is the last student feature vector, i.e., \\(h_S = \\mathrm{AvgPooling}(F_S)\\). \\(\\mathcal{L}_{CE_{inter}}^k\\) is obtained by passing \\(h_S\\) through each teacher classifier. The calculation of \\(w_{inter}^k\\) is similar to that of \\(w_{KD}^k\\)."
|
| 444 |
+
}
|
| 445 |
+
],
|
| 446 |
+
[
|
| 447 |
+
{
|
| 448 |
+
"type": "table_caption",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.094,
|
| 451 |
+
0.1,
|
| 452 |
+
0.907,
|
| 453 |
+
0.115
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "Table 1. Top-1 test accuracy of MKD methods by distilling the knowledge on multiple teachers with the same architectures."
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "table",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.098,
|
| 462 |
+
0.116,
|
| 463 |
+
0.907,
|
| 464 |
+
0.277
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "<table><tr><td>Teacher Ensemble</td><td>WRN40-2 76.62±0.26 79.62</td><td>ResNet56 73.28±0.30 76.00</td><td>VGG13 75.17±0.18 77.07</td><td>VGG13 75.17±0.18 77.07</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.43</td><td>MobileNetV2 65.64±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.19</td><td>ResNet8x4 72.79±0.14</td><td>ShuffleNetV2 72.94±0.24</td><td>VGG8 70.74±0.40</td></tr><tr><td>AVER [8]</td><td>76.30±0.25</td><td>70.21±0.10</td><td>74.07±0.23</td><td>68.91±0.35</td><td>74.99±0.24</td><td>75.87±0.19</td><td>73.26±0.39</td></tr><tr><td>FitNet-MKD [5]</td><td>76.59±0.17</td><td>70.69±0.56</td><td>73.97±0.22</td><td>68.48±0.07</td><td>74.86±0.21</td><td>76.09±0.13</td><td>73.27±0.19</td></tr><tr><td>EBKD [12]</td><td>76.61±0.14</td><td>70.91±0.22</td><td>74.10±0.27</td><td>68.24±0.82</td><td>75.59±0.15</td><td>76.41±0.12</td><td>73.60±0.22</td></tr><tr><td>AEKD [11]</td><td>76.34±0.24</td><td>70.47±0.15</td><td>73.78±0.03</td><td>68.39±0.50</td><td>74.75±0.28</td><td>75.95±0.20</td><td>73.11±0.27</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>71.38±0.02</td><td>74.30±0.16</td><td>69.41±0.20</td><td>75.90±0.13</td><td>77.41±0.14</td><td>75.26±0.32</td></tr></table>"
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "table_caption",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.084,
|
| 473 |
+
0.31,
|
| 474 |
+
0.489,
|
| 475 |
+
0.34
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "Table 2. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods."
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "table",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.093,
|
| 484 |
+
0.34,
|
| 485 |
+
0.486,
|
| 486 |
+
0.512
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "<table><tr><td>Teacher</td><td>WRN40-2 76.62±0.26</td><td>ResNet32x4 79.31±0.14</td><td>ResNet56 73.28±0.30</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.43</td></tr><tr><td>KD [4]</td><td>75.77±0.14</td><td>72.90±0.34</td><td>69.96±0.14</td></tr><tr><td>FitNet [5]</td><td>76.22±0.21</td><td>72.55±0.66</td><td>69.02±0.28</td></tr><tr><td>AT [6]</td><td>76.44±0.38</td><td>72.16±0.12</td><td>69.79±0.26</td></tr><tr><td>VID [14]</td><td>76.32±0.08</td><td>73.09±0.29</td><td>69.45±0.17</td></tr><tr><td>CRD [15]</td><td>76.58±0.23</td><td>73.57±0.25</td><td>71.15±0.44</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>75.26±0.13</td><td>71.38±0.02</td></tr></table>"
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.083,
|
| 495 |
+
0.537,
|
| 496 |
+
0.489,
|
| 497 |
+
0.645
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and \\( w_{inter}^{k} \\) indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space. The ablation study also shows that utilizing \\( w_{inter}^{k} \\) instead of \\( w_{KD}^{k} \\) for the knowledge aggregation in intermediate layers is more effective."
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "equation",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.156,
|
| 506 |
+
0.654,
|
| 507 |
+
0.488,
|
| 508 |
+
0.695
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "\\[\n\\mathcal {L} _ {i n t e r} = \\sum_ {k = 1} ^ {K} w _ {i n t e r} ^ {k} \\left\\| F _ {T _ {k}} - r \\left(F _ {S}\\right) \\right\\| _ {2} ^ {2}, \\tag {7}\n\\]"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "text",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.083,
|
| 517 |
+
0.701,
|
| 518 |
+
0.489,
|
| 519 |
+
0.763
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "where \\( r(\\cdot) \\) is a function for aligning the student and teacher feature dimensions. The \\( \\ell_2 \\) loss function is used as distance measure of intermediate features. Finally, the overall training loss between feature pairs will be aggregated by \\( w_{inter}^k \\)."
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.084,
|
| 528 |
+
0.763,
|
| 529 |
+
0.489,
|
| 530 |
+
0.792
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "In our work, only the output features of the last block are adopted to avoid incurring too much computational cost."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "title",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.084,
|
| 539 |
+
0.81,
|
| 540 |
+
0.312,
|
| 541 |
+
0.825
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "3.3. The Overall Loss Function"
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.083,
|
| 550 |
+
0.834,
|
| 551 |
+
0.489,
|
| 552 |
+
0.865
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "In addition to the aforementioned two losses, a regular cross entropy with the ground-truth labels is calculated"
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "equation",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.185,
|
| 561 |
+
0.876,
|
| 562 |
+
0.488,
|
| 563 |
+
0.918
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "\\[\n\\mathcal {L} _ {C E} = - \\sum_ {c = 1} ^ {C} y ^ {c} \\log \\left(\\sigma \\left(z _ {S} ^ {c}\\right)\\right). \\tag {8}\n\\]"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.51,
|
| 572 |
+
0.301,
|
| 573 |
+
0.9,
|
| 574 |
+
0.316
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "The overall loss function of our CA-MKD is summarized as"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "equation",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.606,
|
| 583 |
+
0.329,
|
| 584 |
+
0.914,
|
| 585 |
+
0.345
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {C E} + \\alpha \\mathcal {L} _ {K D} + \\beta \\mathcal {L} _ {\\text {i n t e r}}, \\tag {9}\n\\]"
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"bbox": [
|
| 593 |
+
0.509,
|
| 594 |
+
0.356,
|
| 595 |
+
0.916,
|
| 596 |
+
0.387
|
| 597 |
+
],
|
| 598 |
+
"angle": 0,
|
| 599 |
+
"content": "where \\(\\alpha\\) and \\(\\beta\\) are hyper-parameters to balance the effect of knowledge distillation and standard cross entropy losses."
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "title",
|
| 603 |
+
"bbox": [
|
| 604 |
+
0.643,
|
| 605 |
+
0.406,
|
| 606 |
+
0.783,
|
| 607 |
+
0.42
|
| 608 |
+
],
|
| 609 |
+
"angle": 0,
|
| 610 |
+
"content": "4. EXPERIMENT"
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"type": "text",
|
| 614 |
+
"bbox": [
|
| 615 |
+
0.509,
|
| 616 |
+
0.434,
|
| 617 |
+
0.916,
|
| 618 |
+
0.525
|
| 619 |
+
],
|
| 620 |
+
"angle": 0,
|
| 621 |
+
"content": "In this section, we conduct extensive experiments on CIFAR-100 dataset [19] to verify the effectiveness of our proposed CA-MKD. We adopt eight different teacher-student combinations based on popular neural network architectures. All compared multi-teacher knowledge distillation (MKD) methods use three teachers except for special declarations."
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "text",
|
| 625 |
+
"bbox": [
|
| 626 |
+
0.508,
|
| 627 |
+
0.525,
|
| 628 |
+
0.916,
|
| 629 |
+
0.66
|
| 630 |
+
],
|
| 631 |
+
"angle": 0,
|
| 632 |
+
"content": "Compared Methods. Besides the naïve AVER [8], we reimplement a single-teacher based method FitNet [5] on multiple teachers and denote it as FitNet-MKD. FitNet-MKD will leverage extra information coming from averaged intermediate teacher features. We also reimplement an entropy-based MKD method [12], which has achieved remarkable results in acoustic experiments, on our image classification task and we denote it as EBKD. As for AEKD, we adopt its logits-based version with the author provided code [11]."
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"bbox": [
|
| 637 |
+
0.508,
|
| 638 |
+
0.661,
|
| 639 |
+
0.917,
|
| 640 |
+
0.827
|
| 641 |
+
],
|
| 642 |
+
"angle": 0,
|
| 643 |
+
"content": "Hyper-parameters. All neural networks are optimized by stochastic gradient descent with momentum 0.9, weight decay 0.0001. The batch size is set to 64. As the previous works do [15, 7], the initial learning rate is set to 0.1, except MobileNetV2, ShuffleNetV1 and ShuffleNetV2 are set to 0.05. The learning rate is multiplied by 0.1 at 150, 180 and 210 of the total 240 training epochs. For the sake of fairness, the temperature \\(\\tau\\) is set to 4 and the \\(\\alpha\\) is set to 1 in all methods. Furthermore, we set the \\(\\beta\\) of our CA-MKD to 50 throughout the experiments. All results are reported in means and standard deviations over 3 runs with different random seeds."
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "title",
|
| 647 |
+
"bbox": [
|
| 648 |
+
0.509,
|
| 649 |
+
0.845,
|
| 650 |
+
0.849,
|
| 651 |
+
0.86
|
| 652 |
+
],
|
| 653 |
+
"angle": 0,
|
| 654 |
+
"content": "4.1. Results on the Same Teacher Architectures"
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "text",
|
| 658 |
+
"bbox": [
|
| 659 |
+
0.508,
|
| 660 |
+
0.869,
|
| 661 |
+
0.916,
|
| 662 |
+
0.915
|
| 663 |
+
],
|
| 664 |
+
"angle": 0,
|
| 665 |
+
"content": "Table 1 shows the top-1 accuracy comparison on CIFAR-100. We also include the results of teacher ensemble with the majority voting strategy. We can find that CA-MKD surpasses"
|
| 666 |
+
}
|
| 667 |
+
],
|
| 668 |
+
[
|
| 669 |
+
{
|
| 670 |
+
"type": "table_caption",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.085,
|
| 673 |
+
0.1,
|
| 674 |
+
0.914,
|
| 675 |
+
0.115
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "Table 3. Top-1 test accuracy of MKD approaches by distilling the knowledge on multiple teachers with different architectures."
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "table",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.099,
|
| 684 |
+
0.117,
|
| 685 |
+
0.907,
|
| 686 |
+
0.151
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "<table><tr><td>VGG8</td><td>AVER</td><td>FitNet-MKD</td><td>EBKD</td><td>AEKD</td><td>CA-MKD</td><td>ResNet8x4</td><td>ResNet20x4</td><td>ResNet32x4</td></tr><tr><td>70.74±0.40</td><td>74.55±0.24</td><td>74.47±0.21</td><td>74.07±0.17</td><td>74.69±0.29</td><td>75.96±0.05</td><td>72.79</td><td>78.39</td><td>79.31</td></tr></table>"
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "image",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.096,
|
| 695 |
+
0.182,
|
| 696 |
+
0.262,
|
| 697 |
+
0.3
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": null
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "image_caption",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.131,
|
| 706 |
+
0.306,
|
| 707 |
+
0.248,
|
| 708 |
+
0.318
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "(a) prediction weights"
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "image",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.28,
|
| 717 |
+
0.177,
|
| 718 |
+
0.484,
|
| 719 |
+
0.298
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": null
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "image_caption",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.324,
|
| 728 |
+
0.307,
|
| 729 |
+
0.426,
|
| 730 |
+
0.318
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "(b) feature weights"
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "image_caption",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.083,
|
| 739 |
+
0.337,
|
| 740 |
+
0.489,
|
| 741 |
+
0.368
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "Fig. 3. The visualization results of learned weights by CA-MKD on each training sample."
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.083,
|
| 750 |
+
0.382,
|
| 751 |
+
0.489,
|
| 752 |
+
0.442
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "all competitors cross various architectures. Specifically, compared to the second best method (EBKD), CA-MKD outperforms it with \\(0.81\\%\\) average improvement<sup>1</sup>, and achieves \\(1.66\\%\\) absolute accuracy improvement in the best case."
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.083,
|
| 761 |
+
0.443,
|
| 762 |
+
0.489,
|
| 763 |
+
0.533
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "To verify the benefits of diverse information brought by multiple teachers, we compare CA-MKD with some excellent single-teacher based methods. The results in Table 6 show the student indeed has the potential to learn knowledge from multiple teachers, and its accuracy is further improved compared with the single-teacher methods to a certain extent."
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "title",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.084,
|
| 772 |
+
0.552,
|
| 773 |
+
0.447,
|
| 774 |
+
0.566
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "4.2. Results on the Different Teacher Architectures"
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.083,
|
| 783 |
+
0.576,
|
| 784 |
+
0.489,
|
| 785 |
+
0.667
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "Table 3 shows the results of training a student (VGG8) with three different teacher architectures, i.e., ResNet8x4, ResNet20x4 and ResNet32x4. We find the student accuracy becomes even higher than that of training with three ResNet32x4 teachers, which may be attributed to that the knowledge diversity is enlarged in different architectures."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "text",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.083,
|
| 794 |
+
0.668,
|
| 795 |
+
0.489,
|
| 796 |
+
0.788
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "Since the performance of ResNet20x4/ResNet32x4 is better than that of ResNet8x4, we could reasonably believe that for most training samples, the student will put larger weights on predictions from the former two rather than the latter one, which is verified in Figure 3. Moreover, our CA-MKD can capture those samples on which the predictions are more confident by ResNet8x4, and assign them dynamic weights to help the student model achieve better performance."
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "title",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.084,
|
| 805 |
+
0.807,
|
| 806 |
+
0.34,
|
| 807 |
+
0.822
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "4.3. Impact of the Teacher Number"
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.084,
|
| 816 |
+
0.831,
|
| 817 |
+
0.489,
|
| 818 |
+
0.862
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "As shown in Figure 4, the student model trained with CA-MKD generally achieves satisfactory results. For example,"
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "page_footnote",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.084,
|
| 827 |
+
0.871,
|
| 828 |
+
0.489,
|
| 829 |
+
0.916
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "1 Average Improvement \\(= \\frac{1}{n}\\sum_{i}^{n}\\left(Acc_{\\mathrm{CA - MKD}}^{i} - Acc_{\\mathrm{EBKD}}^{i}\\right)\\), where the accuracies of CA-MKD, EBKD in the \\(i\\)-th teacher-student combination are denoted as \\(Acc_{\\mathrm{CA - MKD}}^{i}\\), \\(Acc_{\\mathrm{EBKD}}^{i}\\), respectively."
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "image",
|
| 836 |
+
"bbox": [
|
| 837 |
+
0.517,
|
| 838 |
+
0.178,
|
| 839 |
+
0.907,
|
| 840 |
+
0.289
|
| 841 |
+
],
|
| 842 |
+
"angle": 0,
|
| 843 |
+
"content": null
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "image_caption",
|
| 847 |
+
"bbox": [
|
| 848 |
+
0.564,
|
| 849 |
+
0.296,
|
| 850 |
+
0.693,
|
| 851 |
+
0.306
|
| 852 |
+
],
|
| 853 |
+
"angle": 0,
|
| 854 |
+
"content": "(a) ResNet56 & MobileNetV2"
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "image_caption",
|
| 858 |
+
"bbox": [
|
| 859 |
+
0.753,
|
| 860 |
+
0.296,
|
| 861 |
+
0.895,
|
| 862 |
+
0.306
|
| 863 |
+
],
|
| 864 |
+
"angle": 0,
|
| 865 |
+
"content": "(b) ResNet32x4 & ShuffleNetV2"
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "image_caption",
|
| 869 |
+
"bbox": [
|
| 870 |
+
0.556,
|
| 871 |
+
0.325,
|
| 872 |
+
0.868,
|
| 873 |
+
0.34
|
| 874 |
+
],
|
| 875 |
+
"angle": 0,
|
| 876 |
+
"content": "Fig. 4. The effect of different teacher numbers."
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "table_caption",
|
| 880 |
+
"bbox": [
|
| 881 |
+
0.53,
|
| 882 |
+
0.354,
|
| 883 |
+
0.895,
|
| 884 |
+
0.368
|
| 885 |
+
],
|
| 886 |
+
"angle": 0,
|
| 887 |
+
"content": "Table 4. Ablation study with VGG13 & MobileNetV2."
|
| 888 |
+
},
|
| 889 |
+
{
|
| 890 |
+
"type": "table",
|
| 891 |
+
"bbox": [
|
| 892 |
+
0.52,
|
| 893 |
+
0.369,
|
| 894 |
+
0.911,
|
| 895 |
+
0.414
|
| 896 |
+
],
|
| 897 |
+
"angle": 0,
|
| 898 |
+
"content": "<table><tr><td>avg weight</td><td>w/o Linter</td><td>w/o wkinter</td><td>CA-MKD</td></tr><tr><td>67.74±0.87</td><td>68.11±0.02</td><td>68.82±0.63</td><td>69.41±0.20</td></tr></table>"
|
| 899 |
+
},
|
| 900 |
+
{
|
| 901 |
+
"type": "text",
|
| 902 |
+
"bbox": [
|
| 903 |
+
0.508,
|
| 904 |
+
0.44,
|
| 905 |
+
0.915,
|
| 906 |
+
0.501
|
| 907 |
+
],
|
| 908 |
+
"angle": 0,
|
| 909 |
+
"content": "on the \"ResNet56 & MobileNetV2\" setting, the accuracy of CA-MKD increases continually as the number of teachers increases and it surpasses the competitors with three teachers even those competitors are trained with more teachers."
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"type": "title",
|
| 913 |
+
"bbox": [
|
| 914 |
+
0.51,
|
| 915 |
+
0.521,
|
| 916 |
+
0.655,
|
| 917 |
+
0.536
|
| 918 |
+
],
|
| 919 |
+
"angle": 0,
|
| 920 |
+
"content": "4.4. Ablation Study"
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "text",
|
| 924 |
+
"bbox": [
|
| 925 |
+
0.51,
|
| 926 |
+
0.545,
|
| 927 |
+
0.885,
|
| 928 |
+
0.559
|
| 929 |
+
],
|
| 930 |
+
"angle": 0,
|
| 931 |
+
"content": "We summarize the observations from Table 4 as follows:"
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "text",
|
| 935 |
+
"bbox": [
|
| 936 |
+
0.509,
|
| 937 |
+
0.561,
|
| 938 |
+
0.915,
|
| 939 |
+
0.606
|
| 940 |
+
],
|
| 941 |
+
"angle": 0,
|
| 942 |
+
"content": "(1) avg weight. Simply averaging multiple teachers will cause \\(1.67\\%\\) accuracy drop, which confirms the necessity of treating different teachers based on their specific quality."
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "text",
|
| 946 |
+
"bbox": [
|
| 947 |
+
0.509,
|
| 948 |
+
0.607,
|
| 949 |
+
0.915,
|
| 950 |
+
0.651
|
| 951 |
+
],
|
| 952 |
+
"angle": 0,
|
| 953 |
+
"content": "(2) w/o \\(\\mathcal{L}_{inter}\\). The accuracy will appear considerably reduction as we remove the Equation (7), demonstrating the intermediate layer contains useful information for distillation."
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "text",
|
| 957 |
+
"bbox": [
|
| 958 |
+
0.509,
|
| 959 |
+
0.652,
|
| 960 |
+
0.915,
|
| 961 |
+
0.712
|
| 962 |
+
],
|
| 963 |
+
"angle": 0,
|
| 964 |
+
"content": "(3) \\( \\mathrm{w / o} \\) \\( w_{inter}^{k} \\). we directly use the \\( w_{KD}^{k} \\) obtained from the last layer to integrate intermediate features. The lower result indicates the benefits of designing a separate way of calculating weights for the intermediate layer."
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "list",
|
| 968 |
+
"bbox": [
|
| 969 |
+
0.509,
|
| 970 |
+
0.561,
|
| 971 |
+
0.915,
|
| 972 |
+
0.712
|
| 973 |
+
],
|
| 974 |
+
"angle": 0,
|
| 975 |
+
"content": null
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"type": "title",
|
| 979 |
+
"bbox": [
|
| 980 |
+
0.644,
|
| 981 |
+
0.734,
|
| 982 |
+
0.782,
|
| 983 |
+
0.747
|
| 984 |
+
],
|
| 985 |
+
"angle": 0,
|
| 986 |
+
"content": "5. CONCLUSION"
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "text",
|
| 990 |
+
"bbox": [
|
| 991 |
+
0.508,
|
| 992 |
+
0.763,
|
| 993 |
+
0.915,
|
| 994 |
+
0.913
|
| 995 |
+
],
|
| 996 |
+
"angle": 0,
|
| 997 |
+
"content": "In this paper, we introduce confidence-aware mechanism on both predictions and intermediate features for multi-teacher knowledge distillation. The confidence of teachers is calculated based on the closeness between their predictions or features and the ground-truth labels for the reliability identification on each training sample. With the guidance of labels, our technique effectively integrates diverse knowledge from multiple teachers for the student training. Extensive empirical results show that our method outperforms all competitors in various teacher-student architectures."
|
| 998 |
+
}
|
| 999 |
+
],
|
| 1000 |
+
[
|
| 1001 |
+
{
|
| 1002 |
+
"type": "title",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.218,
|
| 1005 |
+
0.091,
|
| 1006 |
+
0.356,
|
| 1007 |
+
0.106
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": "6. REFERENCES"
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "ref_text",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
0.096,
|
| 1016 |
+
0.12,
|
| 1017 |
+
0.489,
|
| 1018 |
+
0.18
|
| 1019 |
+
],
|
| 1020 |
+
"angle": 0,
|
| 1021 |
+
"content": "[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778."
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "ref_text",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
0.096,
|
| 1027 |
+
0.19,
|
| 1028 |
+
0.487,
|
| 1029 |
+
0.265
|
| 1030 |
+
],
|
| 1031 |
+
"angle": 0,
|
| 1032 |
+
"content": "[2] David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al., \"Mastering the game of go without human knowledge,\" Nature, vol. 550, no. 7676, pp. 354-359, 2017."
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "ref_text",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
0.096,
|
| 1038 |
+
0.275,
|
| 1039 |
+
0.487,
|
| 1040 |
+
0.364
|
| 1041 |
+
],
|
| 1042 |
+
"angle": 0,
|
| 1043 |
+
"content": "[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: pre-training of deep bidirectional transformers for language understanding,” in North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019, pp. 4171–4186."
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "ref_text",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
0.096,
|
| 1049 |
+
0.375,
|
| 1050 |
+
0.487,
|
| 1051 |
+
0.418
|
| 1052 |
+
],
|
| 1053 |
+
"angle": 0,
|
| 1054 |
+
"content": "[4] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean, “Distilling the knowledge in a neural network,” arXiv preprint arXiv:1503.02531, 2015."
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "ref_text",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
0.096,
|
| 1060 |
+
0.43,
|
| 1061 |
+
0.487,
|
| 1062 |
+
0.489
|
| 1063 |
+
],
|
| 1064 |
+
"angle": 0,
|
| 1065 |
+
"content": "[5] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio, “Fitness: Hints for thin deep nets,” in International Conference on Learning Representations, 2015."
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "ref_text",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
0.096,
|
| 1071 |
+
0.5,
|
| 1072 |
+
0.487,
|
| 1073 |
+
0.572
|
| 1074 |
+
],
|
| 1075 |
+
"angle": 0,
|
| 1076 |
+
"content": "[6] Sergey Zagoruyko and Nikos Komodakis, “Paying more attention to attention: improving the performance of convolutional neural networks via attention transfer,” in International Conference on Learning Representations, 2017."
|
| 1077 |
+
},
|
| 1078 |
+
{
|
| 1079 |
+
"type": "ref_text",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
0.096,
|
| 1082 |
+
0.584,
|
| 1083 |
+
0.487,
|
| 1084 |
+
0.658
|
| 1085 |
+
],
|
| 1086 |
+
"angle": 0,
|
| 1087 |
+
"content": "[7] Defang Chen, Jian-Ping Mei, Yuan Zhang, Can Wang, Zhe Wang, Yan Feng, and Chun Chen, “Cross-layer distillation with semantic calibration,” in Proceedings of the AAAI Conference on Artificial Intelligence, 2021, vol. 35, pp. 7028–7036."
|
| 1088 |
+
},
|
| 1089 |
+
{
|
| 1090 |
+
"type": "ref_text",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
0.096,
|
| 1093 |
+
0.67,
|
| 1094 |
+
0.487,
|
| 1095 |
+
0.744
|
| 1096 |
+
],
|
| 1097 |
+
"angle": 0,
|
| 1098 |
+
"content": "[8] Shan You, Chang Xu, Chao Xu, and Dacheng Tao, \"Learning from multiple teacher networks,\" in Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2017, pp. 1285-1294."
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "ref_text",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
0.096,
|
| 1104 |
+
0.755,
|
| 1105 |
+
0.487,
|
| 1106 |
+
0.813
|
| 1107 |
+
],
|
| 1108 |
+
"angle": 0,
|
| 1109 |
+
"content": "[9] Takashi Fukuda, Masayuki Suzuki, Gakuto Kurata, Samuel Thomas, Jia Cui, and Bhuvana Ramabhadran, \"Efficient knowledge distillation from an ensemble of teachers,\" in Interspeech, 2017, pp. 3697-3701."
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "ref_text",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
0.088,
|
| 1115 |
+
0.824,
|
| 1116 |
+
0.487,
|
| 1117 |
+
0.914
|
| 1118 |
+
],
|
| 1119 |
+
"angle": 0,
|
| 1120 |
+
"content": "[10] Meng-Chieh Wu, Ching-Te Chiu, and Kun-Hsuan Wu, \"Multi-teacher knowledge distillation for compressed video action recognition on deep neural networks,\" in ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2019, pp. 2202-2206."
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "list",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
0.088,
|
| 1126 |
+
0.12,
|
| 1127 |
+
0.489,
|
| 1128 |
+
0.914
|
| 1129 |
+
],
|
| 1130 |
+
"angle": 0,
|
| 1131 |
+
"content": null
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "ref_text",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
0.513,
|
| 1137 |
+
0.092,
|
| 1138 |
+
0.914,
|
| 1139 |
+
0.166
|
| 1140 |
+
],
|
| 1141 |
+
"angle": 0,
|
| 1142 |
+
"content": "[11] Shangchen Du, Shan You, Xiaojie Li, Jianlong Wu, Fei Wang, Chen Qian, and Changshui Zhang, “Agree to disagree: Adaptive ensemble knowledge distillation in gradient space,” Advances in Neural Information Processing Systems, vol. 33, 2020."
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "ref_text",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
0.513,
|
| 1148 |
+
0.178,
|
| 1149 |
+
0.914,
|
| 1150 |
+
0.252
|
| 1151 |
+
],
|
| 1152 |
+
"angle": 0,
|
| 1153 |
+
"content": "[12] Kisoo Kwon, Hwidong Na, Hoshik Lee, and Nam Soo Kim, “Adaptive knowledge distillation based on entropy,” in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 7409-7413."
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "ref_text",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
0.513,
|
| 1159 |
+
0.264,
|
| 1160 |
+
0.913,
|
| 1161 |
+
0.307
|
| 1162 |
+
],
|
| 1163 |
+
"angle": 0,
|
| 1164 |
+
"content": "[13] Jimmy Ba and Rich Caruana, “Do deep nets really need to be deep?,” in Advances in Neural Information Processing Systems, 2014, pp. 2654-2662."
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "ref_text",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
0.513,
|
| 1170 |
+
0.319,
|
| 1171 |
+
0.914,
|
| 1172 |
+
0.393
|
| 1173 |
+
],
|
| 1174 |
+
"angle": 0,
|
| 1175 |
+
"content": "[14] Sungsoo Ahn, Shell Xu Hu, Andreas Damianou, Neil D Lawrence, and Zhenwen Dai, “Variational information distillation for knowledge transfer,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 9163–9171."
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "ref_text",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
0.513,
|
| 1181 |
+
0.405,
|
| 1182 |
+
0.913,
|
| 1183 |
+
0.448
|
| 1184 |
+
],
|
| 1185 |
+
"angle": 0,
|
| 1186 |
+
"content": "[15] Yonglong Tian, Dilip Krishnan, and Phillip Isola, “Contrastive representation distillation,” in International Conference on Learning Representations, 2020."
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "ref_text",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
0.513,
|
| 1192 |
+
0.46,
|
| 1193 |
+
0.913,
|
| 1194 |
+
0.503
|
| 1195 |
+
],
|
| 1196 |
+
"angle": 0,
|
| 1197 |
+
"content": "[16] Xu Lan, Xiatian Zhu, and Shaogang Gong, “Knowledge distillation by on-the-fly native ensemble,” arXiv preprint arXiv:1806.04606, 2018."
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "ref_text",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
0.513,
|
| 1203 |
+
0.515,
|
| 1204 |
+
0.913,
|
| 1205 |
+
0.574
|
| 1206 |
+
],
|
| 1207 |
+
"angle": 0,
|
| 1208 |
+
"content": "[17] Defang Chen, Jian-Ping Mei, Can Wang, Yan Feng, and Chun Chen, \"Online knowledge distillation with diverse peers,\" in Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 3430-3437."
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"type": "ref_text",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
0.513,
|
| 1214 |
+
0.585,
|
| 1215 |
+
0.913,
|
| 1216 |
+
0.63
|
| 1217 |
+
],
|
| 1218 |
+
"angle": 0,
|
| 1219 |
+
"content": "[18] Yang Liu, Wei Zhang, and Jun Wang, \"Adaptive multiteacher multi-level knowledge distillation,\" Neurocomputing, vol. 415, pp. 106-113, 2020."
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "ref_text",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
0.513,
|
| 1225 |
+
0.641,
|
| 1226 |
+
0.913,
|
| 1227 |
+
0.685
|
| 1228 |
+
],
|
| 1229 |
+
"angle": 0,
|
| 1230 |
+
"content": "[19] Alex Krizhevsky and Geoffrey Hinton, “Learning multiple layers of features from tiny images,” Technical Report, 2009."
|
| 1231 |
+
},
|
| 1232 |
+
{
|
| 1233 |
+
"type": "ref_text",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
0.513,
|
| 1236 |
+
0.696,
|
| 1237 |
+
0.913,
|
| 1238 |
+
0.77
|
| 1239 |
+
],
|
| 1240 |
+
"angle": 0,
|
| 1241 |
+
"content": "[20] Sukmin Yun, Jongjin Park, Kimin Lee, and Jinwoo Shin, \"Regularizing class-wise predictions via self-knowledge distillation,\" in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020, pp. 13876-13885."
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "list",
|
| 1245 |
+
"bbox": [
|
| 1246 |
+
0.513,
|
| 1247 |
+
0.092,
|
| 1248 |
+
0.914,
|
| 1249 |
+
0.77
|
| 1250 |
+
],
|
| 1251 |
+
"angle": 0,
|
| 1252 |
+
"content": null
|
| 1253 |
+
}
|
| 1254 |
+
],
|
| 1255 |
+
[
|
| 1256 |
+
{
|
| 1257 |
+
"type": "title",
|
| 1258 |
+
"bbox": [
|
| 1259 |
+
0.085,
|
| 1260 |
+
0.089,
|
| 1261 |
+
0.188,
|
| 1262 |
+
0.109
|
| 1263 |
+
],
|
| 1264 |
+
"angle": 0,
|
| 1265 |
+
"content": "Appendix"
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "title",
|
| 1269 |
+
"bbox": [
|
| 1270 |
+
0.085,
|
| 1271 |
+
0.119,
|
| 1272 |
+
0.361,
|
| 1273 |
+
0.138
|
| 1274 |
+
],
|
| 1275 |
+
"angle": 0,
|
| 1276 |
+
"content": "1.1. The Detailed Description of \\( w_{inter}^{k} \\)"
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "text",
|
| 1280 |
+
"bbox": [
|
| 1281 |
+
0.083,
|
| 1282 |
+
0.145,
|
| 1283 |
+
0.488,
|
| 1284 |
+
0.22
|
| 1285 |
+
],
|
| 1286 |
+
"angle": 0,
|
| 1287 |
+
"content": "To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and \\( w_{inter}^{k} \\) indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space."
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "text",
|
| 1291 |
+
"bbox": [
|
| 1292 |
+
0.082,
|
| 1293 |
+
0.221,
|
| 1294 |
+
0.488,
|
| 1295 |
+
0.327
|
| 1296 |
+
],
|
| 1297 |
+
"angle": 0,
|
| 1298 |
+
"content": "A more detailed discussion is presented in the following paragraphs. As shown in Figure 5, samples belonging to class-1 and class-2 are depicted as circles and triangles, respectively. Although the decision surfaces of teacher-1 (in Figure 5(b)) and teacher-2 (in Figure 5(c)) correctly classify these samples in their own feature spaces, their discriminability in the student feature space is different (in Figure 5(a))."
|
| 1299 |
+
},
|
| 1300 |
+
{
|
| 1301 |
+
"type": "text",
|
| 1302 |
+
"bbox": [
|
| 1303 |
+
0.082,
|
| 1304 |
+
0.328,
|
| 1305 |
+
0.488,
|
| 1306 |
+
0.417
|
| 1307 |
+
],
|
| 1308 |
+
"angle": 0,
|
| 1309 |
+
"content": "In order to stabilize the whole knowledge transfer process, we expect the student to pay more attention to mimicking the teacher with a similar feature space. In this sense, we conclude that teacher-1 for the student is more suitable since its decision surface performs better compared to that of teacher-2 in the student feature space, as shown in Figure 5(a)."
|
| 1310 |
+
},
|
| 1311 |
+
{
|
| 1312 |
+
"type": "text",
|
| 1313 |
+
"bbox": [
|
| 1314 |
+
0.082,
|
| 1315 |
+
0.418,
|
| 1316 |
+
0.489,
|
| 1317 |
+
0.569
|
| 1318 |
+
],
|
| 1319 |
+
"angle": 0,
|
| 1320 |
+
"content": "Suppose the point A, B, C are the extracted features of the same sample in the feature space of student, teacher-1 and teacher-2, respectively. If we move the student feature (point A) towards the feature from teacher-1 (point B), point A will be correctly classified by the student's own classifier with only minor or even no adjustment. But if we move the student feature (point A) towards the feature from teacher-2 (point C), it will become even harder to be correctly classified by the student, which may disrupt the training of the student classifier and slow down the model convergence."
|
| 1321 |
+
},
|
| 1322 |
+
{
|
| 1323 |
+
"type": "image",
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
0.088,
|
| 1326 |
+
0.58,
|
| 1327 |
+
0.46,
|
| 1328 |
+
0.717
|
| 1329 |
+
],
|
| 1330 |
+
"angle": 0,
|
| 1331 |
+
"content": null
|
| 1332 |
+
},
|
| 1333 |
+
{
|
| 1334 |
+
"type": "image_caption",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
0.084,
|
| 1337 |
+
0.731,
|
| 1338 |
+
0.488,
|
| 1339 |
+
0.747
|
| 1340 |
+
],
|
| 1341 |
+
"angle": 0,
|
| 1342 |
+
"content": "Fig. 5. The comparison of teacher-1 and teacher-2 classifiers."
|
| 1343 |
+
},
|
| 1344 |
+
{
|
| 1345 |
+
"type": "title",
|
| 1346 |
+
"bbox": [
|
| 1347 |
+
0.084,
|
| 1348 |
+
0.783,
|
| 1349 |
+
0.345,
|
| 1350 |
+
0.799
|
| 1351 |
+
],
|
| 1352 |
+
"angle": 0,
|
| 1353 |
+
"content": "2.2. Additional Dataset experiments"
|
| 1354 |
+
},
|
| 1355 |
+
{
|
| 1356 |
+
"type": "text",
|
| 1357 |
+
"bbox": [
|
| 1358 |
+
0.083,
|
| 1359 |
+
0.808,
|
| 1360 |
+
0.488,
|
| 1361 |
+
0.868
|
| 1362 |
+
],
|
| 1363 |
+
"angle": 0,
|
| 1364 |
+
"content": "we add more experiments on Dogs and Tinyimagenet datasets to further verify the effectiveness of our proposed CA-MKD. Table 5 and Table 6 show that our CA-MKD can consistently surpass all the competitors in two more challenging datasets."
|
| 1365 |
+
},
|
| 1366 |
+
{
|
| 1367 |
+
"type": "text",
|
| 1368 |
+
"bbox": [
|
| 1369 |
+
0.084,
|
| 1370 |
+
0.869,
|
| 1371 |
+
0.489,
|
| 1372 |
+
0.915
|
| 1373 |
+
],
|
| 1374 |
+
"angle": 0,
|
| 1375 |
+
"content": "The hyper-parameters for the TinyImagenet dataset are exactly the same as those of CIFAR-100 in our submission. Another dataset (Dogs) contains fine-grained images with"
|
| 1376 |
+
},
|
| 1377 |
+
{
|
| 1378 |
+
"type": "text",
|
| 1379 |
+
"bbox": [
|
| 1380 |
+
0.509,
|
| 1381 |
+
0.092,
|
| 1382 |
+
0.914,
|
| 1383 |
+
0.122
|
| 1384 |
+
],
|
| 1385 |
+
"angle": 0,
|
| 1386 |
+
"content": "larger resolutions, which requires a different training procedure. We follow the setting of a previous work [20]."
|
| 1387 |
+
},
|
| 1388 |
+
{
|
| 1389 |
+
"type": "table_caption",
|
| 1390 |
+
"bbox": [
|
| 1391 |
+
0.509,
|
| 1392 |
+
0.149,
|
| 1393 |
+
0.915,
|
| 1394 |
+
0.179
|
| 1395 |
+
],
|
| 1396 |
+
"angle": 0,
|
| 1397 |
+
"content": "Table 5. Top-1 test accuracy of CA-MKD compared to multiple-teacher knowledge distillation methods."
|
| 1398 |
+
},
|
| 1399 |
+
{
|
| 1400 |
+
"type": "table",
|
| 1401 |
+
"bbox": [
|
| 1402 |
+
0.536,
|
| 1403 |
+
0.18,
|
| 1404 |
+
0.892,
|
| 1405 |
+
0.366
|
| 1406 |
+
],
|
| 1407 |
+
"angle": 0,
|
| 1408 |
+
"content": "<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan=\"2\">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>64.76±1.06</td><td>53.38±0.11</td></tr><tr><td rowspan=\"2\">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>AVER</td><td>64.49±0.16</td><td>47.82±0.15</td></tr><tr><td>FitNet-MKD</td><td>64.11±0.80</td><td>47.82±0.05</td></tr><tr><td>EBKD</td><td>64.32±0.23</td><td>47.20±0.10</td></tr><tr><td>AEKD</td><td>64.19±0.34</td><td>47.62±0.38</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>"
|
| 1409 |
+
},
|
| 1410 |
+
{
|
| 1411 |
+
"type": "table_caption",
|
| 1412 |
+
"bbox": [
|
| 1413 |
+
0.509,
|
| 1414 |
+
0.393,
|
| 1415 |
+
0.915,
|
| 1416 |
+
0.422
|
| 1417 |
+
],
|
| 1418 |
+
"angle": 0,
|
| 1419 |
+
"content": "Table 6. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods."
|
| 1420 |
+
},
|
| 1421 |
+
{
|
| 1422 |
+
"type": "table",
|
| 1423 |
+
"bbox": [
|
| 1424 |
+
0.544,
|
| 1425 |
+
0.423,
|
| 1426 |
+
0.882,
|
| 1427 |
+
0.624
|
| 1428 |
+
],
|
| 1429 |
+
"angle": 0,
|
| 1430 |
+
"content": "<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan=\"2\">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>65.97</td><td>53.45</td></tr><tr><td rowspan=\"2\">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>KD</td><td>63.90±0.08</td><td>47.42±0.07</td></tr><tr><td>FitNet</td><td>62.45±0.61</td><td>47.24±0.28</td></tr><tr><td>AT</td><td>63.48±0.60</td><td>45.73±0.05</td></tr><tr><td>VID</td><td>64.45±0.23</td><td>47.76±0.08</td></tr><tr><td>CRD</td><td>64.61±0.17</td><td>48.11±0.07</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>"
|
| 1431 |
+
}
|
| 1432 |
+
]
|
| 1433 |
+
]
|
2201.00xxx/2201.00007/8b96971b-1059-4fc8-881b-f19173e04430_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90bba09e17ad4fe23c95729cf3ac0b170502903af0b0fef593dc2833115033ca
|
| 3 |
+
size 1571141
|
2201.00xxx/2201.00007/full.md
ADDED
|
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CONFIDENCE-AWARE MULTI-TEACHER KNOWLEDGE DISTILLATION
|
| 2 |
+
|
| 3 |
+
Hailin Zhang
|
| 4 |
+
|
| 5 |
+
Defang Chen
|
| 6 |
+
|
| 7 |
+
Can Wang\*
|
| 8 |
+
|
| 9 |
+
Zhejiang University, China; ZJU-Bangsun Joint Research Center.
|
| 10 |
+
|
| 11 |
+
{zzzhl, defchern, wcan} @ zju.edu.cn
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Knowledge distillation is initially introduced to utilize additional supervision from a single teacher model for the student model training. To boost the student performance, some recent variants attempt to exploit diverse knowledge sources from multiple teachers. However, existing studies mainly integrate knowledge from diverse sources by averaging over multiple teacher predictions or combining them using other label-free strategies, which may mislead student in the presence of low-quality teacher predictions. To tackle this problem, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD), which adaptively assigns sample-wise reliability for each teacher prediction with the help of ground-truth labels, with those teacher predictions close to one-hot labels assigned large weights. Besides, CA-MKD incorporates features in intermediate layers to stable the knowledge transfer process. Extensive experiments show our CA-MKD consistently outperforms all compared state-of-the-art methods across various teacher-student architectures. Code is available: https://github.com/Rorozhl/CA-MKD.
|
| 16 |
+
|
| 17 |
+
Index Terms— knowledge distillation, multiple teachers, confidence-aware weighting
|
| 18 |
+
|
| 19 |
+
# 1. INTRODUCTION
|
| 20 |
+
|
| 21 |
+
Nowadays, deep neural networks have achieved unprecedented success in various applications [1, 2, 3]. However, these complex models requiring huge memory footprint and computational resources are difficult to be applied on embedded devices. Knowledge distillation (KD) is thus proposed as a model compression technique to resolve this issue, which improves the accuracy of a lightweight student model by distilling the knowledge from a pre-trained cumbersome teacher model [4]. The transferred knowledge was originally formalized as softmax outputs (soft targets) of the teacher model [4] and latter extended to the intermediate teacher layers for achieving more promising performance [5, 6, 7].
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Fig. 1. Comparison of the previous average direction (green line) and our proposed confidence-aware direction (red line).
|
| 25 |
+
|
| 26 |
+
As the wisdom of the masses exceeds that of the wisest individual, some multi-teacher knowledge distillation (MKD) methods are proposed and have been proven to be beneficial [8, 9, 10, 11, 12]. Basically, they combine predictions from multiple teachers with the fixed weight assignment [8, 9, 10] or other various label-free schemes, such as calculating weights based on a optimization problem or entropy criterion [11, 12], etc. However, fixed weights fail to differentiate high-quality teachers from low-quality ones [8, 9, 10], and the other schemes may mislead the student in the presence of low-quality teacher predictions [11, 12]. Figure 1 provides an intuitive illustration on this issue, where the student trained with the average weighting strategy might deviate from the correct direction once most teacher predictions are biased.
|
| 27 |
+
|
| 28 |
+
Fortunately, we actually have ground-truth labels in hand to quantify our confidence about teacher predictions and then filter out low-quality predictions for better student training. To this end, we propose Confidence-Aware Multi-teacher Knowledge Distillation (CA-MKD) to learn sample-wise weights by taking the prediction confidence of teachers into consideration for adaptive knowledge integration. The confidence is obtained based on the cross entropy loss between prediction distributions and ground-truth labels. Compared with previous label-free weighting strategies, our technique enables the student to learn from a relatively correct direction.
|
| 29 |
+
|
| 30 |
+
Note that our confidence-aware mechanism not only is able to adaptively weight different teacher predictions based on their sample-wise confidence, but also can be extended to the student-teacher feature pairs in intermediate layers. With the help of our generated flexible and effective weights, we could avoid those poor teacher predictions dominating the knowledge transfer process and considerably improve the student performance on eight teacher-student architecture combinations (as shown in Table 1 and 3).
|
| 31 |
+
|
| 32 |
+
# 2. RELATED WORK
|
| 33 |
+
|
| 34 |
+
Knowledge Distillation. Vanilla KD aims to transfer knowledge from a complex network (teacher) to a simple network (student) with the KL divergence minimization between their softened outputs [13, 4]. Mimicking the teacher representations from intermediate layers was latter proposed to explore more knowledge forms [5, 6, 14, 15, 7]. Compared to these methods that require pre-training a teacher, some works simultaneously train multiple students and encourage them to learn from each other instead [16, 17]. Our technique differs from these online KD methods since we attempt to distill knowledge from multiple pre-trained teachers.
|
| 35 |
+
|
| 36 |
+
Multi-teacher Knowledge Distillation. Rather than employing a single teacher, MKD boosts the effectiveness of distillation by integrating predictions from multiple teachers. A bunch of methods are proposed, such as simply assigning average or other fixed weights for different teachers [8, 9, 10], and calculating the weights based on entropy [12], latent factor [18] or multi-objective optimization in the gradient space [11]. However, these label-free strategies may mislead the student training in the presence of low-quality predictions. For instance, entropy-based strategy will prefer models with blind faith since it favors predictions with low variance [12]; optimization-based strategy favors majority opinion and will be easily misled by noisy data [11]. In contrast, our CA-MKD quantifies the teacher predictions based on ground-truth labels and further improves the student performance.
|
| 37 |
+
|
| 38 |
+
# 3. METHODOLOGY
|
| 39 |
+
|
| 40 |
+
We denote $\mathcal{D} = \{\pmb{x}_i,\pmb{y}_i\}_i^N$ as a labeled training set, $N$ is the number of samples, $K$ is the number of teachers. $F\in \mathbb{R}^{h\times w\times c}$ is the output of the last network block. We denote $\pmb {z} = [z^1,\dots,z^C ]$ as the logits output, where $C$ is the category number. The final model prediction is obtained by a softmax function $\sigma (z^c) = \frac{\exp(z^c / \tau)}{\sum_j\exp(z^j / \tau)}$ with temperature $\tau$ . In the following sections, we will introduce our CA-MKD in detail.
|
| 41 |
+
|
| 42 |
+
# 3.1. The Loss of Teacher Predictions
|
| 43 |
+
|
| 44 |
+
To effectively aggregate the prediction distributions of multiple teachers, we assign different weights which reflects their sample-wise confidence by calculating the cross entropy loss between teacher predictions and ground-truth labels
|
| 45 |
+
|
| 46 |
+
$$
|
| 47 |
+
\mathcal {L} _ {C E _ {K D}} ^ {k} = - \sum_ {c = 1} ^ {C} y ^ {c} \log \left(\sigma \left(z _ {T _ {k}} ^ {c}\right)\right), \tag {1}
|
| 48 |
+
$$
|
| 49 |
+
|
| 50 |
+
$$
|
| 51 |
+
w _ {K D} ^ {k} = \frac {1}{K - 1} \left(1 - \frac {\exp \left(\mathcal {L} _ {C E _ {K D}} ^ {k}\right)}{\sum_ {j} \exp \left(\mathcal {L} _ {C E _ {K D}} ^ {j}\right)}\right), \tag {2}
|
| 52 |
+
$$
|
| 53 |
+
|
| 54 |
+
where $T_{k}$ denotes the $k$ th teacher. The less $\mathcal{L}_{CE_{KD}}^{k}$ corresponds to the larger $w_{KD}^{k}$ . The overall teacher predictions are
|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Fig. 2. An overview of our CA-MKD. The weight calculation of teacher predictions and intermediate teacher features are depicted as the red lines and green lines, respectively.
|
| 58 |
+
|
| 59 |
+
then aggregated with calculated weights
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\mathcal {L} _ {K D} = - \sum_ {k = 1} ^ {K} w _ {K D} ^ {k} \sum_ {c = 1} ^ {C} z _ {T _ {k}} ^ {c} \log \left(\sigma \left(z _ {S} ^ {c}\right)\right). \tag {3}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
According to the above formulas, the teacher whose prediction is closer to ground-truth labels will be assigned larger weight $w_{KD}^{k}$ , since it has enough confidence to make accurate judgement for correct guidance. In contrast, if we simply acquire the weights by calculating the entropy of teacher predictions [12], the weight will become large when the output distribution is sharp regardless of whether the highest probability category is correct. In this case, those biased targets may misguide the student training and further hurt its distillation performance.
|
| 66 |
+
|
| 67 |
+
# 3.2. The Loss of Intermediate Teacher Features
|
| 68 |
+
|
| 69 |
+
In addition to KD Loss, inspired by FitNets [5], we believe that the intermediate layers are also beneficial for learning structural knowledge, and thus extend our method to intermediate layers for mining more information. The calculation of intermediate feature matching is presented as follows
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
z _ {S \rightarrow T _ {k}} = W _ {T _ {k}} h _ {S}, \tag {4}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathcal {L} _ {C E _ {\text {i n t e r}}} ^ {k} = - \sum_ {c = 1} ^ {C} y ^ {c} \log \left(\sigma \left(z _ {S \rightarrow T _ {k}} ^ {c}\right)\right), \tag {5}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
w _ {i n t e r} ^ {k} = \frac {1}{K - 1} \left(1 - \frac {\exp \left(\mathcal {L} _ {C E _ {i n t e r}} ^ {k}\right)}{\sum_ {j} \exp \left(\mathcal {L} _ {C E _ {i n t e r}} ^ {j}\right)}\right). \tag {6}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $W_{T_k}$ is the final classifier of the $k$ th teacher. $h_S \in \mathbb{R}^c$ is the last student feature vector, i.e., $h_S = \mathrm{AvgPooling}(F_S)$ . $\mathcal{L}_{CE_{inter}}^k$ is obtained by passing $h_S$ through each teacher classifier. The calculation of $w_{inter}^k$ is similar to that of $w_{KD}^k$ .
|
| 84 |
+
|
| 85 |
+
Table 1. Top-1 test accuracy of MKD methods by distilling the knowledge on multiple teachers with the same architectures.
|
| 86 |
+
|
| 87 |
+
<table><tr><td>Teacher Ensemble</td><td>WRN40-2 76.62±0.26 79.62</td><td>ResNet56 73.28±0.30 76.00</td><td>VGG13 75.17±0.18 77.07</td><td>VGG13 75.17±0.18 77.07</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td><td>ResNet32x4 79.31±0.14 81.16</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.43</td><td>MobileNetV2 65.64±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.19</td><td>ResNet8x4 72.79±0.14</td><td>ShuffleNetV2 72.94±0.24</td><td>VGG8 70.74±0.40</td></tr><tr><td>AVER [8]</td><td>76.30±0.25</td><td>70.21±0.10</td><td>74.07±0.23</td><td>68.91±0.35</td><td>74.99±0.24</td><td>75.87±0.19</td><td>73.26±0.39</td></tr><tr><td>FitNet-MKD [5]</td><td>76.59±0.17</td><td>70.69±0.56</td><td>73.97±0.22</td><td>68.48±0.07</td><td>74.86±0.21</td><td>76.09±0.13</td><td>73.27±0.19</td></tr><tr><td>EBKD [12]</td><td>76.61±0.14</td><td>70.91±0.22</td><td>74.10±0.27</td><td>68.24±0.82</td><td>75.59±0.15</td><td>76.41±0.12</td><td>73.60±0.22</td></tr><tr><td>AEKD [11]</td><td>76.34±0.24</td><td>70.47±0.15</td><td>73.78±0.03</td><td>68.39±0.50</td><td>74.75±0.28</td><td>75.95±0.20</td><td>73.11±0.27</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>71.38±0.02</td><td>74.30±0.16</td><td>69.41±0.20</td><td>75.90±0.13</td><td>77.41±0.14</td><td>75.26±0.32</td></tr></table>
|
| 88 |
+
|
| 89 |
+
Table 2. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods.
|
| 90 |
+
|
| 91 |
+
<table><tr><td>Teacher</td><td>WRN40-2 76.62±0.26</td><td>ResNet32x4 79.31±0.14</td><td>ResNet56 73.28±0.30</td></tr><tr><td>Student</td><td>ShuffleNetV1 71.70±0.19</td><td>VGG8 70.74±0.40</td><td>MobileNetV2 65.64±0.43</td></tr><tr><td>KD [4]</td><td>75.77±0.14</td><td>72.90±0.34</td><td>69.96±0.14</td></tr><tr><td>FitNet [5]</td><td>76.22±0.21</td><td>72.55±0.66</td><td>69.02±0.28</td></tr><tr><td>AT [6]</td><td>76.44±0.38</td><td>72.16±0.12</td><td>69.79±0.26</td></tr><tr><td>VID [14]</td><td>76.32±0.08</td><td>73.09±0.29</td><td>69.45±0.17</td></tr><tr><td>CRD [15]</td><td>76.58±0.23</td><td>73.57±0.25</td><td>71.15±0.44</td></tr><tr><td>CA-MKD</td><td>77.94±0.31</td><td>75.26±0.13</td><td>71.38±0.02</td></tr></table>
|
| 92 |
+
|
| 93 |
+
To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and $w_{inter}^{k}$ indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space. The ablation study also shows that utilizing $w_{inter}^{k}$ instead of $w_{KD}^{k}$ for the knowledge aggregation in intermediate layers is more effective.
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathcal {L} _ {i n t e r} = \sum_ {k = 1} ^ {K} w _ {i n t e r} ^ {k} \left\| F _ {T _ {k}} - r \left(F _ {S}\right) \right\| _ {2} ^ {2}, \tag {7}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $r(\cdot)$ is a function for aligning the student and teacher feature dimensions. The $\ell_2$ loss function is used as distance measure of intermediate features. Finally, the overall training loss between feature pairs will be aggregated by $w_{inter}^k$ .
|
| 100 |
+
|
| 101 |
+
In our work, only the output features of the last block are adopted to avoid incurring too much computational cost.
|
| 102 |
+
|
| 103 |
+
# 3.3. The Overall Loss Function
|
| 104 |
+
|
| 105 |
+
In addition to the aforementioned two losses, a regular cross entropy with the ground-truth labels is calculated
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathcal {L} _ {C E} = - \sum_ {c = 1} ^ {C} y ^ {c} \log \left(\sigma \left(z _ {S} ^ {c}\right)\right). \tag {8}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
The overall loss function of our CA-MKD is summarized as
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\mathcal {L} = \mathcal {L} _ {C E} + \alpha \mathcal {L} _ {K D} + \beta \mathcal {L} _ {\text {i n t e r}}, \tag {9}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $\alpha$ and $\beta$ are hyper-parameters to balance the effect of knowledge distillation and standard cross entropy losses.
|
| 118 |
+
|
| 119 |
+
# 4. EXPERIMENT
|
| 120 |
+
|
| 121 |
+
In this section, we conduct extensive experiments on CIFAR-100 dataset [19] to verify the effectiveness of our proposed CA-MKD. We adopt eight different teacher-student combinations based on popular neural network architectures. All compared multi-teacher knowledge distillation (MKD) methods use three teachers except for special declarations.
|
| 122 |
+
|
| 123 |
+
Compared Methods. Besides the naïve AVER [8], we reimplement a single-teacher based method FitNet [5] on multiple teachers and denote it as FitNet-MKD. FitNet-MKD will leverage extra information coming from averaged intermediate teacher features. We also reimplement an entropy-based MKD method [12], which has achieved remarkable results in acoustic experiments, on our image classification task and we denote it as EBKD. As for AEKD, we adopt its logits-based version with the author provided code [11].
|
| 124 |
+
|
| 125 |
+
Hyper-parameters. All neural networks are optimized by stochastic gradient descent with momentum 0.9, weight decay 0.0001. The batch size is set to 64. As the previous works do [15, 7], the initial learning rate is set to 0.1, except MobileNetV2, ShuffleNetV1 and ShuffleNetV2 are set to 0.05. The learning rate is multiplied by 0.1 at 150, 180 and 210 of the total 240 training epochs. For the sake of fairness, the temperature $\tau$ is set to 4 and the $\alpha$ is set to 1 in all methods. Furthermore, we set the $\beta$ of our CA-MKD to 50 throughout the experiments. All results are reported in means and standard deviations over 3 runs with different random seeds.
|
| 126 |
+
|
| 127 |
+
# 4.1. Results on the Same Teacher Architectures
|
| 128 |
+
|
| 129 |
+
Table 1 shows the top-1 accuracy comparison on CIFAR-100. We also include the results of teacher ensemble with the majority voting strategy. We can find that CA-MKD surpasses
|
| 130 |
+
|
| 131 |
+
Table 3. Top-1 test accuracy of MKD approaches by distilling the knowledge on multiple teachers with different architectures.
|
| 132 |
+
|
| 133 |
+
<table><tr><td>VGG8</td><td>AVER</td><td>FitNet-MKD</td><td>EBKD</td><td>AEKD</td><td>CA-MKD</td><td>ResNet8x4</td><td>ResNet20x4</td><td>ResNet32x4</td></tr><tr><td>70.74±0.40</td><td>74.55±0.24</td><td>74.47±0.21</td><td>74.07±0.17</td><td>74.69±0.29</td><td>75.96±0.05</td><td>72.79</td><td>78.39</td><td>79.31</td></tr></table>
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
(a) prediction weights
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
(b) feature weights
|
| 140 |
+
|
| 141 |
+
all competitors cross various architectures. Specifically, compared to the second best method (EBKD), CA-MKD outperforms it with $0.81\%$ average improvement<sup>1</sup>, and achieves $1.66\%$ absolute accuracy improvement in the best case.
|
| 142 |
+
|
| 143 |
+
To verify the benefits of diverse information brought by multiple teachers, we compare CA-MKD with some excellent single-teacher based methods. The results in Table 6 show the student indeed has the potential to learn knowledge from multiple teachers, and its accuracy is further improved compared with the single-teacher methods to a certain extent.
|
| 144 |
+
|
| 145 |
+
# 4.2. Results on the Different Teacher Architectures
|
| 146 |
+
|
| 147 |
+
Table 3 shows the results of training a student (VGG8) with three different teacher architectures, i.e., ResNet8x4, ResNet20x4 and ResNet32x4. We find the student accuracy becomes even higher than that of training with three ResNet32x4 teachers, which may be attributed to that the knowledge diversity is enlarged in different architectures.
|
| 148 |
+
|
| 149 |
+
Since the performance of ResNet20x4/ResNet32x4 is better than that of ResNet8x4, we could reasonably believe that for most training samples, the student will put larger weights on predictions from the former two rather than the latter one, which is verified in Figure 3. Moreover, our CA-MKD can capture those samples on which the predictions are more confident by ResNet8x4, and assign them dynamic weights to help the student model achieve better performance.
|
| 150 |
+
|
| 151 |
+
# 4.3. Impact of the Teacher Number
|
| 152 |
+
|
| 153 |
+
As shown in Figure 4, the student model trained with CA-MKD generally achieves satisfactory results. For example,
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Fig. 3. The visualization results of learned weights by CA-MKD on each training sample.
|
| 157 |
+
(a) ResNet56 & MobileNetV2
|
| 158 |
+
(b) ResNet32x4 & ShuffleNetV2
|
| 159 |
+
Fig. 4. The effect of different teacher numbers.
|
| 160 |
+
|
| 161 |
+
Table 4. Ablation study with VGG13 & MobileNetV2.
|
| 162 |
+
|
| 163 |
+
<table><tr><td>avg weight</td><td>w/o Linter</td><td>w/o wkinter</td><td>CA-MKD</td></tr><tr><td>67.74±0.87</td><td>68.11±0.02</td><td>68.82±0.63</td><td>69.41±0.20</td></tr></table>
|
| 164 |
+
|
| 165 |
+
on the "ResNet56 & MobileNetV2" setting, the accuracy of CA-MKD increases continually as the number of teachers increases and it surpasses the competitors with three teachers even those competitors are trained with more teachers.
|
| 166 |
+
|
| 167 |
+
# 4.4. Ablation Study
|
| 168 |
+
|
| 169 |
+
We summarize the observations from Table 4 as follows:
|
| 170 |
+
|
| 171 |
+
(1) avg weight. Simply averaging multiple teachers will cause $1.67\%$ accuracy drop, which confirms the necessity of treating different teachers based on their specific quality.
|
| 172 |
+
(2) w/o $\mathcal{L}_{inter}$ . The accuracy will appear considerably reduction as we remove the Equation (7), demonstrating the intermediate layer contains useful information for distillation.
|
| 173 |
+
(3) $\mathrm{w / o}$ $w_{inter}^{k}$ . we directly use the $w_{KD}^{k}$ obtained from the last layer to integrate intermediate features. The lower result indicates the benefits of designing a separate way of calculating weights for the intermediate layer.
|
| 174 |
+
|
| 175 |
+
# 5. CONCLUSION
|
| 176 |
+
|
| 177 |
+
In this paper, we introduce confidence-aware mechanism on both predictions and intermediate features for multi-teacher knowledge distillation. The confidence of teachers is calculated based on the closeness between their predictions or features and the ground-truth labels for the reliability identification on each training sample. With the guidance of labels, our technique effectively integrates diverse knowledge from multiple teachers for the student training. Extensive empirical results show that our method outperforms all competitors in various teacher-student architectures.
|
| 178 |
+
|
| 179 |
+
# 6. REFERENCES
|
| 180 |
+
|
| 181 |
+
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, “Deep residual learning for image recognition,” in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778.
|
| 182 |
+
[2] David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al., "Mastering the game of go without human knowledge," Nature, vol. 550, no. 7676, pp. 354-359, 2017.
|
| 183 |
+
[3] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova, “BERT: pre-training of deep bidirectional transformers for language understanding,” in North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2019, pp. 4171–4186.
|
| 184 |
+
[4] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean, “Distilling the knowledge in a neural network,” arXiv preprint arXiv:1503.02531, 2015.
|
| 185 |
+
[5] Adriana Romero, Nicolas Ballas, Samira Ebrahimi Kahou, Antoine Chassang, Carlo Gatta, and Yoshua Bengio, “Fitness: Hints for thin deep nets,” in International Conference on Learning Representations, 2015.
|
| 186 |
+
[6] Sergey Zagoruyko and Nikos Komodakis, “Paying more attention to attention: improving the performance of convolutional neural networks via attention transfer,” in International Conference on Learning Representations, 2017.
|
| 187 |
+
[7] Defang Chen, Jian-Ping Mei, Yuan Zhang, Can Wang, Zhe Wang, Yan Feng, and Chun Chen, “Cross-layer distillation with semantic calibration,” in Proceedings of the AAAI Conference on Artificial Intelligence, 2021, vol. 35, pp. 7028–7036.
|
| 188 |
+
[8] Shan You, Chang Xu, Chao Xu, and Dacheng Tao, "Learning from multiple teacher networks," in Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2017, pp. 1285-1294.
|
| 189 |
+
[9] Takashi Fukuda, Masayuki Suzuki, Gakuto Kurata, Samuel Thomas, Jia Cui, and Bhuvana Ramabhadran, "Efficient knowledge distillation from an ensemble of teachers," in Interspeech, 2017, pp. 3697-3701.
|
| 190 |
+
[10] Meng-Chieh Wu, Ching-Te Chiu, and Kun-Hsuan Wu, "Multi-teacher knowledge distillation for compressed video action recognition on deep neural networks," in ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2019, pp. 2202-2206.
|
| 191 |
+
|
| 192 |
+
[11] Shangchen Du, Shan You, Xiaojie Li, Jianlong Wu, Fei Wang, Chen Qian, and Changshui Zhang, “Agree to disagree: Adaptive ensemble knowledge distillation in gradient space,” Advances in Neural Information Processing Systems, vol. 33, 2020.
|
| 193 |
+
[12] Kisoo Kwon, Hwidong Na, Hoshik Lee, and Nam Soo Kim, “Adaptive knowledge distillation based on entropy,” in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 7409-7413.
|
| 194 |
+
[13] Jimmy Ba and Rich Caruana, “Do deep nets really need to be deep?,” in Advances in Neural Information Processing Systems, 2014, pp. 2654-2662.
|
| 195 |
+
[14] Sungsoo Ahn, Shell Xu Hu, Andreas Damianou, Neil D Lawrence, and Zhenwen Dai, “Variational information distillation for knowledge transfer,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 9163–9171.
|
| 196 |
+
[15] Yonglong Tian, Dilip Krishnan, and Phillip Isola, “Contrastive representation distillation,” in International Conference on Learning Representations, 2020.
|
| 197 |
+
[16] Xu Lan, Xiatian Zhu, and Shaogang Gong, “Knowledge distillation by on-the-fly native ensemble,” arXiv preprint arXiv:1806.04606, 2018.
|
| 198 |
+
[17] Defang Chen, Jian-Ping Mei, Can Wang, Yan Feng, and Chun Chen, "Online knowledge distillation with diverse peers," in Proceedings of the AAAI Conference on Artificial Intelligence, 2020, pp. 3430-3437.
|
| 199 |
+
[18] Yang Liu, Wei Zhang, and Jun Wang, "Adaptive multiteacher multi-level knowledge distillation," Neurocomputing, vol. 415, pp. 106-113, 2020.
|
| 200 |
+
[19] Alex Krizhevsky and Geoffrey Hinton, “Learning multiple layers of features from tiny images,” Technical Report, 2009.
|
| 201 |
+
[20] Sukmin Yun, Jongjin Park, Kimin Lee, and Jinwoo Shin, "Regularizing class-wise predictions via self-knowledge distillation," in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2020, pp. 13876-13885.
|
| 202 |
+
|
| 203 |
+
# Appendix
|
| 204 |
+
|
| 205 |
+
# 1.1. The Detailed Description of $w_{inter}^{k}$
|
| 206 |
+
|
| 207 |
+
To stable the knowledge transfer process, we design the student to be more focused on imitating the teacher with a similar feature space and $w_{inter}^{k}$ indeed serves as such a similarity measure representing the discriminability of a teacher classifier in the student feature space.
|
| 208 |
+
|
| 209 |
+
A more detailed discussion is presented in the following paragraphs. As shown in Figure 5, samples belonging to class-1 and class-2 are depicted as circles and triangles, respectively. Although the decision surfaces of teacher-1 (in Figure 5(b)) and teacher-2 (in Figure 5(c)) correctly classify these samples in their own feature spaces, their discriminability in the student feature space is different (in Figure 5(a)).
|
| 210 |
+
|
| 211 |
+
In order to stabilize the whole knowledge transfer process, we expect the student to pay more attention to mimicking the teacher with a similar feature space. In this sense, we conclude that teacher-1 for the student is more suitable since its decision surface performs better compared to that of teacher-2 in the student feature space, as shown in Figure 5(a).
|
| 212 |
+
|
| 213 |
+
Suppose the point A, B, C are the extracted features of the same sample in the feature space of student, teacher-1 and teacher-2, respectively. If we move the student feature (point A) towards the feature from teacher-1 (point B), point A will be correctly classified by the student's own classifier with only minor or even no adjustment. But if we move the student feature (point A) towards the feature from teacher-2 (point C), it will become even harder to be correctly classified by the student, which may disrupt the training of the student classifier and slow down the model convergence.
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
Fig. 5. The comparison of teacher-1 and teacher-2 classifiers.
|
| 217 |
+
|
| 218 |
+
# 2.2. Additional Dataset experiments
|
| 219 |
+
|
| 220 |
+
we add more experiments on Dogs and Tinyimagenet datasets to further verify the effectiveness of our proposed CA-MKD. Table 5 and Table 6 show that our CA-MKD can consistently surpass all the competitors in two more challenging datasets.
|
| 221 |
+
|
| 222 |
+
The hyper-parameters for the TinyImagenet dataset are exactly the same as those of CIFAR-100 in our submission. Another dataset (Dogs) contains fine-grained images with
|
| 223 |
+
|
| 224 |
+
larger resolutions, which requires a different training procedure. We follow the setting of a previous work [20].
|
| 225 |
+
|
| 226 |
+
Table 5. Top-1 test accuracy of CA-MKD compared to multiple-teacher knowledge distillation methods.
|
| 227 |
+
|
| 228 |
+
<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan="2">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>64.76±1.06</td><td>53.38±0.11</td></tr><tr><td rowspan="2">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>AVER</td><td>64.49±0.16</td><td>47.82±0.15</td></tr><tr><td>FitNet-MKD</td><td>64.11±0.80</td><td>47.82±0.05</td></tr><tr><td>EBKD</td><td>64.32±0.23</td><td>47.20±0.10</td></tr><tr><td>AEKD</td><td>64.19±0.34</td><td>47.62±0.38</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>
|
| 229 |
+
|
| 230 |
+
Table 6. Top-1 test accuracy of CA-MKD compared to single-teacher knowledge distillation methods.
|
| 231 |
+
|
| 232 |
+
<table><tr><td>Dataset</td><td>Dogs</td><td>Tinyimagenet</td></tr><tr><td rowspan="2">Teacher</td><td>ResNet34</td><td>ResNet32x4</td></tr><tr><td>65.97</td><td>53.45</td></tr><tr><td rowspan="2">Student</td><td>ShuffleNetV2x0.5</td><td>VGG8</td></tr><tr><td>59.36±0.73</td><td>44.40±0.15</td></tr><tr><td>KD</td><td>63.90±0.08</td><td>47.42±0.07</td></tr><tr><td>FitNet</td><td>62.45±0.61</td><td>47.24±0.28</td></tr><tr><td>AT</td><td>63.48±0.60</td><td>45.73±0.05</td></tr><tr><td>VID</td><td>64.45±0.23</td><td>47.76±0.08</td></tr><tr><td>CRD</td><td>64.61±0.17</td><td>48.11±0.07</td></tr><tr><td>CA-MKD</td><td>65.19±0.23</td><td>49.55±0.12</td></tr></table>
|
2201.00xxx/2201.00007/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:506d344a0ca3e98089f6f092272a7012ef5e86211d440d3f69e6f079388c102b
|
| 3 |
+
size 438864
|
2201.00xxx/2201.00007/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_content_list.json
ADDED
|
@@ -0,0 +1,1903 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "An Efficient Federated Distillation Learning System for Multi-task Time Series Classification",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
81,
|
| 8 |
+
65,
|
| 9 |
+
913,
|
| 10 |
+
133
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Huanlai Xing, Member, IEEE, Zhiwen Xiao, Rong Qu, Senior Member, IEEE, Zonghai Zhu, and Bowen Zhao",
|
| 17 |
+
"bbox": [
|
| 18 |
+
124,
|
| 19 |
+
150,
|
| 20 |
+
870,
|
| 21 |
+
184
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—This paper proposes an efficient federated distillation learning system (EFDLS) for multi-task time series classification (TSC). EFDLS consists of a central server and multiple mobile users, where different users may run different TSC tasks. EFDLS has two novel components, namely a feature-based student-teacher (FBST) framework and a distance-based weights matching (DBWM) scheme. Within each user, the FBST framework transfers knowledge from its teacher's hidden layers to its student's hidden layers via knowledge distillation, with the teacher and student having identical network structure. For each connected user, its student model's hidden layers' weights are uploaded to the EFDLS server periodically. The DBWM scheme is deployed on the server, with the least square distance used to measure the similarity between the weights of two given models. This scheme finds a partner for each connected user such that the user's and its partner's weights are the closest among all the weights uploaded. The server exchanges and sends back the user's and its partner's weights to these two users which then load the received weights to their teachers' hidden layers. Experimental results show that the proposed EFDLS achieves excellent performance on a set of selected UCR2018 datasets regarding top-1 accuracy.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
104,
|
| 30 |
+
205,
|
| 31 |
+
880,
|
| 32 |
+
349
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Deep Learning, Data Mining, Federated Learning, Knowledge Distillation, Time Series Classification.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
104,
|
| 41 |
+
362,
|
| 42 |
+
781,
|
| 43 |
+
376
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "1 INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
75,
|
| 53 |
+
419,
|
| 54 |
+
228,
|
| 55 |
+
434
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "TIME series data is a series of time-ordered data points associated with one or more time-dependent variables and has been successfully applied to areas such as anomaly detection [1], [2], traffic flow forecasting [3], service matching [4], stock prediction [5], electroencephalogram (ECG) detection [6] and parking behavior prediction [7]. A significant amount of research attention has been dedicated to time series classification (TSC) [8]. For example, Wang et al. [9] introduced a fully convolutional network (FCN) for local feature extraction. Zhang et al. [10] devised an attentional prototype network (TapNet) to capture rich representations from the input. Karim et al. [11] proposed a long short-term memory (LSTM) fully convolutional network (FCN-LSTM) for multivariate TSC. A robust temporal feature network (RTFN) hybridizing temporal feature network and LSTM-based attention network was applied to extracting both the local and global patterns of data [12]. Li et al. [13] put forward a shapelet-neural network approach to mine highly-diversified representative shapelets from the input. Lee et al. [14] designed a learnable dynamic temporal pooling method to reduce the temporal pooling size of the hidden representations obtained.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
71,
|
| 64 |
+
446,
|
| 65 |
+
491,
|
| 66 |
+
766
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "TSC algorithms are usually data-driven, where data",
|
| 73 |
+
"bbox": [
|
| 74 |
+
94,
|
| 75 |
+
767,
|
| 76 |
+
488,
|
| 77 |
+
781
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "list",
|
| 83 |
+
"sub_type": "text",
|
| 84 |
+
"list_items": [
|
| 85 |
+
"- H. Xing, Z. Zhu, and B. Zhao are with the School of Computing and Artificial Intelligence, Southwest Jiaotong University, Chengdu 611756, China (Emails: hxx@home.swjtu.edu.cn; zhu@swjtu.edu.cn; cn16bz@icloud.com).",
|
| 86 |
+
"Z. Xiao is with Southwest Jiaotong University, Chengdu 611756, China, and Chengdu University of Information Technology, Chengdu 610103, China (Email: xiao1994zw@163.com).",
|
| 87 |
+
"R. Qu is with the School of Computer Science, University of Nottingham, Nottingham NG7 2RD 455356, UK (Email: rong.qu@nottingham.ac.uk)"
|
| 88 |
+
],
|
| 89 |
+
"bbox": [
|
| 90 |
+
71,
|
| 91 |
+
797,
|
| 92 |
+
488,
|
| 93 |
+
912
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "text",
|
| 99 |
+
"text": "Manuscript received XX, XX; revised XX, XX (Corresponding author: Zhiwen Xiao).",
|
| 100 |
+
"bbox": [
|
| 101 |
+
71,
|
| 102 |
+
917,
|
| 103 |
+
488,
|
| 104 |
+
941
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "text",
|
| 110 |
+
"text": "comes from various application domains. Some data may contain private and sensitive information, such as bank account and ECG. However, traditional data collection operations could not well protect such information, easily resulting in users' privacy leakage during the data collection and distribution processes involved in model training. To overcome the problem above, Google [15], [16], [17] invented federated learning (FL). FL allows users to collectively harvest the advantages of shared models trained from their local data without sending original data to others. FederatedAveraging (FedAvg), federated transfer learning (FTL) and federated knowledge distillation (FKD) are the three mainstream research directions.",
|
| 111 |
+
"bbox": [
|
| 112 |
+
501,
|
| 113 |
+
446,
|
| 114 |
+
924,
|
| 115 |
+
635
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 0
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "text",
|
| 121 |
+
"text": "FedAvg calculates the average weights of the models of all users and shares the weights with each user in the FL system [18]. For instance, Ma et al. [19] devised a communication-efficient federated generalized tensor factorization for electronic health records. Liu et al. [20] used a federated adaptation framework to leverage the sparsity property of neural networks for generating privacy-preserving representations. A hierarchical personalized FL method aggregated heterogeneous user models, with privacy heterogeneity and model heterogeneity considered [21]. Yang et al. [22] modified the FedAvg method using partial networks for COVID-19 detection.",
|
| 122 |
+
"bbox": [
|
| 123 |
+
501,
|
| 124 |
+
635,
|
| 125 |
+
924,
|
| 126 |
+
810
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 0
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "text",
|
| 132 |
+
"text": "FTL introduces transfer learning techniques to promote knowledge transfer between different users, increasing system accuracy [23]. For example, Yang et al. [24] developed an FTL framework, FedSteg, for secure image steganalysis. An FTL method with dynamic gradient aggregation was proposed to weight the local gradients during the aggregation step when handling speech recognition tasks [25]. Majeed et al. [26] proposed an FTL-based structure to address traffic classification problems.",
|
| 133 |
+
"bbox": [
|
| 134 |
+
501,
|
| 135 |
+
811,
|
| 136 |
+
924,
|
| 137 |
+
944
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "header",
|
| 143 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 144 |
+
"bbox": [
|
| 145 |
+
73,
|
| 146 |
+
31,
|
| 147 |
+
421,
|
| 148 |
+
44
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 0
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "page_number",
|
| 154 |
+
"text": "1",
|
| 155 |
+
"bbox": [
|
| 156 |
+
911,
|
| 157 |
+
32,
|
| 158 |
+
921,
|
| 159 |
+
42
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 0
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "aside_text",
|
| 165 |
+
"text": "arXiv:2201.00011v1 [cs.LG] 30 Dec 2021",
|
| 166 |
+
"bbox": [
|
| 167 |
+
22,
|
| 168 |
+
263,
|
| 169 |
+
57,
|
| 170 |
+
708
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 0
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "text",
|
| 176 |
+
"text": "Unlike FedAvg and FTL, FKD takes the average of all users' weights as the weights for all teachers and transfers each teacher's knowledge to its corresponding student via knowledge distillation (KD) [27]. A group knowledge transfer training algorithm was adopted to train small convolutional neural networks (CNNs) and transfer their knowledge to a prominent server-side CNN [28]. Mishra et al. [29] proposed a resource-aware FKD approach for network resource allocation. Sohei et al. [30] devised a distillation-based semi-supervised FL framework for communication-efficient collaborative training with private data. Nowadays, FKD is attracting increasingly more research attention.",
|
| 177 |
+
"bbox": [
|
| 178 |
+
71,
|
| 179 |
+
51,
|
| 180 |
+
491,
|
| 181 |
+
227
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 1
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "In addition, there is a variety of FL-based algorithms in the literature. For instance, Chen et al. [31] applied asynchronous learning and temporally weighted aggregation to enhancing system's performance. Sattler et al. [32] presented a sparse ternary compression method to meet various requirements of FL environment. A cooperative game involving a gradient algorithm was designed to tackle image classification and speech recognition tasks [33]. An ensemble FL system used a randomly selected subset of clients to learn multiple global models against malicious clients [34]. Hong et al. [35] combined adversarial learning and FL to produce federated adversarial debiasing for fair and transferable representations. Zhou et al. [36] proposed a privacy-preserving distributed contextual federated online learning framework with big data support for social recommender systems. Pan et al. [37] put forward a multi-granular federated neural architecture search framework to enable the automation of model architecture search in a federated and privacy-preserved setting.",
|
| 188 |
+
"bbox": [
|
| 189 |
+
71,
|
| 190 |
+
227,
|
| 191 |
+
491,
|
| 192 |
+
505
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 1
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "text",
|
| 198 |
+
"text": "Most FL algorithms are developed around single-task problems, where multiple users work together to complete a task, e.g., COVID-19 detection [22], traffic classification [26] or speech recognition [25]. It is quite challenging to directly apply these algorithms to multi-task problems unless efficient knowledge sharing among different tasks is enabled. Unfortunately, TSC is usually multi-task-oriented. Time series data is collected from various application domains, such as ECG, traffic flow, human activity recognition. Each time series dataset has specific characteristics, e.g., length and variance, which may differ significantly from others. Thus, time series data is highly imbalanced and strongly non-independent, and identically distributed (Non-I.I.D.). In multi-task learning, it is commonly recognized that knowledge sharing among different tasks helps increase the efficiency and accuracy of each task [38]. For most TSC algorithms, how to securely share knowledge of similar expertise among different tasks is still challenging. In other words, user privacy and knowledge sharing are two critical issues that need to be carefully addressed when devising practical multi-task TSC algorithms. To the best of our knowledge, FL for multi-task TSC has not received sufficient research attention.",
|
| 199 |
+
"bbox": [
|
| 200 |
+
71,
|
| 201 |
+
505,
|
| 202 |
+
491,
|
| 203 |
+
824
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 1
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "We present an efficient federated distillation learning system (EFDLS) for multi-task TSC. This system consists of a central server and a number of mobile users running various TSC tasks simultaneously. Given two arbitrary users, they run either different tasks (e.g., ECG and motion) or the same task with different data sources to mimic real-world applications. EFDLS is characterized by a feature-based student-teacher (FBST) framework and a distance-based",
|
| 210 |
+
"bbox": [
|
| 211 |
+
71,
|
| 212 |
+
825,
|
| 213 |
+
491,
|
| 214 |
+
941
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "weights matching (DBWM) scheme. The FBST framework is deployed on each user, where the student and teacher models have identical network structure. Within each user, its teacher's hidden layers' knowledge is transferred to its student's hidden layers, helping the student mine high-quality features from the data. The DBWM scheme is deployed on the EFDLS server, where the least square distance (LSD) is used to measure the similarity between the weights of two models. When all connected users' weights are uploaded completely, for an arbitrary connected user, the DBWM scheme finds the one with the most similar weights among all connected users. After that, the server sends the connected user's weights to the found one that then loads the weights to its teacher model's hidden layers.",
|
| 221 |
+
"bbox": [
|
| 222 |
+
501,
|
| 223 |
+
53,
|
| 224 |
+
924,
|
| 225 |
+
257
|
| 226 |
+
],
|
| 227 |
+
"page_idx": 1
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"type": "text",
|
| 231 |
+
"text": "Our main contributions are summarized below.",
|
| 232 |
+
"bbox": [
|
| 233 |
+
527,
|
| 234 |
+
257,
|
| 235 |
+
861,
|
| 236 |
+
271
|
| 237 |
+
],
|
| 238 |
+
"page_idx": 1
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"type": "list",
|
| 242 |
+
"sub_type": "text",
|
| 243 |
+
"list_items": [
|
| 244 |
+
"- We propose EFDLS for multi-task TSC, where each user runs one TSC task at a time and different users may run different TSC tasks. The data generated on different users is different. EFDLS aims at providing secure knowledge sharing of similar expertise among different tasks. This problem has not attracted enough research attention.",
|
| 245 |
+
"- In EFDLS, feature-based knowledge distillation is used for knowledge transfer within each user. Unlike the traditional FKD that adopts the average weights of all users to supervise the feature extraction process in each user, EFDLS finds the one with the most similar expertise (i.e., a partner) for each user according to LSD and offers knowledge sharing between the user and its partner.",
|
| 246 |
+
"Experimental results demonstrate that EFDLS outperforms six state-of-the-art FL algorithms considering 44 well-known datasets selected in the UCR 2018 archive regarding the mean accuracy, 'win'/'tie'/'lose' measure, and AVG_rank, which are all based on the top-1 accuracy. That shows the effectiveness of EFDLS in addressing TSC problems."
|
| 247 |
+
],
|
| 248 |
+
"bbox": [
|
| 249 |
+
527,
|
| 250 |
+
277,
|
| 251 |
+
921,
|
| 252 |
+
599
|
| 253 |
+
],
|
| 254 |
+
"page_idx": 1
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"text": "The rest of the paper is organized below. Section 2 reviews the existing TSC algorithms. Section 3 overviews the architecture of EFDLS and describes its key components. Section 4 provides and analyzes the experimental results, and conclusion is drawn in Section 5.",
|
| 259 |
+
"bbox": [
|
| 260 |
+
503,
|
| 261 |
+
604,
|
| 262 |
+
924,
|
| 263 |
+
679
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 1
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "text",
|
| 269 |
+
"text": "2 RELATED WORK",
|
| 270 |
+
"text_level": 1,
|
| 271 |
+
"bbox": [
|
| 272 |
+
504,
|
| 273 |
+
696,
|
| 274 |
+
671,
|
| 275 |
+
710
|
| 276 |
+
],
|
| 277 |
+
"page_idx": 1
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"type": "text",
|
| 281 |
+
"text": "A large number of traditional and deep learning algorithms have been developed for TSC.",
|
| 282 |
+
"bbox": [
|
| 283 |
+
503,
|
| 284 |
+
715,
|
| 285 |
+
923,
|
| 286 |
+
746
|
| 287 |
+
],
|
| 288 |
+
"page_idx": 1
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"type": "text",
|
| 292 |
+
"text": "2.1 Traditional Algorithms",
|
| 293 |
+
"text_level": 1,
|
| 294 |
+
"bbox": [
|
| 295 |
+
504,
|
| 296 |
+
762,
|
| 297 |
+
712,
|
| 298 |
+
777
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 1
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "Two representative streams of algorithms are distance- and feature-based. For distance-based algorithms, it is quite common to combine the dynamic time warping (DTW) and nearest neighbor (NN), e.g., $DTW_{A}$ , $DTW_{I}$ and $DTW_{D}$ [39]. Besides, a significant number of DTW-NN-based ensemble algorithms taking advantage of DTW and NN have been proposed in the community. For example, Line et al. [40] presented an elastic ensemble (EE) algorithm for feature extraction, with 11 types of 1-NN-based elastic distance considered. A collective of the transformation-based ensemble (COTE) with 37 NN-based classifiers was adopted to",
|
| 305 |
+
"bbox": [
|
| 306 |
+
501,
|
| 307 |
+
781,
|
| 308 |
+
924,
|
| 309 |
+
944
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 1
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "header",
|
| 315 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 316 |
+
"bbox": [
|
| 317 |
+
73,
|
| 318 |
+
31,
|
| 319 |
+
421,
|
| 320 |
+
44
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 1
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "page_number",
|
| 326 |
+
"text": "2",
|
| 327 |
+
"bbox": [
|
| 328 |
+
911,
|
| 329 |
+
32,
|
| 330 |
+
923,
|
| 331 |
+
42
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 1
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "image",
|
| 337 |
+
"img_path": "images/20eba29cb041037b36d4654061077667ea8edeef94067a63e1f39e9351c2f737.jpg",
|
| 338 |
+
"image_caption": [
|
| 339 |
+
"Fig. 1. The schematic diagram of EFDLS. Note that 'FBST Framework' and 'DBWM Scheme' denote the feature-based student-teacher framework deployed on each user and the distance-based weights matching scheme run on the server. 'Conv x 9 128' represents a 1-dimensional convolutional neural network, where its filter size and channel sizes are set to 9 and 128. 'BN' is a batch normalization module, and 'ReLU' is the rectified linear unit activation function."
|
| 340 |
+
],
|
| 341 |
+
"image_footnote": [],
|
| 342 |
+
"bbox": [
|
| 343 |
+
76,
|
| 344 |
+
55,
|
| 345 |
+
924,
|
| 346 |
+
424
|
| 347 |
+
],
|
| 348 |
+
"page_idx": 2
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"type": "text",
|
| 352 |
+
"text": "address various TSC problems [41]. The hierarchical vote collective of transformation-based ensembles (HIVE-COTE) [42] and local cascade ensemble [43] are two representative ensemble algorithms in the literature.",
|
| 353 |
+
"bbox": [
|
| 354 |
+
71,
|
| 355 |
+
520,
|
| 356 |
+
491,
|
| 357 |
+
579
|
| 358 |
+
],
|
| 359 |
+
"page_idx": 2
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "text",
|
| 363 |
+
"text": "For feature-based algorithms, their aim is to capture sufficient discriminate features from the given data. For instance, Line et al. [44] introduced a shapelet transformation method to find representative shapelets that reflected the trend of raw data. A bag-of-features representation framework was applied to extracting the information at different locations of sequences [45]. Dempster et al. [46] applied minimally random convolutional kernel transform to exploring the transformed features from data. In addition, the learned pattern similarity [47], bag of symbolic Fourier approximation symbols [48], hidden-unit logistic model [49], time series forest [50], and multi-feature dictionary representation and ensemble learning [51] are also representative algorithms.",
|
| 364 |
+
"bbox": [
|
| 365 |
+
71,
|
| 366 |
+
579,
|
| 367 |
+
490,
|
| 368 |
+
785
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 2
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"text": "2.2 Deep Learning Algorithms",
|
| 375 |
+
"text_level": 1,
|
| 376 |
+
"bbox": [
|
| 377 |
+
71,
|
| 378 |
+
805,
|
| 379 |
+
310,
|
| 380 |
+
821
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 2
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "By unfolding the internal representation hierarchy of data, deep learning algorithms focus on extracting the intrinsic connections among representations. Most of the existing deep learning models are either single-network- or dual-network-based [12]. A single-network-based model captures the significant correlations within the representation hierarchy of data by one (usually hybridized) network, e.g., FCN [9], ResNet [9], shapelet-neural network [13], InceptionTime",
|
| 387 |
+
"bbox": [
|
| 388 |
+
71,
|
| 389 |
+
825,
|
| 390 |
+
491,
|
| 391 |
+
944
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 2
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "text",
|
| 397 |
+
"text": "[52], dynamic temporal pooling [14], multi-process collaborative architecture [53], and multi-scale attention convolutional neural network [54]. In contrast, a dual-network-based model usually consists of two parallel networks, i.e., local-feature extraction network (LFN) and global-relation extraction network (GRN), such as FCN-LSTM [11], RTFN [12], ResNet-Transformer [55], RNTS [56], and TapNet [10].",
|
| 398 |
+
"bbox": [
|
| 399 |
+
501,
|
| 400 |
+
520,
|
| 401 |
+
921,
|
| 402 |
+
623
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 2
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "text",
|
| 408 |
+
"text": "Almost all algorithms above emphasized single-task TSC, e.g., traffic or gesture classification. However, TSC usually involves multiple tasks in real-world scenarios, like various applications with different TSC tasks run on different mobile devices in a mobile computing environment. Enabling efficient knowledge sharing of similar expertise among different tasks helps increase the average accuracy of these tasks. Nevertheless, sharing knowledge among different TSC tasks securely and efficiently is still a challenge. That is what FL aims for.",
|
| 409 |
+
"bbox": [
|
| 410 |
+
503,
|
| 411 |
+
623,
|
| 412 |
+
921,
|
| 413 |
+
768
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 2
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "text",
|
| 419 |
+
"text": "3 EFDLS",
|
| 420 |
+
"text_level": 1,
|
| 421 |
+
"bbox": [
|
| 422 |
+
504,
|
| 423 |
+
791,
|
| 424 |
+
602,
|
| 425 |
+
806
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 2
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "text",
|
| 431 |
+
"text": "This section first overviews the architecture of EFDLS. Then, it introduces the feature-based student-teacher framework, distance-based weights matching scheme, and communication overhead.",
|
| 432 |
+
"bbox": [
|
| 433 |
+
503,
|
| 434 |
+
813,
|
| 435 |
+
921,
|
| 436 |
+
871
|
| 437 |
+
],
|
| 438 |
+
"page_idx": 2
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"type": "text",
|
| 442 |
+
"text": "3.1 System Overview",
|
| 443 |
+
"text_level": 1,
|
| 444 |
+
"bbox": [
|
| 445 |
+
504,
|
| 446 |
+
893,
|
| 447 |
+
676,
|
| 448 |
+
907
|
| 449 |
+
],
|
| 450 |
+
"page_idx": 2
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"type": "text",
|
| 454 |
+
"text": "EFDLS is a secure distributed system for multi-task TSC. There is a central server and multiple mobile users. Let $N_{tot}$",
|
| 455 |
+
"bbox": [
|
| 456 |
+
503,
|
| 457 |
+
912,
|
| 458 |
+
921,
|
| 459 |
+
943
|
| 460 |
+
],
|
| 461 |
+
"page_idx": 2
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"type": "header",
|
| 465 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 466 |
+
"bbox": [
|
| 467 |
+
73,
|
| 468 |
+
32,
|
| 469 |
+
421,
|
| 470 |
+
44
|
| 471 |
+
],
|
| 472 |
+
"page_idx": 2
|
| 473 |
+
},
|
| 474 |
+
{
|
| 475 |
+
"type": "page_number",
|
| 476 |
+
"text": "3",
|
| 477 |
+
"bbox": [
|
| 478 |
+
911,
|
| 479 |
+
32,
|
| 480 |
+
921,
|
| 481 |
+
42
|
| 482 |
+
],
|
| 483 |
+
"page_idx": 2
|
| 484 |
+
},
|
| 485 |
+
{
|
| 486 |
+
"type": "text",
|
| 487 |
+
"text": "and $N_{conn}$ denote the numbers of total and connected users in the system, respectively, where $N_{conn} \\leq N_{tot}$ . Each user runs one TSC task at a time and different users might run different TSC tasks. For two arbitrary users, they run two different tasks, such as gesture and ECG classification, or the same task with different data sources.",
|
| 488 |
+
"bbox": [
|
| 489 |
+
71,
|
| 490 |
+
53,
|
| 491 |
+
490,
|
| 492 |
+
140
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 3
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "text",
|
| 498 |
+
"text": "The overview of EFDLS is shown in Fig. 1. In the system, users train their models locally based on knowledge distillation and share their model weights with users with similar expertise via the server. We propose FBST, a feature-based student-teacher framework that is deployed on each user as its learning model. Within each user, its teacher's hidden layers' knowledge is transferred to its student's hidden layers. For each connected user, its student model's hidden layers' weights are uploaded to the EFDLS server periodically. We propose DBWM, a distance-based weights matching scheme deployed on the server, with the LSD adopted to measure the similarity between the weights of two given models. After the weights of all connected users are uploaded completely, for each connected user, the DBWM scheme is launched to find the one with the most similar weights among all connected users. In this way, every user has a partner to match with. For each connected user, its uploaded weights are sent to its partner that then loads these weights to its teacher model's hidden layers. The server's role looks like a telecom-network switch. The EFDLS system allows users to benefit from knowledge sharing without sacrificing security and privacy.",
|
| 499 |
+
"bbox": [
|
| 500 |
+
76,
|
| 501 |
+
142,
|
| 502 |
+
491,
|
| 503 |
+
465
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 3
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "3.2 Feature-based Student-Teacher Framework",
|
| 510 |
+
"text_level": 1,
|
| 511 |
+
"bbox": [
|
| 512 |
+
73,
|
| 513 |
+
501,
|
| 514 |
+
434,
|
| 515 |
+
516
|
| 516 |
+
],
|
| 517 |
+
"page_idx": 3
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "text",
|
| 521 |
+
"text": "In the FBST framework, the student and teacher models have identical network structure. Within each user, feature-based KD promotes knowledge transfer from the teacher's hidden layers to its student's hidden layers, helping the student capture rich and valuable representations from the input data.",
|
| 522 |
+
"bbox": [
|
| 523 |
+
71,
|
| 524 |
+
527,
|
| 525 |
+
490,
|
| 526 |
+
617
|
| 527 |
+
],
|
| 528 |
+
"page_idx": 3
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "text",
|
| 532 |
+
"text": "3.2.1 Feature Extractor",
|
| 533 |
+
"text_level": 1,
|
| 534 |
+
"bbox": [
|
| 535 |
+
73,
|
| 536 |
+
643,
|
| 537 |
+
250,
|
| 538 |
+
657
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 3
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "text",
|
| 544 |
+
"text": "The feature extractor contains multiple hidden layers and a classifier, as shown in Fig. 1. The hidden layers are responsible for local-feature extraction, including three Convolutional Blocks (i.e., ConvBlock1, ConvBlock2, and ConvBlock3), an average pooling layer, and a dense (i.e., fully-connected) layer. Each ConvBlock consists of a 1-dimensional CNN (Conv) module, a batch normalization (BN) module, and a rectified linear unit activation (ReLU) function, defined as:",
|
| 545 |
+
"bbox": [
|
| 546 |
+
71,
|
| 547 |
+
669,
|
| 548 |
+
490,
|
| 549 |
+
800
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 3
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "equation",
|
| 555 |
+
"text": "\n$$\nf _ {\\text {c o n v b l o c k}} (x) = f _ {\\text {r e l u}} \\left(f _ {\\text {b n}} \\left(W _ {\\text {c o n v}} \\otimes x + b _ {\\text {c o n v}}\\right)\\right) \\tag {1}\n$$\n",
|
| 556 |
+
"text_format": "latex",
|
| 557 |
+
"bbox": [
|
| 558 |
+
119,
|
| 559 |
+
818,
|
| 560 |
+
488,
|
| 561 |
+
835
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 3
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "where, $W_{conv}$ and $b_{conv}$ are the weight and bias matrices of CNN, respectively. $\\otimes$ represents the convolutional computation operation. $f_{bn}$ and $f_{relu}$ denote the batch normalization and ReLU functions, respectively.",
|
| 568 |
+
"bbox": [
|
| 569 |
+
71,
|
| 570 |
+
852,
|
| 571 |
+
491,
|
| 572 |
+
911
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 3
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "text",
|
| 578 |
+
"text": "Let $x_{bn} = \\{x_1, x_2, \\dots, x_{N_{bn}}\\}$ denote the input of batch normalization (BN), where $x_i$ and $N_{bn}$ stand for the $i$ -th",
|
| 579 |
+
"bbox": [
|
| 580 |
+
71,
|
| 581 |
+
912,
|
| 582 |
+
491,
|
| 583 |
+
941
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 3
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "text",
|
| 589 |
+
"text": "instance and batch size, respectively. $f_{bn}(x_{bn})$ is defined in Eq. (2)",
|
| 590 |
+
"bbox": [
|
| 591 |
+
503,
|
| 592 |
+
53,
|
| 593 |
+
921,
|
| 594 |
+
82
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 3
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "equation",
|
| 600 |
+
"text": "\n$$\n\\begin{array}{l} f _ {b n} \\left(x _ {b n}\\right) = f _ {b n} \\left(x _ {1}, x _ {2}, \\dots , x _ {N _ {b n}}\\right) \\\\ = (\\alpha \\frac {x _ {1} - \\mu}{\\delta + \\zeta} + \\beta , \\alpha \\frac {x _ {2} - \\mu}{\\delta + \\zeta} + \\beta , \\dots , \\alpha \\frac {x _ {N _ {b n}} - \\mu}{\\delta + \\zeta} + \\beta) \\\\ \\end{array}\n$$\n",
|
| 601 |
+
"text_format": "latex",
|
| 602 |
+
"bbox": [
|
| 603 |
+
504,
|
| 604 |
+
88,
|
| 605 |
+
926,
|
| 606 |
+
137
|
| 607 |
+
],
|
| 608 |
+
"page_idx": 3
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "equation",
|
| 612 |
+
"text": "\n$$\n\\mu = \\frac {1}{N _ {b n}} \\sum_ {i = 1} ^ {N _ {b n}} x _ {i}\n$$\n",
|
| 613 |
+
"text_format": "latex",
|
| 614 |
+
"bbox": [
|
| 615 |
+
553,
|
| 616 |
+
138,
|
| 617 |
+
666,
|
| 618 |
+
176
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 3
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "equation",
|
| 624 |
+
"text": "\n$$\n\\delta = \\sqrt {\\sum_ {i = 1} ^ {N _ {b n}} \\left(x _ {i} - \\mu\\right) ^ {2}} \\tag {2}\n$$\n",
|
| 625 |
+
"text_format": "latex",
|
| 626 |
+
"bbox": [
|
| 627 |
+
555,
|
| 628 |
+
179,
|
| 629 |
+
921,
|
| 630 |
+
236
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 3
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"text": "where, $\\alpha \\in \\mathbb{R}^{+}$ and $\\beta \\in \\mathbb{R}$ are the parameters to be learned during training. $\\zeta > 0$ is an arbitrarily small number.",
|
| 637 |
+
"bbox": [
|
| 638 |
+
503,
|
| 639 |
+
236,
|
| 640 |
+
921,
|
| 641 |
+
265
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 3
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "text",
|
| 647 |
+
"text": "The classifier is composed of a dense layer and a Softmax function, mapping high-level features extracted from the hidden layers to the corresponding label.",
|
| 648 |
+
"bbox": [
|
| 649 |
+
503,
|
| 650 |
+
266,
|
| 651 |
+
921,
|
| 652 |
+
310
|
| 653 |
+
],
|
| 654 |
+
"page_idx": 3
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "text",
|
| 658 |
+
"text": "3.2.2 Knowledge Distillation",
|
| 659 |
+
"text_level": 1,
|
| 660 |
+
"bbox": [
|
| 661 |
+
504,
|
| 662 |
+
320,
|
| 663 |
+
712,
|
| 664 |
+
335
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "Feature-based KD regularizes a student model by transferring knowledge from the corresponding teacher's hidden layers to the student's hidden layers [57]. For an arbitrary user, its student model captures sufficient discriminate representations from the data under its teacher model's supervision.",
|
| 671 |
+
"bbox": [
|
| 672 |
+
503,
|
| 673 |
+
338,
|
| 674 |
+
921,
|
| 675 |
+
422
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "Let $O_{i}^{T,1}$ , $O_{i}^{T,2}$ , $O_{i}^{T,3}$ , and $O_{i}^{T,4}$ be the outputs of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer of the teacher's hidden layers. Let $O_{i}^{S,1}$ , $O_{i}^{S,2}$ , $O_{i}^{S,3}$ , and $O_{i}^{S,4}$ be the outputs of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer of the student's hidden layers. Following the previous work [28], we define the KD loss, $\\mathcal{L}_i^{KD}$ , of $U_{i}$ as:",
|
| 682 |
+
"bbox": [
|
| 683 |
+
503,
|
| 684 |
+
422,
|
| 685 |
+
921,
|
| 686 |
+
527
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 3
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "equation",
|
| 692 |
+
"text": "\n$$\n\\mathcal {L} _ {i} ^ {K D} = \\sum_ {m = 1} ^ {4} \\left| \\left| O _ {i} ^ {T, m} - O _ {i} ^ {S, m} \\right| \\right| ^ {2} \\tag {3}\n$$\n",
|
| 693 |
+
"text_format": "latex",
|
| 694 |
+
"bbox": [
|
| 695 |
+
606,
|
| 696 |
+
527,
|
| 697 |
+
921,
|
| 698 |
+
565
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "For $U_{i}$ , its total loss, $\\mathcal{L}_i$ , consists of KD loss, $\\mathcal{L}_i^{KD}$ , and supervised loss, $\\mathcal{L}_i^{Sup}$ . As the previous studies in [10], [11], [12] suggested, $\\mathcal{L}_i^{Sup}$ uses the cross-entropy function to measure the average difference between the ground truth labels and their prediction vectors, as shown in Eq. (4).",
|
| 705 |
+
"bbox": [
|
| 706 |
+
503,
|
| 707 |
+
570,
|
| 708 |
+
921,
|
| 709 |
+
646
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 3
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "equation",
|
| 715 |
+
"text": "\n$$\n\\mathcal {L} _ {i} ^ {S u p} = - \\frac {1}{N _ {s e g}} \\sum_ {j = 1} ^ {N _ {s e g}} y _ {j} \\log \\left(\\hat {y} _ {j}\\right) \\tag {4}\n$$\n",
|
| 716 |
+
"text_format": "latex",
|
| 717 |
+
"bbox": [
|
| 718 |
+
607,
|
| 719 |
+
652,
|
| 720 |
+
921,
|
| 721 |
+
694
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "where, $N_{seg}$ is the number of input vectors, and $y_{i}$ and $\\hat{y}_j$ are the ground label and prediction vector of the $j$ -th input vector, respectively.",
|
| 728 |
+
"bbox": [
|
| 729 |
+
503,
|
| 730 |
+
699,
|
| 731 |
+
921,
|
| 732 |
+
743
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 3
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "text",
|
| 738 |
+
"text": "The total loss, $\\mathcal{L}_i$ , is defined as:",
|
| 739 |
+
"bbox": [
|
| 740 |
+
527,
|
| 741 |
+
743,
|
| 742 |
+
746,
|
| 743 |
+
757
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 3
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "equation",
|
| 749 |
+
"text": "\n$$\n\\mathcal {L} _ {i} = \\epsilon \\times \\mathcal {L} _ {i} ^ {S u p} + (1 - \\epsilon) \\times \\mathcal {L} _ {i} ^ {K D} \\tag {5}\n$$\n",
|
| 750 |
+
"text_format": "latex",
|
| 751 |
+
"bbox": [
|
| 752 |
+
599,
|
| 753 |
+
763,
|
| 754 |
+
921,
|
| 755 |
+
782
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 3
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"text": "where, $\\epsilon \\in (0,1)$ is a coefficient to balance $\\mathcal{L}_i^{Sup}$ and $\\mathcal{L}_i^{KD}$ . In this paper, we set $\\epsilon = 0.9$ (More details can be found in Section 4.3).",
|
| 762 |
+
"bbox": [
|
| 763 |
+
503,
|
| 764 |
+
787,
|
| 765 |
+
921,
|
| 766 |
+
833
|
| 767 |
+
],
|
| 768 |
+
"page_idx": 3
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "text",
|
| 772 |
+
"text": "3.3 Distance-based Weights Matching",
|
| 773 |
+
"text_level": 1,
|
| 774 |
+
"bbox": [
|
| 775 |
+
503,
|
| 776 |
+
849,
|
| 777 |
+
799,
|
| 778 |
+
866
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 3
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "text",
|
| 784 |
+
"text": "The least square distance (LSD) is used to calculate the similarity between the weights of two given models. When the weights uploaded by all the connected users are received, the DBWM scheme immediately launches the weights matching process to find a partner for each connected user.",
|
| 785 |
+
"bbox": [
|
| 786 |
+
501,
|
| 787 |
+
869,
|
| 788 |
+
921,
|
| 789 |
+
941
|
| 790 |
+
],
|
| 791 |
+
"page_idx": 3
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"type": "header",
|
| 795 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 796 |
+
"bbox": [
|
| 797 |
+
73,
|
| 798 |
+
31,
|
| 799 |
+
421,
|
| 800 |
+
44
|
| 801 |
+
],
|
| 802 |
+
"page_idx": 3
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"type": "page_number",
|
| 806 |
+
"text": "4",
|
| 807 |
+
"bbox": [
|
| 808 |
+
911,
|
| 809 |
+
32,
|
| 810 |
+
921,
|
| 811 |
+
42
|
| 812 |
+
],
|
| 813 |
+
"page_idx": 3
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"type": "text",
|
| 817 |
+
"text": "3.3.1 Least Square Distance",
|
| 818 |
+
"text_level": 1,
|
| 819 |
+
"bbox": [
|
| 820 |
+
73,
|
| 821 |
+
51,
|
| 822 |
+
287,
|
| 823 |
+
66
|
| 824 |
+
],
|
| 825 |
+
"page_idx": 4
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "text",
|
| 829 |
+
"text": "Let $FLEs$ denote the maximum number of federated learning epochs. Let $W_{i}^{S,k}$ and $W_{i}^{T,k}$ be the weights of the student and teacher models of $U_{i}$ at the $k$ -th federated learning epoch, $k = 1,2,\\dots,FLEs$ . Denote the hidden layers' weights of the student and teacher models of $U_{i}$ by $W_{i}^{S_{hidden},k} \\subset W_{i}^{S,k}$ and $W_{i}^{T_{hidden},k} \\subset W_{i}^{T,k}$ , respectively. To be specific, $W_{i}^{S_{hidden},k}$ consists of the weights of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer, namely, $W_{i}^{S_{1},k}$ , $W_{i}^{S_{2},k}$ , $W_{i}^{S_{3},k}$ , and $W_{i}^{S_{4},k}$ . So, we have $W_{i}^{S_{hidden},k} = \\{W_{i}^{S_{1},k}, W_{i}^{S_{2},k}, W_{i}^{S_{3},k}, W_{i}^{S_{4},k}\\}$ .",
|
| 830 |
+
"bbox": [
|
| 831 |
+
71,
|
| 832 |
+
70,
|
| 833 |
+
490,
|
| 834 |
+
220
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 4
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "text",
|
| 840 |
+
"text": "At the $k$ -th federated learning epoch, user $U_{i}, i = 1,2,\\dots,N_{conn}$ , uploads its student model's hidden layers' weights, $W_{i}^{S_{hidden},k}$ , to the server. The server stores the uploaded weights in the weight set $\\mathbf{W}$ defined in Eq. (6).",
|
| 841 |
+
"bbox": [
|
| 842 |
+
71,
|
| 843 |
+
220,
|
| 844 |
+
491,
|
| 845 |
+
280
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 4
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "equation",
|
| 851 |
+
"text": "\n$$\n\\mathbf {W} = \\left[ W _ {1} ^ {S _ {\\text {h i d d e n}}, k}, W _ {2} ^ {S _ {\\text {h i d d e n}}, k}, \\dots , W _ {N _ {\\text {c o n n}}} ^ {S _ {\\text {h i d d e n}}, k} \\right] \\tag {6}\n$$\n",
|
| 852 |
+
"text_format": "latex",
|
| 853 |
+
"bbox": [
|
| 854 |
+
127,
|
| 855 |
+
284,
|
| 856 |
+
490,
|
| 857 |
+
306
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 4
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "text",
|
| 863 |
+
"text": "The server then calculates the weights' square distance set, $d$ , based on $\\mathbf{W}$ . $d$ is defined as:",
|
| 864 |
+
"bbox": [
|
| 865 |
+
71,
|
| 866 |
+
310,
|
| 867 |
+
491,
|
| 868 |
+
339
|
| 869 |
+
],
|
| 870 |
+
"page_idx": 4
|
| 871 |
+
},
|
| 872 |
+
{
|
| 873 |
+
"type": "equation",
|
| 874 |
+
"text": "\n$$\nd = \\left[ \\begin{array}{c} d _ {1} \\\\ d _ {2} \\\\ \\dots \\\\ d _ {N _ {\\text {c o n n}}} \\end{array} \\right] = \\left[ \\begin{array}{c c c} d _ {1, 2} & \\dots & d _ {1, N _ {\\text {c o n n}}} \\\\ d _ {2, 1} & \\dots & d _ {2, N _ {\\text {c o n n}}} \\\\ \\dots & \\dots & \\dots \\\\ d _ {N _ {\\text {c o n n}}, 1} & \\dots & d _ {N _ {\\text {c o n n}}, N _ {\\text {c o n n}} - 1} \\end{array} \\right] \\tag {7}\n$$\n",
|
| 875 |
+
"text_format": "latex",
|
| 876 |
+
"bbox": [
|
| 877 |
+
80,
|
| 878 |
+
343,
|
| 879 |
+
490,
|
| 880 |
+
406
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 4
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "text",
|
| 886 |
+
"text": "where, $d_{i,j}$ $(i,j\\in 1,\\dots,N_{conn},i\\neq j)$ is the square distance between $W_{i}^{S_{hidden},k}$ and $W_{j}^{S_{hidden},k}$ , as defined in Eq. (8).",
|
| 887 |
+
"bbox": [
|
| 888 |
+
71,
|
| 889 |
+
409,
|
| 890 |
+
491,
|
| 891 |
+
444
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 4
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "equation",
|
| 897 |
+
"text": "\n$$\n\\begin{array}{l} d _ {i, j} = \\left\\| W _ {i} ^ {S _ {\\text {h i d d e n}}, k} - W _ {j} ^ {S _ {\\text {h i d d e n}}, k} \\right\\| ^ {2} \\\\ = \\sum_ {m = 1} ^ {4} \\left| \\left| W _ {i} ^ {S _ {m}, k} - W _ {j} ^ {S _ {m}, k} \\right| \\right| ^ {2} \\tag {8} \\\\ \\end{array}\n$$\n",
|
| 898 |
+
"text_format": "latex",
|
| 899 |
+
"bbox": [
|
| 900 |
+
158,
|
| 901 |
+
446,
|
| 902 |
+
488,
|
| 903 |
+
508
|
| 904 |
+
],
|
| 905 |
+
"page_idx": 4
|
| 906 |
+
},
|
| 907 |
+
{
|
| 908 |
+
"type": "text",
|
| 909 |
+
"text": "We adopt the argmin function to return the index of the smallest distance for each row in $d$ and obtain the index set, ID. ID is defined in Eq. (9).",
|
| 910 |
+
"bbox": [
|
| 911 |
+
71,
|
| 912 |
+
513,
|
| 913 |
+
491,
|
| 914 |
+
558
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "equation",
|
| 920 |
+
"text": "\n$$\n\\mathbf {I D} = \\operatorname {a r g m i n} (d) = \\left[ I D _ {1}, I D _ {2}, \\dots , I D _ {N _ {\\text {c o n n}}} \\right] \\tag {9}\n$$\n",
|
| 921 |
+
"text_format": "latex",
|
| 922 |
+
"bbox": [
|
| 923 |
+
129,
|
| 924 |
+
564,
|
| 925 |
+
488,
|
| 926 |
+
582
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 4
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "where, $ID_{i}$ is the index of the smallest distance for $U_{i}$ .",
|
| 933 |
+
"bbox": [
|
| 934 |
+
71,
|
| 935 |
+
587,
|
| 936 |
+
455,
|
| 937 |
+
601
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 4
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "text",
|
| 943 |
+
"text": "Based on $\\mathbf{ID}$ , we easily obtain the LSD weight set, $\\mathbf{W}^{LSD}$ from $\\mathbf{W}$ . $\\mathbf{W}^{LSD}$ is defined in Eq. (10).",
|
| 944 |
+
"bbox": [
|
| 945 |
+
71,
|
| 946 |
+
601,
|
| 947 |
+
490,
|
| 948 |
+
632
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 4
|
| 951 |
+
},
|
| 952 |
+
{
|
| 953 |
+
"type": "equation",
|
| 954 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {W} ^ {L S D} = \\left[ W _ {1} ^ {L S D, k}, W _ {2} ^ {L S D, k},..., W _ {N _ {\\text {c o n n}}} ^ {L S D, k} \\right] \\tag {10} \\\\ = \\left[ \\mathbf {W} (I D _ {1}), \\mathbf {W} (I D _ {2}), \\dots , \\mathbf {W} (I D _ {N _ {\\text {c o n n}}}) \\right] \\\\ \\end{array}\n$$\n",
|
| 955 |
+
"text_format": "latex",
|
| 956 |
+
"bbox": [
|
| 957 |
+
109,
|
| 958 |
+
636,
|
| 959 |
+
488,
|
| 960 |
+
676
|
| 961 |
+
],
|
| 962 |
+
"page_idx": 4
|
| 963 |
+
},
|
| 964 |
+
{
|
| 965 |
+
"type": "text",
|
| 966 |
+
"text": "where, $W_{i}^{LSD,k}$ are the weights matched with those of $U_{i}$ at the $k$ -th federated learning epoch.",
|
| 967 |
+
"bbox": [
|
| 968 |
+
71,
|
| 969 |
+
681,
|
| 970 |
+
491,
|
| 971 |
+
713
|
| 972 |
+
],
|
| 973 |
+
"page_idx": 4
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"type": "text",
|
| 977 |
+
"text": "Once $U_{i}$ receives $W_{i}^{LSD,k}$ from the server, $U_{i}$ loads these weights to its teacher's hidden layers at the beginning of the next federated learning epoch, as defined in Eq. (11).",
|
| 978 |
+
"bbox": [
|
| 979 |
+
71,
|
| 980 |
+
713,
|
| 981 |
+
491,
|
| 982 |
+
758
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 4
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "equation",
|
| 988 |
+
"text": "\n$$\nW _ {i} ^ {T _ {\\text {h i d d e n}}, k + 1} \\leftarrow W _ {i} ^ {L S D, k} \\tag {11}\n$$\n",
|
| 989 |
+
"text_format": "latex",
|
| 990 |
+
"bbox": [
|
| 991 |
+
192,
|
| 992 |
+
763,
|
| 993 |
+
488,
|
| 994 |
+
782
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 4
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "text",
|
| 1000 |
+
"text": "Alg. 1 and Alg. 2 show the user and server implementation procedures, respectively.",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
71,
|
| 1003 |
+
787,
|
| 1004 |
+
491,
|
| 1005 |
+
819
|
| 1006 |
+
],
|
| 1007 |
+
"page_idx": 4
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"type": "text",
|
| 1011 |
+
"text": "3.4 Communication Overhead",
|
| 1012 |
+
"text_level": 1,
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
71,
|
| 1015 |
+
835,
|
| 1016 |
+
308,
|
| 1017 |
+
849
|
| 1018 |
+
],
|
| 1019 |
+
"page_idx": 4
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "text",
|
| 1023 |
+
"text": "EFDLS does not launch the DBWM scheme unless the weights from all the $N_{conn}$ connected users are received. It helps reduce the interaction between the server and users, promoting the system's service efficiency. For user $U_{i}, i = 1,2,\\dots,N_{conn}$ , we analyze the communication overhead of uploading and downloading its weights. Denote the",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
71,
|
| 1026 |
+
854,
|
| 1027 |
+
491,
|
| 1028 |
+
943
|
| 1029 |
+
],
|
| 1030 |
+
"page_idx": 4
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "code",
|
| 1034 |
+
"sub_type": "algorithm",
|
| 1035 |
+
"code_caption": [
|
| 1036 |
+
"Algorithm 1 EFDLS User Implementation Procedure"
|
| 1037 |
+
],
|
| 1038 |
+
"code_body": "1: procedure USERPROCEDURE $(U_{i},FLEs)$ \n2: Initialize all global variables; \n3: for $k = 1$ to FLEs do \n4: if $k = 1$ then \n5: // The student model is trained alone \n6: Obtain $W_{i}^{S,k}$ after the initial local training; \n7: //Upload its hidden layers' weights to server \n8: Upload $W_{i}^{S_{hidden},k}\\subset W_{i}^{S,k}$ . \n9: else \n10: if receiveServer(Active)=1 then \n11: // Connect to the EFDLS server \n12: Receive $W_{i}^{LSD,k}$ . \n13: Load $W_{i}^{LSD,k}$ to the teacher model; \n14: Compute $\\mathcal{L}_i$ by Eq. (5); \n15: Update $W_{i}^{S,k + 1}$ using the gradient decent; \n16: Upload $W_{i}^{S_{hidden},k + 1}\\subset W_{i}^{S,k + 1}$ . \n17: else \n18: Disconnect from the EFDLS server. \n19: end if \n20: end if \n21: end for \n22: end procedure",
|
| 1039 |
+
"bbox": [
|
| 1040 |
+
508,
|
| 1041 |
+
68,
|
| 1042 |
+
923,
|
| 1043 |
+
398
|
| 1044 |
+
],
|
| 1045 |
+
"page_idx": 4
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "code",
|
| 1049 |
+
"sub_type": "algorithm",
|
| 1050 |
+
"code_caption": [
|
| 1051 |
+
"Algorithm 2 EFDLS Server Implementation Procedure"
|
| 1052 |
+
],
|
| 1053 |
+
"code_body": "1: procedure SERVERPROCEDURE $(N_{tot},N_{conn},FLEs)$ \n2: Initialize all global variables; \n3: Set $\\mathbf{W} = \\varnothing$ . \n4: for $k = 1$ to FLEs do \n5: // Run on the server; \n6: Clear and initialize W; \n7: for $i = 1$ to $N_{conn}$ do \n8: //Receive model weights from users; \n9: Receive $W_{i}^{S_{hidden},k}$ . \n10: Include $W_{i}^{S_{hidden},k}$ in W. \n11: end for \n12: for $i = 1$ to $N_{conn}$ do \n13: Obtain $W_{i}^{LSD,k}$ based on W by Eqs. (6)-(10); \n14: Send $W_{i}^{LSD,k}$ to $U_{i}$ \n15: end for \n16: end for \n17: end procedure",
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
508,
|
| 1056 |
+
449,
|
| 1057 |
+
919,
|
| 1058 |
+
704
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 4
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "text",
|
| 1064 |
+
"text": "bandwidth requirement for uploading the student model's hidden layers' weights of $U_{i}$ once by $BW$ . Clearly, the bandwidth requirement for downloading the student model's hidden layers' weights from the server once is also $BW$ . That is because, for an arbitrary connected user, the weights uploaded to and those downloaded from the server are of the same size, given that each user has exactly the same model structure. At each federated learning epoch, the bandwidth requirement for user $U_{i}, i = 1,2,\\dots,N_{conn}$ is estimated as $BW + BW = 2BW$ . For $U_{i}$ , its total communication overhead is in proportion to $2BW \\cdot FLEs$ . Hence, the total communication overhead is proportional to $2BW \\cdot FLEs \\cdot N_{conn}$ .",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
501,
|
| 1067 |
+
752,
|
| 1068 |
+
923,
|
| 1069 |
+
943
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 4
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "header",
|
| 1075 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1076 |
+
"bbox": [
|
| 1077 |
+
73,
|
| 1078 |
+
31,
|
| 1079 |
+
421,
|
| 1080 |
+
44
|
| 1081 |
+
],
|
| 1082 |
+
"page_idx": 4
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "page_number",
|
| 1086 |
+
"text": "5",
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
911,
|
| 1089 |
+
32,
|
| 1090 |
+
921,
|
| 1091 |
+
42
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 4
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "table",
|
| 1097 |
+
"img_path": "images/d05b5ed651360662a8330093d808ef8bfbcb6941df438996cfbe9c3924688606.jpg",
|
| 1098 |
+
"table_caption": [
|
| 1099 |
+
"TABLE 1 Details of 44 selected datasets from the UCR 2018."
|
| 1100 |
+
],
|
| 1101 |
+
"table_footnote": [],
|
| 1102 |
+
"table_body": "<table><tr><td>Scale</td><td>Dataset</td><td>Train</td><td>Test</td><td>Class</td><td>SeriesLength</td><td>Type</td></tr><tr><td rowspan=\"11\">Short</td><td>Chinatown</td><td>20</td><td>345</td><td>2</td><td>24</td><td>Traffic</td></tr><tr><td>MelbournePedestrian</td><td>1194</td><td>2439</td><td>10</td><td>24</td><td>Traffic</td></tr><tr><td>SonyAIBORobotSur.2</td><td>27</td><td>953</td><td>2</td><td>65</td><td>Sensor</td></tr><tr><td>SonyAIBORobotSur.1</td><td>20</td><td>601</td><td>2</td><td>70</td><td>Sensor</td></tr><tr><td>DistalPhalanxO.A.G</td><td>400</td><td>139</td><td>3</td><td>80</td><td>Image</td></tr><tr><td>DistalPhalanxO.C.</td><td>600</td><td>276</td><td>2</td><td>80</td><td>Image</td></tr><tr><td>DistalPhalanxTW</td><td>400</td><td>139</td><td>6</td><td>80</td><td>Image</td></tr><tr><td>TwoLeadECG</td><td>23</td><td>1139</td><td>2</td><td>82</td><td>ECG</td></tr><tr><td>MoteStrain</td><td>20</td><td>1252</td><td>2</td><td>84</td><td>Sensor</td></tr><tr><td>ECG200</td><td>100</td><td>100</td><td>2</td><td>96</td><td>ECG</td></tr><tr><td>CBF</td><td>30</td><td>900</td><td>3</td><td>128</td><td>Simulated</td></tr><tr><td rowspan=\"11\">Medium</td><td>DodgerLoopDay</td><td>78</td><td>80</td><td>7</td><td>288</td><td>Sensor</td></tr><tr><td>DodgerLoopGame</td><td>20</td><td>138</td><td>2</td><td>288</td><td>Sensor</td></tr><tr><td>DodgerLoopWeekend</td><td>20</td><td>138</td><td>2</td><td>288</td><td>Sensor</td></tr><tr><td>CricketX</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>CricketY</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>CricketZ</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>FaceFour</td><td>24</td><td>88</td><td>4</td><td>350</td><td>Image</td></tr><tr><td>Ham</td><td>109</td><td>105</td><td>2</td><td>431</td><td>Spectro</td></tr><tr><td>Meat</td><td>60</td><td>60</td><td>3</td><td>448</td><td>Spectro</td></tr><tr><td>Fish</td><td>175</td><td>175</td><td>7</td><td>463</td><td>Image</td></tr><tr><td>Beef</td><td>30</td><td>30</td><td>5</td><td>470</td><td>Spectro</td></tr><tr><td rowspan=\"11\">Long</td><td>OliveOil</td><td>30</td><td>30</td><td>4</td><td>570</td><td>Spectro</td></tr><tr><td>Car</td><td>60</td><td>60</td><td>4</td><td>577</td><td>Sensor</td></tr><tr><td>Lightning2</td><td>60</td><td>61</td><td>2</td><td>637</td><td>Sensor</td></tr><tr><td>Computers</td><td>250</td><td>250</td><td>2</td><td>720</td><td>Device</td></tr><tr><td>Mallat</td><td>55</td><td>2345</td><td>8</td><td>1024</td><td>Simulated</td></tr><tr><td>Phoneme</td><td>214</td><td>1896</td><td>39</td><td>1024</td><td>Sensor</td></tr><tr><td>StarLightCurves</td><td>1000</td><td>8236</td><td>3</td><td>1024</td><td>Sensor</td></tr><tr><td>MixedShapesRegularT.</td><td>500</td><td>2425</td><td>5</td><td>1024</td><td>Image</td></tr><tr><td>MixedShapesSmallT.</td><td>100</td><td>2425</td><td>5</td><td>1024</td><td>Image</td></tr><tr><td>ACSF1</td><td>100</td><td>100</td><td>10</td><td>1460</td><td>Device</td></tr><tr><td>SemgHandG.Ch2</td><td>300</td><td>600</td><td>2</td><td>1500</td><td>Spectrum</td></tr><tr><td rowspan=\"11\">Vary</td><td>AllGestureWiimoteX</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>AllGestureWiimoteY</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>AllGestureWiimoteZ</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>GestureMidAirD1</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GestureMidAirD2</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GestureMidAirD3</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GesturePebbleZ1</td><td>132</td><td>172</td><td>6</td><td>Vary</td><td>Sensor</td></tr><tr><td>GesturePebbleZ2</td><td>146</td><td>158</td><td>6</td><td>Vary</td><td>Sensor</td></tr><tr><td>PickupGestureW.Z</td><td>50</td><td>50</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>PLAID</td><td>537</td><td>537</td><td>11</td><td>Vary</td><td>Device</td></tr><tr><td>ShakeGestureW.Z</td><td>50</td><td>50</td><td>10</td><td>Vary</td><td>Sensor</td></tr></table>",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
199,
|
| 1105 |
+
88,
|
| 1106 |
+
797,
|
| 1107 |
+
660
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 5
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "text",
|
| 1113 |
+
"text": "4 PERFORMANCE EVALUATION",
|
| 1114 |
+
"text_level": 1,
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
76,
|
| 1117 |
+
684,
|
| 1118 |
+
336,
|
| 1119 |
+
698
|
| 1120 |
+
],
|
| 1121 |
+
"page_idx": 5
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "text",
|
| 1125 |
+
"text": "This section first introduces the experimental setup and performance metrics and then focuses on the ablation study. Finally, the performance of EFDLS is evaluated.",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
76,
|
| 1128 |
+
705,
|
| 1129 |
+
488,
|
| 1130 |
+
750
|
| 1131 |
+
],
|
| 1132 |
+
"page_idx": 5
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"text": "4.1 Experimental Setup",
|
| 1137 |
+
"text_level": 1,
|
| 1138 |
+
"bbox": [
|
| 1139 |
+
76,
|
| 1140 |
+
772,
|
| 1141 |
+
259,
|
| 1142 |
+
787
|
| 1143 |
+
],
|
| 1144 |
+
"page_idx": 5
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "text",
|
| 1148 |
+
"text": "4.1.1 Data Description",
|
| 1149 |
+
"text_level": 1,
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
76,
|
| 1152 |
+
792,
|
| 1153 |
+
243,
|
| 1154 |
+
806
|
| 1155 |
+
],
|
| 1156 |
+
"page_idx": 5
|
| 1157 |
+
},
|
| 1158 |
+
{
|
| 1159 |
+
"type": "text",
|
| 1160 |
+
"text": "The UCR 2018 archive is one of the most popular time series repositories with 128 datasets of different lengths in various application domains [58]. Following the previous work [53], we divide the UCR 2018 archive into 4 categories with respect to dataset length, namely, 'short', 'medium', 'long', and 'vary'. The length of a 'short' dataset is no more than 200. That of a 'medium' one varies from 200 to 500. A 'long' one has a length of over 500 while a 'vary' one has an indefinite length. The 128 datasets are composed of 41 'short', 32",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
76,
|
| 1163 |
+
811,
|
| 1164 |
+
488,
|
| 1165 |
+
941
|
| 1166 |
+
],
|
| 1167 |
+
"page_idx": 5
|
| 1168 |
+
},
|
| 1169 |
+
{
|
| 1170 |
+
"type": "text",
|
| 1171 |
+
"text": "'medium', 44 'long', and 11 'vary' datasets. Unfortunately, our limited computing resources do not allow us to consider the whole 128 datasets (detailed hardware specification can be found in Subsection Implementation Details). There are seven algorithms for performance comparison and the average training time on the 128 datasets costed more than 32 hours for a single federated learning epoch. So, we select 11 datasets from each category, resulting in 44 datasets selected. More details are found in Table 1.",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
506,
|
| 1174 |
+
685,
|
| 1175 |
+
919,
|
| 1176 |
+
815
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 5
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "4.1.2 Implementation Details",
|
| 1183 |
+
"text_level": 1,
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
508,
|
| 1186 |
+
828,
|
| 1187 |
+
718,
|
| 1188 |
+
840
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 5
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "text",
|
| 1194 |
+
"text": "Following previous studies [8], [9], [10], [11], [53], we set the decay value of batch normalization to 0.9. We use the $L_{2}$ regularization to avoid overfitting during the training process. Meanwhile, we adopt the AdamOptimizer with Pytorch<sup>1</sup>, where the initial learning rate is set to 0.0001.",
|
| 1195 |
+
"bbox": [
|
| 1196 |
+
506,
|
| 1197 |
+
845,
|
| 1198 |
+
919,
|
| 1199 |
+
917
|
| 1200 |
+
],
|
| 1201 |
+
"page_idx": 5
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "header",
|
| 1205 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1206 |
+
"bbox": [
|
| 1207 |
+
76,
|
| 1208 |
+
32,
|
| 1209 |
+
419,
|
| 1210 |
+
44
|
| 1211 |
+
],
|
| 1212 |
+
"page_idx": 5
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "page_number",
|
| 1216 |
+
"text": "6",
|
| 1217 |
+
"bbox": [
|
| 1218 |
+
911,
|
| 1219 |
+
32,
|
| 1220 |
+
921,
|
| 1221 |
+
42
|
| 1222 |
+
],
|
| 1223 |
+
"page_idx": 5
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "footer",
|
| 1227 |
+
"text": "1. https://pytorch.org/",
|
| 1228 |
+
"bbox": [
|
| 1229 |
+
521,
|
| 1230 |
+
929,
|
| 1231 |
+
656,
|
| 1232 |
+
941
|
| 1233 |
+
],
|
| 1234 |
+
"page_idx": 5
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "table",
|
| 1238 |
+
"img_path": "images/54a02a6e00d4878ed485c951adbb82f484512993a16253486c890017cc03428e.jpg",
|
| 1239 |
+
"table_caption": [
|
| 1240 |
+
"TABLE 2 Experimental results of different algorithms on 44 datasets when $N_{conn} = 44$ and $N_{tot} = 44$"
|
| 1241 |
+
],
|
| 1242 |
+
"table_footnote": [],
|
| 1243 |
+
"table_body": "<table><tr><td>Dataset</td><td>Baseline</td><td>FedAvg</td><td>FedAvgM</td><td>FedGrad</td><td>FTL</td><td>FTLS</td><td>FKD</td><td>EFDLS</td></tr><tr><td>Chinatown</td><td>0.9623</td><td>0.2754</td><td>0.2754</td><td>0.9623</td><td>0.9665</td><td>0.9537</td><td>0.9275</td><td>0.9478</td></tr><tr><td>MelbournePedestrian</td><td>0.9139</td><td>0.1</td><td>0.1</td><td>0.7784</td><td>0.8486</td><td>0.8922</td><td>0.9379</td><td>0.9453</td></tr><tr><td>SonyAIBORobotSur.2</td><td>0.8961</td><td>0.383</td><td>0.383</td><td>0.8363</td><td>0.8688</td><td>0.9035</td><td>0.915</td><td>0.8961</td></tr><tr><td>SonyAIBORobotSur.1</td><td>0.8652</td><td>0.5707</td><td>0.6619</td><td>0.7887</td><td>0.8236</td><td>0.8702</td><td>0.8369</td><td>0.8819</td></tr><tr><td>DistalPhalanxO.A.G</td><td>0.6763</td><td>0.1079</td><td>0.1079</td><td>0.6187</td><td>0.6259</td><td>0.6475</td><td>0.6691</td><td>0.6475</td></tr><tr><td>DistalPhalanxO.C.</td><td>0.75</td><td>0.417</td><td>0.6619</td><td>0.6776</td><td>0.7464</td><td>0.7465</td><td>0.7536</td><td>0.7428</td></tr><tr><td>DistalPhalanxTW</td><td>0.6547</td><td>0.1295</td><td>0.1295</td><td>0.554</td><td>0.6259</td><td>0.6547</td><td>0.6835</td><td>0.6403</td></tr><tr><td>TwoLeadECG</td><td>0.7463</td><td>0.4996</td><td>0.4996</td><td>0.7305</td><td>0.7287</td><td>0.7278</td><td>0.8112</td><td>0.7665</td></tr><tr><td>MoteStrain</td><td>0.7788</td><td>0.5391</td><td>0.5391</td><td>0.6933</td><td>0.7923</td><td>0.8283</td><td>0.8163</td><td>0.8203</td></tr><tr><td>ECG200</td><td>0.86</td><td>0.36</td><td>0.36</td><td>0.8</td><td>0.84</td><td>0.85</td><td>0.87</td><td>0.85</td></tr><tr><td>CBF</td><td>0.987</td><td>0.3333</td><td>0.5911</td><td>0.5911</td><td>0.973</td><td>0.9922</td><td>0.9922</td><td>0.9956</td></tr><tr><td>DodgerLoopDay</td><td>0.575</td><td>0.15</td><td>0.15</td><td>0.3875</td><td>0.55</td><td>0.525</td><td>0.5125</td><td>0.5375</td></tr><tr><td>DodgerLoopGame</td><td>0.6884</td><td>0.5217</td><td>0.5217</td><td>0.6232</td><td>0.7826</td><td>0.7609</td><td>0.7609</td><td>0.7464</td></tr><tr><td>DodgerLoopWeekend</td><td>0.8261</td><td>0.7391</td><td>0.7391</td><td>0.7319</td><td>0.8841</td><td>0.8913</td><td>0.913</td><td>0.9203</td></tr><tr><td>CricketX</td><td>0.5897</td><td>0.0692</td><td>0.1371</td><td>0.2256</td><td>0.5667</td><td>0.6128</td><td>0.659</td><td>0.6718</td></tr><tr><td>CricketY</td><td>0.5051</td><td>0.0949</td><td>0.1357</td><td>0.1949</td><td>0.5</td><td>0.4949</td><td>0.5538</td><td>0.5974</td></tr><tr><td>CricketZ</td><td>0.6205</td><td>0.0846</td><td>0.0846</td><td>0.2256</td><td>0.5692</td><td>0.6</td><td>0.6692</td><td>0.7256</td></tr><tr><td>FaceFour</td><td>0.6477</td><td>0.1591</td><td>0.1591</td><td>0.4659</td><td>0.6591</td><td>0.6932</td><td>0.6932</td><td>0.6818</td></tr><tr><td>Ham</td><td>0.7143</td><td>0.4857</td><td>0.4857</td><td>0.6762</td><td>0.7048</td><td>0.7143</td><td>0.7048</td><td>0.6952</td></tr><tr><td>Meat</td><td>0.8667</td><td>0.3333</td><td>0.3333</td><td>0.7333</td><td>0.8333</td><td>0.8333</td><td>0.9</td><td>0.917</td></tr><tr><td>Fish</td><td>0.5657</td><td>0.1371</td><td>0.1371</td><td>0.2857</td><td>0.5771</td><td>0.6</td><td>0.6</td><td>0.6229</td></tr><tr><td>Beef</td><td>0.7667</td><td>0.2</td><td>0.2</td><td>0.5667</td><td>0.7</td><td>0.7</td><td>0.7</td><td>0.7667</td></tr><tr><td>OliveOil</td><td>0.8333</td><td>0.167</td><td>0.167</td><td>0.7</td><td>0.8667</td><td>0.8667</td><td>0.8333</td><td>0.8333</td></tr><tr><td>Car</td><td>0.5833</td><td>0.233</td><td>0.233</td><td>0.5</td><td>0.5667</td><td>0.5833</td><td>0.5667</td><td>0.6333</td></tr><tr><td>Lightning2</td><td>0.7869</td><td>0.459</td><td>0.459</td><td>0.7705</td><td>0.7869</td><td>0.8033</td><td>0.7541</td><td>0.7869</td></tr><tr><td>Computers</td><td>0.78</td><td>0.5</td><td>0.5</td><td>0.584</td><td>0.688</td><td>0.748</td><td>0.788</td><td>0.804</td></tr><tr><td>Mallat</td><td>0.7446</td><td>0.1254</td><td>0.1254</td><td>0.4141</td><td>0.7638</td><td>0.7539</td><td>0.7906</td><td>0.8299</td></tr><tr><td>Phoneme</td><td>0.2231</td><td>0.02</td><td>0.02</td><td>0.1108</td><td>0.2147</td><td>0.2247</td><td>0.2859</td><td>0.2954</td></tr><tr><td>StarLightCurves</td><td>0.9534</td><td>0.1429</td><td>0.1429</td><td>0.5062</td><td>0.9519</td><td>0.9584</td><td>0.9571</td><td>0.9582</td></tr><tr><td>MixedShapesRegularT.</td><td>0.8586</td><td>0.1889</td><td>0.1889</td><td>0.2223</td><td>0.8384</td><td>0.8598</td><td>0.8643</td><td>0.8907</td></tr><tr><td>MixedShapesSmallT.</td><td>0.8029</td><td>0.1889</td><td>0.1889</td><td>0.2421</td><td>0.7942</td><td>0.8062</td><td>0.8318</td><td>0.8388</td></tr><tr><td>ACSIF1</td><td>0.77</td><td>0.1</td><td>0.19</td><td>0.19</td><td>0.82</td><td>0.89</td><td>0.87</td><td>0.88</td></tr><tr><td>SengHandG.Ch2</td><td>0.7067</td><td>0.65</td><td>0.65</td><td>0.555</td><td>0.72</td><td>0.7383</td><td>0.6867</td><td>0.72</td></tr><tr><td>AllGestureWiimoteX</td><td>0.2643</td><td>0.1</td><td>0.1</td><td>0.1371</td><td>0.2729</td><td>0.3043</td><td>0.2929</td><td>0.2914</td></tr><tr><td>AllGestureWiimoteY</td><td>0.2585</td><td>0.1</td><td>0.1</td><td>0.1357</td><td>0.3186</td><td>0.3029</td><td>0.2529</td><td>0.2829</td></tr><tr><td>AllGestureWiimoteZ</td><td>0.2886</td><td>0.1</td><td>0.1</td><td>0.1343</td><td>0.2671</td><td>0.29</td><td>0.4014</td><td>0.3786</td></tr><tr><td>GestureMidAirD1</td><td>0.5538</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.5462</td><td>0.5538</td><td>0.4615</td><td>0.5769</td></tr><tr><td>GestureMidAirD2</td><td>0.4231</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.4154</td><td>0.4462</td><td>0.4692</td><td>0.5308</td></tr><tr><td>GestureMidAirD3</td><td>0.3</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.2693</td><td>0.2615</td><td>0.2231</td><td>0.2769</td></tr><tr><td>GesturePebbleZ1</td><td>0.4419</td><td>0.1628</td><td>0.1628</td><td>0.2558</td><td>0.4767</td><td>0.4826</td><td>0.5</td><td>0.4883</td></tr><tr><td>GesturePebbleZ2</td><td>0.4241</td><td>0.1519</td><td>0.1519</td><td>0.2722</td><td>0.5126</td><td>0.557</td><td>0.6013</td><td>0.5886</td></tr><tr><td>PickupGestureW.Z</td><td>0.56</td><td>0.1</td><td>0.1</td><td>0.24</td><td>0.62</td><td>0.6</td><td>0.7</td><td>0.74</td></tr><tr><td>PLAID</td><td>0.203</td><td>0.0615</td><td>0.0615</td><td>0.0615</td><td>0.2198</td><td>0.2253</td><td>0.2924</td><td>0.2589</td></tr><tr><td>ShakeGestureW.Z</td><td>0.92</td><td>0.1</td><td>0.1</td><td>0.1</td><td>0.96</td><td>0.92</td><td>0.96</td><td>0.96</td></tr><tr><td>Win</td><td>4</td><td>0</td><td>0</td><td>0</td><td>3</td><td>7</td><td>10</td><td>18</td></tr><tr><td>Tie</td><td>1</td><td>0</td><td>0</td><td>0</td><td>2</td><td>1</td><td>1</td><td>2</td></tr><tr><td>Lose</td><td>39</td><td>44</td><td>44</td><td>44</td><td>39</td><td>36</td><td>33</td><td>24</td></tr><tr><td>Best</td><td>5</td><td>0</td><td>0</td><td>0</td><td>5</td><td>8</td><td>11</td><td>20</td></tr><tr><td>MeanACC</td><td>0.6622</td><td>0.2377</td><td>0.2557</td><td>0.4445</td><td>0.6604</td><td>0.6743</td><td>0.6878</td><td>0.7014</td></tr><tr><td>AVG_rank</td><td>3.5455</td><td>7.5</td><td>7.3409</td><td>6.0113</td><td>3.9204</td><td>2.8977</td><td>2.6364</td><td>2.1478</td></tr></table>",
|
| 1244 |
+
"bbox": [
|
| 1245 |
+
153,
|
| 1246 |
+
90,
|
| 1247 |
+
844,
|
| 1248 |
+
763
|
| 1249 |
+
],
|
| 1250 |
+
"page_idx": 6
|
| 1251 |
+
},
|
| 1252 |
+
{
|
| 1253 |
+
"type": "text",
|
| 1254 |
+
"text": "All experiments were conducted on a desktop with an Nvidia GTX 1080Ti GPU with 11GB memory, and an AMD R5 1400 CPU with 16G RAM under the Ubuntu 18.04 OS.",
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
71,
|
| 1257 |
+
786,
|
| 1258 |
+
490,
|
| 1259 |
+
830
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 6
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "text",
|
| 1265 |
+
"text": "4.2 Performance Metrics",
|
| 1266 |
+
"text_level": 1,
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
73,
|
| 1269 |
+
849,
|
| 1270 |
+
269,
|
| 1271 |
+
864
|
| 1272 |
+
],
|
| 1273 |
+
"page_idx": 6
|
| 1274 |
+
},
|
| 1275 |
+
{
|
| 1276 |
+
"type": "text",
|
| 1277 |
+
"text": "To evaluate FL algorithms' performance, we use three well-known metrics: 'win'/'tie'/'lose', mean accuracy (MeanACC), and AVG_rank, all based on the top-1 accuracy. For an arbitrary algorithm, its 'win', 'tie', and 'lose' values indicate on how many datasets it is better than, equal to,",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
71,
|
| 1280 |
+
869,
|
| 1281 |
+
490,
|
| 1282 |
+
944
|
| 1283 |
+
],
|
| 1284 |
+
"page_idx": 6
|
| 1285 |
+
},
|
| 1286 |
+
{
|
| 1287 |
+
"type": "text",
|
| 1288 |
+
"text": "and worse than the others, respectively; its 'best' value is the summation of the corresponding 'win' and 'tie' values. The AVG_rank score reflects the average difference between the accuracy values of a model and the best accuracy values among all models [9], [10], [11], [12], [56].",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
503,
|
| 1291 |
+
786,
|
| 1292 |
+
923,
|
| 1293 |
+
861
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 6
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "text",
|
| 1299 |
+
"text": "4.3 Ablation Study",
|
| 1300 |
+
"text_level": 1,
|
| 1301 |
+
"bbox": [
|
| 1302 |
+
504,
|
| 1303 |
+
878,
|
| 1304 |
+
656,
|
| 1305 |
+
893
|
| 1306 |
+
],
|
| 1307 |
+
"page_idx": 6
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "text",
|
| 1311 |
+
"text": "We use the 44 UCR2018 datasets above to study the impact of parameter settings on the performance of EFDLS. Assume there are 44 users in the system, i.e., $N_{tot} = 44$ . Each user",
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
503,
|
| 1314 |
+
898,
|
| 1315 |
+
923,
|
| 1316 |
+
941
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 6
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "header",
|
| 1322 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
73,
|
| 1325 |
+
31,
|
| 1326 |
+
419,
|
| 1327 |
+
44
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 6
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "page_number",
|
| 1333 |
+
"text": "7",
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
911,
|
| 1336 |
+
32,
|
| 1337 |
+
921,
|
| 1338 |
+
42
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 6
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "image",
|
| 1344 |
+
"img_path": "images/619b0b52bd6730ace90930fe13fd2a6e1ffd7f9bab336e0c31220df8f03cce7a.jpg",
|
| 1345 |
+
"image_caption": [
|
| 1346 |
+
"Fig. 2. MeanACC results obtained by EFDLS with different ratios of $N_{conn}$ to $N_{tot}$ on 44 datasets when $N_{tot} = 44$ ."
|
| 1347 |
+
],
|
| 1348 |
+
"image_footnote": [],
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
86,
|
| 1351 |
+
73,
|
| 1352 |
+
455,
|
| 1353 |
+
239
|
| 1354 |
+
],
|
| 1355 |
+
"page_idx": 7
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "image",
|
| 1359 |
+
"img_path": "images/cd2d5134f238c2dfc49f0ab7807142d9d33320b8b7a78d7cb0b7b844e831fe19.jpg",
|
| 1360 |
+
"image_caption": [
|
| 1361 |
+
"Fig. 3. MeanACC results with different $\\epsilon$ values on 44 datasets when $N_{conn} = 44$ and $N_{tot} = 44$ ."
|
| 1362 |
+
],
|
| 1363 |
+
"image_footnote": [],
|
| 1364 |
+
"bbox": [
|
| 1365 |
+
84,
|
| 1366 |
+
325,
|
| 1367 |
+
455,
|
| 1368 |
+
494
|
| 1369 |
+
],
|
| 1370 |
+
"page_idx": 7
|
| 1371 |
+
},
|
| 1372 |
+
{
|
| 1373 |
+
"type": "text",
|
| 1374 |
+
"text": "runs a TSC task with data coming from a specific dataset. For any two users, if they run identical tasks, e.g., motion recognition, their data sources come from different datasets, e.g., CricketX and CricketY. In the experiments, each user's data comes from one of the 44 datasets.",
|
| 1375 |
+
"bbox": [
|
| 1376 |
+
71,
|
| 1377 |
+
570,
|
| 1378 |
+
490,
|
| 1379 |
+
643
|
| 1380 |
+
],
|
| 1381 |
+
"page_idx": 7
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "text",
|
| 1385 |
+
"text": "4.3.1 Impact of $N_{conn}$",
|
| 1386 |
+
"text_level": 1,
|
| 1387 |
+
"bbox": [
|
| 1388 |
+
73,
|
| 1389 |
+
657,
|
| 1390 |
+
238,
|
| 1391 |
+
674
|
| 1392 |
+
],
|
| 1393 |
+
"page_idx": 7
|
| 1394 |
+
},
|
| 1395 |
+
{
|
| 1396 |
+
"type": "text",
|
| 1397 |
+
"text": "To investigate the impact of $N_{conn}$ on the EFDLS's performance, we select four ratios of $N_{conn}$ to $N_{tot}$ , namely $40\\%$ , $60\\%$ , $80\\%$ , and $100\\%$ . For example, $40\\%$ means there are 18 connected users for weights uploading, given $N_{tot} = 44$ . The MeanACC results obtained by EFDLS with different $N_{conn}$ values on 44 datasets are shown in Fig. 2. One can easily observe that a larger $N_{conn}$ tends to result in a higher MeanACC value. That is because as $N_{conn}$ increases, more amount of time series data is made use of by the system and thus more discriminate representations are captured.",
|
| 1398 |
+
"bbox": [
|
| 1399 |
+
71,
|
| 1400 |
+
676,
|
| 1401 |
+
491,
|
| 1402 |
+
823
|
| 1403 |
+
],
|
| 1404 |
+
"page_idx": 7
|
| 1405 |
+
},
|
| 1406 |
+
{
|
| 1407 |
+
"type": "text",
|
| 1408 |
+
"text": "4.3.2 Impact of $\\epsilon$",
|
| 1409 |
+
"text_level": 1,
|
| 1410 |
+
"bbox": [
|
| 1411 |
+
73,
|
| 1412 |
+
835,
|
| 1413 |
+
204,
|
| 1414 |
+
851
|
| 1415 |
+
],
|
| 1416 |
+
"page_idx": 7
|
| 1417 |
+
},
|
| 1418 |
+
{
|
| 1419 |
+
"type": "text",
|
| 1420 |
+
"text": "$\\epsilon$ is a coefficient to balance each connected user's supervised and KD losses in EFDLS. Fig. 3 shows the MeanACC results with different $\\epsilon$ values when $N_{conn} = 44$ and $N_{tot} = 44$ . It is seen that $\\epsilon = 0.90$ results in the highest MeanACC score, i.e., 0.7014. That means $\\epsilon = 0.90$ is appropriate to reduce each user's entropy on its data during training.",
|
| 1421 |
+
"bbox": [
|
| 1422 |
+
71,
|
| 1423 |
+
854,
|
| 1424 |
+
491,
|
| 1425 |
+
944
|
| 1426 |
+
],
|
| 1427 |
+
"page_idx": 7
|
| 1428 |
+
},
|
| 1429 |
+
{
|
| 1430 |
+
"type": "text",
|
| 1431 |
+
"text": "4.4 Experimental Analysis",
|
| 1432 |
+
"text_level": 1,
|
| 1433 |
+
"bbox": [
|
| 1434 |
+
504,
|
| 1435 |
+
53,
|
| 1436 |
+
714,
|
| 1437 |
+
68
|
| 1438 |
+
],
|
| 1439 |
+
"page_idx": 7
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "text",
|
| 1443 |
+
"text": "To evaluate the overall performance of EFDLS, we compare it with seven benchmark algorithms listed below against 'Win'/'Lose'/'Tie', MeanACC, and AVG_rank.",
|
| 1444 |
+
"bbox": [
|
| 1445 |
+
503,
|
| 1446 |
+
71,
|
| 1447 |
+
923,
|
| 1448 |
+
114
|
| 1449 |
+
],
|
| 1450 |
+
"page_idx": 7
|
| 1451 |
+
},
|
| 1452 |
+
{
|
| 1453 |
+
"type": "list",
|
| 1454 |
+
"sub_type": "text",
|
| 1455 |
+
"list_items": [
|
| 1456 |
+
"- Baseline: the single-task TSC algorithm with the feature extractor in Fig. 1 deployed on each user. Note that each user has a unique dataset to run and knowledge sharing among them is disabled.",
|
| 1457 |
+
"- FedAvg: the FederatedAveraging method using the feature extractor in Fig. 1 [18].",
|
| 1458 |
+
"- FedAvgM: the modified FedAvg using the feature extractor in Fig. 1 [27].",
|
| 1459 |
+
"FedGrad: the federated gradient method using the feature extractor in Fig. 1 [16].",
|
| 1460 |
+
"- FTL: the federated transfer learning method using the feature extractor in Fig. 1 [23].",
|
| 1461 |
+
"FTLS: FTL [23] based on the DBWM scheme using the feature extractor in Fig. 1.",
|
| 1462 |
+
"- FKD: the federated knowledge distillation using the feature extractor in Fig. 1 [27], [28]. For fair comparison, FKD uses the same student-teacher network structure as EFDLS."
|
| 1463 |
+
],
|
| 1464 |
+
"bbox": [
|
| 1465 |
+
527,
|
| 1466 |
+
119,
|
| 1467 |
+
921,
|
| 1468 |
+
382
|
| 1469 |
+
],
|
| 1470 |
+
"page_idx": 7
|
| 1471 |
+
},
|
| 1472 |
+
{
|
| 1473 |
+
"type": "text",
|
| 1474 |
+
"text": "Table 2 shows the top-1 accuracy results with various algorithms on 44 UCR2018 datasets when $N_{conn} = 44$ and $N_{tot} = 44$ . To visualize the differences between EFDLS and the others, Fig. 4 depicts the accuracy plots of EFDLS against each of the remaining algorithms on 44 datasets. In addition, the AVG_rank results are shown in Fig. 5.",
|
| 1475 |
+
"bbox": [
|
| 1476 |
+
503,
|
| 1477 |
+
388,
|
| 1478 |
+
921,
|
| 1479 |
+
476
|
| 1480 |
+
],
|
| 1481 |
+
"page_idx": 7
|
| 1482 |
+
},
|
| 1483 |
+
{
|
| 1484 |
+
"type": "text",
|
| 1485 |
+
"text": "First of all, we study the effectiveness of knowledge sharing among users by comparing EFDLS with Baseline. One can observe that EFDLS beats Baseline in every aspect, including 'Win'/'Lose'/'Tie', MeanACC, and AVG_rank. For example, the former wins 18 out of 44 datasets while the latter wins only 4. The accuracy plot of EFDLS vs. Baseline in Fig. 4(a) also supports the finding above. The main difference between EFDLS and Baseline is that the latter only uses standalone feature extractors which do not share the locally collected knowledge with each other. On the other hand, with sufficient knowledge sharing of similar expertise among users enabled, EFDLS improves the system's generalization ability and thus achieves promising multi-task TSC performance.",
|
| 1486 |
+
"bbox": [
|
| 1487 |
+
501,
|
| 1488 |
+
476,
|
| 1489 |
+
923,
|
| 1490 |
+
679
|
| 1491 |
+
],
|
| 1492 |
+
"page_idx": 7
|
| 1493 |
+
},
|
| 1494 |
+
{
|
| 1495 |
+
"type": "text",
|
| 1496 |
+
"text": "Secondly, we study the effectiveness of the FBST framework by comparing EFDLS with FTLS. It is easily seen that EFDLS outperforms FTLS regarding the 'best', MeanACC, and AVG_rank values. The accuracy plot of EFDLS vs. FTLS in Fig. 4(f) also supports this. The FBST framework allows efficient knowledge transfer from teacher to student, helping the student capture sufficient discriminate representations from the input data. On the contrary, the FTLS's learning model lacks of self-generalization, leading to deteriorated performance during knowledge sharing.",
|
| 1497 |
+
"bbox": [
|
| 1498 |
+
501,
|
| 1499 |
+
680,
|
| 1500 |
+
921,
|
| 1501 |
+
825
|
| 1502 |
+
],
|
| 1503 |
+
"page_idx": 7
|
| 1504 |
+
},
|
| 1505 |
+
{
|
| 1506 |
+
"type": "text",
|
| 1507 |
+
"text": "Thirdly, we study the effectiveness of the DBWM scheme by comparing EFDLS with FKD. Apparently, EFDLS overweighs FKD with respect to 'best', MeanACC, and AVG_rank. It is backed by the accuracy plot of EFDLS vs. FTLS in Fig. 4(g). As mentioned before, at each federated learning epoch, the DBWM scheme finds a partner for each user and then EFDLS offers weights exchange between each pair of connected users, which realizes knowledge sharing",
|
| 1508 |
+
"bbox": [
|
| 1509 |
+
501,
|
| 1510 |
+
825,
|
| 1511 |
+
923,
|
| 1512 |
+
944
|
| 1513 |
+
],
|
| 1514 |
+
"page_idx": 7
|
| 1515 |
+
},
|
| 1516 |
+
{
|
| 1517 |
+
"type": "header",
|
| 1518 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1519 |
+
"bbox": [
|
| 1520 |
+
73,
|
| 1521 |
+
31,
|
| 1522 |
+
421,
|
| 1523 |
+
44
|
| 1524 |
+
],
|
| 1525 |
+
"page_idx": 7
|
| 1526 |
+
},
|
| 1527 |
+
{
|
| 1528 |
+
"type": "page_number",
|
| 1529 |
+
"text": "8",
|
| 1530 |
+
"bbox": [
|
| 1531 |
+
911,
|
| 1532 |
+
32,
|
| 1533 |
+
921,
|
| 1534 |
+
42
|
| 1535 |
+
],
|
| 1536 |
+
"page_idx": 7
|
| 1537 |
+
},
|
| 1538 |
+
{
|
| 1539 |
+
"type": "image",
|
| 1540 |
+
"img_path": "images/b5dbaa657d5a9da1b671bed6753d46ed561d1035e3773d98319733216841da30.jpg",
|
| 1541 |
+
"image_caption": [
|
| 1542 |
+
"(a)"
|
| 1543 |
+
],
|
| 1544 |
+
"image_footnote": [],
|
| 1545 |
+
"bbox": [
|
| 1546 |
+
75,
|
| 1547 |
+
55,
|
| 1548 |
+
272,
|
| 1549 |
+
176
|
| 1550 |
+
],
|
| 1551 |
+
"page_idx": 8
|
| 1552 |
+
},
|
| 1553 |
+
{
|
| 1554 |
+
"type": "image",
|
| 1555 |
+
"img_path": "images/1a1951ed3e2bd453649d31a11a29c1094a11c0f4d590115774da80076d750b28.jpg",
|
| 1556 |
+
"image_caption": [
|
| 1557 |
+
"(b)"
|
| 1558 |
+
],
|
| 1559 |
+
"image_footnote": [],
|
| 1560 |
+
"bbox": [
|
| 1561 |
+
277,
|
| 1562 |
+
55,
|
| 1563 |
+
495,
|
| 1564 |
+
176
|
| 1565 |
+
],
|
| 1566 |
+
"page_idx": 8
|
| 1567 |
+
},
|
| 1568 |
+
{
|
| 1569 |
+
"type": "image",
|
| 1570 |
+
"img_path": "images/6e1414698032c623e8fbba065b0a82cf07a8bd946177d4a96d67c55efb457e56.jpg",
|
| 1571 |
+
"image_caption": [
|
| 1572 |
+
"(c)"
|
| 1573 |
+
],
|
| 1574 |
+
"image_footnote": [],
|
| 1575 |
+
"bbox": [
|
| 1576 |
+
504,
|
| 1577 |
+
56,
|
| 1578 |
+
712,
|
| 1579 |
+
178
|
| 1580 |
+
],
|
| 1581 |
+
"page_idx": 8
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "image",
|
| 1585 |
+
"img_path": "images/27ef68a45306d29412d94985eab200eb5cc1bcd76e194216b20ddbbbfb171a3d.jpg",
|
| 1586 |
+
"image_caption": [
|
| 1587 |
+
"(d)"
|
| 1588 |
+
],
|
| 1589 |
+
"image_footnote": [],
|
| 1590 |
+
"bbox": [
|
| 1591 |
+
715,
|
| 1592 |
+
56,
|
| 1593 |
+
919,
|
| 1594 |
+
178
|
| 1595 |
+
],
|
| 1596 |
+
"page_idx": 8
|
| 1597 |
+
},
|
| 1598 |
+
{
|
| 1599 |
+
"type": "image",
|
| 1600 |
+
"img_path": "images/432c3a691d530c164ecc506e3cd2129fcd3f726eb797083113f1d44dd312c3f7.jpg",
|
| 1601 |
+
"image_caption": [
|
| 1602 |
+
"(e)"
|
| 1603 |
+
],
|
| 1604 |
+
"image_footnote": [],
|
| 1605 |
+
"bbox": [
|
| 1606 |
+
75,
|
| 1607 |
+
199,
|
| 1608 |
+
282,
|
| 1609 |
+
321
|
| 1610 |
+
],
|
| 1611 |
+
"page_idx": 8
|
| 1612 |
+
},
|
| 1613 |
+
{
|
| 1614 |
+
"type": "image",
|
| 1615 |
+
"img_path": "images/e8941f2df05b765635f025b651994c37e06f9dc7f216379d1205394d520d1238.jpg",
|
| 1616 |
+
"image_caption": [
|
| 1617 |
+
"(f)"
|
| 1618 |
+
],
|
| 1619 |
+
"image_footnote": [],
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
393,
|
| 1622 |
+
199,
|
| 1623 |
+
602,
|
| 1624 |
+
321
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 8
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "image",
|
| 1630 |
+
"img_path": "images/5e0adb0f154a93ce54eb46e9d5b123c01135c01ebeb1be0246c51522da77ce56.jpg",
|
| 1631 |
+
"image_caption": [
|
| 1632 |
+
"(g)",
|
| 1633 |
+
"Fig. 4. Accuracy plot results showing the performance difference between two given algorithms. (a) Baseline vs. EFDLS; (b) FedAvg vs. EFDLS; (c) FedAvgM vs. EFDLS; (d) FedGrad vs. EFDLS; (e) FTL vs. EFDLS; (f) FTLS vs. EFDLS; (g) FKD vs. EFDLS."
|
| 1634 |
+
],
|
| 1635 |
+
"image_footnote": [],
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
710,
|
| 1638 |
+
199,
|
| 1639 |
+
919,
|
| 1640 |
+
321
|
| 1641 |
+
],
|
| 1642 |
+
"page_idx": 8
|
| 1643 |
+
},
|
| 1644 |
+
{
|
| 1645 |
+
"type": "text",
|
| 1646 |
+
"text": "of similar expertise among different users. In contrast, FKD adopts the average weights to supervise the feature extraction process in each user. It is likely to lead to catastrophic forgetting in a user whose weights significantly differ from the average weights.",
|
| 1647 |
+
"bbox": [
|
| 1648 |
+
71,
|
| 1649 |
+
417,
|
| 1650 |
+
490,
|
| 1651 |
+
491
|
| 1652 |
+
],
|
| 1653 |
+
"page_idx": 8
|
| 1654 |
+
},
|
| 1655 |
+
{
|
| 1656 |
+
"type": "text",
|
| 1657 |
+
"text": "Last but not least, we compare EFDLS with all the seven algorithms. One can easily observe that our EFDLS is no doubt the best among all algorithms for comparison since ours obtains the highest MeanACC and 'best' values, namely 0.7014 and 20, and the smallest AVG_rank value, namely 2.1478. The FKD takes the second position when considering its 'best', MeanACC, and AVG_rank values, namely, 11, 0.6878, and 2.6364. On the other hand, FedAvg and its variant, FedAvgM, are the two worst algorithms. The following explains the reasons behind the findings above. When faced with the multi-task TSC problem, each user runs one TSC task, and different users may run different TSC tasks. The FBST framework and the DBWM scheme help EFDLS to realize fine-grained knowledge sharing between any pair of users with the most similar expertise. FKD uses the average of all users' weights to guide each user to capture valuable features from the data, promoting coarse-grained knowledge sharing among users. On the other hand, FedAvg and FedAvgM simply take the average weights of all users as each user's weights, which may cause catastrophic forgetting and hence poor performance on multi-task TSC.",
|
| 1658 |
+
"bbox": [
|
| 1659 |
+
71,
|
| 1660 |
+
491,
|
| 1661 |
+
491,
|
| 1662 |
+
811
|
| 1663 |
+
],
|
| 1664 |
+
"page_idx": 8
|
| 1665 |
+
},
|
| 1666 |
+
{
|
| 1667 |
+
"type": "text",
|
| 1668 |
+
"text": "5 CONCLUSION",
|
| 1669 |
+
"text_level": 1,
|
| 1670 |
+
"bbox": [
|
| 1671 |
+
73,
|
| 1672 |
+
833,
|
| 1673 |
+
215,
|
| 1674 |
+
848
|
| 1675 |
+
],
|
| 1676 |
+
"page_idx": 8
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "text",
|
| 1680 |
+
"text": "The FBST framework promotes knowledge transfer from a teacher's to its student's hidden layers, helping the student capture instance-level representations from the input. The DBWM scheme finds a partner for each user in terms of similarity between their uploaded weights, enabling knowledge sharing of similar expertise among different users.",
|
| 1681 |
+
"bbox": [
|
| 1682 |
+
71,
|
| 1683 |
+
854,
|
| 1684 |
+
491,
|
| 1685 |
+
944
|
| 1686 |
+
],
|
| 1687 |
+
"page_idx": 8
|
| 1688 |
+
},
|
| 1689 |
+
{
|
| 1690 |
+
"type": "text",
|
| 1691 |
+
"text": "With FBST and DBWM, the proposed EFDLS securely shares knowledge of similar expertise among different tasks for multi-task time series classification. Experimental results show that compared with six benchmark FL algorithms, EFDLS is a winner on 44 datasets with respect to the MeanACC and AVG_rank metrics and on 20 datasets in terms of the 'best' measure. In particular, compared with single-task Baseline, EFDLS obtains $32/4/8$ regarding the 'win'/'tie'/'lose' metric. That reflects the potential of EFDLS to be applied to multi-task TSC problems in various real-world domains.",
|
| 1692 |
+
"bbox": [
|
| 1693 |
+
501,
|
| 1694 |
+
417,
|
| 1695 |
+
921,
|
| 1696 |
+
578
|
| 1697 |
+
],
|
| 1698 |
+
"page_idx": 8
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "text",
|
| 1702 |
+
"text": "REFERENCES",
|
| 1703 |
+
"text_level": 1,
|
| 1704 |
+
"bbox": [
|
| 1705 |
+
504,
|
| 1706 |
+
599,
|
| 1707 |
+
619,
|
| 1708 |
+
614
|
| 1709 |
+
],
|
| 1710 |
+
"page_idx": 8
|
| 1711 |
+
},
|
| 1712 |
+
{
|
| 1713 |
+
"type": "list",
|
| 1714 |
+
"sub_type": "ref_text",
|
| 1715 |
+
"list_items": [
|
| 1716 |
+
"[1] G. Pang and C. Aggarwal, \"Toward explainable deep anomaly detection,\" In Proc. ACM KDD'21, p. 4056-4057, 2021.",
|
| 1717 |
+
"[2] J. Li, H. He, H. He, L. Li, and Y. Xiang, \"An end-to-end framework with multisource monitoring data for bridge health anomaly identification,\" IEEE Trans. Instrum. Meas., vol. 70, pp. 1-9, 2021.",
|
| 1718 |
+
"[3] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Sheng, H. Xiong, and L. Akoglu, \"A comprehensive survey on graph anomaly detection with deep learning,\" IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.",
|
| 1719 |
+
"[4] H. Tong and J. Zhu, \"New peer effect-based approach for service matching in cloud manufacturing under uncertain preferences,\" Appl. Soft Comput., vol. 94, pp. 1-17, 2020.",
|
| 1720 |
+
"[5] L. Shi, Z. Teng, L. Wang, Y. Zhang, and A. Binder, \"Deepclue: Visual interpretation of text-based deep stock prediction,\" IEEE Trans Knowl. Data Eng., vol. 31, no. 6, pp. 1094-1108, 2019.",
|
| 1721 |
+
"[6] D. Nahmias and K. Kontson, \"Easy perturbation eeg algorithm for spectral importance (easypeasi): A simple method to identify important spectral features of eeg in deep learning models,\" In Proc. ACM KDD'21, p. 2398-2406, 2021.",
|
| 1722 |
+
"[7] F. Zhang, Y. Liu, N. Feng, C. Yang, J. Zhai, S. Zhang, B. He, J. Lin, X. Zhang, and X. Du, \"Periodic weather-aware LSTM with event mechanism for parking behavior prediction,\" IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.",
|
| 1723 |
+
"[8] H. Fawaz, G. Forestier, J. Weber, L. Idoumghar, and P.-A. Muller, \"Deep learning for time series classification: a review,\" Data Min. Knowl. Disc., vol. 33, pp. 917-963, 2019.",
|
| 1724 |
+
"[9] ——, \"Time series classification from scratch with deep neural networks: A strong baseline,\" In Proc. IEEE IJCNN 2017, pp. 1578-1585, 2017."
|
| 1725 |
+
],
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
504,
|
| 1728 |
+
622,
|
| 1729 |
+
923,
|
| 1730 |
+
941
|
| 1731 |
+
],
|
| 1732 |
+
"page_idx": 8
|
| 1733 |
+
},
|
| 1734 |
+
{
|
| 1735 |
+
"type": "header",
|
| 1736 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1737 |
+
"bbox": [
|
| 1738 |
+
73,
|
| 1739 |
+
31,
|
| 1740 |
+
421,
|
| 1741 |
+
44
|
| 1742 |
+
],
|
| 1743 |
+
"page_idx": 8
|
| 1744 |
+
},
|
| 1745 |
+
{
|
| 1746 |
+
"type": "page_number",
|
| 1747 |
+
"text": "9",
|
| 1748 |
+
"bbox": [
|
| 1749 |
+
911,
|
| 1750 |
+
32,
|
| 1751 |
+
921,
|
| 1752 |
+
42
|
| 1753 |
+
],
|
| 1754 |
+
"page_idx": 8
|
| 1755 |
+
},
|
| 1756 |
+
{
|
| 1757 |
+
"type": "image",
|
| 1758 |
+
"img_path": "images/404ed6599b8a9f1cfbc300c044b67729eb04480a73851e0f14caaa55a89d26ae.jpg",
|
| 1759 |
+
"image_caption": [
|
| 1760 |
+
"Fig. 5. Critical difference diagram of the average ranks of various FL algorithms on 44 datasets."
|
| 1761 |
+
],
|
| 1762 |
+
"image_footnote": [],
|
| 1763 |
+
"bbox": [
|
| 1764 |
+
138,
|
| 1765 |
+
70,
|
| 1766 |
+
866,
|
| 1767 |
+
141
|
| 1768 |
+
],
|
| 1769 |
+
"page_idx": 9
|
| 1770 |
+
},
|
| 1771 |
+
{
|
| 1772 |
+
"type": "list",
|
| 1773 |
+
"sub_type": "ref_text",
|
| 1774 |
+
"list_items": [
|
| 1775 |
+
"[10] X. Zhang, Y. Gao, J. Lin, and C.-T. Lu, \"Tapnet: Multivariate time series classification with attentional prototypical network,\" In Proc. AAAI 2020, pp. 6845-6852, 2020.",
|
| 1776 |
+
"[11] F. Karim, S. Majumdar, H. Darabi, and S. Harford, \"Multivariate LSTM-fcns for time series classification,\" Neural Networks, vol. 116, pp. 237-245, 2019.",
|
| 1777 |
+
"[12] Z. Xiao, X. Xu, H. Xing, S. Luo, P. Dai, and D. Zhan, \"Rtfn: A robust temporal feature network for time series classification.\" Inform. Sciences, vol. 571, pp. 65-86, 2021.",
|
| 1778 |
+
"[13] G. Li, B. Choi, J. Xu, S. Bhowmick, K.-P. Chun, and G. Wong, \"Shapenet: A shapelet-neural network approach for multivariate time series classification,\" In Proc. AAAI 2021, vol. 35, no. 9, pp. 8375-8383, 2021.",
|
| 1779 |
+
"[14] D. Lee, S. Lee, and H. Yu, \"Learnable dynamic temporal pooling for time series classification,\" In Proc. AAAI 2021, vol. 35, no. 9, pp. 8288-8296, 2021.",
|
| 1780 |
+
"[15] B. Arcas, G. Bacon, K. Bonawitz, and et al., \"Federated learning: Collaborative machine learning without centralized training data,\" https://ai.googleblog.com/2017/04/federated-learning-collaborative.html, 2017.",
|
| 1781 |
+
"[16] Q. Yang, Y. Liu, T. Chen, and Y. Tong, \"Federated machine learning: Concept and applications,\" ACM Trans. Intell. Syst. Technol., vol. 10, no. 2, pp. 1-19, 2019.",
|
| 1782 |
+
"[17] Q. Li, Z. Wen, Z. Wu, S. Hu, N. Wang, Y. Li, X. Liu, and B. He, \"A survey on federated learning systems: Vision, hype and reality for data privacy and protection,\" IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.",
|
| 1783 |
+
"[18] M. McMahan, E. Moore, D. Ramage, S. Hampson, and B. Arcas, \"Communication-efficient learning of deep networks from decentralized data,\" In Proc. AISTATS 2017, pp. 1-11, 2017.",
|
| 1784 |
+
"[19] J. Ma, Q. Zhang, J. Lou, L. Xiong, and J. Ho, \"Communication efficient federated generalized tensor factorization for collaborative health data analytics,\" In Proc. 30th The Web Conference 2021, 2021.",
|
| 1785 |
+
"[20] B. Liu, Y. Guo, and X. Chen, \"Pfa: Privacy-preserving federated adaptation for effective model personalization,\" In Proc. 30th The Web Conference 2021, 2021.",
|
| 1786 |
+
"[21] J. Wu, Z. Huang, Y. Ning, H. Wang, E. Chen, J. Yi, and B. Zhou, \"Hierarchical personalized federated learning for user modeling,\" In Proc. 30th The Web Conference 2021, 2021.",
|
| 1787 |
+
"[22] Q. Yang, J. Zhang, W. Hao, G. Spell, and L. Carin, \"Flop: Federated learning on medical datasets using partial networks,\" In Proc. ACM KDD'21, 2021.",
|
| 1788 |
+
"[23] Y. Liu, Y. Kang, C. Xing, T. Chen, and Q. Yang, \"A secure federated transfer learning framework,\" IEEE Intell. Syst., vol. 35, no. 4, pp. 70-82, 2020.",
|
| 1789 |
+
"[24] H. Yang, H. He, W. Zhang, and X. Cao, \"Fedsteg: A federated transfer learning framework for secure image steganalysis,\" IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 1084-1094, 2021.",
|
| 1790 |
+
"[25] D. Dimitriadis, K. Kumatani, R. Gmyr, Y. Gaur, and S. Eskimez, \"Federated transfer learning with dynamic gradient aggregation,\" arXiv preprint arXiv:2008.02452, 2020.",
|
| 1791 |
+
"[26] U. Majeed, S. Hassan, and C. Hong, \"Cross-silo model-based secure federated transfer learning for flow-based traffic classification,\" In Proc. ICOIN 2021, 2021.",
|
| 1792 |
+
"[27] H. Seo, J. Park, S. Oh, M. Bennis, and S.-L. Kim, \"Federated knowledge distillation,\" arXiv preprint arXiv:2011.02367, 2020.",
|
| 1793 |
+
"[28] C. He, M. Annavaram, and S. Avestimehr, \"Group knowledge transfer: Federated learning of large cnns at the edge,\" In Proc. NeurIPS 2020, 2020.",
|
| 1794 |
+
"[29] R. Mishra, H. Gupta, and T. Dutta, \"A network resource aware federated learning approach using knowledge distillation,\" In Proc. INFOCOM 2021, 2021."
|
| 1795 |
+
],
|
| 1796 |
+
"bbox": [
|
| 1797 |
+
75,
|
| 1798 |
+
234,
|
| 1799 |
+
491,
|
| 1800 |
+
940
|
| 1801 |
+
],
|
| 1802 |
+
"page_idx": 9
|
| 1803 |
+
},
|
| 1804 |
+
{
|
| 1805 |
+
"type": "list",
|
| 1806 |
+
"sub_type": "ref_text",
|
| 1807 |
+
"list_items": [
|
| 1808 |
+
"[30] S. Itahara, T. Nishio, Y. Koda, M. Morikura, and K. Yamamoto, \"Distillation-based semi-supervised federated learning for communication-efficient collaborative training with non-iid private data,\" IEEE Trans. Mobile Comput., pp. 1-1, 2021.",
|
| 1809 |
+
"[31] Y. Chen, X. Sun, and Y. Jin, \"Communication-efficient federated deep learning with layer-wise asynchronous model update and temporally weighted aggregation,\" IEEE Trans. Neur. Net. Learn. Sys., vol. 31, no. 10, pp. 4229-4238, 2020.",
|
| 1810 |
+
"[32] F. Sattler, S. Wiedemann, K.-R. Müller, and W. Samek, \"Robust and communication-efficient federated learning from non-i.i.d. data,\" IEEE Trans. Neur. Net. Learn. Sys., vol. 31, no. 9, pp. 3400-3413, 2020.",
|
| 1811 |
+
"[33] L. Nagalapatti and R. Narayanam, \"Game of gradients: Mitigating irrelevant clients in federated learning,\" In Proc. AAAI 2021, vol. 35, no. 10, pp. 9046-9054, 2021.",
|
| 1812 |
+
"[34] X. Cao, J. Jia, and N. Gong, \"Provably secure federated learning against malicious clients,\" In Proc. AAAI 2020, vol. 35, no. 8, pp. 6885-6893, 2020.",
|
| 1813 |
+
"[35] J. Hong, Z. Zhu, S. Yu, Z. Wang, H. Dodge, and J. Zhou, \"Federated adversarial debiasing for fair and transferable representations,\" In Proc. ACM KDD'21, vol. 1, no. 1, August 2021.",
|
| 1814 |
+
"[36] P. Zhou, L. Wang, L. Guo, S. Gong, and B. Zheng, \"A privacy-preserving distributed contextual federated online learning framework with big data support in social recommender systems,\" IEEE Trans. Knowl. Data Eng., vol. 33, no. 3, pp. 824-838, 2021.",
|
| 1815 |
+
"[37] Z. Pan, L. Hu, W. Tang, J. Li, Y. He, and Z. Liu, \"Privacy-preserving multi-granular federated neural architecture search a general framework,\" IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.",
|
| 1816 |
+
"[38] M. Crawshaw, \"Multi-task learning with deep neural networks: A survey,\" arXiv preprint arXiv: 2009.09796, 2020.",
|
| 1817 |
+
"[39] A. Ruiz, M. Flynn, and A. Bagnall, \"Benchmarking multivariate time series classification algorithms,\" arXiv preprint arXiv: 2007.13156, 2020.",
|
| 1818 |
+
"[40] J. Lines and A. Bagnall, \"Time series classification with ensembles of elastic distance measures,\" Data Min. Knowl. Disc., vol. 29, p. 565-592, 2015.",
|
| 1819 |
+
"[41] A. Bagnall, J. Lines, J. Hills, and A. Bostrom, \"Time series classification with cote: the collective of transformation-based ensembles,\" In Proc. ICDE 2016, pp. 1548-1549, 2016.",
|
| 1820 |
+
"[42] J. Lines, S. Taylor, and A. Bagnall, \"Time series classification with hive-cote: the hierarchical of transformation-based ensembles,\" ACM Trans. Knowl. Discov. D, vol. 21, no. 52, pp. 1-35, 2018.",
|
| 1821 |
+
"[43] K. Fauvel, E. Fromont, V. Masson, P. Faverdin, and A. Termier, \"Local cascade ensemble for multivariate data classification,\" arXiv preprint arXiv:2005.03645, 2020.",
|
| 1822 |
+
"[44] J. Lines, L. Davis, J. Hills, and A. Bagnall, \"A shapelet transform for time series classification,\" In Proc. ACM KDD'12, 2012.",
|
| 1823 |
+
"[45] M. Baydogan, G. Runger, and E. Tuv, \"A bag-of-features framework to classify time series,\" IEEE Trans. Pattern Anal., vol. 35, no. 11, pp. 2796-2802, 2013.",
|
| 1824 |
+
"[46] A. Dempster, D. Schmidt, and G. Webb, \"Minirocket: A very fast (almost) deterministic transform for time series classification,\" In Proc. ACM KDD'21, 2021.",
|
| 1825 |
+
"[47] M. Baydogan and G. Runger, \"Time series representation and similarity based on local auto patterns,\" Data Min. Knowl. Disc., vol. 30, p. 476-509, 2016.",
|
| 1826 |
+
"[48] J. Large, A. Bagnall, S. Malinowski, and R. Tavenard, \"From bop to boss and beyond: time series classification with dictionary based classifier,\" arXiv preprint arXiv:1809.06751, 2018.",
|
| 1827 |
+
"[49] W. Pei, H. Dibeklioglu, D. Tax, and L. van der Maaten, \"Multivariate time-series classification using the hidden-unit logistic model,\" IEEE Trans. Neur. Net. Lear., vol. 29, no. 4, pp. 920-931, 2018."
|
| 1828 |
+
],
|
| 1829 |
+
"bbox": [
|
| 1830 |
+
506,
|
| 1831 |
+
234,
|
| 1832 |
+
921,
|
| 1833 |
+
941
|
| 1834 |
+
],
|
| 1835 |
+
"page_idx": 9
|
| 1836 |
+
},
|
| 1837 |
+
{
|
| 1838 |
+
"type": "header",
|
| 1839 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1840 |
+
"bbox": [
|
| 1841 |
+
73,
|
| 1842 |
+
31,
|
| 1843 |
+
421,
|
| 1844 |
+
44
|
| 1845 |
+
],
|
| 1846 |
+
"page_idx": 9
|
| 1847 |
+
},
|
| 1848 |
+
{
|
| 1849 |
+
"type": "page_number",
|
| 1850 |
+
"text": "10",
|
| 1851 |
+
"bbox": [
|
| 1852 |
+
906,
|
| 1853 |
+
32,
|
| 1854 |
+
921,
|
| 1855 |
+
42
|
| 1856 |
+
],
|
| 1857 |
+
"page_idx": 9
|
| 1858 |
+
},
|
| 1859 |
+
{
|
| 1860 |
+
"type": "list",
|
| 1861 |
+
"sub_type": "ref_text",
|
| 1862 |
+
"list_items": [
|
| 1863 |
+
"[50] H. Deng, G. Runger, E. Tuv, and M. Vladimir, \"A time series forest for classification and feature extraction,\" Inform. Sciences, vol. 239, p. 142-153, 2013.",
|
| 1864 |
+
"[51] B. Bai, G. Li, S. Wang, Z. Wu, and W. Yan, \"Time series classification based on multi-feature dictionary representation and ensemble learning,\" Expert Syst. Appl., vol. 169, pp. 1-10, 2021.",
|
| 1865 |
+
"[52] H. Fawaz, B. Lucas, G. Forestier, C. Pelletier, D. Schmidt, J. Weber, G. Webb, L. Idoumghar, P.-A. Muller, and F. Petitjean, \"Inceptiontime: finding alexnet for time series classification,\" Data Min. Knowl. Disc., vol. 34, p. 1936-1962, 2020.",
|
| 1866 |
+
"[53] Z. Xiao, X. Xu, H. Zhang, and E. Szczerbicki, \"A new multi-process collaborative architecture for time series classification,\" Knowl.-Based Syst., vol. 220, pp. 1-11, 2021.",
|
| 1867 |
+
"[54] W. Chen and K. Shi, \"Multi-scale attention convolutional neural network for time series classification,\" Neural Networks, vol. 136, pp. 126-140, 2021.",
|
| 1868 |
+
"[55] S. Huang, L. Xu, and C. Jiang, \"Artificial intelligence and advanced time series classification: Residual attention net for cross-domain modeling,\" Fintech with Artificial Intelligence, Big Data, and Blockchain, Blockchain Technologies, 2021.",
|
| 1869 |
+
"[56] Z. Xiao, X. Xu, H. Xing, R. Qu, F. Song, and B. Zhao, \"Rnts: Robust neural temporal search for time series classification,\" In Proc. IJCNN 2021, 2021.",
|
| 1870 |
+
"[57] J. Guo, B. Yu, S. Maybank, and D. Tao, \"Knowledge distillation: A survey,\" arXiv preprint arXiv: 2006.05525, 2020.",
|
| 1871 |
+
"[58] H. Dau, A. Bagnall, C.-C. M. Yeh, Y. Zhu, S. Gharghabi, C. Ratanamahatana, and E. Keogh, \"The ucr time series archive,\" IEEE/CAA Journal of Automatica Sinica, vol. 6, no. 6, pp. 1293-1305, 2019."
|
| 1872 |
+
],
|
| 1873 |
+
"bbox": [
|
| 1874 |
+
75,
|
| 1875 |
+
54,
|
| 1876 |
+
491,
|
| 1877 |
+
385
|
| 1878 |
+
],
|
| 1879 |
+
"page_idx": 10
|
| 1880 |
+
},
|
| 1881 |
+
{
|
| 1882 |
+
"type": "header",
|
| 1883 |
+
"text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015",
|
| 1884 |
+
"bbox": [
|
| 1885 |
+
73,
|
| 1886 |
+
32,
|
| 1887 |
+
419,
|
| 1888 |
+
44
|
| 1889 |
+
],
|
| 1890 |
+
"page_idx": 10
|
| 1891 |
+
},
|
| 1892 |
+
{
|
| 1893 |
+
"type": "page_number",
|
| 1894 |
+
"text": "11",
|
| 1895 |
+
"bbox": [
|
| 1896 |
+
906,
|
| 1897 |
+
32,
|
| 1898 |
+
919,
|
| 1899 |
+
42
|
| 1900 |
+
],
|
| 1901 |
+
"page_idx": 10
|
| 1902 |
+
}
|
| 1903 |
+
]
|
2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00011/cf9a0028-3213-43e4-a02b-f411f6ccdabf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75e826cff634d9782dd7ffdd2a8a794f718be68d30a18b2d160482d29558395b
|
| 3 |
+
size 6984053
|
2201.00xxx/2201.00011/full.md
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# An Efficient Federated Distillation Learning System for Multi-task Time Series Classification
|
| 2 |
+
|
| 3 |
+
Huanlai Xing, Member, IEEE, Zhiwen Xiao, Rong Qu, Senior Member, IEEE, Zonghai Zhu, and Bowen Zhao
|
| 4 |
+
|
| 5 |
+
Abstract—This paper proposes an efficient federated distillation learning system (EFDLS) for multi-task time series classification (TSC). EFDLS consists of a central server and multiple mobile users, where different users may run different TSC tasks. EFDLS has two novel components, namely a feature-based student-teacher (FBST) framework and a distance-based weights matching (DBWM) scheme. Within each user, the FBST framework transfers knowledge from its teacher's hidden layers to its student's hidden layers via knowledge distillation, with the teacher and student having identical network structure. For each connected user, its student model's hidden layers' weights are uploaded to the EFDLS server periodically. The DBWM scheme is deployed on the server, with the least square distance used to measure the similarity between the weights of two given models. This scheme finds a partner for each connected user such that the user's and its partner's weights are the closest among all the weights uploaded. The server exchanges and sends back the user's and its partner's weights to these two users which then load the received weights to their teachers' hidden layers. Experimental results show that the proposed EFDLS achieves excellent performance on a set of selected UCR2018 datasets regarding top-1 accuracy.
|
| 6 |
+
|
| 7 |
+
Index Terms—Deep Learning, Data Mining, Federated Learning, Knowledge Distillation, Time Series Classification.
|
| 8 |
+
|
| 9 |
+
# 1 INTRODUCTION
|
| 10 |
+
|
| 11 |
+
TIME series data is a series of time-ordered data points associated with one or more time-dependent variables and has been successfully applied to areas such as anomaly detection [1], [2], traffic flow forecasting [3], service matching [4], stock prediction [5], electroencephalogram (ECG) detection [6] and parking behavior prediction [7]. A significant amount of research attention has been dedicated to time series classification (TSC) [8]. For example, Wang et al. [9] introduced a fully convolutional network (FCN) for local feature extraction. Zhang et al. [10] devised an attentional prototype network (TapNet) to capture rich representations from the input. Karim et al. [11] proposed a long short-term memory (LSTM) fully convolutional network (FCN-LSTM) for multivariate TSC. A robust temporal feature network (RTFN) hybridizing temporal feature network and LSTM-based attention network was applied to extracting both the local and global patterns of data [12]. Li et al. [13] put forward a shapelet-neural network approach to mine highly-diversified representative shapelets from the input. Lee et al. [14] designed a learnable dynamic temporal pooling method to reduce the temporal pooling size of the hidden representations obtained.
|
| 12 |
+
|
| 13 |
+
TSC algorithms are usually data-driven, where data
|
| 14 |
+
|
| 15 |
+
- H. Xing, Z. Zhu, and B. Zhao are with the School of Computing and Artificial Intelligence, Southwest Jiaotong University, Chengdu 611756, China (Emails: hxx@home.swjtu.edu.cn; zhu@swjtu.edu.cn; cn16bz@icloud.com).
|
| 16 |
+
Z. Xiao is with Southwest Jiaotong University, Chengdu 611756, China, and Chengdu University of Information Technology, Chengdu 610103, China (Email: xiao1994zw@163.com).
|
| 17 |
+
R. Qu is with the School of Computer Science, University of Nottingham, Nottingham NG7 2RD 455356, UK (Email: rong.qu@nottingham.ac.uk)
|
| 18 |
+
|
| 19 |
+
Manuscript received XX, XX; revised XX, XX (Corresponding author: Zhiwen Xiao).
|
| 20 |
+
|
| 21 |
+
comes from various application domains. Some data may contain private and sensitive information, such as bank account and ECG. However, traditional data collection operations could not well protect such information, easily resulting in users' privacy leakage during the data collection and distribution processes involved in model training. To overcome the problem above, Google [15], [16], [17] invented federated learning (FL). FL allows users to collectively harvest the advantages of shared models trained from their local data without sending original data to others. FederatedAveraging (FedAvg), federated transfer learning (FTL) and federated knowledge distillation (FKD) are the three mainstream research directions.
|
| 22 |
+
|
| 23 |
+
FedAvg calculates the average weights of the models of all users and shares the weights with each user in the FL system [18]. For instance, Ma et al. [19] devised a communication-efficient federated generalized tensor factorization for electronic health records. Liu et al. [20] used a federated adaptation framework to leverage the sparsity property of neural networks for generating privacy-preserving representations. A hierarchical personalized FL method aggregated heterogeneous user models, with privacy heterogeneity and model heterogeneity considered [21]. Yang et al. [22] modified the FedAvg method using partial networks for COVID-19 detection.
|
| 24 |
+
|
| 25 |
+
FTL introduces transfer learning techniques to promote knowledge transfer between different users, increasing system accuracy [23]. For example, Yang et al. [24] developed an FTL framework, FedSteg, for secure image steganalysis. An FTL method with dynamic gradient aggregation was proposed to weight the local gradients during the aggregation step when handling speech recognition tasks [25]. Majeed et al. [26] proposed an FTL-based structure to address traffic classification problems.
|
| 26 |
+
|
| 27 |
+
Unlike FedAvg and FTL, FKD takes the average of all users' weights as the weights for all teachers and transfers each teacher's knowledge to its corresponding student via knowledge distillation (KD) [27]. A group knowledge transfer training algorithm was adopted to train small convolutional neural networks (CNNs) and transfer their knowledge to a prominent server-side CNN [28]. Mishra et al. [29] proposed a resource-aware FKD approach for network resource allocation. Sohei et al. [30] devised a distillation-based semi-supervised FL framework for communication-efficient collaborative training with private data. Nowadays, FKD is attracting increasingly more research attention.
|
| 28 |
+
|
| 29 |
+
In addition, there is a variety of FL-based algorithms in the literature. For instance, Chen et al. [31] applied asynchronous learning and temporally weighted aggregation to enhancing system's performance. Sattler et al. [32] presented a sparse ternary compression method to meet various requirements of FL environment. A cooperative game involving a gradient algorithm was designed to tackle image classification and speech recognition tasks [33]. An ensemble FL system used a randomly selected subset of clients to learn multiple global models against malicious clients [34]. Hong et al. [35] combined adversarial learning and FL to produce federated adversarial debiasing for fair and transferable representations. Zhou et al. [36] proposed a privacy-preserving distributed contextual federated online learning framework with big data support for social recommender systems. Pan et al. [37] put forward a multi-granular federated neural architecture search framework to enable the automation of model architecture search in a federated and privacy-preserved setting.
|
| 30 |
+
|
| 31 |
+
Most FL algorithms are developed around single-task problems, where multiple users work together to complete a task, e.g., COVID-19 detection [22], traffic classification [26] or speech recognition [25]. It is quite challenging to directly apply these algorithms to multi-task problems unless efficient knowledge sharing among different tasks is enabled. Unfortunately, TSC is usually multi-task-oriented. Time series data is collected from various application domains, such as ECG, traffic flow, human activity recognition. Each time series dataset has specific characteristics, e.g., length and variance, which may differ significantly from others. Thus, time series data is highly imbalanced and strongly non-independent, and identically distributed (Non-I.I.D.). In multi-task learning, it is commonly recognized that knowledge sharing among different tasks helps increase the efficiency and accuracy of each task [38]. For most TSC algorithms, how to securely share knowledge of similar expertise among different tasks is still challenging. In other words, user privacy and knowledge sharing are two critical issues that need to be carefully addressed when devising practical multi-task TSC algorithms. To the best of our knowledge, FL for multi-task TSC has not received sufficient research attention.
|
| 32 |
+
|
| 33 |
+
We present an efficient federated distillation learning system (EFDLS) for multi-task TSC. This system consists of a central server and a number of mobile users running various TSC tasks simultaneously. Given two arbitrary users, they run either different tasks (e.g., ECG and motion) or the same task with different data sources to mimic real-world applications. EFDLS is characterized by a feature-based student-teacher (FBST) framework and a distance-based
|
| 34 |
+
|
| 35 |
+
weights matching (DBWM) scheme. The FBST framework is deployed on each user, where the student and teacher models have identical network structure. Within each user, its teacher's hidden layers' knowledge is transferred to its student's hidden layers, helping the student mine high-quality features from the data. The DBWM scheme is deployed on the EFDLS server, where the least square distance (LSD) is used to measure the similarity between the weights of two models. When all connected users' weights are uploaded completely, for an arbitrary connected user, the DBWM scheme finds the one with the most similar weights among all connected users. After that, the server sends the connected user's weights to the found one that then loads the weights to its teacher model's hidden layers.
|
| 36 |
+
|
| 37 |
+
Our main contributions are summarized below.
|
| 38 |
+
|
| 39 |
+
- We propose EFDLS for multi-task TSC, where each user runs one TSC task at a time and different users may run different TSC tasks. The data generated on different users is different. EFDLS aims at providing secure knowledge sharing of similar expertise among different tasks. This problem has not attracted enough research attention.
|
| 40 |
+
- In EFDLS, feature-based knowledge distillation is used for knowledge transfer within each user. Unlike the traditional FKD that adopts the average weights of all users to supervise the feature extraction process in each user, EFDLS finds the one with the most similar expertise (i.e., a partner) for each user according to LSD and offers knowledge sharing between the user and its partner.
|
| 41 |
+
Experimental results demonstrate that EFDLS outperforms six state-of-the-art FL algorithms considering 44 well-known datasets selected in the UCR 2018 archive regarding the mean accuracy, 'win'/'tie'/'lose' measure, and AVG_rank, which are all based on the top-1 accuracy. That shows the effectiveness of EFDLS in addressing TSC problems.
|
| 42 |
+
|
| 43 |
+
The rest of the paper is organized below. Section 2 reviews the existing TSC algorithms. Section 3 overviews the architecture of EFDLS and describes its key components. Section 4 provides and analyzes the experimental results, and conclusion is drawn in Section 5.
|
| 44 |
+
|
| 45 |
+
# 2 RELATED WORK
|
| 46 |
+
|
| 47 |
+
A large number of traditional and deep learning algorithms have been developed for TSC.
|
| 48 |
+
|
| 49 |
+
# 2.1 Traditional Algorithms
|
| 50 |
+
|
| 51 |
+
Two representative streams of algorithms are distance- and feature-based. For distance-based algorithms, it is quite common to combine the dynamic time warping (DTW) and nearest neighbor (NN), e.g., $DTW_{A}$ , $DTW_{I}$ and $DTW_{D}$ [39]. Besides, a significant number of DTW-NN-based ensemble algorithms taking advantage of DTW and NN have been proposed in the community. For example, Line et al. [40] presented an elastic ensemble (EE) algorithm for feature extraction, with 11 types of 1-NN-based elastic distance considered. A collective of the transformation-based ensemble (COTE) with 37 NN-based classifiers was adopted to
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Fig. 1. The schematic diagram of EFDLS. Note that 'FBST Framework' and 'DBWM Scheme' denote the feature-based student-teacher framework deployed on each user and the distance-based weights matching scheme run on the server. 'Conv x 9 128' represents a 1-dimensional convolutional neural network, where its filter size and channel sizes are set to 9 and 128. 'BN' is a batch normalization module, and 'ReLU' is the rectified linear unit activation function.
|
| 55 |
+
|
| 56 |
+
address various TSC problems [41]. The hierarchical vote collective of transformation-based ensembles (HIVE-COTE) [42] and local cascade ensemble [43] are two representative ensemble algorithms in the literature.
|
| 57 |
+
|
| 58 |
+
For feature-based algorithms, their aim is to capture sufficient discriminate features from the given data. For instance, Line et al. [44] introduced a shapelet transformation method to find representative shapelets that reflected the trend of raw data. A bag-of-features representation framework was applied to extracting the information at different locations of sequences [45]. Dempster et al. [46] applied minimally random convolutional kernel transform to exploring the transformed features from data. In addition, the learned pattern similarity [47], bag of symbolic Fourier approximation symbols [48], hidden-unit logistic model [49], time series forest [50], and multi-feature dictionary representation and ensemble learning [51] are also representative algorithms.
|
| 59 |
+
|
| 60 |
+
# 2.2 Deep Learning Algorithms
|
| 61 |
+
|
| 62 |
+
By unfolding the internal representation hierarchy of data, deep learning algorithms focus on extracting the intrinsic connections among representations. Most of the existing deep learning models are either single-network- or dual-network-based [12]. A single-network-based model captures the significant correlations within the representation hierarchy of data by one (usually hybridized) network, e.g., FCN [9], ResNet [9], shapelet-neural network [13], InceptionTime
|
| 63 |
+
|
| 64 |
+
[52], dynamic temporal pooling [14], multi-process collaborative architecture [53], and multi-scale attention convolutional neural network [54]. In contrast, a dual-network-based model usually consists of two parallel networks, i.e., local-feature extraction network (LFN) and global-relation extraction network (GRN), such as FCN-LSTM [11], RTFN [12], ResNet-Transformer [55], RNTS [56], and TapNet [10].
|
| 65 |
+
|
| 66 |
+
Almost all algorithms above emphasized single-task TSC, e.g., traffic or gesture classification. However, TSC usually involves multiple tasks in real-world scenarios, like various applications with different TSC tasks run on different mobile devices in a mobile computing environment. Enabling efficient knowledge sharing of similar expertise among different tasks helps increase the average accuracy of these tasks. Nevertheless, sharing knowledge among different TSC tasks securely and efficiently is still a challenge. That is what FL aims for.
|
| 67 |
+
|
| 68 |
+
# 3 EFDLS
|
| 69 |
+
|
| 70 |
+
This section first overviews the architecture of EFDLS. Then, it introduces the feature-based student-teacher framework, distance-based weights matching scheme, and communication overhead.
|
| 71 |
+
|
| 72 |
+
# 3.1 System Overview
|
| 73 |
+
|
| 74 |
+
EFDLS is a secure distributed system for multi-task TSC. There is a central server and multiple mobile users. Let $N_{tot}$
|
| 75 |
+
|
| 76 |
+
and $N_{conn}$ denote the numbers of total and connected users in the system, respectively, where $N_{conn} \leq N_{tot}$ . Each user runs one TSC task at a time and different users might run different TSC tasks. For two arbitrary users, they run two different tasks, such as gesture and ECG classification, or the same task with different data sources.
|
| 77 |
+
|
| 78 |
+
The overview of EFDLS is shown in Fig. 1. In the system, users train their models locally based on knowledge distillation and share their model weights with users with similar expertise via the server. We propose FBST, a feature-based student-teacher framework that is deployed on each user as its learning model. Within each user, its teacher's hidden layers' knowledge is transferred to its student's hidden layers. For each connected user, its student model's hidden layers' weights are uploaded to the EFDLS server periodically. We propose DBWM, a distance-based weights matching scheme deployed on the server, with the LSD adopted to measure the similarity between the weights of two given models. After the weights of all connected users are uploaded completely, for each connected user, the DBWM scheme is launched to find the one with the most similar weights among all connected users. In this way, every user has a partner to match with. For each connected user, its uploaded weights are sent to its partner that then loads these weights to its teacher model's hidden layers. The server's role looks like a telecom-network switch. The EFDLS system allows users to benefit from knowledge sharing without sacrificing security and privacy.
|
| 79 |
+
|
| 80 |
+
# 3.2 Feature-based Student-Teacher Framework
|
| 81 |
+
|
| 82 |
+
In the FBST framework, the student and teacher models have identical network structure. Within each user, feature-based KD promotes knowledge transfer from the teacher's hidden layers to its student's hidden layers, helping the student capture rich and valuable representations from the input data.
|
| 83 |
+
|
| 84 |
+
# 3.2.1 Feature Extractor
|
| 85 |
+
|
| 86 |
+
The feature extractor contains multiple hidden layers and a classifier, as shown in Fig. 1. The hidden layers are responsible for local-feature extraction, including three Convolutional Blocks (i.e., ConvBlock1, ConvBlock2, and ConvBlock3), an average pooling layer, and a dense (i.e., fully-connected) layer. Each ConvBlock consists of a 1-dimensional CNN (Conv) module, a batch normalization (BN) module, and a rectified linear unit activation (ReLU) function, defined as:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
f _ {\text {c o n v b l o c k}} (x) = f _ {\text {r e l u}} \left(f _ {\text {b n}} \left(W _ {\text {c o n v}} \otimes x + b _ {\text {c o n v}}\right)\right) \tag {1}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where, $W_{conv}$ and $b_{conv}$ are the weight and bias matrices of CNN, respectively. $\otimes$ represents the convolutional computation operation. $f_{bn}$ and $f_{relu}$ denote the batch normalization and ReLU functions, respectively.
|
| 93 |
+
|
| 94 |
+
Let $x_{bn} = \{x_1, x_2, \dots, x_{N_{bn}}\}$ denote the input of batch normalization (BN), where $x_i$ and $N_{bn}$ stand for the $i$ -th
|
| 95 |
+
|
| 96 |
+
instance and batch size, respectively. $f_{bn}(x_{bn})$ is defined in Eq. (2)
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\begin{array}{l} f _ {b n} \left(x _ {b n}\right) = f _ {b n} \left(x _ {1}, x _ {2}, \dots , x _ {N _ {b n}}\right) \\ = (\alpha \frac {x _ {1} - \mu}{\delta + \zeta} + \beta , \alpha \frac {x _ {2} - \mu}{\delta + \zeta} + \beta , \dots , \alpha \frac {x _ {N _ {b n}} - \mu}{\delta + \zeta} + \beta) \\ \end{array}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\mu = \frac {1}{N _ {b n}} \sum_ {i = 1} ^ {N _ {b n}} x _ {i}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\delta = \sqrt {\sum_ {i = 1} ^ {N _ {b n}} \left(x _ {i} - \mu\right) ^ {2}} \tag {2}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where, $\alpha \in \mathbb{R}^{+}$ and $\beta \in \mathbb{R}$ are the parameters to be learned during training. $\zeta > 0$ is an arbitrarily small number.
|
| 111 |
+
|
| 112 |
+
The classifier is composed of a dense layer and a Softmax function, mapping high-level features extracted from the hidden layers to the corresponding label.
|
| 113 |
+
|
| 114 |
+
# 3.2.2 Knowledge Distillation
|
| 115 |
+
|
| 116 |
+
Feature-based KD regularizes a student model by transferring knowledge from the corresponding teacher's hidden layers to the student's hidden layers [57]. For an arbitrary user, its student model captures sufficient discriminate representations from the data under its teacher model's supervision.
|
| 117 |
+
|
| 118 |
+
Let $O_{i}^{T,1}$ , $O_{i}^{T,2}$ , $O_{i}^{T,3}$ , and $O_{i}^{T,4}$ be the outputs of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer of the teacher's hidden layers. Let $O_{i}^{S,1}$ , $O_{i}^{S,2}$ , $O_{i}^{S,3}$ , and $O_{i}^{S,4}$ be the outputs of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer of the student's hidden layers. Following the previous work [28], we define the KD loss, $\mathcal{L}_i^{KD}$ , of $U_{i}$ as:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\mathcal {L} _ {i} ^ {K D} = \sum_ {m = 1} ^ {4} \left| \left| O _ {i} ^ {T, m} - O _ {i} ^ {S, m} \right| \right| ^ {2} \tag {3}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
For $U_{i}$ , its total loss, $\mathcal{L}_i$ , consists of KD loss, $\mathcal{L}_i^{KD}$ , and supervised loss, $\mathcal{L}_i^{Sup}$ . As the previous studies in [10], [11], [12] suggested, $\mathcal{L}_i^{Sup}$ uses the cross-entropy function to measure the average difference between the ground truth labels and their prediction vectors, as shown in Eq. (4).
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\mathcal {L} _ {i} ^ {S u p} = - \frac {1}{N _ {s e g}} \sum_ {j = 1} ^ {N _ {s e g}} y _ {j} \log \left(\hat {y} _ {j}\right) \tag {4}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where, $N_{seg}$ is the number of input vectors, and $y_{i}$ and $\hat{y}_j$ are the ground label and prediction vector of the $j$ -th input vector, respectively.
|
| 131 |
+
|
| 132 |
+
The total loss, $\mathcal{L}_i$ , is defined as:
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\mathcal {L} _ {i} = \epsilon \times \mathcal {L} _ {i} ^ {S u p} + (1 - \epsilon) \times \mathcal {L} _ {i} ^ {K D} \tag {5}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
where, $\epsilon \in (0,1)$ is a coefficient to balance $\mathcal{L}_i^{Sup}$ and $\mathcal{L}_i^{KD}$ . In this paper, we set $\epsilon = 0.9$ (More details can be found in Section 4.3).
|
| 139 |
+
|
| 140 |
+
# 3.3 Distance-based Weights Matching
|
| 141 |
+
|
| 142 |
+
The least square distance (LSD) is used to calculate the similarity between the weights of two given models. When the weights uploaded by all the connected users are received, the DBWM scheme immediately launches the weights matching process to find a partner for each connected user.
|
| 143 |
+
|
| 144 |
+
# 3.3.1 Least Square Distance
|
| 145 |
+
|
| 146 |
+
Let $FLEs$ denote the maximum number of federated learning epochs. Let $W_{i}^{S,k}$ and $W_{i}^{T,k}$ be the weights of the student and teacher models of $U_{i}$ at the $k$ -th federated learning epoch, $k = 1,2,\dots,FLEs$ . Denote the hidden layers' weights of the student and teacher models of $U_{i}$ by $W_{i}^{S_{hidden},k} \subset W_{i}^{S,k}$ and $W_{i}^{T_{hidden},k} \subset W_{i}^{T,k}$ , respectively. To be specific, $W_{i}^{S_{hidden},k}$ consists of the weights of ConvBlock 1, ConvBlock 2, ConvBlock 3, and the dense layer, namely, $W_{i}^{S_{1},k}$ , $W_{i}^{S_{2},k}$ , $W_{i}^{S_{3},k}$ , and $W_{i}^{S_{4},k}$ . So, we have $W_{i}^{S_{hidden},k} = \{W_{i}^{S_{1},k}, W_{i}^{S_{2},k}, W_{i}^{S_{3},k}, W_{i}^{S_{4},k}\}$ .
|
| 147 |
+
|
| 148 |
+
At the $k$ -th federated learning epoch, user $U_{i}, i = 1,2,\dots,N_{conn}$ , uploads its student model's hidden layers' weights, $W_{i}^{S_{hidden},k}$ , to the server. The server stores the uploaded weights in the weight set $\mathbf{W}$ defined in Eq. (6).
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\mathbf {W} = \left[ W _ {1} ^ {S _ {\text {h i d d e n}}, k}, W _ {2} ^ {S _ {\text {h i d d e n}}, k}, \dots , W _ {N _ {\text {c o n n}}} ^ {S _ {\text {h i d d e n}}, k} \right] \tag {6}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
The server then calculates the weights' square distance set, $d$ , based on $\mathbf{W}$ . $d$ is defined as:
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
d = \left[ \begin{array}{c} d _ {1} \\ d _ {2} \\ \dots \\ d _ {N _ {\text {c o n n}}} \end{array} \right] = \left[ \begin{array}{c c c} d _ {1, 2} & \dots & d _ {1, N _ {\text {c o n n}}} \\ d _ {2, 1} & \dots & d _ {2, N _ {\text {c o n n}}} \\ \dots & \dots & \dots \\ d _ {N _ {\text {c o n n}}, 1} & \dots & d _ {N _ {\text {c o n n}}, N _ {\text {c o n n}} - 1} \end{array} \right] \tag {7}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
where, $d_{i,j}$ $(i,j\in 1,\dots,N_{conn},i\neq j)$ is the square distance between $W_{i}^{S_{hidden},k}$ and $W_{j}^{S_{hidden},k}$ , as defined in Eq. (8).
|
| 161 |
+
|
| 162 |
+
$$
|
| 163 |
+
\begin{array}{l} d _ {i, j} = \left\| W _ {i} ^ {S _ {\text {h i d d e n}}, k} - W _ {j} ^ {S _ {\text {h i d d e n}}, k} \right\| ^ {2} \\ = \sum_ {m = 1} ^ {4} \left| \left| W _ {i} ^ {S _ {m}, k} - W _ {j} ^ {S _ {m}, k} \right| \right| ^ {2} \tag {8} \\ \end{array}
|
| 164 |
+
$$
|
| 165 |
+
|
| 166 |
+
We adopt the argmin function to return the index of the smallest distance for each row in $d$ and obtain the index set, ID. ID is defined in Eq. (9).
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\mathbf {I D} = \operatorname {a r g m i n} (d) = \left[ I D _ {1}, I D _ {2}, \dots , I D _ {N _ {\text {c o n n}}} \right] \tag {9}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where, $ID_{i}$ is the index of the smallest distance for $U_{i}$ .
|
| 173 |
+
|
| 174 |
+
Based on $\mathbf{ID}$ , we easily obtain the LSD weight set, $\mathbf{W}^{LSD}$ from $\mathbf{W}$ . $\mathbf{W}^{LSD}$ is defined in Eq. (10).
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\begin{array}{l} \mathbf {W} ^ {L S D} = \left[ W _ {1} ^ {L S D, k}, W _ {2} ^ {L S D, k},..., W _ {N _ {\text {c o n n}}} ^ {L S D, k} \right] \tag {10} \\ = \left[ \mathbf {W} (I D _ {1}), \mathbf {W} (I D _ {2}), \dots , \mathbf {W} (I D _ {N _ {\text {c o n n}}}) \right] \\ \end{array}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
where, $W_{i}^{LSD,k}$ are the weights matched with those of $U_{i}$ at the $k$ -th federated learning epoch.
|
| 181 |
+
|
| 182 |
+
Once $U_{i}$ receives $W_{i}^{LSD,k}$ from the server, $U_{i}$ loads these weights to its teacher's hidden layers at the beginning of the next federated learning epoch, as defined in Eq. (11).
|
| 183 |
+
|
| 184 |
+
$$
|
| 185 |
+
W _ {i} ^ {T _ {\text {h i d d e n}}, k + 1} \leftarrow W _ {i} ^ {L S D, k} \tag {11}
|
| 186 |
+
$$
|
| 187 |
+
|
| 188 |
+
Alg. 1 and Alg. 2 show the user and server implementation procedures, respectively.
|
| 189 |
+
|
| 190 |
+
# 3.4 Communication Overhead
|
| 191 |
+
|
| 192 |
+
EFDLS does not launch the DBWM scheme unless the weights from all the $N_{conn}$ connected users are received. It helps reduce the interaction between the server and users, promoting the system's service efficiency. For user $U_{i}, i = 1,2,\dots,N_{conn}$ , we analyze the communication overhead of uploading and downloading its weights. Denote the
|
| 193 |
+
|
| 194 |
+
Algorithm 1 EFDLS User Implementation Procedure
|
| 195 |
+
1: procedure USERPROCEDURE $(U_{i},FLEs)$
|
| 196 |
+
2: Initialize all global variables;
|
| 197 |
+
3: for $k = 1$ to FLEs do
|
| 198 |
+
4: if $k = 1$ then
|
| 199 |
+
5: // The student model is trained alone
|
| 200 |
+
6: Obtain $W_{i}^{S,k}$ after the initial local training;
|
| 201 |
+
7: //Upload its hidden layers' weights to server
|
| 202 |
+
8: Upload $W_{i}^{S_{hidden},k}\subset W_{i}^{S,k}$ .
|
| 203 |
+
9: else
|
| 204 |
+
10: if receiveServer(Active)=1 then
|
| 205 |
+
11: // Connect to the EFDLS server
|
| 206 |
+
12: Receive $W_{i}^{LSD,k}$ .
|
| 207 |
+
13: Load $W_{i}^{LSD,k}$ to the teacher model;
|
| 208 |
+
14: Compute $\mathcal{L}_i$ by Eq. (5);
|
| 209 |
+
15: Update $W_{i}^{S,k + 1}$ using the gradient decent;
|
| 210 |
+
16: Upload $W_{i}^{S_{hidden},k + 1}\subset W_{i}^{S,k + 1}$ .
|
| 211 |
+
17: else
|
| 212 |
+
18: Disconnect from the EFDLS server.
|
| 213 |
+
19: end if
|
| 214 |
+
20: end if
|
| 215 |
+
21: end for
|
| 216 |
+
22: end procedure
|
| 217 |
+
|
| 218 |
+
Algorithm 2 EFDLS Server Implementation Procedure
|
| 219 |
+
1: procedure SERVERPROCEDURE $(N_{tot},N_{conn},FLEs)$
|
| 220 |
+
2: Initialize all global variables;
|
| 221 |
+
3: Set $\mathbf{W} = \varnothing$ .
|
| 222 |
+
4: for $k = 1$ to FLEs do
|
| 223 |
+
5: // Run on the server;
|
| 224 |
+
6: Clear and initialize W;
|
| 225 |
+
7: for $i = 1$ to $N_{conn}$ do
|
| 226 |
+
8: //Receive model weights from users;
|
| 227 |
+
9: Receive $W_{i}^{S_{hidden},k}$ .
|
| 228 |
+
10: Include $W_{i}^{S_{hidden},k}$ in W.
|
| 229 |
+
11: end for
|
| 230 |
+
12: for $i = 1$ to $N_{conn}$ do
|
| 231 |
+
13: Obtain $W_{i}^{LSD,k}$ based on W by Eqs. (6)-(10);
|
| 232 |
+
14: Send $W_{i}^{LSD,k}$ to $U_{i}$
|
| 233 |
+
15: end for
|
| 234 |
+
16: end for
|
| 235 |
+
17: end procedure
|
| 236 |
+
|
| 237 |
+
bandwidth requirement for uploading the student model's hidden layers' weights of $U_{i}$ once by $BW$ . Clearly, the bandwidth requirement for downloading the student model's hidden layers' weights from the server once is also $BW$ . That is because, for an arbitrary connected user, the weights uploaded to and those downloaded from the server are of the same size, given that each user has exactly the same model structure. At each federated learning epoch, the bandwidth requirement for user $U_{i}, i = 1,2,\dots,N_{conn}$ is estimated as $BW + BW = 2BW$ . For $U_{i}$ , its total communication overhead is in proportion to $2BW \cdot FLEs$ . Hence, the total communication overhead is proportional to $2BW \cdot FLEs \cdot N_{conn}$ .
|
| 238 |
+
|
| 239 |
+
TABLE 1 Details of 44 selected datasets from the UCR 2018.
|
| 240 |
+
|
| 241 |
+
<table><tr><td>Scale</td><td>Dataset</td><td>Train</td><td>Test</td><td>Class</td><td>SeriesLength</td><td>Type</td></tr><tr><td rowspan="11">Short</td><td>Chinatown</td><td>20</td><td>345</td><td>2</td><td>24</td><td>Traffic</td></tr><tr><td>MelbournePedestrian</td><td>1194</td><td>2439</td><td>10</td><td>24</td><td>Traffic</td></tr><tr><td>SonyAIBORobotSur.2</td><td>27</td><td>953</td><td>2</td><td>65</td><td>Sensor</td></tr><tr><td>SonyAIBORobotSur.1</td><td>20</td><td>601</td><td>2</td><td>70</td><td>Sensor</td></tr><tr><td>DistalPhalanxO.A.G</td><td>400</td><td>139</td><td>3</td><td>80</td><td>Image</td></tr><tr><td>DistalPhalanxO.C.</td><td>600</td><td>276</td><td>2</td><td>80</td><td>Image</td></tr><tr><td>DistalPhalanxTW</td><td>400</td><td>139</td><td>6</td><td>80</td><td>Image</td></tr><tr><td>TwoLeadECG</td><td>23</td><td>1139</td><td>2</td><td>82</td><td>ECG</td></tr><tr><td>MoteStrain</td><td>20</td><td>1252</td><td>2</td><td>84</td><td>Sensor</td></tr><tr><td>ECG200</td><td>100</td><td>100</td><td>2</td><td>96</td><td>ECG</td></tr><tr><td>CBF</td><td>30</td><td>900</td><td>3</td><td>128</td><td>Simulated</td></tr><tr><td rowspan="11">Medium</td><td>DodgerLoopDay</td><td>78</td><td>80</td><td>7</td><td>288</td><td>Sensor</td></tr><tr><td>DodgerLoopGame</td><td>20</td><td>138</td><td>2</td><td>288</td><td>Sensor</td></tr><tr><td>DodgerLoopWeekend</td><td>20</td><td>138</td><td>2</td><td>288</td><td>Sensor</td></tr><tr><td>CricketX</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>CricketY</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>CricketZ</td><td>390</td><td>390</td><td>12</td><td>300</td><td>Motion</td></tr><tr><td>FaceFour</td><td>24</td><td>88</td><td>4</td><td>350</td><td>Image</td></tr><tr><td>Ham</td><td>109</td><td>105</td><td>2</td><td>431</td><td>Spectro</td></tr><tr><td>Meat</td><td>60</td><td>60</td><td>3</td><td>448</td><td>Spectro</td></tr><tr><td>Fish</td><td>175</td><td>175</td><td>7</td><td>463</td><td>Image</td></tr><tr><td>Beef</td><td>30</td><td>30</td><td>5</td><td>470</td><td>Spectro</td></tr><tr><td rowspan="11">Long</td><td>OliveOil</td><td>30</td><td>30</td><td>4</td><td>570</td><td>Spectro</td></tr><tr><td>Car</td><td>60</td><td>60</td><td>4</td><td>577</td><td>Sensor</td></tr><tr><td>Lightning2</td><td>60</td><td>61</td><td>2</td><td>637</td><td>Sensor</td></tr><tr><td>Computers</td><td>250</td><td>250</td><td>2</td><td>720</td><td>Device</td></tr><tr><td>Mallat</td><td>55</td><td>2345</td><td>8</td><td>1024</td><td>Simulated</td></tr><tr><td>Phoneme</td><td>214</td><td>1896</td><td>39</td><td>1024</td><td>Sensor</td></tr><tr><td>StarLightCurves</td><td>1000</td><td>8236</td><td>3</td><td>1024</td><td>Sensor</td></tr><tr><td>MixedShapesRegularT.</td><td>500</td><td>2425</td><td>5</td><td>1024</td><td>Image</td></tr><tr><td>MixedShapesSmallT.</td><td>100</td><td>2425</td><td>5</td><td>1024</td><td>Image</td></tr><tr><td>ACSF1</td><td>100</td><td>100</td><td>10</td><td>1460</td><td>Device</td></tr><tr><td>SemgHandG.Ch2</td><td>300</td><td>600</td><td>2</td><td>1500</td><td>Spectrum</td></tr><tr><td rowspan="11">Vary</td><td>AllGestureWiimoteX</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>AllGestureWiimoteY</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>AllGestureWiimoteZ</td><td>300</td><td>700</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>GestureMidAirD1</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GestureMidAirD2</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GestureMidAirD3</td><td>208</td><td>130</td><td>26</td><td>Vary</td><td>Trajectory</td></tr><tr><td>GesturePebbleZ1</td><td>132</td><td>172</td><td>6</td><td>Vary</td><td>Sensor</td></tr><tr><td>GesturePebbleZ2</td><td>146</td><td>158</td><td>6</td><td>Vary</td><td>Sensor</td></tr><tr><td>PickupGestureW.Z</td><td>50</td><td>50</td><td>10</td><td>Vary</td><td>Sensor</td></tr><tr><td>PLAID</td><td>537</td><td>537</td><td>11</td><td>Vary</td><td>Device</td></tr><tr><td>ShakeGestureW.Z</td><td>50</td><td>50</td><td>10</td><td>Vary</td><td>Sensor</td></tr></table>
|
| 242 |
+
|
| 243 |
+
# 4 PERFORMANCE EVALUATION
|
| 244 |
+
|
| 245 |
+
This section first introduces the experimental setup and performance metrics and then focuses on the ablation study. Finally, the performance of EFDLS is evaluated.
|
| 246 |
+
|
| 247 |
+
# 4.1 Experimental Setup
|
| 248 |
+
|
| 249 |
+
# 4.1.1 Data Description
|
| 250 |
+
|
| 251 |
+
The UCR 2018 archive is one of the most popular time series repositories with 128 datasets of different lengths in various application domains [58]. Following the previous work [53], we divide the UCR 2018 archive into 4 categories with respect to dataset length, namely, 'short', 'medium', 'long', and 'vary'. The length of a 'short' dataset is no more than 200. That of a 'medium' one varies from 200 to 500. A 'long' one has a length of over 500 while a 'vary' one has an indefinite length. The 128 datasets are composed of 41 'short', 32
|
| 252 |
+
|
| 253 |
+
'medium', 44 'long', and 11 'vary' datasets. Unfortunately, our limited computing resources do not allow us to consider the whole 128 datasets (detailed hardware specification can be found in Subsection Implementation Details). There are seven algorithms for performance comparison and the average training time on the 128 datasets costed more than 32 hours for a single federated learning epoch. So, we select 11 datasets from each category, resulting in 44 datasets selected. More details are found in Table 1.
|
| 254 |
+
|
| 255 |
+
# 4.1.2 Implementation Details
|
| 256 |
+
|
| 257 |
+
Following previous studies [8], [9], [10], [11], [53], we set the decay value of batch normalization to 0.9. We use the $L_{2}$ regularization to avoid overfitting during the training process. Meanwhile, we adopt the AdamOptimizer with Pytorch<sup>1</sup>, where the initial learning rate is set to 0.0001.
|
| 258 |
+
|
| 259 |
+
TABLE 2 Experimental results of different algorithms on 44 datasets when $N_{conn} = 44$ and $N_{tot} = 44$
|
| 260 |
+
|
| 261 |
+
<table><tr><td>Dataset</td><td>Baseline</td><td>FedAvg</td><td>FedAvgM</td><td>FedGrad</td><td>FTL</td><td>FTLS</td><td>FKD</td><td>EFDLS</td></tr><tr><td>Chinatown</td><td>0.9623</td><td>0.2754</td><td>0.2754</td><td>0.9623</td><td>0.9665</td><td>0.9537</td><td>0.9275</td><td>0.9478</td></tr><tr><td>MelbournePedestrian</td><td>0.9139</td><td>0.1</td><td>0.1</td><td>0.7784</td><td>0.8486</td><td>0.8922</td><td>0.9379</td><td>0.9453</td></tr><tr><td>SonyAIBORobotSur.2</td><td>0.8961</td><td>0.383</td><td>0.383</td><td>0.8363</td><td>0.8688</td><td>0.9035</td><td>0.915</td><td>0.8961</td></tr><tr><td>SonyAIBORobotSur.1</td><td>0.8652</td><td>0.5707</td><td>0.6619</td><td>0.7887</td><td>0.8236</td><td>0.8702</td><td>0.8369</td><td>0.8819</td></tr><tr><td>DistalPhalanxO.A.G</td><td>0.6763</td><td>0.1079</td><td>0.1079</td><td>0.6187</td><td>0.6259</td><td>0.6475</td><td>0.6691</td><td>0.6475</td></tr><tr><td>DistalPhalanxO.C.</td><td>0.75</td><td>0.417</td><td>0.6619</td><td>0.6776</td><td>0.7464</td><td>0.7465</td><td>0.7536</td><td>0.7428</td></tr><tr><td>DistalPhalanxTW</td><td>0.6547</td><td>0.1295</td><td>0.1295</td><td>0.554</td><td>0.6259</td><td>0.6547</td><td>0.6835</td><td>0.6403</td></tr><tr><td>TwoLeadECG</td><td>0.7463</td><td>0.4996</td><td>0.4996</td><td>0.7305</td><td>0.7287</td><td>0.7278</td><td>0.8112</td><td>0.7665</td></tr><tr><td>MoteStrain</td><td>0.7788</td><td>0.5391</td><td>0.5391</td><td>0.6933</td><td>0.7923</td><td>0.8283</td><td>0.8163</td><td>0.8203</td></tr><tr><td>ECG200</td><td>0.86</td><td>0.36</td><td>0.36</td><td>0.8</td><td>0.84</td><td>0.85</td><td>0.87</td><td>0.85</td></tr><tr><td>CBF</td><td>0.987</td><td>0.3333</td><td>0.5911</td><td>0.5911</td><td>0.973</td><td>0.9922</td><td>0.9922</td><td>0.9956</td></tr><tr><td>DodgerLoopDay</td><td>0.575</td><td>0.15</td><td>0.15</td><td>0.3875</td><td>0.55</td><td>0.525</td><td>0.5125</td><td>0.5375</td></tr><tr><td>DodgerLoopGame</td><td>0.6884</td><td>0.5217</td><td>0.5217</td><td>0.6232</td><td>0.7826</td><td>0.7609</td><td>0.7609</td><td>0.7464</td></tr><tr><td>DodgerLoopWeekend</td><td>0.8261</td><td>0.7391</td><td>0.7391</td><td>0.7319</td><td>0.8841</td><td>0.8913</td><td>0.913</td><td>0.9203</td></tr><tr><td>CricketX</td><td>0.5897</td><td>0.0692</td><td>0.1371</td><td>0.2256</td><td>0.5667</td><td>0.6128</td><td>0.659</td><td>0.6718</td></tr><tr><td>CricketY</td><td>0.5051</td><td>0.0949</td><td>0.1357</td><td>0.1949</td><td>0.5</td><td>0.4949</td><td>0.5538</td><td>0.5974</td></tr><tr><td>CricketZ</td><td>0.6205</td><td>0.0846</td><td>0.0846</td><td>0.2256</td><td>0.5692</td><td>0.6</td><td>0.6692</td><td>0.7256</td></tr><tr><td>FaceFour</td><td>0.6477</td><td>0.1591</td><td>0.1591</td><td>0.4659</td><td>0.6591</td><td>0.6932</td><td>0.6932</td><td>0.6818</td></tr><tr><td>Ham</td><td>0.7143</td><td>0.4857</td><td>0.4857</td><td>0.6762</td><td>0.7048</td><td>0.7143</td><td>0.7048</td><td>0.6952</td></tr><tr><td>Meat</td><td>0.8667</td><td>0.3333</td><td>0.3333</td><td>0.7333</td><td>0.8333</td><td>0.8333</td><td>0.9</td><td>0.917</td></tr><tr><td>Fish</td><td>0.5657</td><td>0.1371</td><td>0.1371</td><td>0.2857</td><td>0.5771</td><td>0.6</td><td>0.6</td><td>0.6229</td></tr><tr><td>Beef</td><td>0.7667</td><td>0.2</td><td>0.2</td><td>0.5667</td><td>0.7</td><td>0.7</td><td>0.7</td><td>0.7667</td></tr><tr><td>OliveOil</td><td>0.8333</td><td>0.167</td><td>0.167</td><td>0.7</td><td>0.8667</td><td>0.8667</td><td>0.8333</td><td>0.8333</td></tr><tr><td>Car</td><td>0.5833</td><td>0.233</td><td>0.233</td><td>0.5</td><td>0.5667</td><td>0.5833</td><td>0.5667</td><td>0.6333</td></tr><tr><td>Lightning2</td><td>0.7869</td><td>0.459</td><td>0.459</td><td>0.7705</td><td>0.7869</td><td>0.8033</td><td>0.7541</td><td>0.7869</td></tr><tr><td>Computers</td><td>0.78</td><td>0.5</td><td>0.5</td><td>0.584</td><td>0.688</td><td>0.748</td><td>0.788</td><td>0.804</td></tr><tr><td>Mallat</td><td>0.7446</td><td>0.1254</td><td>0.1254</td><td>0.4141</td><td>0.7638</td><td>0.7539</td><td>0.7906</td><td>0.8299</td></tr><tr><td>Phoneme</td><td>0.2231</td><td>0.02</td><td>0.02</td><td>0.1108</td><td>0.2147</td><td>0.2247</td><td>0.2859</td><td>0.2954</td></tr><tr><td>StarLightCurves</td><td>0.9534</td><td>0.1429</td><td>0.1429</td><td>0.5062</td><td>0.9519</td><td>0.9584</td><td>0.9571</td><td>0.9582</td></tr><tr><td>MixedShapesRegularT.</td><td>0.8586</td><td>0.1889</td><td>0.1889</td><td>0.2223</td><td>0.8384</td><td>0.8598</td><td>0.8643</td><td>0.8907</td></tr><tr><td>MixedShapesSmallT.</td><td>0.8029</td><td>0.1889</td><td>0.1889</td><td>0.2421</td><td>0.7942</td><td>0.8062</td><td>0.8318</td><td>0.8388</td></tr><tr><td>ACSIF1</td><td>0.77</td><td>0.1</td><td>0.19</td><td>0.19</td><td>0.82</td><td>0.89</td><td>0.87</td><td>0.88</td></tr><tr><td>SengHandG.Ch2</td><td>0.7067</td><td>0.65</td><td>0.65</td><td>0.555</td><td>0.72</td><td>0.7383</td><td>0.6867</td><td>0.72</td></tr><tr><td>AllGestureWiimoteX</td><td>0.2643</td><td>0.1</td><td>0.1</td><td>0.1371</td><td>0.2729</td><td>0.3043</td><td>0.2929</td><td>0.2914</td></tr><tr><td>AllGestureWiimoteY</td><td>0.2585</td><td>0.1</td><td>0.1</td><td>0.1357</td><td>0.3186</td><td>0.3029</td><td>0.2529</td><td>0.2829</td></tr><tr><td>AllGestureWiimoteZ</td><td>0.2886</td><td>0.1</td><td>0.1</td><td>0.1343</td><td>0.2671</td><td>0.29</td><td>0.4014</td><td>0.3786</td></tr><tr><td>GestureMidAirD1</td><td>0.5538</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.5462</td><td>0.5538</td><td>0.4615</td><td>0.5769</td></tr><tr><td>GestureMidAirD2</td><td>0.4231</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.4154</td><td>0.4462</td><td>0.4692</td><td>0.5308</td></tr><tr><td>GestureMidAirD3</td><td>0.3</td><td>0.0384</td><td>0.0384</td><td>0.0923</td><td>0.2693</td><td>0.2615</td><td>0.2231</td><td>0.2769</td></tr><tr><td>GesturePebbleZ1</td><td>0.4419</td><td>0.1628</td><td>0.1628</td><td>0.2558</td><td>0.4767</td><td>0.4826</td><td>0.5</td><td>0.4883</td></tr><tr><td>GesturePebbleZ2</td><td>0.4241</td><td>0.1519</td><td>0.1519</td><td>0.2722</td><td>0.5126</td><td>0.557</td><td>0.6013</td><td>0.5886</td></tr><tr><td>PickupGestureW.Z</td><td>0.56</td><td>0.1</td><td>0.1</td><td>0.24</td><td>0.62</td><td>0.6</td><td>0.7</td><td>0.74</td></tr><tr><td>PLAID</td><td>0.203</td><td>0.0615</td><td>0.0615</td><td>0.0615</td><td>0.2198</td><td>0.2253</td><td>0.2924</td><td>0.2589</td></tr><tr><td>ShakeGestureW.Z</td><td>0.92</td><td>0.1</td><td>0.1</td><td>0.1</td><td>0.96</td><td>0.92</td><td>0.96</td><td>0.96</td></tr><tr><td>Win</td><td>4</td><td>0</td><td>0</td><td>0</td><td>3</td><td>7</td><td>10</td><td>18</td></tr><tr><td>Tie</td><td>1</td><td>0</td><td>0</td><td>0</td><td>2</td><td>1</td><td>1</td><td>2</td></tr><tr><td>Lose</td><td>39</td><td>44</td><td>44</td><td>44</td><td>39</td><td>36</td><td>33</td><td>24</td></tr><tr><td>Best</td><td>5</td><td>0</td><td>0</td><td>0</td><td>5</td><td>8</td><td>11</td><td>20</td></tr><tr><td>MeanACC</td><td>0.6622</td><td>0.2377</td><td>0.2557</td><td>0.4445</td><td>0.6604</td><td>0.6743</td><td>0.6878</td><td>0.7014</td></tr><tr><td>AVG_rank</td><td>3.5455</td><td>7.5</td><td>7.3409</td><td>6.0113</td><td>3.9204</td><td>2.8977</td><td>2.6364</td><td>2.1478</td></tr></table>
|
| 262 |
+
|
| 263 |
+
All experiments were conducted on a desktop with an Nvidia GTX 1080Ti GPU with 11GB memory, and an AMD R5 1400 CPU with 16G RAM under the Ubuntu 18.04 OS.
|
| 264 |
+
|
| 265 |
+
# 4.2 Performance Metrics
|
| 266 |
+
|
| 267 |
+
To evaluate FL algorithms' performance, we use three well-known metrics: 'win'/'tie'/'lose', mean accuracy (MeanACC), and AVG_rank, all based on the top-1 accuracy. For an arbitrary algorithm, its 'win', 'tie', and 'lose' values indicate on how many datasets it is better than, equal to,
|
| 268 |
+
|
| 269 |
+
and worse than the others, respectively; its 'best' value is the summation of the corresponding 'win' and 'tie' values. The AVG_rank score reflects the average difference between the accuracy values of a model and the best accuracy values among all models [9], [10], [11], [12], [56].
|
| 270 |
+
|
| 271 |
+
# 4.3 Ablation Study
|
| 272 |
+
|
| 273 |
+
We use the 44 UCR2018 datasets above to study the impact of parameter settings on the performance of EFDLS. Assume there are 44 users in the system, i.e., $N_{tot} = 44$ . Each user
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
Fig. 2. MeanACC results obtained by EFDLS with different ratios of $N_{conn}$ to $N_{tot}$ on 44 datasets when $N_{tot} = 44$ .
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
Fig. 3. MeanACC results with different $\epsilon$ values on 44 datasets when $N_{conn} = 44$ and $N_{tot} = 44$ .
|
| 280 |
+
|
| 281 |
+
runs a TSC task with data coming from a specific dataset. For any two users, if they run identical tasks, e.g., motion recognition, their data sources come from different datasets, e.g., CricketX and CricketY. In the experiments, each user's data comes from one of the 44 datasets.
|
| 282 |
+
|
| 283 |
+
# 4.3.1 Impact of $N_{conn}$
|
| 284 |
+
|
| 285 |
+
To investigate the impact of $N_{conn}$ on the EFDLS's performance, we select four ratios of $N_{conn}$ to $N_{tot}$ , namely $40\%$ , $60\%$ , $80\%$ , and $100\%$ . For example, $40\%$ means there are 18 connected users for weights uploading, given $N_{tot} = 44$ . The MeanACC results obtained by EFDLS with different $N_{conn}$ values on 44 datasets are shown in Fig. 2. One can easily observe that a larger $N_{conn}$ tends to result in a higher MeanACC value. That is because as $N_{conn}$ increases, more amount of time series data is made use of by the system and thus more discriminate representations are captured.
|
| 286 |
+
|
| 287 |
+
# 4.3.2 Impact of $\epsilon$
|
| 288 |
+
|
| 289 |
+
$\epsilon$ is a coefficient to balance each connected user's supervised and KD losses in EFDLS. Fig. 3 shows the MeanACC results with different $\epsilon$ values when $N_{conn} = 44$ and $N_{tot} = 44$ . It is seen that $\epsilon = 0.90$ results in the highest MeanACC score, i.e., 0.7014. That means $\epsilon = 0.90$ is appropriate to reduce each user's entropy on its data during training.
|
| 290 |
+
|
| 291 |
+
# 4.4 Experimental Analysis
|
| 292 |
+
|
| 293 |
+
To evaluate the overall performance of EFDLS, we compare it with seven benchmark algorithms listed below against 'Win'/'Lose'/'Tie', MeanACC, and AVG_rank.
|
| 294 |
+
|
| 295 |
+
- Baseline: the single-task TSC algorithm with the feature extractor in Fig. 1 deployed on each user. Note that each user has a unique dataset to run and knowledge sharing among them is disabled.
|
| 296 |
+
- FedAvg: the FederatedAveraging method using the feature extractor in Fig. 1 [18].
|
| 297 |
+
- FedAvgM: the modified FedAvg using the feature extractor in Fig. 1 [27].
|
| 298 |
+
FedGrad: the federated gradient method using the feature extractor in Fig. 1 [16].
|
| 299 |
+
- FTL: the federated transfer learning method using the feature extractor in Fig. 1 [23].
|
| 300 |
+
FTLS: FTL [23] based on the DBWM scheme using the feature extractor in Fig. 1.
|
| 301 |
+
- FKD: the federated knowledge distillation using the feature extractor in Fig. 1 [27], [28]. For fair comparison, FKD uses the same student-teacher network structure as EFDLS.
|
| 302 |
+
|
| 303 |
+
Table 2 shows the top-1 accuracy results with various algorithms on 44 UCR2018 datasets when $N_{conn} = 44$ and $N_{tot} = 44$ . To visualize the differences between EFDLS and the others, Fig. 4 depicts the accuracy plots of EFDLS against each of the remaining algorithms on 44 datasets. In addition, the AVG_rank results are shown in Fig. 5.
|
| 304 |
+
|
| 305 |
+
First of all, we study the effectiveness of knowledge sharing among users by comparing EFDLS with Baseline. One can observe that EFDLS beats Baseline in every aspect, including 'Win'/'Lose'/'Tie', MeanACC, and AVG_rank. For example, the former wins 18 out of 44 datasets while the latter wins only 4. The accuracy plot of EFDLS vs. Baseline in Fig. 4(a) also supports the finding above. The main difference between EFDLS and Baseline is that the latter only uses standalone feature extractors which do not share the locally collected knowledge with each other. On the other hand, with sufficient knowledge sharing of similar expertise among users enabled, EFDLS improves the system's generalization ability and thus achieves promising multi-task TSC performance.
|
| 306 |
+
|
| 307 |
+
Secondly, we study the effectiveness of the FBST framework by comparing EFDLS with FTLS. It is easily seen that EFDLS outperforms FTLS regarding the 'best', MeanACC, and AVG_rank values. The accuracy plot of EFDLS vs. FTLS in Fig. 4(f) also supports this. The FBST framework allows efficient knowledge transfer from teacher to student, helping the student capture sufficient discriminate representations from the input data. On the contrary, the FTLS's learning model lacks of self-generalization, leading to deteriorated performance during knowledge sharing.
|
| 308 |
+
|
| 309 |
+
Thirdly, we study the effectiveness of the DBWM scheme by comparing EFDLS with FKD. Apparently, EFDLS overweighs FKD with respect to 'best', MeanACC, and AVG_rank. It is backed by the accuracy plot of EFDLS vs. FTLS in Fig. 4(g). As mentioned before, at each federated learning epoch, the DBWM scheme finds a partner for each user and then EFDLS offers weights exchange between each pair of connected users, which realizes knowledge sharing
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
(a)
|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
(b)
|
| 316 |
+
|
| 317 |
+

|
| 318 |
+
(c)
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
(d)
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
(e)
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
(f)
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
(g)
|
| 331 |
+
Fig. 4. Accuracy plot results showing the performance difference between two given algorithms. (a) Baseline vs. EFDLS; (b) FedAvg vs. EFDLS; (c) FedAvgM vs. EFDLS; (d) FedGrad vs. EFDLS; (e) FTL vs. EFDLS; (f) FTLS vs. EFDLS; (g) FKD vs. EFDLS.
|
| 332 |
+
|
| 333 |
+
of similar expertise among different users. In contrast, FKD adopts the average weights to supervise the feature extraction process in each user. It is likely to lead to catastrophic forgetting in a user whose weights significantly differ from the average weights.
|
| 334 |
+
|
| 335 |
+
Last but not least, we compare EFDLS with all the seven algorithms. One can easily observe that our EFDLS is no doubt the best among all algorithms for comparison since ours obtains the highest MeanACC and 'best' values, namely 0.7014 and 20, and the smallest AVG_rank value, namely 2.1478. The FKD takes the second position when considering its 'best', MeanACC, and AVG_rank values, namely, 11, 0.6878, and 2.6364. On the other hand, FedAvg and its variant, FedAvgM, are the two worst algorithms. The following explains the reasons behind the findings above. When faced with the multi-task TSC problem, each user runs one TSC task, and different users may run different TSC tasks. The FBST framework and the DBWM scheme help EFDLS to realize fine-grained knowledge sharing between any pair of users with the most similar expertise. FKD uses the average of all users' weights to guide each user to capture valuable features from the data, promoting coarse-grained knowledge sharing among users. On the other hand, FedAvg and FedAvgM simply take the average weights of all users as each user's weights, which may cause catastrophic forgetting and hence poor performance on multi-task TSC.
|
| 336 |
+
|
| 337 |
+
# 5 CONCLUSION
|
| 338 |
+
|
| 339 |
+
The FBST framework promotes knowledge transfer from a teacher's to its student's hidden layers, helping the student capture instance-level representations from the input. The DBWM scheme finds a partner for each user in terms of similarity between their uploaded weights, enabling knowledge sharing of similar expertise among different users.
|
| 340 |
+
|
| 341 |
+
With FBST and DBWM, the proposed EFDLS securely shares knowledge of similar expertise among different tasks for multi-task time series classification. Experimental results show that compared with six benchmark FL algorithms, EFDLS is a winner on 44 datasets with respect to the MeanACC and AVG_rank metrics and on 20 datasets in terms of the 'best' measure. In particular, compared with single-task Baseline, EFDLS obtains $32/4/8$ regarding the 'win'/'tie'/'lose' metric. That reflects the potential of EFDLS to be applied to multi-task TSC problems in various real-world domains.
|
| 342 |
+
|
| 343 |
+
# REFERENCES
|
| 344 |
+
|
| 345 |
+
[1] G. Pang and C. Aggarwal, "Toward explainable deep anomaly detection," In Proc. ACM KDD'21, p. 4056-4057, 2021.
|
| 346 |
+
[2] J. Li, H. He, H. He, L. Li, and Y. Xiang, "An end-to-end framework with multisource monitoring data for bridge health anomaly identification," IEEE Trans. Instrum. Meas., vol. 70, pp. 1-9, 2021.
|
| 347 |
+
[3] X. Ma, J. Wu, S. Xue, J. Yang, C. Zhou, Q. Sheng, H. Xiong, and L. Akoglu, "A comprehensive survey on graph anomaly detection with deep learning," IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.
|
| 348 |
+
[4] H. Tong and J. Zhu, "New peer effect-based approach for service matching in cloud manufacturing under uncertain preferences," Appl. Soft Comput., vol. 94, pp. 1-17, 2020.
|
| 349 |
+
[5] L. Shi, Z. Teng, L. Wang, Y. Zhang, and A. Binder, "Deepclue: Visual interpretation of text-based deep stock prediction," IEEE Trans Knowl. Data Eng., vol. 31, no. 6, pp. 1094-1108, 2019.
|
| 350 |
+
[6] D. Nahmias and K. Kontson, "Easy perturbation eeg algorithm for spectral importance (easypeasi): A simple method to identify important spectral features of eeg in deep learning models," In Proc. ACM KDD'21, p. 2398-2406, 2021.
|
| 351 |
+
[7] F. Zhang, Y. Liu, N. Feng, C. Yang, J. Zhai, S. Zhang, B. He, J. Lin, X. Zhang, and X. Du, "Periodic weather-aware LSTM with event mechanism for parking behavior prediction," IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.
|
| 352 |
+
[8] H. Fawaz, G. Forestier, J. Weber, L. Idoumghar, and P.-A. Muller, "Deep learning for time series classification: a review," Data Min. Knowl. Disc., vol. 33, pp. 917-963, 2019.
|
| 353 |
+
[9] ——, "Time series classification from scratch with deep neural networks: A strong baseline," In Proc. IEEE IJCNN 2017, pp. 1578-1585, 2017.
|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
Fig. 5. Critical difference diagram of the average ranks of various FL algorithms on 44 datasets.
|
| 357 |
+
|
| 358 |
+
[10] X. Zhang, Y. Gao, J. Lin, and C.-T. Lu, "Tapnet: Multivariate time series classification with attentional prototypical network," In Proc. AAAI 2020, pp. 6845-6852, 2020.
|
| 359 |
+
[11] F. Karim, S. Majumdar, H. Darabi, and S. Harford, "Multivariate LSTM-fcns for time series classification," Neural Networks, vol. 116, pp. 237-245, 2019.
|
| 360 |
+
[12] Z. Xiao, X. Xu, H. Xing, S. Luo, P. Dai, and D. Zhan, "Rtfn: A robust temporal feature network for time series classification." Inform. Sciences, vol. 571, pp. 65-86, 2021.
|
| 361 |
+
[13] G. Li, B. Choi, J. Xu, S. Bhowmick, K.-P. Chun, and G. Wong, "Shapenet: A shapelet-neural network approach for multivariate time series classification," In Proc. AAAI 2021, vol. 35, no. 9, pp. 8375-8383, 2021.
|
| 362 |
+
[14] D. Lee, S. Lee, and H. Yu, "Learnable dynamic temporal pooling for time series classification," In Proc. AAAI 2021, vol. 35, no. 9, pp. 8288-8296, 2021.
|
| 363 |
+
[15] B. Arcas, G. Bacon, K. Bonawitz, and et al., "Federated learning: Collaborative machine learning without centralized training data," https://ai.googleblog.com/2017/04/federated-learning-collaborative.html, 2017.
|
| 364 |
+
[16] Q. Yang, Y. Liu, T. Chen, and Y. Tong, "Federated machine learning: Concept and applications," ACM Trans. Intell. Syst. Technol., vol. 10, no. 2, pp. 1-19, 2019.
|
| 365 |
+
[17] Q. Li, Z. Wen, Z. Wu, S. Hu, N. Wang, Y. Li, X. Liu, and B. He, "A survey on federated learning systems: Vision, hype and reality for data privacy and protection," IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.
|
| 366 |
+
[18] M. McMahan, E. Moore, D. Ramage, S. Hampson, and B. Arcas, "Communication-efficient learning of deep networks from decentralized data," In Proc. AISTATS 2017, pp. 1-11, 2017.
|
| 367 |
+
[19] J. Ma, Q. Zhang, J. Lou, L. Xiong, and J. Ho, "Communication efficient federated generalized tensor factorization for collaborative health data analytics," In Proc. 30th The Web Conference 2021, 2021.
|
| 368 |
+
[20] B. Liu, Y. Guo, and X. Chen, "Pfa: Privacy-preserving federated adaptation for effective model personalization," In Proc. 30th The Web Conference 2021, 2021.
|
| 369 |
+
[21] J. Wu, Z. Huang, Y. Ning, H. Wang, E. Chen, J. Yi, and B. Zhou, "Hierarchical personalized federated learning for user modeling," In Proc. 30th The Web Conference 2021, 2021.
|
| 370 |
+
[22] Q. Yang, J. Zhang, W. Hao, G. Spell, and L. Carin, "Flop: Federated learning on medical datasets using partial networks," In Proc. ACM KDD'21, 2021.
|
| 371 |
+
[23] Y. Liu, Y. Kang, C. Xing, T. Chen, and Q. Yang, "A secure federated transfer learning framework," IEEE Intell. Syst., vol. 35, no. 4, pp. 70-82, 2020.
|
| 372 |
+
[24] H. Yang, H. He, W. Zhang, and X. Cao, "Fedsteg: A federated transfer learning framework for secure image steganalysis," IEEE Trans. Netw. Sci. Eng., vol. 8, no. 2, pp. 1084-1094, 2021.
|
| 373 |
+
[25] D. Dimitriadis, K. Kumatani, R. Gmyr, Y. Gaur, and S. Eskimez, "Federated transfer learning with dynamic gradient aggregation," arXiv preprint arXiv:2008.02452, 2020.
|
| 374 |
+
[26] U. Majeed, S. Hassan, and C. Hong, "Cross-silo model-based secure federated transfer learning for flow-based traffic classification," In Proc. ICOIN 2021, 2021.
|
| 375 |
+
[27] H. Seo, J. Park, S. Oh, M. Bennis, and S.-L. Kim, "Federated knowledge distillation," arXiv preprint arXiv:2011.02367, 2020.
|
| 376 |
+
[28] C. He, M. Annavaram, and S. Avestimehr, "Group knowledge transfer: Federated learning of large cnns at the edge," In Proc. NeurIPS 2020, 2020.
|
| 377 |
+
[29] R. Mishra, H. Gupta, and T. Dutta, "A network resource aware federated learning approach using knowledge distillation," In Proc. INFOCOM 2021, 2021.
|
| 378 |
+
|
| 379 |
+
[30] S. Itahara, T. Nishio, Y. Koda, M. Morikura, and K. Yamamoto, "Distillation-based semi-supervised federated learning for communication-efficient collaborative training with non-iid private data," IEEE Trans. Mobile Comput., pp. 1-1, 2021.
|
| 380 |
+
[31] Y. Chen, X. Sun, and Y. Jin, "Communication-efficient federated deep learning with layer-wise asynchronous model update and temporally weighted aggregation," IEEE Trans. Neur. Net. Learn. Sys., vol. 31, no. 10, pp. 4229-4238, 2020.
|
| 381 |
+
[32] F. Sattler, S. Wiedemann, K.-R. Müller, and W. Samek, "Robust and communication-efficient federated learning from non-i.i.d. data," IEEE Trans. Neur. Net. Learn. Sys., vol. 31, no. 9, pp. 3400-3413, 2020.
|
| 382 |
+
[33] L. Nagalapatti and R. Narayanam, "Game of gradients: Mitigating irrelevant clients in federated learning," In Proc. AAAI 2021, vol. 35, no. 10, pp. 9046-9054, 2021.
|
| 383 |
+
[34] X. Cao, J. Jia, and N. Gong, "Provably secure federated learning against malicious clients," In Proc. AAAI 2020, vol. 35, no. 8, pp. 6885-6893, 2020.
|
| 384 |
+
[35] J. Hong, Z. Zhu, S. Yu, Z. Wang, H. Dodge, and J. Zhou, "Federated adversarial debiasing for fair and transferable representations," In Proc. ACM KDD'21, vol. 1, no. 1, August 2021.
|
| 385 |
+
[36] P. Zhou, L. Wang, L. Guo, S. Gong, and B. Zheng, "A privacy-preserving distributed contextual federated online learning framework with big data support in social recommender systems," IEEE Trans. Knowl. Data Eng., vol. 33, no. 3, pp. 824-838, 2021.
|
| 386 |
+
[37] Z. Pan, L. Hu, W. Tang, J. Li, Y. He, and Z. Liu, "Privacy-preserving multi-granular federated neural architecture search a general framework," IEEE Trans. Knowl. Data Eng., pp. 1-1, 2021.
|
| 387 |
+
[38] M. Crawshaw, "Multi-task learning with deep neural networks: A survey," arXiv preprint arXiv: 2009.09796, 2020.
|
| 388 |
+
[39] A. Ruiz, M. Flynn, and A. Bagnall, "Benchmarking multivariate time series classification algorithms," arXiv preprint arXiv: 2007.13156, 2020.
|
| 389 |
+
[40] J. Lines and A. Bagnall, "Time series classification with ensembles of elastic distance measures," Data Min. Knowl. Disc., vol. 29, p. 565-592, 2015.
|
| 390 |
+
[41] A. Bagnall, J. Lines, J. Hills, and A. Bostrom, "Time series classification with cote: the collective of transformation-based ensembles," In Proc. ICDE 2016, pp. 1548-1549, 2016.
|
| 391 |
+
[42] J. Lines, S. Taylor, and A. Bagnall, "Time series classification with hive-cote: the hierarchical of transformation-based ensembles," ACM Trans. Knowl. Discov. D, vol. 21, no. 52, pp. 1-35, 2018.
|
| 392 |
+
[43] K. Fauvel, E. Fromont, V. Masson, P. Faverdin, and A. Termier, "Local cascade ensemble for multivariate data classification," arXiv preprint arXiv:2005.03645, 2020.
|
| 393 |
+
[44] J. Lines, L. Davis, J. Hills, and A. Bagnall, "A shapelet transform for time series classification," In Proc. ACM KDD'12, 2012.
|
| 394 |
+
[45] M. Baydogan, G. Runger, and E. Tuv, "A bag-of-features framework to classify time series," IEEE Trans. Pattern Anal., vol. 35, no. 11, pp. 2796-2802, 2013.
|
| 395 |
+
[46] A. Dempster, D. Schmidt, and G. Webb, "Minirocket: A very fast (almost) deterministic transform for time series classification," In Proc. ACM KDD'21, 2021.
|
| 396 |
+
[47] M. Baydogan and G. Runger, "Time series representation and similarity based on local auto patterns," Data Min. Knowl. Disc., vol. 30, p. 476-509, 2016.
|
| 397 |
+
[48] J. Large, A. Bagnall, S. Malinowski, and R. Tavenard, "From bop to boss and beyond: time series classification with dictionary based classifier," arXiv preprint arXiv:1809.06751, 2018.
|
| 398 |
+
[49] W. Pei, H. Dibeklioglu, D. Tax, and L. van der Maaten, "Multivariate time-series classification using the hidden-unit logistic model," IEEE Trans. Neur. Net. Lear., vol. 29, no. 4, pp. 920-931, 2018.
|
| 399 |
+
|
| 400 |
+
[50] H. Deng, G. Runger, E. Tuv, and M. Vladimir, "A time series forest for classification and feature extraction," Inform. Sciences, vol. 239, p. 142-153, 2013.
|
| 401 |
+
[51] B. Bai, G. Li, S. Wang, Z. Wu, and W. Yan, "Time series classification based on multi-feature dictionary representation and ensemble learning," Expert Syst. Appl., vol. 169, pp. 1-10, 2021.
|
| 402 |
+
[52] H. Fawaz, B. Lucas, G. Forestier, C. Pelletier, D. Schmidt, J. Weber, G. Webb, L. Idoumghar, P.-A. Muller, and F. Petitjean, "Inceptiontime: finding alexnet for time series classification," Data Min. Knowl. Disc., vol. 34, p. 1936-1962, 2020.
|
| 403 |
+
[53] Z. Xiao, X. Xu, H. Zhang, and E. Szczerbicki, "A new multi-process collaborative architecture for time series classification," Knowl.-Based Syst., vol. 220, pp. 1-11, 2021.
|
| 404 |
+
[54] W. Chen and K. Shi, "Multi-scale attention convolutional neural network for time series classification," Neural Networks, vol. 136, pp. 126-140, 2021.
|
| 405 |
+
[55] S. Huang, L. Xu, and C. Jiang, "Artificial intelligence and advanced time series classification: Residual attention net for cross-domain modeling," Fintech with Artificial Intelligence, Big Data, and Blockchain, Blockchain Technologies, 2021.
|
| 406 |
+
[56] Z. Xiao, X. Xu, H. Xing, R. Qu, F. Song, and B. Zhao, "Rnts: Robust neural temporal search for time series classification," In Proc. IJCNN 2021, 2021.
|
| 407 |
+
[57] J. Guo, B. Yu, S. Maybank, and D. Tao, "Knowledge distillation: A survey," arXiv preprint arXiv: 2006.05525, 2020.
|
| 408 |
+
[58] H. Dau, A. Bagnall, C.-C. M. Yeh, Y. Zhu, S. Gharghabi, C. Ratanamahatana, and E. Keogh, "The ucr time series archive," IEEE/CAA Journal of Automatica Sinica, vol. 6, no. 6, pp. 1293-1305, 2019.
|
2201.00xxx/2201.00011/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49b808b33df7f32d771f1e6b354039322bf34279e07659b9e18ec968fa19bc3d
|
| 3 |
+
size 874840
|
2201.00xxx/2201.00011/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00042/6031260c-1066-414e-836c-08a9e723b8bf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24e056e8ca4cda70f9fc4311f7470dfb83f339ee81a9db2a0f91fa82362541ba
|
| 3 |
+
size 8898910
|
2201.00xxx/2201.00042/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00042/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0fcb720f8735a35a86af013cda2b35cb5fcdf2349dbc3e2fa214d0e326afd7bc
|
| 3 |
+
size 1162015
|
2201.00xxx/2201.00042/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00044/b5c93617-34e7-4877-9247-c8a8bb698f50_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c6ceb50e6d74ba278d650bb52913ce918404d98a768511a97fa9ac8319d033c
|
| 3 |
+
size 5505434
|
2201.00xxx/2201.00044/full.md
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TRANSFORMER EMBEDDINGS OF IRREGULARLY SPACED EVENTS AND THEIR PARTICIPANTS
|
| 2 |
+
|
| 3 |
+
# Chenghao Yang
|
| 4 |
+
|
| 5 |
+
Dept. of Computer Science Columbia University
|
| 6 |
+
|
| 7 |
+
yangalan1996@gmail.com
|
| 8 |
+
|
| 9 |
+
# Hongyuan Mei
|
| 10 |
+
|
| 11 |
+
Toyota Tech. Institute at Chicago hongyuan@ttic.edu
|
| 12 |
+
|
| 13 |
+
# Jason Eisner
|
| 14 |
+
|
| 15 |
+
Dept. of Computer Science Johns Hopkins University jason@cs.jhu.edu
|
| 16 |
+
|
| 17 |
+
# ABSTRACT
|
| 18 |
+
|
| 19 |
+
The neural Hawkes process (Mei & Eisner, 2017) is a generative model of irregularly spaced sequences of discrete events. To handle complex domains with many event types, Mei et al. (2020a) further consider a setting in which each event in the sequence updates a deductive database of facts (via domain-specific pattern-matching rules); future events are then conditioned on the database contents. They show how to convert such a symbolic system into a neuro-symbolic continuous-time generative model, in which each database fact and possible event has a time-varying embedding that is derived from its symbolic provenance.
|
| 20 |
+
|
| 21 |
+
In this paper, we modify both models, replacing their recurrent LSTM-based architectures with flatter attention-based architectures (Vaswani et al., 2017), which are simpler and more parallelizable. This does not appear to hurt our accuracy, which is comparable to or better than that of the original models as well as (where applicable) previous attention-based methods (Zuo et al., 2020; Zhang et al., 2020a).
|
| 22 |
+
|
| 23 |
+
# 1 INTRODUCTION
|
| 24 |
+
|
| 25 |
+
It has recently become common to model event sequences by embedding each event into $\mathbb{R}^D$ . Event sequences are ubiquitous in real-world applications, such as healthcare, finance, education, commerce, gaming, audio, news, security, and social media. Event embeddings could be used in a variety of downstream applied tasks, similar to word token embeddings in BERT (Devlin et al., 2018).
|
| 26 |
+
|
| 27 |
+
In this paper, we embed each event using attention over the previous events, using continuous-time positional encodings so as to consider their timing. To build a left-to-right generative model, we also embed possible events at future times in exactly the same way, and use their embeddings to predict their instantaneous probabilities at those times.
|
| 28 |
+
|
| 29 |
+
Attention-based models (Vaswani et al., 2017) have already become extremely popular for generative modeling of discrete-time sequences, such as natural-language documents (Radford et al., 2019; Brown et al., 2020) and proteins (Rao et al., 2021). As we confirm here, they are also effective for modeling sequences that are irregularly spaced in continuous time, even in lower-data regimes.
|
| 30 |
+
|
| 31 |
+
Our past work on modeling event sequences (Mei & Eisner, 2017; Mei et al., 2019; 2020a,b) used neural architectures based on LSTMs (Hochreiter & Schmidhuber, 1997). That is, predictions at time $t$ were derived from a recurrent encoding of the sequence of timestamped events at times $< t$ . However, an attention-based (Transformer-style) architecture has three potential advantages:
|
| 32 |
+
|
| 33 |
+
① A Transformer does not summarize the past. Our predictions at time $t$ can examine an unboundedly large representation of the past (embeddings in $\mathbb{R}^d$ of every event before $t$ ), not merely a fixed-dimensional summary that was computed greedily from left to right (an LSTM's state at time $t$ ).
|
| 34 |
+
$②$ A Transformer's computation graph is broader and shallower. The breadth makes it easier to learn long-distance influences. The shallowness does make it impossible to represent inherently deep concepts such as parity (Hahn, 2020), but it enables greater parallelism: the layer- $\ell$ embeddings can be computed in parallel during training, as they depend only on layer $\ell -1$ and not on one another.
|
| 35 |
+
③ The Transformer architecture is simpler and arguably more natural, while remaining competitive in our experiments. To model the temporal distribution of the next event, all of our models posit embeddings of possible future events that depend on the future event's time $t$ . To accomplish this,
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
(a) A-NHP
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
(b) A-NDTT
|
| 42 |
+
Figure 1: These figures show how embeddings in the model flow through layers (bottom to top) and through time (left to right). There are two possible event types, e and f, which represent email messages. At the upper right corner of each figure, we obtain their modeled intensities at a certain time $t$ , $\lambda_{\mathrm{e}}(t)$ and $\lambda_{\mathrm{f}}(t)$ , based on the embeddings of the three previous, irregularly spaced observed events. This requires embedding e and f at time $t$ as if they were observed. If either one actually occurs at time $t$ , we will keep its embeddings, which will then affect embeddings of events at times $> t$ . Figure (a) shows the basic model of section 3, in which each event's embedding at layer $\ell$ depends $(\longrightarrow)$ on all preceding events at layer $\ell - 1$ . (The dashed arrows $\dashrightarrow$ reflect residual connections.) Section 4 explains that the $\underline{\mathrm{e}} \longrightarrow \underline{\mathrm{f}}$ influences can be prevented by dropping the rule $\underline{\mathrm{f}} \gets \underline{\mathrm{e}}$ . Figure (b) shows an A-NDTT model from section 5, in which the company forum's embedding at layer $\ell$ depends $(\longrightarrow)$ on all preceding events at layer $\ell - 1$ (via $\gets$ rules). The events or possible events at layer $\ell$ do not depend directly on preceding events; instead, their embeddings at time $t$ are derived $(\longrightarrow)$ from the forum's embedding at time $t$ (via $\dashrightarrow$ rules).
|
| 43 |
+
|
| 44 |
+
Mei & Eisner (2017) had to stipulate an arbitrary family of parametric decay functions on $t$ , and the neuro-symbolic framework of Mei et al. (2020a) required a complex method for pooling the parameters of these decay functions. But in our present method, no decay functions are required to allow embeddings and probabilities to drift over time. The embeddings are constructed "from scratch" at each time $t$ simply by attending to the set of previous events, using $t$ -specific query vectors that include a continuous positional embedding of $t$ . As $t$ increases, the attention weights over the previous events vary continuously with $t$ , so the embeddings and probabilities do so as well.
|
| 45 |
+
|
| 46 |
+
We present a series of increasingly sophisticated methods. Section 2 explains how to embed events in context (like continuous-time BERT). Section 3 turns this into a generative point process model that can predict the time and type of the next event (like continuous-time GPT). In section 4, we allow a domain expert to write simple rules that control attention, constraining which events can "see" which previous events and with what parameters. Finally, section 5 allows the domain expert to write more complex rules, using our previously published Datalog through time formalism (Mei et al., 2020a). These rules allow events to interact with a symbolic deductive database that tracks facts over time so that the neural architecture does not have to learn how to do so. As in Mei et al. (2020a), we define time-varying embeddings for all facts in the database and all events that are possible given those facts, using parameters associated with the rules that established the facts and possible events.
|
| 47 |
+
|
| 48 |
+
In the end, we arrive at attention-based versions of the NHP (Mei & Eisner, 2017) and NDTT (Mei et al., 2020a) frameworks, which we refer to as A-NHP (section 3) and A-NDTT (section 5). We evaluate them in section 7, showing comparable or better accuracy. We release our code.
|
| 49 |
+
|
| 50 |
+
# 2 CONTINUOUS-TIME TRANSFORMER FOR EMBEDDING EVENTS
|
| 51 |
+
|
| 52 |
+
Suppose we observe $I$ events over a fixed time interval $[0, T)$ . Each event is denoted mnemonically as $e@t$ (i.e., "type $e$ at time $t$ ) where $e \in \mathcal{E}$ is the type of event (drawn from a finite set $\mathcal{E}$ ). The observed event sequence is $e_1@t_1, e_2@t_2, \ldots, e_I@t_I$ , where $0 < t_1 < t_2 < \ldots < t_I < T$ .
|
| 53 |
+
|
| 54 |
+
For any event $e@t$ , we can compute an embedding $\llbracket e\rrbracket (t)\in \mathbb{R}^D$ by attending to its history $\mathcal{H}(e@t)$ a set of relevant events. (For the moment, imagine that $\mathcal{H}(e@t)$ consists of all the observed events $e_i@t_i$ .) More precisely, $\llbracket e\rrbracket (t)$ is the concatenation of layer-wise embeddings $\llbracket e\rrbracket^{(0)}(t),\llbracket e\rrbracket^{(1)}(t),\ldots ,\llbracket e\rrbracket^{(L)}(t)$ . For $\ell >0$ , the layer- $\ell$ embedding of $e@t$ is computed as
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
\llbracket e \rrbracket^ {(\ell)} (t) \stackrel {\text {d e f}} {=} \underbrace {\llbracket e \rrbracket^ {(\ell - 1)} (t)} _ {\text {r e s i d u a l c o n n e c t i o n}} + \tanh \left(\sum_ {f @ s \in \mathcal {H} (e @ t)} \frac {\mathbf {v} ^ {(\ell)} (f @ s) \alpha^ {(\ell)} (f @ s , e @ t)}{1 + \sum_ {f @ s \in \mathcal {H} (e @ t)} \alpha^ {(\ell)} (f @ s , e @ t)}\right) \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
where the unnormalized attention weight on each relevant event $f_{@} s \in \mathcal{H}(k_{@} t)$ is
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\alpha^ {(\ell)} (f @ s, e @ t) \stackrel {\text {d e f}} {=} \exp \left(\frac {1}{\sqrt {D}} \mathbf {k} ^ {(\ell)} (f @ s) ^ {\top} \mathbf {q} ^ {(\ell)} (e @ t)\right) \in \mathbb {R} \tag {2}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
In layer $\ell$ , $\mathbf{v}^{(\ell)}$ , $\mathbf{k}^{(\ell)}$ , and $\mathbf{q}^{(\ell)}$ are known as the value, key, and query vectors and are extracted from the layer- $(\ell-1)$ event embeddings using learned layer-specific matrices $\mathbf{V}^{(\ell)}$ , $\mathbf{K}^{(\ell)}$ , $\mathbf{Q}^{(\ell)}$ :
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathbf {v} ^ {(\ell)} (e @ t) \stackrel {\text {d e f}} {=} \mathbf {V} ^ {(\ell)} \left[ 1; [ [ t ] ]; [ [ e ] ] ^ {(\ell - 1)} (t) \right] \tag {3a}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\mathbf {k} ^ {(\ell)} \left(e @ t\right) \stackrel {\text {d e f}} {=} \mathbf {K} ^ {(\ell)} \left[ 1; [ [ t ] ]; [ [ e ] ] ^ {(\ell - 1)} (t) \right] \tag {3b}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathbf {q} ^ {(\ell)} (e @ t) \stackrel {\mathrm {d e f}} {=} \mathbf {Q} ^ {(\ell)} \left[ 1; [ [ t ] ]; [ [ e ] ] ^ {(\ell - 1)} (t) \right] \tag {3c}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
As the base case, $\llbracket e\rrbracket^{(0)}(t)\stackrel {\mathrm{def}}{=}\llbracket e\rrbracket^{(0)}$ is a learned embedding of the event type $e$ . $\llbracket t\rrbracket$ denotes an embedding of the time $t$ . We cannot learn absolute embeddings for all real numbers, so we fix
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\llbracket t \rrbracket_ {d} = \sin \left(t / \left(m \cdot \left(\frac {5 M}{m}\right) ^ {\frac {d}{D}}\right)\right) \text {i f} d \text {i s e v e n} \quad \llbracket t \rrbracket_ {d} = \cos \left(t / \left(m \cdot \left(\frac {5 M}{m}\right) ^ {\frac {d - 1}{D}}\right)\right) \text {i f} d \text {i s o d d} \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
where $0 \leq d < D$ are the dimensions and our choices of $m, M$ are explained in Appendix A.
|
| 87 |
+
|
| 88 |
+
Crucially, to compute the layer- $\ell$ embedding of an event, equations (1)-(3) need only the layer- $(\ell - 1)$ embeddings of the relevant events in its history. This lets us compute the layer- $\ell$ embeddings of all events in parallel. Note that equations (1)-(3) are simplifications of the traditional Transformer, since this ablation performed equally well in our pilot experiments (see Appendix A).
|
| 89 |
+
|
| 90 |
+
The set of relevant events $\mathcal{H}(e@t)$ could be defined in a task-specific way. For example, to pretrain BERT-like embeddings (Devlin et al., 2018), we might use a corrupted version of $\{e_1@t_1,\ldots ,e_I@t_I\}$ in which some $e_i@t_i$ have been removed or replaced with $\text{mask} @ t_{i}$ . Such embeddings could be pretrained with a BERT-like objective and then fine-tuned to predict properties of the observed events.
|
| 91 |
+
|
| 92 |
+
# 3 GENERATIVE MODELING OF CONTINUOUS-TIME EVENT SEQUENCES
|
| 93 |
+
|
| 94 |
+
In this paper, we focus on the task of predicting future events given past ones. At any time $t$ , we would like to know what will happen at that time, given the actual events that happened before $t$ . Our generative model is analogous to a Transformer language model (Radford et al., 2019; Brown et al., 2020), which, at each time $t \in \mathbb{N}$ , defines a probability distribution over the words in the vocabulary.
|
| 95 |
+
|
| 96 |
+
In our setting, however, $t \in \mathbb{R}$ . With probability 1, nothing happens at time $t$ . Each possible event $e$ in our vocabulary has only an infinitesimal probability of occurring at time $t$ . We write this probability as $\lambda_{e}(t)\mathrm{d}t$ where $\lambda_{e}(t) \in \mathbb{R}^{+}$ is called the (Poisson) intensity of type- $e$ events at time $t$ . More formally, the probability of such an event occurring during $[t,t + \epsilon)$ approaches $\lambda_{e}(t)\epsilon$ as $\epsilon \to^{+}0$ .
|
| 97 |
+
|
| 98 |
+
Thus, our modeling task is to model $\lambda_{e}(t)$ (as in, e.g., Hawkes, 1971; Du et al., 2016; Mei & Eisner, 2017). We model $\lambda_{e}(t)$ as a function of the top-layer embedding of the possible event $e@t$ :
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\lambda_ {e} (t) \stackrel {\text {d e f}} {=} \operatorname {s o f t p l u s} \left(\mathbf {w} _ {e} ^ {\top} [ 1; \llbracket e \rrbracket^ {L} (t) ], \tau_ {e}\right) \quad \text {w h e r e s o f t p l u s} (x, \tau) = \tau \log (1 + \exp (x / \tau)) > 0 \tag {5}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
with learnable parameters $\mathbf{w}_e$ and $\tau_e > 0$ . We do this separately for each possible $e@t$ , computing the embedding $\llbracket e\rrbracket^L (t)$ using equations (1)-(3). The softplus transfer function is inherited from the neural Hawkes process (Mei & Eisner, 2017). To ensure that our model is generative, we compute $\llbracket e\rrbracket (t)$ from only previous events. That is, $\mathcal{H}(e@t)$ in equation (1) may contain any or all of the previously generated events $e_i@t_i$ for $t_i < t$ , but it may not contain any for which $t_i > t$ . We call this model the attentive Neural Hawkes process, or A-NHP, and evaluate it in section 7.
|
| 105 |
+
|
| 106 |
+
Our model's log-likelihood has the same form as for any autoregressive multivariate point process:
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\sum_ {i = 1} ^ {I} \log \lambda_ {e _ {i}} (t _ {i}) - \int_ {t = 0} ^ {T} \sum_ {e = 1} ^ {E} \lambda_ {e} (t) \mathrm {d} t \tag {6}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
Derivations of this formula can be found in previous work (e.g., Hawkes, 1971; Liniger, 2009; Mei & Eisner, 2017). We can estimate the parameters by locally maximizing the log-likelihood (6) by
|
| 113 |
+
|
| 114 |
+
any stochastic gradient method. Intuitively, each $\log \lambda_{e_i}(t_i)$ is increased to explain why the observed event $e_i$ happened at time $t_i$ , while $\int_{t=0}^{T} \sum_{e=1}^{E} \lambda_e(t) \, \mathrm{d}t$ is decreased to explain why no event of any possible type $e \in \{1, \ldots, E\}$ ever happened at other times.
|
| 115 |
+
|
| 116 |
+
Appendix D gives training details, including Monte Carlo approximations to the integral in equation (6), as well as noting alternative training objectives. Given the learned parameters, we may wish to sample from the model given the past history, or make a minimum Bayes risk prediction about the next event. Recipes can be found in Appendix E.
|
| 117 |
+
|
| 118 |
+
Notice that equation (5) is rather expensive compared to previous work, since it computes a deep embedding of the possible event $e@t$ just for the purpose of finding its intensity—and the algorithms of Appendices D–E require computing the intensities of many possible events. Appendix A offers a speedup that shares embeddings among similar events, but it also explains why different events may sometimes have to be embedded differently to support the selective attention in sections 4–5 below.
|
| 119 |
+
|
| 120 |
+
# 4 MULTI-HEAD SELECTIVE ATTENTION
|
| 121 |
+
|
| 122 |
+
We now present a simple initial version of selective attention. As in a graphical model, not all events should be able to influence one another directly. Consider a scenario with two event types: $\underline{\mathbf{e}}$ means that Eve emails Adam, while $\underline{\mathbf{f}}$ means that Frank emails Eve. As Frank does not know when Eve emailed Adam, past events of type $\underline{\mathbf{e}}$ cannot influence his behavior. Therefore, $\mathcal{H}(\underline{\mathbf{f}}@t)$ should include past events of type $\underline{\mathbf{f}}$ but not $\underline{\mathbf{e}}$ , so that the embedding of $\underline{\mathbf{f}}@t$ and hence the intensity function $\lambda_{\underline{\mathbf{f}}}(\underline{\mathbf{f}})$ pay zero attention to $\underline{\mathbf{e}}$ events. In contrast, $\mathcal{H}(\underline{\mathbf{e}}@t)$ should still include past events of both types, since both are visible to Eve and can influence her behavior.
|
| 123 |
+
|
| 124 |
+
We describe this situation with the edges $\underline{\mathsf{f}}\gets \underline{\mathsf{f}},\underline{\mathsf{e}}\gets \underline{\mathsf{e}},\underline{\mathsf{e}}\gets \underline{\mathsf{f}}$ . These are akin to the edges in a directed graphical model. They specify the sparsity pattern of the influence matrix (or Granger causality matrix) that describes which past events can influence which future events. There is a long history of estimating this matrix from observed sequence data (e.g., Xu et al., 2016; Zhang et al., 2021), even with neural influence models (Zhang et al., 2020b). In the present paper, however, we do not attempt to estimate this sparsity pattern, but assume it is provided by a human domain expert. Incorporating such domain knowledge into the model can reduce the amount of training data that is needed. Edges like $\underline{\mathsf{e}}\gets \underline{\mathsf{f}}$ can be regarded as simple cases of the NDTT rules in section 5 below.
|
| 125 |
+
|
| 126 |
+
Such rules also affect how we apply attention. When Eve decides whether to email Adam $(\underline{\mathsf{e}}\circ t)$ , we may reasonably suppose that she separately considers the embeddings of the past $\underline{\mathbf{e}}$ events (e.g., "when were my last relevant emails to Adam?") versus the past $\underline{\mathbf{f}}$ events (e.g., "what have I heard recently from Frank?"). Hence, we associate different attention heads with the two rules that affect $\underline{\mathbf{e}}$ , namely $\underline{\mathbf{e}}\gets \underline{\mathbf{e}}$ and $\underline{\mathbf{e}}\gets \underline{\mathbf{f}}$ . These heads may have different parameters, so that they seek out and obtain different information from the past via different queries, keys, and values. In general, we replace equation (1) with
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\llbracket e \rrbracket^ {(\ell)} (t) \stackrel {\text {d e f}} {=} \llbracket e \rrbracket^ {(\ell - 1)} (t) + \tanh \left(\sum_ {r} \square_ {r} ^ {(\ell)} (t)\right) \tag {7}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\boxed {e} _ {r} ^ {(\ell)} (t) \stackrel {\text {d e f}} {=} \sum_ {f @ s \in \mathcal {H} _ {r} (e @ t)} \frac {\mathbf {v} _ {r} ^ {(\ell)} (f @ s) \alpha_ {r} ^ {(\ell)} (f @ s , e @ t)}{1 + \sum_ {f @ s \in \mathcal {H} _ {r} (e @ t)} \alpha_ {r} ^ {(\ell)} (f @ s , e @ t)} \tag {8}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $r$ in the summation ranges over rules $e \gets \dots$ . The history $\mathcal{H}_r(e@t)$ contains only those past events $f@s$ that rule $r$ makes visible to $e$ . If there are no such events, or they have small attention weights (are only weakly relevant to $e@t$ ) as discussed in Appendix A, then rule $r$ will contribute little or nothing to the sum in equation (7). The attention weights $\alpha_r$ and vectors $\mathbf{v}_r$ are defined using versions of equations (2)-(3) with $r$ -specific parameters.
|
| 137 |
+
|
| 138 |
+
In short, each rule looks at the context separately, through its own attention weights determined by its own parameters. The rule already specifies symbolically which past events can get nonzero attention in the first place, so it makes sense for the rule to also provide the parameters that determine the attention weights and value projections. Further discussion is given in Appendix A.
|
| 139 |
+
|
| 140 |
+
# 5 ATTENTIVE NEURAL DATABLOG THROUGH TIME
|
| 141 |
+
|
| 142 |
+
Edges such as $\underline{\mathbf{e}} \gets \underline{\mathbf{f}}$ can be regarded as simple examples of rules in an NDTT program (Mei et al., 2020a, section 2). We briefly review this formalism and then extend our approach from section 4 to handle all NDTT programs.
|
| 143 |
+
|
| 144 |
+
A Datalog through time (DTT) program describes possible sequences of events, much as a regular expression describes legal sequences of characters. A DTT program for a particular domain specifies how each event automatically updates a database, adding or removing facts. In this way, the past events $e_1, \ldots, e_i$ sequentially construct a database. This database then determines which event types (if any) can happen next: the next event $e_{i+1}$ can be $\underline{\mathsf{f}}$ only if $\underline{\mathsf{f}}$ is currently a fact in the database.
|
| 145 |
+
|
| 146 |
+
Thus, an event may appear in the database as a fact, meaning that the event is possible. We use variables $e, f$ to range over events, but variables $g, h$ to range over any facts (both events and non-events). Literal examples of facts are shown in orange if they are events (e.g., $\underline{\mathbf{f}}$ ), and in blue otherwise.
|
| 147 |
+
|
| 148 |
+
A neural Datalog through time (NDTT) program is a DTT program augmented with some dimensionality declarations (Appendix C). A rule that adds a fact to the database now also computes a vector embedding of that fact, or updates the existing embedding if the fact was already in the database.
|
| 149 |
+
|
| 150 |
+
Notice that the dimensionality of the embedded database changes as the database grows and shrinks over time. Nonetheless, the model has a fixed number of parameters associated with the fixed set of rules of the NDTT program. As we will see, rules can contain variables, allowing a small set of rules to model a large set of event types (i.e., parameter sharing).
|
| 151 |
+
|
| 152 |
+
If $\underline{\mathbf{f}}$ is a fact in the database at time $t$ , meaning that event $\underline{\mathbf{f}}$ is possible at time $t$ , then its embedding $[\underline{\mathbf{f}}]\mathbf{L}(t)$ determines its intensity $\lambda_{\underline{\mathbf{f}}}(\underline{t})$ via equation (5), as before. Thus, where a DTT program only describes which event sequences are possible, an NDTT program also describes how probable they are.
|
| 153 |
+
|
| 154 |
+
Although the set of database facts is modified only when an event occurs, the facts' embeddings are time-sensitive and evolve as the events that added them to the database recede into the past. This allows event intensities such as $\lambda_{\underline{\mathsf{f}}}^{\prime}(t)$ to wax and wane continuously as time elapses.
|
| 155 |
+
|
| 156 |
+
Datalog. We now give details. We begin with Datalog (Ceri et al., 1989), a traditional formalism for deductive databases. A deductive database holds both extensional facts, which are placed there by some external process, and intensional facts, which are transitively deduced from the extensional facts. A Datalog program is simply a set of rules that govern these deductions:
|
| 157 |
+
|
| 158 |
+
- $h \coloneqq g_1, \ldots, g_n$ says to deduce $h$ at any time $t$ when $g_1, \ldots, g_n$ are all true (in the database).
|
| 159 |
+
|
| 160 |
+
A single rule can license many deductions. That is because the facts can have structured names, and $h, g_1, \ldots, g_n$ can be patterns that match against those names, using capitalized identifiers as variables. A model of filesystem properties might have a rule like open(U,D) := user(U), group(G), document(D), member(U,G), readable(D,G). In English, this says that U can open D at any time when user U is a member of some group G such that document D is readable by G.
|
| 161 |
+
|
| 162 |
+
Datalog through time. Whenever extensional facts are added or removed, the intensional facts are instantly recomputed according to the deductive rules. DTT is an extension in which extensional facts are automatically added and removed when the database is notified of the occurrence of events. This behavior is governed by two additional rule types:
|
| 163 |
+
|
| 164 |
+
- $h \gets f, g_1, \ldots, g_n$ says to $\mathbf{add} h$ at any time $s$ when event $f$ occurs and the $g_i$ are all true.
|
| 165 |
+
- $!h \gets f, g_1, \ldots, g_n$ says to remove $h$ at any time $s$ satisfying the same conditions.
|
| 166 |
+
|
| 167 |
+
Thus, the proposition $h$ is true at time $t$ (i.e., appears as a fact in the database at time $t$ ) iff either $\mathbf{1} \, h$ is deduced at time $t$ , or $\mathbf{2} \, h$ was added at some time $s < t$ and never removed at any time in $(s, t)$ .
|
| 168 |
+
|
| 169 |
+
In our previous example, editing(U,D) $\leftarrow$ open(U,D), member(U,G), writeable(U,G) records in the database that user U is editing D, once they have opened it with appropriate permissions. (As a result, edit events might become possible via a deductive rule edit(U,D) $\vdots$ editing(U,D).)
|
| 170 |
+
|
| 171 |
+
Neural Datalog through time. It would be difficult to train a neural architecture to encode thousands or millions of structured boolean facts about the world in its state and to systematically keep those
|
| 172 |
+
|
| 173 |
+
facts up to date in response to possibly rare events. As a neuro-symbolic method, NDTT delegates that task to a symbolic database governed by manually specified DTT rules. However, it also augments the database: iff a proposition $h$ appears as a fact in the NDTT database at time $t$ , it will be associated not only with the simple truth value true but also with an embedding $[[h]](t)$ . This embedding is a learned representation of that fact at that time, and can be trained to be useful in downstream tasks. It captures details of when and how that fact was established (the fact's provenance), since it is computed using learned parameters associated with the rules that deduced and/or added it.
|
| 174 |
+
|
| 175 |
+
For example, a user's embedding might be constructed using attention over all the past events that have affected the user, via rules of the form $\text{user}(\text{U}) \leftarrow \cdots$ . This summarizes the user's state. Similarly, a document's embedding might be constructed using attention over all the edits to it, considering the editing user's state at the time of the edit: $\text{document}(\text{D}) \leftarrow \text{edit}(\text{U}, \text{D})$ , $\text{user}(\text{U})$ .
|
| 176 |
+
|
| 177 |
+
Embeddings from NDTT rules. Our goal is to provide new formulas for the embeddings $\llbracket h\rrbracket (t)$ based on Transformer-style attention rather than LSTM-style recurrence. We call this attentive NDTT, or A-NDTT. This gives a new way to map an NDTT program to a neural architecture. The potential advantages for accuracy, efficiency, and simplicity were explained in section 1.
|
| 178 |
+
|
| 179 |
+
Intuitively, the $\leftarrow$ rules will govern the "horizontal" flow of information through time (by defining attentional connections as we saw in section 4), while the $\vdots$ -rules will govern the "vertical" flow of information at a given time (by defining feed-forward connections). These are, of course, the two major mechanisms in Transformer architectures.
|
| 180 |
+
|
| 181 |
+
Under A-NDTT, the layer- $\ell$ embedding of $h@t$ is
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
\llbracket h \rrbracket^ {(\ell)} (t) \stackrel {\text {d e f}} {=} \llbracket h \rrbracket^ {(\ell - 1)} (t) + \tanh \left([ h ] ^ {(\ell)} (t) + \sum_ {r} \underline {{h}} _ {r} ^ {(\ell)} (t)\right) \tag {9}
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
which is an augmented version of equation (7). Suppose $h$ is true at time $t$ because it was added by rule $r$ (i.e., condition 2). Then the summand $\underline{[h]}_r^{(\ell)}(t)$ exists and is computed much as in equation (8), now with attention over all "add times" $s$ . In other words, $\mathcal{H}_r(h@t)$ in equation (8) includes just those past events $f@s$ such that $f$ added $h$ via $r$ at some time $s < t$ and $h$ was never removed at any time in $(s,t)$ .
|
| 188 |
+
|
| 189 |
+
More precisely, when the rule $h \gets f$ , $g_1, \ldots, g_n$ causes $h@t$ to attend to the specific past event $f@s$ , we actually want attention to consider the embedding at time $s$ not just of $f$ , but of the entire add condition $f, g_1, \ldots, g_n$ . Thus, we replace $f@s$ with $(f, g_1, \ldots, g_n)@s$ throughout equation (8). The attention key of this add condition is defined as $\mathbf{k}^{(\ell)}((f, g_1, \ldots, g_n)@s) \stackrel{\mathrm{def}}{=} \mathbf{K}_r^{(\ell)}\left[1; [[s]}; [[f]]^{(\ell-1)}(s); [[g_1]]^{(\ell-1)}(s); \ldots [[g_n]]^{(\ell-1)}(s)\right]$ (compare equation (3b)). Its attention value $\mathbf{v}^{(\ell)}((f, g_1, \ldots, g_n)@s)$ is defined analogously, using a different matrix $\mathbf{V}_r^{(\ell)}$ .
|
| 190 |
+
|
| 191 |
+
The above handles the $\Leftarrow$ rules. As for the $\vdash$ rules, the vector $[h]^{(\ell)}(t)$ in equation (9) sums over all the ways that $h$ can be deduced at time $t$ (i.e., condition 1). This does not involve attention, so we exactly follow Mei et al. (2020a, equations (3)-(6)):
|
| 192 |
+
|
| 193 |
+
$$
|
| 194 |
+
[ h ] ^ {(\ell)} (t) = \sum_ {r} \bigoplus_ {g _ {1}, \dots , g _ {n}} ^ {\beta_ {r}} \mathbf {W} _ {r} [ 1; \llbracket g _ {1} \rrbracket (t); \dots ; \llbracket g _ {n} \rrbracket (t) ] \tag {10}
|
| 195 |
+
$$
|
| 196 |
+
|
| 197 |
+
where $r$ ranges over rules, and $(g_1, \ldots, g_n)$ ranges over all tuples of facts at time $t$ such that $h: g_1, \ldots, g_n$ matches rule $r$ (and thus deduces $h$ at time $t$ ). The operator $\bigoplus^{\beta_r}$ is a softmax-pooling operator with a learned inverse temperature $\beta_r$ . If $h$ is not deduced at time $t$ by any instantiation of $r$ , then $r$ has no effect on the sum (10), since pooling the empty set with $\bigoplus^{\beta_r}$ returns 0.
|
| 198 |
+
|
| 199 |
+
Example. Mei et al. (2020a) give many examples of NDTT programs. Here is a simple example to illustrate the use of :- rules. e means that Eve posts a message to the company forum, while f means that Frank does so. Once the forum is created by a create event, its existence is a fact (called forum) whose embedding (called [forum]) always reflects all messages posted so far to the forum. Until the forum is destroyed, Eve and Frank can post to it, and the embeddings and intensities of their messages depend on the current state of the forum:
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\begin{array}{c} 1 \quad \text {f o r u m} \leftarrow \underline {{\text {c r e a t e}}}. \\ 2 \quad ! \text {f o r u m} \leftarrow \underline {{\text {d e s t r o y}}}. \end{array}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
\begin{array}{c c} \text {f o r u m} & \leftarrow \underline {{e}}. \\ \text {f o r u m} & \leftarrow \underline {{f}}. \end{array}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\begin{array}{c c} 5 & \underline {{e}} : := \text {f o r u m .} \\ 6 & \underline {{f}} : := \text {f o r u m .} \end{array}
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
The resulting neural architecture is drawn in Figure 1b. If the company grows from 2 to $K$ employees, then the program needs $O(K)$ rules and hence $O(K)$ parameters, which define how each employee's messages affect the forum and vice-versa. Without the :- rules, we would have to list out $O(K^2)$ rules such as $\underline{\mathrm{e}} \gets \underline{\mathrm{f}}$ and hence would need $O(K^2)$ parameters, which define how each employee's messages affect every other employee's messages directly; this case is drawn in Figure 1a.
|
| 214 |
+
|
| 215 |
+
Appendix B and Figure 4 spell out an enhanced version of this example that makes use of variables, so that all $K$ employees can be governed by a constant $(O(1))$ number of rules.
|
| 216 |
+
|
| 217 |
+
Discussion. NDTT rules enrich the notion of "influence matrix" from section 4. Events traditionally influence the intensities of subsequent events, but NDTT $\leftarrow$ rules more generally let them influence the embeddings of subsequent facts (and hence the intensities of any events among those facts). Furthermore, NDTT $\vdash$ rules let facts influence the embeddings of contemporaneous facts.
|
| 218 |
+
|
| 219 |
+
Each $\leftarrow$ rule $r$ can be seen as defining the fixed sparsity pattern of a large influence matrix, along with parameters for computing its nonzero entries from context at each attention layer. The size of this matrix is determined by the number of ways to instantiate the variables in the rule. The entries of the matrix are normalized versions of the attention weights $\alpha_{r}$ . The influences of different $\leftarrow$ rules $r$ are combined by equation (9) and are modulated by nonlinearities.
|
| 220 |
+
|
| 221 |
+
Overall, (A-)NDTT models learn representations, much like pretrained language models (Peters et al., 2018; Radford et al., 2019). They learn continuous embeddings of the facts in a discrete database, using a neural architecture that is derived from the symbolic rules that deduce these facts and update them in response to events. The facts change at discrete times but their embeddings change continuously. We train the model so that the embeddings of possible events predict how likely they are to occur.
|
| 222 |
+
|
| 223 |
+
# 6 RELATED WORK
|
| 224 |
+
|
| 225 |
+
Multivariate point processes have been widely used in real-world applications, including document stream modeling (He et al., 2015; Du et al., 2015a), learning Granger causality (Xu et al., 2016; Zhang et al., 2020b; 2021), network analysis (Choi et al., 2015; Etesami et al., 2016), recommendation systems (Du et al., 2015b), and social network analysis (Guo et al., 2015; Lukasik et al., 2016).
|
| 226 |
+
|
| 227 |
+
Over the recent years, various neural models have been proposed to expand the expressiveness of point processes. They mostly use recurrent neural networks, or LSTMs (Hochreiter & Schmidhuber, 1997): in particular Du et al. (2016); Mei & Eisner (2017); Xiao et al. (2017a,b); Omi et al. (2019); Shchur et al. (2020); Mei et al. (2020a); Boyd et al. (2020). Models of this kind enjoy continuous and infinite state spaces, as well as flexible transition functions, thus achieving superior performance on many real-world datasets, compared to classical models such as the Hawkes process (Hawkes, 1971).
|
| 228 |
+
|
| 229 |
+
The Transformer Hawkes process (Zuo et al., 2020) and self-attentive Hawkes process (Zhang et al., 2020a) were the first papers to adapt generative Transformers (Vaswani et al., 2017; Radford et al., 2019; Brown et al., 2020) to point processes. The Transformer architecture allows their models to enjoy unboundedly large representations of histories, as well as great parallelism during training (see 1 and 2 in section 1). As section 3 discussed, both models—as well as subsequent attention-based models (Enguehard et al., 2020; Sharma et al., 2021)—derive the intensity $\lambda_{e}(t)$ from $\llbracket f\rrbracket(s)$ where $f@s$ is the latest actual event before $t$ . (The THP takes $\lambda_{e}(t)$ to be a softplus function of $\mathbf{w}_{e}^{\top}[1;t/s;\llbracket f\rrbracket(s)]$ . The SAHP defines $\lambda_{e}(\cdot)$ as a function that exponentially decays toward an asymptote, computing the 3 parameters of this function from $e$ and $\llbracket f\rrbracket(s)$ .) In contrast (see 3 in section 1), our model derives $\lambda_{e}(t)$ from $\llbracket e\rrbracket t$ —the embedding of the possible event $e@t$ which is computed using $e-$ and $t$ -specific attention over all past events. Zhu et al. (2021, section 3.1) independently proposed this approach but did not evaluate it experimentally.
|
| 230 |
+
|
| 231 |
+
# 7 EXPERIMENTS
|
| 232 |
+
|
| 233 |
+
On several synthetic and real-world datasets, we evaluate our model's held-out log-likelihood, and its success at predicting the time and type of the next event. We compare with multiple strong competitors. Experimental details not given in this section can be found in Appendix F.
|
| 234 |
+
|
| 235 |
+
We implemented our A-NDTT framework using PyTorch (Paszke et al., 2017) and pyDatalog (Carbonell et al., 2016), borrowing substantially from the public implementation of NDTT (Mei
|
| 236 |
+
|
| 237 |
+
et al., 2020a). We also built a faster, GPU-friendly PyTorch implementation of our more restricted A-NHP model (see section 7.1 below). Our code and datasets are available at https://github.com/yangalan123/anhp-andtt.
|
| 238 |
+
|
| 239 |
+
For the competing models, we made use of their published implementations.1 References and URLs are provided in Appendix F.2.
|
| 240 |
+
|
| 241 |
+
# 7.1 COMPARISON OF DIFFERENT TRANSFORMER ARCHITECTURES
|
| 242 |
+
|
| 243 |
+
We first verify that our continuous-time Transformer is competitive with three state-of-the-art neural event models. The four models we compare are
|
| 244 |
+
|
| 245 |
+
Transformer Hawkes Process (THP) (Zuo et al., 2020). See section 6.
|
| 246 |
+
|
| 247 |
+
Self-Attentive Hawkes Process (SAHP) (Zhang et al., 2020a). See section 6.
|
| 248 |
+
|
| 249 |
+
Neural Hawkes Process (NHP) (Mei & Eisner, 2017). This is not an attention-based model. At any time $t$ , NHP uses a continuous-time LSTM to summarize the events over $[0, t)$ into a multidimensional state vector, and conditions the intensities $\lambda_{e}(t)$ of all event types on that state.
|
| 250 |
+
|
| 251 |
+
Attentive Neural Hawkes Process (A-NHP) This is our unstructured generative model from section 3. Since this model does not use selective attention, we speed up the intensity computations by defining them in terms of a single coarse event type, as described in Appendix A. Thus, each event intensity $\lambda_{e}(t)$ is computed by attention over all previous events, where the attention weights are independent of $e$ . This parameter-sharing mechanism resembles the NHP, except that we now use a Transformer in place of an LSTM.
|
| 252 |
+
|
| 253 |
+
In a pilot experiment, we drew sequences from randomly initialized models of all 4 types (details in Appendix F.1.1), and then fit all 4 models on each of these 4 synthetic datasets. We find that NHP, SAHP, and A-NHP have very close performance on all 4 datasets (outperforming THP, especially at predicting timing, except perhaps on the THP dataset itself); see Figure 5 in Appendix F.1.1 for results. Thus, A-NHP is still a satisfactory choice even when it is misspecified. This result is reassuring because A-NHP has less capacity in some ways (the circuit depth of a Transformer is fixed, whereas the circuit depth of an LSTM grows with the length of the sequence) and excess capacity in other ways (the Transformer has unbounded memory whereas the LSTM has finite-dimensional memory).
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
Figure 2: Evaluation results (smaller is better) with $95\%$ bootstrap confidence intervals on the two real-world datasets, comparing THP, SAHP, and NHP with our A-NHP model. RMSE evaluates the predicted time of the next event (root mean squared error), while error rate evaluates its predicted type given its time.
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
(b) StackOverflow
|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
|
| 269 |
+
We then fit all 4 models to the following two benchmark real-world datasets.2
|
| 270 |
+
|
| 271 |
+
MIMIC-II (Lee et al., 2011). This dataset is a collection of de-identified clinical visit records of Intensive Care Unit patients for 7 years. Each patient has a sequence of hospital visit events, and each event records its time stamp and disease diagnosis.
|
| 272 |
+
|
| 273 |
+
StackOverflow (Leskovec & Krevl, 2014). This dataset represents two years of user awards on a question-answering website: each user received a sequence of badges (of 22 different types).
|
| 274 |
+
|
| 275 |
+
On MIMIC-II data (Figure 2a), our A-NHP is always a co-winner on each of these tasks; but the other co-winner (NHP or THP) varies across tasks. On StackOverflow data (Figure 2b), our A-NHP is clearly a winner on 2 out of 3 tasks and is tied with NHP on the third. Compared to NHP, A-NHP also enjoys a computational advantage, as discussed in sections 1 and 2. Empirically, training an A-NHP only took a fraction of the time that was needed to train an NHP, when sequences are reasonably long. Details can be found in Table 2 of Appendix F.3.
|
| 276 |
+
|
| 277 |
+
# 7.2 A-NDTT vs. NDTT
|
| 278 |
+
|
| 279 |
+
Now we turn to the structured modeling approach presented in section 5. We compare A-NDTT with NDTT on the RoboCup dataset and IPTV dataset proposed by Mei et al. (2020a). In both cases, we used the NDTT program written by Mei et al. (2020a). The rules are unchanged; the only difference is that our A-NDTT has the new continuous-time Transformer in lieu of the LSTM architecture. We also evaluated the unstructured NHP and A-NHP models on these datasets.
|
| 280 |
+
|
| 281 |
+
RoboCup (Chen & Mooney, 2008). This dataset logs the actions (e.g., kick, pass) of robot soccer players in the RoboCup Finals 2001-2004. The ball is frequently transferred between players (by passing or stealing), and this dynamically changes the set of possible event types (e.g., only the ball possessor can kick or pass). There are $K = 528$ event types over all time, but only about 20 of them are possible at any given time. For each prefix of each held-out event sequence, we used minimum Bayes risk to predict the next event's time, and to predict its participant(s) given its time and action type.
|
| 282 |
+
|
| 283 |
+
IPTV (Xu et al., 2018). This dataset contains records of 1000 users watching 49 TV programs over the first 11 months of 2012. Each event has the form watch(U,P). Given each prefix of the test event sequence, we attempted to predict the next test event's time $t$ , and to predict its program P given its actual time $t$ and user U.
|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
Figure 3: Evaluation results with $95\%$ bootstrap confidence intervals on the RoboCup and IPTV datasets. Evaluation methods are the same as in Figure 2. Note that the training objective was log-likelihood.
|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
|
| 294 |
+

|
| 295 |
+
(b) IPTV
|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+
The Robocup results are shown in Figure 3a. As in section 7.1, we find that A-NHP performs better than NHP on all the evaluation metrics; on log-likelihood and event type prediction, A-NHP is significantly better (paired permutation test, $p < 0.05$ ). We now inject domain knowledge into both the LSTM and Transformer approaches, by deriving architectures based on the RoboCup NDTT program (which specifies, for example, that only the ball possessor can kick or pass). The resulting models—NDTT and A-NDTT—are substantial and significant improvements, considerably reducing both the log-likelihood and the very high error rate on event type prediction. NDTT and A-NDTT are not significantly different from each other: since NDTT already knows which past events might be relevant, perhaps it is not sorely in need of the Transformer's ability to scan an unbounded history for relevant events.<sup>3</sup> Appendix F.5 includes more results of A-NDTT vs. NDTT broken down by action types.
|
| 300 |
+
|
| 301 |
+
Additionally, while A-NDTT does not improve the overall accuracy for this particular NDTT program and dataset, it does achieve overall comparable accuracy with a simpler and shallower architecture (2-3 in section 1). Like other Transformers, the A-NDTT architecture could be trained on a GPU with parallelism, as outlined in Appendix F.4 (future work).
|
| 302 |
+
|
| 303 |
+
The IPTV results are shown in Figure 3b. In this case, the log-likelihood of NHP can be substantially and significantly improved either by using rules (as for Robocup) or by using attention, or both. The error rate on predicting the next event type is again very high for NHP, and is substantially and significantly reduced by using rules—although not as much under the A-NDTT architecture as under the original NDTT architecture.
|
| 304 |
+
|
| 305 |
+
# 8 CONCLUSION
|
| 306 |
+
|
| 307 |
+
We showed how to generalize the Transformer architecture to sequences of discrete events in continuous time. Our architecture builds up rich embeddings of actual and possible events at any time $t$ , from lower-level representations of those events and their contexts. We usually train the model so that the embedding of a possible event predicts its intensity, yielding a flexible generative model that supports parallel computation of log-likelihood. We showed in section 7.1 that it outperforms other Transformer-based models on multiple real-world datasets and also beats or ties them on multiple synthetic datasets.
|
| 308 |
+
|
| 309 |
+
We also showed how to integrate this architecture with NDTT, a neural-symbolic framework that automatically derives neural models from logic programs. Our attention-based modification of NDTT has shown competitive performance, despite having a simpler and shallower architecture. Our code and datasets are available at https://github.com/yangalan123/anhp-andtt.
|
| 310 |
+
|
| 311 |
+
# ACKNOWLEDGMENTS
|
| 312 |
+
|
| 313 |
+
This work was supported in part by the National Science Foundation under Grant No. 1718846. We thank Bloomberg for a Data Science Ph.D. Fellowship to the second author. We thank Minjie Xu for the suggestion of developing a Transformer version of NDTT. We thank the anonymous ICLR 2022 reviewers for discussion and for pointing out additional related work.
|
| 314 |
+
|
| 315 |
+
# REFERENCES
|
| 316 |
+
|
| 317 |
+
Ba, J. L., Kiros, J. R., and Hinton, G. E. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.
|
| 318 |
+
Boyd, A., Bamler, R., Mandt, S., and Smyth, P. User-dependent neural sequence models for continuous-time event data. In Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 319 |
+
Brown, T. B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. In Advances in Neural Information Processing Systems (NeurIPS), 2020.
|
| 320 |
+
Carbonell, P., jcdouet, Alves, H. C., and Tim, A. pyDatalog, 2016.
|
| 321 |
+
Ceri, S., Gottlob, G., and Tanca, L. What you always wanted to know about Datalog (and never dared to ask). IEEE Transactions on Knowledge and Data Engineering, 1989.
|
| 322 |
+
Chen, D. L. and Mooney, R. J. Learning to sportscast: A test of grounded language acquisition. In Proceedings of the International Conference on Machine Learning (ICML), 2008.
|
| 323 |
+
Choi, E., Du, N., Chen, R., Song, L., and Sun, J. Constructing disease network and temporal progression model via context-sensitive Hawkes process. In IEEE International Conference on Data Mining (ICDM), 2015.
|
| 324 |
+
Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 325 |
+
|
| 326 |
+
Du, N., Farajtabar, M., Ahmed, A., Smola, A. J., and Song, L. Dirichlet-Hawkes processes with applications to clustering continuous-time document streams. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2015a.
|
| 327 |
+
Du, N., Wang, Y., He, N., Sun, J., and Song, L. Time-sensitive recommendation from recurrent user activities. In Advances in Neural Information Processing Systems (NeurIPS), 2015b.
|
| 328 |
+
Du, N., Dai, H., Trivedi, R., Upadhyay, U., Gomez-Rodriguez, M., and Song, L. Recurrent marked temporal point processes: Embedding event history to vector. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2016.
|
| 329 |
+
Enguehard, J., Busbridge, D., Bozson, A., Woodcock, C., and Hammerla, N. Neural temporal point processes [for] modelling electronic health records. In Proceedings of Machine Learning Research, volume 136, pp. 85-113, 2020. NeurIPS 2020 Workshop on Machine Learning for Health (ML4H).
|
| 330 |
+
Etesami, J., Kiyavash, N., Zhang, K., and Singhal, K. Learning network of multivariate Hawkes processes: A time series approach. arXiv preprint arXiv:1603.04319, 2016.
|
| 331 |
+
Guo, F., Blundell, C., Wallach, H., and Heller, K. The Bayesian echo chamber: Modeling social influence via linguistic accommodation. In Proceedings of the Eighteenth International Conference on Artificial Intelligence and Statistics, 2015.
|
| 332 |
+
Hahn, M. Theoretical limitations of self-attention in neural sequence models. Transactions of the Association for Computational Linguistics, 8, 2020.
|
| 333 |
+
Hawkes, A. G. Spectra of some self-exciting and mutually exciting point processes. Biometrika, 1971.
|
| 334 |
+
He, P., Liu, X., Gao, J., and Chen, W. DeBERTa: Decoding-enhanced bert with disentangled attention. arXiv preprint arXiv:2006.03654, 2020.
|
| 335 |
+
He, X., Rekatsinas, T., Foulds, J., Getoor, L., and Liu, Y. HawkesTopic: A joint model for network inference and topic modeling from text-based cascades. In Proceedings of the International Conference on Machine Learning (ICML), 2015.
|
| 336 |
+
Hochreiter, S. and Schmidhuber, J. Long short-term memory. Neural Computation, 1997.
|
| 337 |
+
Kingma, D. and Ba, J. Adam: A method for stochastic optimization. In Proceedings of the International Conference on Learning Representations (ICLR), 2015.
|
| 338 |
+
Kitaev, N. and Klein, D. Constituency parsing with a self-attentive encoder. In Proceedings of the Association for Computational Linguistics, 2018.
|
| 339 |
+
Lee, J., Scott, D. J., Villarroel, M., Clifford, G. D., Saeed, M., and Mark, R. G. Open-access MIMIC-II database for intensive care research. In 2011 Annual International Conference of the IEEE Engineering in Medicine and Biology Society, 2011.
|
| 340 |
+
Leskovec, J. and Krevl, A. SNAP Datasets: Stanford large network dataset collection, 2014.
|
| 341 |
+
Lewis, P. A. and Shedler, G. S. Simulation of nonhomogeneous Poisson processes by thinning. Naval Research Logistics Quarterly, 1979.
|
| 342 |
+
Liniger, T. J. Multivariate Hawkes processes. Diss., Eidgenössische Technische Hochschule ETH Zürich, Nr. 18403, 2009.
|
| 343 |
+
Lukasik, M., Srijith, P. K., Vu, D., Bontcheva, K., Zubiaga, A., and Cohn, T. Hawkes processes for continuous time sequence classification: An application to rumour stance classification in Twitter. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), 2016.
|
| 344 |
+
Mei, H. and Eisner, J. The neural Hawkes process: A neurally self-modulating multivariate point process. In Advances in Neural Information Processing Systems (NeurIPS), 2017.
|
| 345 |
+
Mei, H., Qin, G., and Eisner, J. Imputing missing events in continuous-time event streams. In Proceedings of the International Conference on Machine Learning (ICML), 2019.
|
| 346 |
+
|
| 347 |
+
Mei, H., Qin, G., Xu, M., and Eisner, J. Neural Datalog through time: Informed temporal modeling via logical specification. In Proceedings of the International Conference on Machine Learning (ICML), 2020a.
|
| 348 |
+
Mei, H., Wan, T., and Eisner, J. Noise-contrastive estimation for multivariate point processes. In Advances in Neural Information Processing Systems (NeurIPS), 2020b.
|
| 349 |
+
Mikolov, T., Karafiát, M., Burget, L., Cernocký, J., and Khudanpur, S. Recurrent neural network-based language model. In Proceedings of the Annual Conference of the International Speech Communication Association (INTERNSPPEECH), 2010.
|
| 350 |
+
Omi, T., Ueda, N., and Aihara, K. Fully neural network based model for general temporal point processes. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 351 |
+
Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., and Lerer, A. Automatic differentiation in PyTorch. 2017.
|
| 352 |
+
Peters, M., Neumann, M., Iyyer, M., Gardner, M., Clark, C., Lee, K., and Zettlemoyer, L. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), 2018.
|
| 353 |
+
Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., and Sutskever, I. Language models are unsupervised multitask learners. 2019.
|
| 354 |
+
Rao, R., Meier, J., Sercu, T., Ovchinnikov, S., and Rives, A. Transformer protein language models are unsupervised structure learners. In Proceedings of the International Conference on Learning Representations (ICLR), 2021.
|
| 355 |
+
Sharma, K., Zhang, Y., Ferrara, E., and Liu, Y. Identifying coordinated accounts on social media through hidden influence and group behaviours. In Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 2021.
|
| 356 |
+
Shchur, O., Biloš, M., and Gunnemann, S. Intensity-free learning of temporal point processes. In Proceedings of the International Conference on Learning Representations (ICLR), 2020.
|
| 357 |
+
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention is all you need. In Advances in Neural Information Processing Systems (NeurIPS), 2017.
|
| 358 |
+
Xiao, S., Yan, J., Farajtabar, M., Song, L., Yang, X., and Zha, H. Joint modeling of event sequence and time series with attentional twin recurrent neural networks. arXiv preprint arXiv:1703.08524, 2017a.
|
| 359 |
+
Xiao, S., Yan, J., Yang, X., Zha, H., and Chu, S. Modeling the intensity function of point process via recurrent neural networks. In Proceedings of the AAAI Conference on Artificial Intelligence, 2017b.
|
| 360 |
+
Xu, H., Farajtabar, M., and Zha, H. Learning Granger causality for Hawkes processes. In Proceedings of the International Conference on Machine Learning (ICML), 2016.
|
| 361 |
+
Xu, H., Luo, D., and Carin, L. Online continuous-time tensor factorization based on pairwise interactive point processes. In Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI), 2018.
|
| 362 |
+
Zhang, Q., Lipani, A., Kirnap, O., and Yilmaz, E. Self-attentive Hawkes process. In Proceedings of the International Conference on Machine Learning (ICML), 2020a.
|
| 363 |
+
Zhang, Q., Lipani, A., and Yilmaz, E. Learning neural point processes with latent graphs. In Proceedings of the International World Wide Web Conference (WWW), pp. 1495-1505, 2021.
|
| 364 |
+
Zhang, W., Panum, T. K., Jha, S., Chalasani, P., and Page, D. CAUSE: Learning Granger causality from event sequences using attribution methods. In Proceedings of the International Conference on Machine Learning (ICML), 2020b.
|
| 365 |
+
|
| 366 |
+
Zhu, S., Zhang, M., Ding, R., and Xie, Y. Deep Fourier kernel for self-attentive point processes. In International Conference on Artificial Intelligence and Statistics, 2021.
|
| 367 |
+
Zuo, S., Jiang, H., Li, Z., Zhao, T., and Zha, H. Transformer Hawkes process. In International Conference on Machine Learning, pp. 11692-11702. PMLR, 2020.
|
| 368 |
+
|
| 369 |
+
# Appendices
|
| 370 |
+
|
| 371 |
+
# A DISCUSSION OF ARCHITECTURAL DETAILS
|
| 372 |
+
|
| 373 |
+
Simplification. Equation (1) is a simplification of the original Transformer architecture (Vaswani et al., 2017). In the original architecture, $\llbracket e\rrbracket^{(\ell)}(t)$ would be obtained as LayerNorm $(\mathbf{x} + \mathrm{FFN}^{(\ell)}(\mathbf{x}))$ where $\mathbf{x}$ is the LayerNorm transformation (Ba et al., 2016) of the right-hand side of equation (1), and the nonlinear transform $\mathrm{FFN}^{(\ell)}$ is computed by a learned two-layer feed-forward network.
|
| 374 |
+
|
| 375 |
+
In our preliminary experiments, we found that the LayerNorm and FFN steps did not help, so for simplicity and speed, we omitted them from equation (1) and from the remaining experiments. However, it is possible that they might help on other domains or with larger training datasets, so our code supports them via command-line arguments.
|
| 376 |
+
|
| 377 |
+
Graceful degradation. Another change to equation (1) (and equation (8)) is that when normalizing the attention weights, we included an additional summand of 1 in the denominator.4 We do this so that when the history $\mathcal{H}(e@t)$ is "rather irrelevant" to $e@t$ , the architecture behaves roughly as if $\mathcal{H}(e@t)$ were the empty set. In equation (1), this means that $\llbracket e\rrbracket^{(\ell)}(t)$ will then be close to $\llbracket e\rrbracket^{(\ell -1)}(t)$ . Similarly, equation (7) will not be much influenced by rule $r$ if rule $r$ finds only events $\mathcal{H}_r(e@t)$ that it considers to be "rather irrelevant" to $e@t$ .
|
| 378 |
+
|
| 379 |
+
A "rather irrelevant" history is one for which the unnormalized attention weights are small in toto, so that the denominator is dominated by the 1 summand. This may occur, for example, if events in the distant past tend to have small attention weights, and the history consists only of old events (and not too many of them). When the history is rather irrelevant, the argument to tanh in equation (1) and the summand $\boxed{e_r^{(\ell)}}(t)$ in equation (7) are close to $\mathbf{0}$ ; when $\mathcal{H}(e@t) = \emptyset$ , they are exactly $\mathbf{0}$ .
|
| 380 |
+
|
| 381 |
+
Direct access to time embeddings. Another difference from Vaswani et al. (2017)—perhaps not an important one—is that in equation (3), we chose to concatenate $\llbracket t\rrbracket$ to the rest of the embedding rather than add it (cf. Kitaev & Klein, 2018; He et al., 2020). Furthermore we did so at every layer and not just layer 0. The intuition here is that the extraction of good key and query vectors at each layer may benefit from "direct access" to $\llbracket t\rrbracket$ . For example, this should make it easy to learn keys and queries such that the attention weight is highest when $s\approx t - \Delta$ (since for every $\Delta \in \mathbb{R}$ , there exists a sparse linear operator that transforms $\llbracket t\rrbracket \mapsto \llbracket t - \Delta \rrbracket$ ).
|
| 382 |
+
|
| 383 |
+
Range of wavelengths for time embeddings. Our time embedding $\llbracket t\rrbracket$ in (4) uses dimensions that are sinusoidal in $t$ , with wavelengths forming a geometric progression from $2\pi m$ to $2\pi (5M)$ . Setting $m = 1$ , $M = 2000$ would recover the standard scheme of Vaswani et al. (2017) (previously used in continuous time by Zuo et al. (2020)).
|
| 384 |
+
|
| 385 |
+
We instead set $m$ and $M$ from data so that we are robust to datasets of different time scales. Part of the intuition behind using sinusoidal embeddings is that nearby times can be distinguished by different values in their short-wavelength dimensions, whereas the long-wavelength dimensions make it easy to inspect and compare faraway times, since those dimensions are nearly linear on $t \in [0, M]$ . We therefore take $m$ to be the shortest gap between any two events in the same history,
|
| 386 |
+
|
| 387 |
+
$$
|
| 388 |
+
m = \min _ {e @ t} \min _ {f @ s, f ^ {\prime} @ s ^ {\prime} \in \mathcal {H} (e @ t)} | s - s ^ {\prime} |, \tag {11}
|
| 389 |
+
$$
|
| 390 |
+
|
| 391 |
+
as computed over training data, and take $M$ greater than all $T$ in training data (where each observed sequence is observed over an interval $[0,T])$
|
| 392 |
+
|
| 393 |
+
If we were modeling sequences of words as in Brown et al. (2020), our procedure would indeed recover the values $m = 1$ and $M = 2000$ that they used to model text documents. Multiplying or dividing all $t$ values in the dataset by 1000 (e.g., switching between second and millisecond units) would have no effect on the time embeddings, as it would scale $m$ and $M$ in the same way.
|
| 394 |
+
|
| 395 |
+
Coarse event embeddings for speed. As noted at the end of section 3, the intensity model equation (5) involves a full embedding of each $e@t$ . This may be expressive, but it is also expensive. The
|
| 396 |
+
|
| 397 |
+
attention weight vectors $\alpha^{(1)},\ldots ,\alpha^{(L)}$ used to compute this embedding must be computed from scratch for each $e$ and $t$ . Why is this necessary?
|
| 398 |
+
|
| 399 |
+
Like other neural sequence models—both RNN-based and Transformer-based—we derive the probability that the next sequence element is $e$ from an inner product of the form $\mathbf{w}_e^\top [1; [[\mathcal{H}(e@t)]]]$ , where in our equation (5), the role of the history embedding $[[\mathcal{H}(e@t)]]$ is played by $[[e]]^L(t)$ . However, for many previous models, the history embedding does not depend on $e$ , so it can be computed once at each time $t$ and reused across all $e$ .
|
| 400 |
+
|
| 401 |
+
- In neural language models, typically all previous events are taken to be relevant. $\mathbb{H}(e@t)$ can then be defined as an RNN encoding of the sequence all past events (Mikolov et al., 2010), or alternatively a Transformer embedding of the single most recent past event (which depends on the entire sequence of past events). This does not depend on $e$ .
|
| 402 |
+
- When modeling irregularly spaced events, $t$ is not necessarily an integer, and the past events in $\mathcal{H}(e@t)$ do not necessarily take place at $1,2,\dots,t - 1$ . Thus, the encoding $[[\mathcal{H}(e@t)]]$ must somehow be improved to also consider the elapsed time $t - t_i$ since the most recent past event (Du et al., 2016; Mei & Eisner, 2017; Zuo et al., 2020; Zhang et al., 2020a). So now $[[\mathcal{H}(e@t)]]$ must look at $t$ , but it is still independent of $e$ .
|
| 403 |
+
- In contrast, in sections 4-5, we will allow the more general case where $\mathbb{H}(e@t)$ varies with $e$ as well, since NDTT rules determine which past events should be attended to by $e$ . The original NDTT paper essentially defined $\mathbb{H}(e@t)$ as the state at time $t$ of an $e$ -specific continuous-time LSTM, which is updated by just the events that are relevant to $e$ . In our attention-based approach, we instead define it to be $\mathbb{H}[t]$ , yielding equation (5).
|
| 404 |
+
|
| 405 |
+
To reduce this computational cost, we can associate each event type $e$ with a coarse event type $\bar{e}$ that is guaranteed to have the same set of relevant past events, and replace $[[e]]^L(t)$ with $[[\bar{e}]]^L(t)$ in equation (5). (However, equation (5) still uses the fine-grained $\mathbf{w}_{e}$ .) Now to compute $\lambda_{e}(t)$ , we only have to embed $\bar{e} \circ t$ , which is a speedup if many of the possible event types $e$ are associated with the same $\bar{e}$ . In the case where we do not use selective attention, we can get away with using only a single coarse event type for the whole model—saving a runtime factor of $|\mathcal{E}|$ as in the cheaper approach. Note that the history $\mathcal{H}$ still uses fine-grained embeddings, so if $e \circ t$ actually occurs, we must then compute $[[e]]^0(t), \ldots, [[e]]^L(t)$ .
|
| 406 |
+
|
| 407 |
+
Concatenation vs. summation. Equation (7) uses summation to combine the outputs (8) of different attention heads $r$ . Vaswani et al. (2017) instead combined such outputs by projecting their concatenation, but that becomes trickier in our setting: different events $e$ would need to concatenate different numbers of attention heads $r$ (for just the rules $r$ that can take the form $e \leftarrow \cdots$ ), resulting in projection matrices of different dimensionalities. Especially when NDTT rules can contain variables (section 5 below), it is not immediately obvious how one should construct these matrices or share parameters among them. These presentational problems vanish with our simpler summation approach.
|
| 408 |
+
|
| 409 |
+
Our approach loses no expressive power: projecting a concatenation of $\boxed{e}_{r}^{(\ell)}(t)$ values, as Vaswani et al. would suggest, is equivalent to summing up an $r$ -specific projection of $\boxed{e}_{r}^{(\ell)}(t)$ for each $r$ , as we do, where our projection of $\boxed{e}_{r}^{(\ell)}(t)$ has implicitly been incorporated into the projection (3a) that produces $\mathbf{v}_r^{(\ell)}$ . That is, if we can learn $\mathbf{V}_r^{(\ell)} = \mathbf{V}$ in equation (3a), then we can also learn $\mathbf{V}_r^{(\ell)} = \mathbf{PV}$ , where $\mathbf{P}$ is the desired projection matrix for rule $r$ . To make our method fully equivalent to Vaswani et al.'s, we would have to explicitly parameterize $\mathbf{V}_r^{(\ell)}$ as a matrix product of the form $\mathbf{PV}$ , forcing it to be low-rank.
|
| 410 |
+
|
| 411 |
+
# B NDTT EXAMPLE WITH VARIABLES
|
| 412 |
+
|
| 413 |
+
The company message forum program in section 5 had only 2 users and 1 forum. However, if the company employs many persons P and has a forum for each team T, NDTT rules can use capitalized variables to define the whole system concisely, using only $O(1)$ rules and $O(1)$ parameters. Here the possible facts and events have structured names like message(eve,sales,joke), which denotes an event in which employee eve posts a joke to the sales team's forum.
|
| 414 |
+
|
| 415 |
+
$$
|
| 416 |
+
\begin{array}{l} \text {f o r u m} (T) \leftarrow \text {c r e a t e} (T). \quad 9 | \text {m e s s a g e} (P, T, C): - \text {e m p l} (P), \text {f o r u m} (T), \text {c o n t e n t} (C). \\ 8 \left| f o r u m (T) \leftarrow \underline {{m e a s g e}} (P, T, C). \quad 1 0 \right|! f o r u m (T) \leftarrow \underline {{d e s t r o y}} (T). \\ \end{array}
|
| 417 |
+
$$
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
Figure 4: The solid green arrows correspond to instantiations of the attentional $\leftarrow$ rules 8 and 13. They can capture the real-world property that thanks to Eve's joke at time $t_5$ , the sales forum still feels more humorous at time $t$ and Frank, another member of that forum, is still in a good mood. This raises the probability $\lambda_e(t)\mathrm{d}t$ that Frank posts his own joke at time $t$ , where $e = \text{message}(\text{frank}, \text{sales}, \text{joke})$ . More formally, $\lambda_e(t)$ is determined by the layer- $L$ embedding of Frank's possible message $e@t$ . In general, the layer- $\ell$ embedding of this message reflects the layer- $(\ell - 1)$ embeddings of both Frank and the forum at time $t$ , as well as the fact that the possible message is a joke. If the message is actually sent (i.e., the possible event actually happens), its layer- $\ell$ embedding would in turn affect the layer- $(\ell + 1)$ embeddings of the forum and its readers at times $> t$ . An arrow with multiple inputs means that the input embeddings are concatenated before being transformed into a contribution to the output embedding. Other visual conventions are as in Figure 1. Not all facts, events, or arrows are shown in this drawing.
|
| 421 |
+
|
| 422 |
+
This generalizes the previous program, allowing multiple forums and saying that any employee (not just Eve and Frank) can post any type of message to any forum, affecting the embedding of that forum. We could modify rule 9 by adding an additional condition member $(\mathrm{P},\mathrm{T})$ , so that employees can only post to forums of which they are members. Membership could be established and tracked by
|
| 423 |
+
|
| 424 |
+
11 join(P,T) :← empl(P), forum(T).
|
| 425 |
+
12 member(P,T) <- join(P,T).
|
| 426 |
+
13 empl(P) $\leftarrow$ message(P2,T,C), member(P,T).
|
| 427 |
+
|
| 428 |
+
Rules 8 and 13 ensure that a message to a forum affects the subsequent embedding of that forum and also the subsequent embeddings of all employees who were members of that forum when the message was sent. This may affect which employees join which forums in future, and what they post to the forums, as drawn in Figure 4 in the appendices. For further examples, see the full presentation of NDTT in Mei et al. (2020a).
|
| 429 |
+
|
| 430 |
+
How are variables treated in the computation of embeddings? In equations (7)-(8), $r$ refers to a rule with variables. However, $e$ refers to a specific fact, without variables. An instantiation of $r$ is a copy of $r$ in which each variable has been consistently replaced by an actual value. In our modified version of equation (8), the summations range over all values of $(f, g_1, \ldots, g_n)@s$ such that $e \leftarrow f$ , $g_1$ , $\ldots$ , $g_n$ is an instantiation of $r$ that added $e$ at time $s$ (i.e., an instantiation of $r$ such that $f$ occurred at time $s$ and $g_1, \ldots, g_n$ were all true at time $s$ ). Thus, the attentional competition may consider $(f, g_1, \ldots, g_n)@s$ values that are derived from many different instantiations of $r$ . Their attentional weights $\alpha_r^{(\ell)}$ are all obtained using the shared parameters associated with rule $r$ . The summation in equation (7) ranges only over rules $r$ with at least one instantiation that adds $e@t$ , so it skips rules that are irrelevant to $e$ .
|
| 431 |
+
|
| 432 |
+
# C PARAMETER DIMENSIONALITY SPECIFICATION FOR A-NDTT
|
| 433 |
+
|
| 434 |
+
In this section we discuss the dimensionality of the fact embeddings $\llbracket h\rrbracket (t)$ in section 5.
|
| 435 |
+
|
| 436 |
+
As in the original NDTT paper Mei et al. (2020a), the type of a fact in the database is given by its functor (forum, member, create, etc.). All facts of the same type have embedding vectors of the same dimensionality, and these dimensionalities are declared by the NDTT program.
|
| 437 |
+
|
| 438 |
+
This is enough to determine the dimensions of the parameter matrices associated with the deduction rules (Mei et al., 2020a). How about the add rules, however? The form of equation (9) implies that the value vectors $\mathbf{v}_r^{(\ell)}$ for add rule $r$ have the same dimensionality as the embedding of the head of $r$ . The key and query vectors for rule $r$ (as used in equation (2)) can share this dimensionality by default, although we are free to override this and specify a different dimensionality for them. The foregoing choices determine the dimensions of the parameter matrices $\mathbf{V}_r^{(\ell)}, \mathbf{K}_r^{(\ell)}, \mathbf{Q}_r^{(\ell)}$ associated with rule $r$ .
|
| 439 |
+
|
| 440 |
+
# D LIKELIHOOD COMPUTATION DETAILS
|
| 441 |
+
|
| 442 |
+
In this section we discuss the log-likelihood formulas in section 3.
|
| 443 |
+
|
| 444 |
+
Derivations of the log-likelihood formula (6) can be found in previous work (e.g., Hawkes, 1971; Liniger, 2009; Mei & Eisner, 2017). Derivations of this formula appear in previous work (e.g., Hawkes, 1971; Liniger, 2009; Mei & Eisner, 2017). Intuitively, when training to increase the log-likelihood (6), each $\log \lambda_{e_i}(t_i)$ is increased to explain why the observed event $e_i$ happened at time $t_i$ , while $\int_{t=0}^{T} \sum_{e=1}^{E} \lambda_e(t) \mathrm{d}t$ is decreased to explain why no event of any possible type $e \in \{1, \dots, E\}$ ever happened at other times. Note that there is no log under the integral in equation (6). Why? The probability that there was not an event of any type in the infinitesimally wide interval $[t, t + \mathrm{d}t)$ is $1 - \lambda(t) \mathrm{d}t$ , whose $\log$ is $-\lambda(t) \mathrm{d}t$ .
|
| 445 |
+
|
| 446 |
+
The integral term in equation (6) is computed using the Monte Carlo approximation given by Mei & Eisner (2017, Algorithm 1), which samples times $t$ . This yields an unbiased stochastic gradient. For the number of Monte Carlo samples, we follow the practice of Mei & Eisner (2017): namely, at training time, we match the number of samples to the number of observed events at training time, a reasonable and fast choice, but to estimate log-likelihood when tuning hyperparameters or reporting final results, we take 10 times as many samples. The small remaining variance in this procedure is shown in our error bars, as explained in footnote 6.
|
| 447 |
+
|
| 448 |
+
At each sampled time $t$ , the Monte Carlo method still requires a summation over all events to obtain $\lambda(t)$ . This summation can be expensive when there are many event types. This is not a serious problem for our standalone A-NHP implementation since it can leverage GPU parallelism. But for the general A-NDTT implementation, it is hard to parallelize the $\lambda_k(t)$ computation over $k$ and $t$ . In that case, we use the downsampling trick detailed in Appendix D of Mei et al. (2020a).
|
| 449 |
+
|
| 450 |
+
An alternative would be to replace maximum-likelihood estimation with noise-contrastive estimation, which is quite effective at training NHP and NDTT models (Mei et al., 2020b).
|
| 451 |
+
|
| 452 |
+
# E HOW TO PREDICT EVENTS
|
| 453 |
+
|
| 454 |
+
It is possible to sample event sequences exactly from an A-NHP or A-NDTT model, using the thinning algorithm that is traditionally used for autoregressive point processes (Lewis & Shedler, 1979; Liniger, 2009). In general, to apply the thinning algorithm to sample the next event at time $\geq t_0$ , it is necessary to have an upper bound on $\{\lambda_e(t):t\in [t_0,\infty)\}$ for each event type $t$ . An explicit construction for the NHP (or NDTT) model was given by Mei & Eisner (2017, Appendix B.3). For A-NHP and A-NDTT, observe that $\lambda_{e}(t)$ is a continuous real-valued function of $[t]]$ (the particular function depends on $e$ and the history of events at times $< t_0$ ). Since $[t]$ falls in the compact set $[-1,1]^d$ (thanks to the sinusoidal embedding (4)), it follows that $\lambda_{e}(t)$ is indeed bounded. Actual numerical bounds can be computed using interval arithmetic. That is, we can apply our continuous function not to a particular value of $[t]]$ but to all of $[-1,1]^d$ , where for any elementary continuous function $f:\mathbb{R}\to \mathbb{R}$ , we have defined $f([x_{\mathrm{lo}},x_{\mathrm{hi}}])$ to return some bounded interval that contains $f(x)$ for all $x\in [x_{\mathrm{lo}},x_{\mathrm{hi}}]$ . The result will be a bounded interval that contains $\lambda_{e}(t)$ for all $t\in [t_0,\infty)$ .
|
| 455 |
+
|
| 456 |
+
<table><tr><td rowspan="2">DATASET</td><td rowspan="2">K</td><td colspan="3"># OF EVENT TAXENS</td><td colspan="3">SEQUENCE LENGTH</td></tr><tr><td>TRAIN</td><td>DEV</td><td>TEST</td><td>MIN</td><td>MEAN</td><td>MAX</td></tr><tr><td>SYNTHETIC</td><td>10</td><td>59904</td><td>7425</td><td>7505</td><td>49</td><td>75</td><td>99</td></tr><tr><td>MIMIC-II</td><td>75</td><td>1930</td><td>252</td><td>237</td><td>2</td><td>4</td><td>33</td></tr><tr><td>STACKOVERFLOW</td><td>22</td><td>345116</td><td>38065</td><td>97233</td><td>41</td><td>72</td><td>736</td></tr><tr><td>ROBOCUP</td><td>528</td><td>2195</td><td>817</td><td>780</td><td>780</td><td>948</td><td>1336</td></tr></table>
|
| 457 |
+
|
| 458 |
+
Table 1: Statistics of each dataset.
|
| 459 |
+
|
| 460 |
+
Section 7 includes a task-based evaluation where we try to predict the time and type of just the next event. More precisely, for each event in each held-out sequence, we attempt to predict its time given only the preceding events, as well as its type given both its true time and the preceding events.
|
| 461 |
+
|
| 462 |
+
We evaluate the time prediction with average $\mathrm{L}_2$ loss (yielding a root-mean-squared error, or RMSE) and evaluate the argument prediction with average 0-1 loss (yielding an error rate).
|
| 463 |
+
|
| 464 |
+
Following Mei & Eisner (2017), we use the minimum Bayes risk (MBR) principle to predict the time and type with lowest expected loss. For completeness, we repeat the general recipe in this section.
|
| 465 |
+
|
| 466 |
+
For the $i^{\mathrm{th}}$ event, its time $t_i$ has density $p_i(t) = \lambda (t)\exp (-\int_{t_{i - 1}}^t\lambda (t')\mathrm{d}t')$ . We choose $\int_{t_{i - 1}}^{\infty}tp_i(t)\mathrm{d}t$ as the time prediction because it has the lowest expected $\mathbf{L}_2$ loss. The integral can be estimated using i.i.d. samples of $t_i$ drawn from $p_i(t)$ by the thinning algorithm.
|
| 467 |
+
|
| 468 |
+
Given the next event time $t_i$ , we choose the most probable type $\arg \max_e \lambda_e(t_i)$ as the type prediction because it minimizes expected 0-1 loss. In some circumstances, one might also like to predict the most probable type out of a restricted set $\mathcal{E}' \subsetneq \{1, \dots, E\}$ . This allows one to answer questions like "If we know that some event of the form message(eve, T) happened at time $t_i$ , then what was the forum T, given all past events?" The answer will simply be $\arg \max_{e \in \mathcal{E}'} \lambda_e(t_i)$ .
|
| 469 |
+
|
| 470 |
+
# F EXPERIMENTAL DETAILS
|
| 471 |
+
|
| 472 |
+
# F.1 DATASET DETAILS
|
| 473 |
+
|
| 474 |
+
Table 1 shows statistics about each dataset that we use in this paper.
|
| 475 |
+
|
| 476 |
+
# F.1.1 PILOT EXPERIMENTS ON SIMULATED DATA
|
| 477 |
+
|
| 478 |
+
In this experiment, we draw data from randomly initialized NHP, A-NHP, SAHP, and THP. For all of them, we take the number of event types to be $E = 10$ . For NHP, the dimensions of event embeddings and hidden states are all 32; for A-NHP, the number of layers ( $L$ in our paper) is 2, and the dimensions of time embeddings and event embeddings are 32; for SAHP, the number of layers is 4, and the dimension of hidden states is 32; for THP, the number of layer is 7, and the dimension of hidden states is 32.
|
| 479 |
+
|
| 480 |
+
For each model, we draw 800, 100, and 100 sequences for training, validation and testing, respectively. For each sequence, the sequence length $I$ is drawn from Uniform(49, 99). We take the maximum observation time $T$ to be $t_I + 1$ , one time step after the final event.
|
| 481 |
+
|
| 482 |
+
We fit all 4 models on each of these 4 synthetic datasets. The results are graphed in Figure 5 and show that NHP, SAHP, and A-NHP have very close performance on all 4 datasets (outperforming THP, especially at predicting timing, except perhaps on the THP dataset itself). Notably, THP fits the time intervals poorly when it is misspecified, perhaps because its family of intensity functions (section 6) is not a good match for real data: THP requires that the intensity of $e$ between events changes more slowly later in the event sequence, and that if it increases over time, it approaches linear growth rather than an asymptote.
|
| 483 |
+
|
| 484 |
+
Log-likelihood per event (the training objective) of the whole test event sequence
|
| 485 |
+
|
| 486 |
+

|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
|
| 490 |
+

|
| 491 |
+
|
| 492 |
+

|
| 493 |
+
|
| 494 |
+
Log-likelihood per event of the event times only
|
| 495 |
+
|
| 496 |
+

|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
|
| 500 |
+

|
| 501 |
+
|
| 502 |
+

|
| 503 |
+
|
| 504 |
+
Log-likelihood per event of the event types only
|
| 505 |
+
|
| 506 |
+

|
| 507 |
+
(a) THP Data
|
| 508 |
+
|
| 509 |
+

|
| 510 |
+
(b) SAHP Data
|
| 511 |
+
|
| 512 |
+

|
| 513 |
+
(c) NHP Data
|
| 514 |
+
|
| 515 |
+

|
| 516 |
+
(d) A-NHP Data
|
| 517 |
+
Figure 5: Log-likelihood on held-out data (in nats, with $95\%$ bootstrap confidence intervals $^6$ ). Larger values are better. Each column is a different experiment, on a single synthetic dataset generated from a different distribution family (shown at the bottom of the column). Within each column, the red dashed horizontal line represents the log-likelihood of the true distribution that generated the data. Within each column, we train and test 4 models: THP, SAHP, NHP, and A-NHP (from left to right). The model from the correct family is shown in red; compare this to our A-NHP model (the rightmost model). Other models are shown in lighter ink. Note that log-likelihood for continuous variables can be positive (as in the second row), since it uses the log of a probability density that may be $>1$ .
|
| 518 |
+
|
| 519 |
+
# F.1.2 OTHER DATA DETAILS
|
| 520 |
+
|
| 521 |
+
For MIMIC-II and StackOverflow, we used the version processed by Du et al. (2016); more details (e.g., about processing) can be found in their paper.
|
| 522 |
+
|
| 523 |
+
For RoboCup, we used the version processed by Chen & Mooney (2008); please refer to their paper for more details (e.g., data description, processing method, etc)
|
| 524 |
+
|
| 525 |
+
# F.2 IMPLEMENTATION DETAILS
|
| 526 |
+
|
| 527 |
+
For NHP, our implementation is based on the public Github repositories at https://github.com/HMEIatJHU/neurawkes (Mei & Eisner (2017), with MIT License) and https://github.com/HMEIatJHU/neural-hawkes-particle-smoothing (Mei et al. (2019), with BSD 3-Clause "New" or "Revised" License). We made a considerable amount of modifications to their code (e.g., model, thinning algorithm), in order to migrate it to PyTorch 1.7. We built the standalone GPU implementation of A-NHP upon our NHP code.
|
| 528 |
+
|
| 529 |
+
For NDTT, we used the public Github repository at https://github.com/HMEIatJHU/neural-datalog-through-time (Mei et al. (2020a), with MIT License). We built A-NDTT upon NDTT.
|
| 530 |
+
|
| 531 |
+
For THP, we used the public Github repository at https://github.com/SimiaoZuo/Transformer-Hawkes-Process (Zuo et al. (2020), no license specified).
|
| 532 |
+
|
| 533 |
+
For SAHP, we used the public Github repository at https://github.com/QiangAIResearcher/sahp Repo (Zhang et al. (2020a), no license specified).
|
| 534 |
+
|
| 535 |
+
# F.3 TRAINING DETAILS
|
| 536 |
+
|
| 537 |
+
For each model in section 7, we had to specify various dimensionalities. For simplicity, we used a single hyperparameter $D$ and took all vectors to be in $\mathbb{R}^D$ . This includes the state vectors of NHP,
|
| 538 |
+
|
| 539 |
+
<table><tr><td>DATASET</td><td colspan="2">TRAINING TIME (MILLSECONDS) / SEQUENCE</td></tr><tr><td></td><td>NHP</td><td>A-NHP</td></tr><tr><td>SYNTHETIC</td><td>208.7</td><td>56.3</td></tr><tr><td>MIMIC-II</td><td>2.9</td><td>32.6</td></tr><tr><td>STACKOVERFLOW</td><td>156.6</td><td>65.7</td></tr></table>
|
| 540 |
+
|
| 541 |
+
Table 2: Training time of NHP and A-NHP for experiments in section 7.1.
|
| 542 |
+
|
| 543 |
+
the fact embeddings of NDTT and A-NDTT, and the query, key, and value vectors for the models with attention mechanisms (THP, SAHP, A-NHP, and A-NDTT). For the models with attention mechanisms, we also had to choose the number of layers $L$ .
|
| 544 |
+
|
| 545 |
+
We tuned these hyperparameters for each combination of model, dataset, and training size (e.g., each bar in Figures 2, 3a and 5), always choosing the combination of $D$ and $L$ that achieved the best performance on the dev set. Our search spaces were $D \in \{4,8,16,32,64,128\}$ and $L \in \{1,2,3,4,5,6\}$ . In practice, the optimal $D$ for a model was usually 32 or 64; the optimal $L$ was usually 1, 2, or 3.
|
| 546 |
+
|
| 547 |
+
To train the parameters for a given model, we used the Adam algorithm (Kingma & Ba, 2015) with its default settings. We performed early stopping based on log-likelihood on the held-out dev set.
|
| 548 |
+
|
| 549 |
+
For the experiments in section 7.1, we used the standalone PyTorch implementations for NHP and A-NHP, which are GPU-friendly. We trained each model on an NVIDIA K80 GPU. Table 2 shows their training time per sequence on each dataset.
|
| 550 |
+
|
| 551 |
+
For section 7.2, we run our NDTT and A-NDTT models only on CPUs. This follows Mei et al. (2020a), who did not find an efficient method to leverage GPU parallelism for training NDTT models. The machines we used for NDTT and A-NDTT are 6-core Haswell architectures. On RoboCup, the training time of NDTT and A-NDTT was 62 and 149 seconds per sequence, respectively. See Appendix F.4 for future work on improving the latter time by exploiting GPU parallelism.
|
| 552 |
+
|
| 553 |
+
For the NHP and A-NHP models in section 7.2, we ran the specialized code for these models on CPU as well, rather than on GPU as in section 7.1, since the RoboCup sequences were too long to fit in the memory of our K80 GPU. The training time was 66 and 95 seconds per sequence for NHP and A-NHP, respectively.
|
| 554 |
+
|
| 555 |
+
# F.4 TRAINING PARALLELISM
|
| 556 |
+
|
| 557 |
+
We point out that in the future, GPU parallelism could be exploited through the following procedure, given a GPU with enough memory to handle long training sequences. (The layers can be partitioned across multiple GPUs if needed.)
|
| 558 |
+
|
| 559 |
+
For each training minibatch, the first step is to play each event sequence $e_1@t_1$ , $e_2@t_2$ , ..., $e_I@t_I$ forward to determine the contents of the database on each interval $(0, t_1], (t_1, t_2], \ldots, (t_{i-1}, t_I], (t_I, T])$ . This step runs on CPU, and computes only the boolean facts ("Datalog through time") without their embeddings ("neural Datalog through time").
|
| 560 |
+
|
| 561 |
+
Let $\mathcal{F}$ be the set of facts that ever appeared in the database during this minibatch and let $\mathcal{R}$ be the set of rules that were ever used to deduce or add them (section 5). Furthermore, let $\mathcal{T}$ be the set of times consisting of $\{t_1,\dots ,t_I\}$ together with the times $t$ that are sampled for the Monte Carlo integral (Appendix D).
|
| 562 |
+
|
| 563 |
+
A computation graph of size $O(|\mathcal{R}| \cdot I)$ can now be constructed, as illustrated in Figure 1b, to compute the embeddings $[[h]](t)$ of all facts $h \in \mathcal{F}$ at all times $t \in \mathcal{T}$ . The layer- $\ell$ embeddings at time $t \in \mathcal{T}$ depend on the layer- $(\ell - 1)$ embeddings at times $t_i \leq t$ , according to the add rules in $\mathcal{R}$ . The layer- $\ell$ embedding of a fact that is deduced at time $t$ also depends on the layer- $\ell$ embeddings at time $t$ of the facts that it is deduced from, according to the deduction rules in $\mathcal{R}$ ; this further increases the depth of the computation graph.
|
| 564 |
+
|
| 565 |
+
For a given fact $h \in \mathcal{F}$ , $[[h]]^{(\ell)}(t)$ can be computed in parallel for all event sequences and all times $t \in \mathcal{T}$ (even times $t$ when $h$ is not true, although those embeddings will not be used). Multiple facts that are governed by the same NDTT rule $r \in \mathcal{R}$ can also be handled in parallel, since they use the same $r$ -specific parameters. Thus, a GPU can be effective for this phase. The computation of $\underline{h}_r^{(\ell)}(t)$ in equation (9) must take care to limit its attention to just those earlier times when an event occurred that added $h$ via rule $r$ , and the computation of $[h]^{(\ell)}(t)$ in equation (9) must take care to consider only rules $r$ that in fact deduce $h$ at time $t$ because their conditions are true at time $t$ . This masks unwanted parts of the computation, rendering parts of the GPU idle. GPU parallelism will still be worthwhile if a substantial fraction of the computation remains unmasked—which is true for relatively homogenous settings where most facts in $\mathcal{F}$ hold true for a large portion of the observed interval $[0,T)$ , even if their embeddings fluctuate.
|
| 566 |
+
|
| 567 |
+
# F.5 MORE RESULTS
|
| 568 |
+
|
| 569 |
+
The performance of A-NDTT and NDTT is not always comparable for specific action types, as shown in Figure 6. In terms of data fitting (left figure), A-NDTT is significantly better at the kickoff events while NDTT is better at the others. For time prediction (middle figure), A-NDTT is significantly better at the goal, kickoff, and pass events, but the differences for the other action types are not significant. For action participant prediction (right figure), A-NDTT is significantly better at the kickoff events while there is no difference for the others; both do perfectly well at the goal and kick events such that their dots overlap at the origin.
|
| 570 |
+
|
| 571 |
+

|
| 572 |
+
Figure 6: Results of NDTT and A-NDTT in Figure 3a broken down by action types, with horizontal and vertical error bars, respectively.
|
| 573 |
+
|
| 574 |
+

|
| 575 |
+
|
| 576 |
+

|
| 577 |
+
|
| 578 |
+
In Figure 7, we show that Figure 6 does not change qualitatively when re-run with different random seeds.
|
| 579 |
+
|
| 580 |
+

|
| 581 |
+
|
| 582 |
+

|
| 583 |
+
(a)
|
| 584 |
+
|
| 585 |
+

|
| 586 |
+
|
| 587 |
+

|
| 588 |
+
|
| 589 |
+

|
| 590 |
+
(b)
|
| 591 |
+
|
| 592 |
+

|
| 593 |
+
|
| 594 |
+

|
| 595 |
+
|
| 596 |
+

|
| 597 |
+
(c)
|
| 598 |
+
|
| 599 |
+

|
| 600 |
+
|
| 601 |
+

|
| 602 |
+
Figure 7: Replications of Figure 6 (one per row) with different random seeds used during training.
|
| 603 |
+
|
| 604 |
+

|
| 605 |
+
(d)
|
| 606 |
+
|
| 607 |
+

|
2201.00xxx/2201.00044/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e99f101505d42e75fd341e9421cf12f13fdba859e02cc044803fff54aad2ec8
|
| 3 |
+
size 534738
|
2201.00xxx/2201.00044/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00057/dd8a9590-2d6f-4bed-9fa6-d919d0779366_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:306b07b1d2e0a590fe7b44988af7736461a8a97bc9de413bd416189755dc471a
|
| 3 |
+
size 1568915
|
2201.00xxx/2201.00057/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00057/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f0e901876435e21933e8864c6685b6ab440ba99e68aecaa7144163815b5b431
|
| 3 |
+
size 1455572
|
2201.00xxx/2201.00057/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00058/82c82297-b486-438e-899b-046bfa0819f0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95b624379b97fab1a269b3437174c589e444ca3ec17f1bf436ff79e9b9ee68e4
|
| 3 |
+
size 8206544
|
2201.00xxx/2201.00058/full.md
ADDED
|
@@ -0,0 +1,726 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Representation Topology Divergence: a Method for Comparing Neural Network Representations
|
| 2 |
+
|
| 3 |
+
Serguei Barannikov<sup>12</sup> Ilya Trofimov<sup>1</sup> Nikita Balabin<sup>1</sup> Evgeny Burnaev<sup>13</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Comparison of data representations is a complex multi-aspect problem that has no complete solution yet. We propose a method for comparing two data representations. We introduce the Representation Topology Divergence (RTD) which measures the dissimilarity in multi-scale topology between two point clouds of equal size with a one-to-one correspondence between points. The data point clouds are allowed to lie in different ambient spaces. The RTD is one of the few practical methods based on Topological Data Analysis (TDA) applicable to real machine learning datasets. Experiments show the proposed RTD agrees with the intuitive assessment of data representation similarity and is sensitive to its topological structure. We apply RTD to gain insights into neural network representations in computer vision and NLP domains for various problems: training dynamics analysis, data distribution shift, transfer learning, ensemble learning.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Representations of objects are the essential component learnt by deep neural networks. In opposite to the distance in the original space, the similarity of representations is proved to be semantically meaningful. Despite the significant practical success of deep neural networks, many aspects of their behavior are poorly understood. Only a few methods study neural representations without relying on their quality on a specific downstream task. In this work, we focus on the comparison of representations from neural networks.
|
| 12 |
+
|
| 13 |
+
Comparison of representations is an ill-posed problem without a "ground truth" answer. Early studies were based on variants of Canonical Correlation Analysis (CCA): SVCCA,
|
| 14 |
+
|
| 15 |
+
$^{1}$ Skolkovo Institute of Science and Technology, Moscow, Russia $^{2}$ CNRS, Université Paris Cité, France $^{3}$ Artificial Intelligence Research Institute (AIRI), Moscow, Russia Correspondence to: Serguei Barannikov <S.Barannikov@skoltech.ru>.
|
| 16 |
+
|
| 17 |
+
Proceedings of the $39^{th}$ International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).
|
| 18 |
+
|
| 19 |
+
(Raghu et al., 2017), PWCCA (Morcos et al., 2018). However, CCA-like measures define similarity too loosely since they are invariant under any invertible linear transformation. The Centered Kernel Alignment (CKA), (Kornblith et al., 2019) is the statistical test to measure the independence of two sets of variables. (Kornblith et al., 2019) proved CKA to be more consistent with the intuitive similarity of representations. Particularly, neural networks learn similar representations from different seeds as evaluated by CKA. Another line of work is concerned with the alignment between groups of neurons (Li et al., 2015), (Wang et al., 2018). The similarity of representations is also a topic of study in neuroscience (Edelman, 1998; Kriegeskorte et al., 2008; Connolly et al., 2012).
|
| 20 |
+
|
| 21 |
+
Representational similarity metrics like CKA and CCA were used to gain insights on representations obtained in meta-learning (Raghu et al., 2020), to compare representations from different layers of language models (Voita et al., 2019), and to study the effect of fine-tuning (Wu et al., 2020). Finally, (Nguyen et al., 2021) used CKA to study the phenomenon of a "block structure" emerging in wide and deep networks in computer vision and compare their representations.
|
| 22 |
+
|
| 23 |
+
In this paper, we take a topological perspective on the comparison of neural network representations. We propose the Representation Topology Divergence (RTD) score, which measures dissimilarity between two point clouds of equal size with a one-to-one correspondence between points. Point clouds are allowed to lie in different ambient spaces. Existing geometrical and topological methods are dedicated to other problems: they are either too general and do not incorporate the one-to-one correspondence requirement (Khrulkov & Oseledets, 2018), (Tsitsulin et al., 2020), or they restrict point clouds to lie in the same ambient space (Kynkänniemi et al., 2019), (Barannikov et al., 2021). Most of these methods are applied to the evaluation of GANs. Recently, (Moor et al., 2020) proposed a loss term to compare the topology of data in original and latent spaces and applied the term as a part of the Topological Autoencoder.
|
| 24 |
+
|
| 25 |
+
In this work, we make the following contributions:
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Figure 1: Comparison of representations after the $i$ th epoch and the final one done by RTD, 1-CKA, and disagreement of predictions. All the measures are normalized by division to their maximal values. Strikingly, RTD highly correlates with the disagreement of models' predictions.
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+
1. We propose a topologically-inspired approach for comparison of neural network representations;
|
| 33 |
+
2. We introduce the $R$ -Cross-Barcode $(P, \tilde{P})$ , a tool based on Topological Data Analysis (TDA), which measures the differences in the multi-scale topology of two point clouds $P, \tilde{P}$ with a one-to-one correspondence between points;
|
| 34 |
+
3. Based on the $R$ -Cross-Barcode $(P, \tilde{P})$ , we define the Representation Topology Divergence (RTD), the quantity measuring the multi-scale topological dissimilarity between two representations;
|
| 35 |
+
4. Our computational experiments show that RTD agrees with an intuitive notion of neural network representations similarity. In contrast to most existing approaches, RTD is sensitive to differences in topological structures (clusters, voids, cavities, tunnels, etc.) of the representations and enjoys a very good correlation with disagreement of models predictions. We apply RTD to compare representations in computer vision and NLP domains and various problems: training dynamics analysis, data distribution shift, transfer learning, ensemble learning, and disentanglement. Experiments show that RTD outperforms CKA, IMD, and SVCCA.
|
| 36 |
+
|
| 37 |
+
The source code is publicly available: https://github.com/IlyaTrofimov/RTD.
|
| 38 |
+
|
| 39 |
+
# 2. Comparing Neural Network Representations
|
| 40 |
+
|
| 41 |
+
Our starting point is the geometric perspective on representation learning through the lens of the manifold hypothesis (Goodfellow et al., 2016), according to which real-world data presented in a high-dimensional space are expected to concentrate in the vicinity of a manifold of much lower dimension. The low-dimensional manifold $M_{\mathcal{P}}$ underlying
|
| 42 |
+
|
| 43 |
+
the given data representation $\mathcal{P}$ can be accessed in general only through discrete sets of samples. The standard approach to recover the manifold $M_{\mathcal{P}}$ is to take a sample $P$ and to approximate $M_{\mathcal{P}}$ by a set of simplexes with vertices from $P$ . Commonly, to select the simplexes approximating $M_{\mathcal{P}}$ one has to fix a threshold $\alpha > 0$ and consider the simplexes with edge lengths not exceeding $\alpha$ (Niyogi et al., 2008; Belkin & Niyogi, 2001). It is difficult to guess the correct value of the threshold, and hence a reasonable approach is to study all thresholds at once.
|
| 44 |
+
|
| 45 |
+
Given two representations, we consider two corresponding graphs with distance-like weights and compare the difference in the multiscale topology of the two graphs.
|
| 46 |
+
|
| 47 |
+
Let $\mathcal{P},\tilde{\mathcal{P}}$ be two representations giving two embeddings of the same data $\nu$ . The two embeddings $\mathcal{P},\tilde{\mathcal{P}}$ belong in general to different ambient spaces and have the natural one-to-one correspondence between points in $\mathcal{P}$ and $\tilde{\mathcal{P}}$ . Given a sample of data $V\subseteq \mathcal{V}$ , the two representations $P = \mathcal{P}(V)$ , $\tilde{P} = \tilde{\mathcal{P}} (V)$ define two weighted graphs $\mathcal{G}^w,\mathcal{G}^{\tilde{w}}$ with the same vertex set $V$ . The weights $w_{AB}$ , $\tilde{w}_{AB}$ of an edge $AB$ are given by the distances $w_{AB} = \mathrm{dist}(P(A),P(B))$ , $\tilde{w}_{AB} = \mathrm{dist}(\tilde{P} (A),\tilde{P} (B))$ .
|
| 48 |
+
|
| 49 |
+
The simplicial approximation to the manifold $M_{\mathcal{P}}$ at threshold $\alpha$ consists of simplexes whose edges in $\mathcal{G}^w$ have weights not exceeding $\alpha$ . Let $\mathcal{G}^{w\leq \alpha}$ denote the graph with the vertex set $V$ and the edges with weights not exceeding $\alpha$ . To compare the simplicial approximations to the manifolds $M_{\mathcal{P}}$ and $M_{\tilde{\mathcal{P}}}$ described by the graphs $\mathcal{G}^{w\leq \alpha}$ and $\mathcal{G}^{\tilde{w}\leq \alpha}$ , we compare each of the two simplicial approximations with the union of simplices formed by edges present in at least one of the two graphs. The graph $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ contains an edge between vertices $A$ and $B$ iff the distance between the points $A$ and $B$ is smaller than $\alpha$ in at least one of the representations $P$ , $\tilde{P}$ . The set of edges of the graph $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ is the union of sets of edges of $\mathcal{G}^{w\leq \alpha}$ and $\mathcal{G}^{\tilde{w}\leq \alpha}$ . The similarity of manifolds $M_{\mathcal{P}}$ and $M_{\tilde{\mathcal{P}}}$ can be measured by the degrees of
|
| 50 |
+
|
| 51 |
+
similarities of the graph $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ with the graph $\mathcal{G}^{w\leq \alpha}$ and the graph $\mathcal{G}^{\tilde{w}\leq \alpha}$ .
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Figure 2: Graphs $\mathcal{G}^{w\leq \alpha}$ , $\mathcal{G}^{\tilde{w}\leq \alpha}$ and $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ with edges not in $\mathcal{G}^{w\leq \alpha}$ colored in green.
|
| 55 |
+
|
| 56 |
+
# 2.1. Topological features for a pair of weighted graphs
|
| 57 |
+
|
| 58 |
+
One way to measure the discrepancy between the graphs $\mathcal{G}^{w\leq \alpha}$ and $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ is to count the graph $\mathcal{G}^{w\leq \alpha}$ connected components merged together in the graph $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ . We show an example of this situation in Figure 2 right, see also Figure 10, where three graphs $\mathcal{G}^{w\leq \alpha}$ , $\mathcal{G}^{\tilde{w}\leq \alpha}$ and $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ are shown, with edges of the graph $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ not in $\mathcal{G}^{w\leq \alpha}$ colored in green. Each merging is represented by a class of green paths in $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ joining two blue clusters. The significance of the discrepancy constituted by the green path is measured by the difference $\alpha_{d} - \alpha_{b}$ in the smallest thresholds $\alpha_{b},\alpha_{d}$ at which the two clusters are merged in $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha_{b}}$ and $\mathcal{G}^{w\leq \alpha_{d}}$ . Homology is the tool that permits counting such topological features, because of the space limit we gather the definitions and necessary properties of homology in Appendix A, see also (Hatcher, 2005). The number of these simplest topological features is the dimension of the kernel of linear map $H_0(\mathcal{G}^{w\leq \alpha})\to H_0(\mathcal{G}^{\min (w,\tilde{w})\leq \alpha})$ , as basis elements of the vector space $H_{0}$ correspond to the graph connected components. It may also happen that a non-trivial merging happens between two distant parts of the same $\mathcal{G}^{w\leq \alpha}$ cluster or between two $\mathcal{G}^{w\leq \alpha}$ clusters already connected via a chain of merging, as on Figure 9. The number of these features is the dimension of the cokernel of the map $H_{1}(\mathcal{G}^{w\leq \alpha})\to H_{1}(\mathcal{G}^{\min (w,\tilde{w})\leq \alpha})$ . Hence the number of non-trivial mergings is the sum of the two numbers. We are interested in these numbers for all possible thresholds $\alpha$ . When the threshold $\alpha$ is increased then more green and blue edges appear, and also certain green edges become blue. Using an auxiliary graph and the barcodes algorithm, we calculate the numbers of such topological features for all values of $\alpha$ at once.
|
| 59 |
+
|
| 60 |
+
# 2.2. R-Cross-Barcode
|
| 61 |
+
|
| 62 |
+
Recall that the Vietoris-Rips filtered complex of a graph $\mathcal{G}$ equipped with edge weights' matrix $m$ is the collection of $k$ -simplexes, $k \geq 0$ , which are $(k + 1)$ -element subsets of the set of vertices of $\mathcal{G}$ , with the filtration threshold of a simplex defined by the maximal weight on the edges:
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
R _ {\alpha} (\mathcal {G} ^ {m}) = \left\{\left\{A _ {i _ {0}}, \dots , A _ {i _ {k}} \right\}, A _ {i} \in \operatorname {V e r t} (\mathcal {G}) | m _ {A _ {i} A _ {j}} \leq \alpha \right\}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
Our simplicial approximation to the manifold $M_{\mathcal{P}}$ at threshold $\alpha$ is the union of all simplexes from the simplicial complex $R_{\alpha}(\mathcal{G}^{w})$ , and similarly the approximation to $M_{\tilde{\mathcal{P}}}$ is the union of all simplexes from $R_{\alpha}(\mathcal{G}^{\tilde{w}})$ .
|
| 69 |
+
|
| 70 |
+
The dissimilarity between the filtered simplicial complexes $R_{\alpha}(\mathcal{G}^w)$ and $R_{\alpha}(\mathcal{G}^{\tilde{w}})$ can be quantified using the homological methods. The relevant tools here are homology, barcodes and homology exact sequences. We describe our construction below and, because of space limitations, we sketch further explanation of the construction in Appendix, Section A.2.
|
| 71 |
+
|
| 72 |
+
Concretely, to compare the multi-scale topology of the two weighted graphs $\mathcal{G}^w$ and $\mathcal{G}^{\tilde{w}}$ we introduce the weighted graph $\hat{\mathcal{G}}^{w,\tilde{w}}$ with doubled set of vertices and with the edge weights defined as follows. For convenience, fix a numbering of vertices $\operatorname {Vert}(\mathcal{G}) = \{A_1,\dots ,A_N\}$ . For each vertex $A\in \operatorname {Vert}(\mathcal{G})$ we add the extra vertex $A^\prime$ together with $A$ to $\hat{\mathcal{G}}$ , plus the unique additional vertex $O$ , and define the distance-like edge weights in $\hat{\mathcal{G}}^{w,\tilde{w}}$ as:
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
d _ {A _ {i} ^ {\prime} A _ {j} ^ {\prime}} = \min (w _ {A _ {i} A _ {j}}, \tilde {w} _ {A _ {i} A _ {j}}), d _ {A _ {i} A _ {j} ^ {\prime}} = d _ {A _ {i} A _ {j}} = w _ {A _ {i} A _ {j}},
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
d _ {A _ {i} A _ {i} ^ {\prime}} = d _ {O A _ {i}} = 0, d _ {A _ {j} A _ {i} ^ {\prime}} = d _ {O A _ {i} ^ {\prime}} = + \infty \tag {1}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $i < j$ and $O \in \mathrm{Vert}(\hat{\mathcal{G}}^{w,\tilde{w}})$ is the additional vertex. In practice, for the calculation of RTD described below, the distance matrix can be taken in a slightly simpler form $m = \left( \begin{array}{cc}0 & (w_{+})^{\intercal}\\ w_{+} & \min (w,\tilde{w}) \end{array} \right)$ , where $w$ and $\tilde{w}$ are the edge weight matrices of $\mathcal{G}^w$ and $\mathcal{G}^{\tilde{w}}$ , and $w_{+}$ , respectively $(w_{+})^{\intercal}$ , is the matrix $w$ with upper-(respectively, lower-)triangular part replaced by $+\infty$ .
|
| 83 |
+
|
| 84 |
+
Next, we construct the Vietoris-Rips filtered simplicial complex of the graph $\hat{\mathcal{G}}^{w,\tilde{w}}$ and take its barcode. The doubling of vertices in $\hat{\mathcal{G}}^{w,\tilde{w}}$ creates triangles $OA_{i}A_{j}$ , $A_{i}A_{j}A_{j}^{\prime}$ , $A_{i}A_{i}^{\prime}A_{j}^{\prime}$ at the threshold $\alpha = w_{A_iA_j}$ . These triangles "kill" the edge $A_{i}^{\prime}A_{j}^{\prime}$ becoming blue at this threshold. Intuitively, the $i$ -th barcode of $R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}})$ records the $i$ -dimensional topological features that are born in $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ but are not yet born near the same place in $\mathcal{G}^{w\leq \alpha}$ and the $(i - 1)$ -dimensional topological features that are dead in $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ but are not yet dead at the same place in $\mathcal{G}^{w\leq \alpha}$ , see Theorem 2.1 below.
|
| 85 |
+
|
| 86 |
+
Definition. The $R$ -Cross-Barcode $_i(P, \tilde{P})$ is the set of intervals recording the "births" and "deaths" of $i$ -dimensional
|
| 87 |
+
|
| 88 |
+
Algorithm 1 R-Cross-Barcode $_i(P, \tilde{P})$
|
| 89 |
+
Input: $w$ , $\tilde{w}$ : matrices of pairwise distances within point clouds $P$ , $\tilde{P}$
|
| 90 |
+
|
| 91 |
+
Require: $\operatorname{vr}(m)$ : function computing filtered complex from pairwise distances matrix $m$
|
| 92 |
+
|
| 93 |
+
Require: $\mathrm{B}(C,i)$ : function computing persistence intervals of filtered complex $C$ in dimension $i$ $w,\tilde{w}\gets w,\tilde{w}$ divided by their 0.9 quantiles
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
m \leftarrow \left( \begin{array}{c c c} w & (w _ {+}) ^ {\intercal} & 0 \\ w _ {+} & \min (w, \tilde {w}) & + \infty \\ 0 & + \infty & 0 \end{array} \right) R \text {- C r o s s - B a r c o d e} _ {i} \leftarrow \mathrm {B} (\mathrm {v r} (m), i)
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
Return: intervals list $R$ -Cross-Barcode $_i(P, \tilde{P})$ representing "births" and "deaths" of topological discrepancies between $P$ and $\tilde{P}$ .
|
| 100 |
+
|
| 101 |
+
topological features in the filtered simplicial complex $R_{\alpha}(\mathring{\mathcal{G}}^{w,\bar{w}})$
|
| 102 |
+
|
| 103 |
+
The $R$ -Cross-Barcode $_{*}(P, \tilde{P})$ (for Representations' Cross-Barcode) records the differences in the multiscale topology of the two embeddings. The topological features with longer lifespans indicate in general the essential features.
|
| 104 |
+
|
| 105 |
+
Theorem 2.1. Basic properties of R-Cross-Barcode $_{*}(P, \tilde{P})$ :
|
| 106 |
+
|
| 107 |
+
- if $P(A) = \tilde{P}(A)$ for any object $A \in V$ , then $R$ -Cross-Barcode $_*(P, \tilde{P}) = \emptyset$ ;
|
| 108 |
+
- if all distances within $\tilde{P}(V)$ are zero i.e. all objects are represented by the same point in $\tilde{P}$ , then for all $k \geq 0$ : $R$ -Cross-Barcode $_{k+1}(P, \tilde{P}) = \text{Barcode}_k(P)$ the standard barcode of the point cloud $P$ ;
|
| 109 |
+
- for any value of threshold $\alpha$ , the following sequence of natural linear maps of homology groups
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
\begin{array}{l} \xrightarrow {r _ {3 i + 3}} H _ {i} \left(R _ {\alpha} \left(\mathcal {G} ^ {w}\right)\right) \xrightarrow {r _ {3 i + 2}} H _ {i} \left(R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right)\right) \xrightarrow {r _ {3 i + 1}} \\ \xrightarrow {r _ {3 i + 1}} H _ {i} \big (R _ {\alpha} (\hat {\mathcal {G}} ^ {w, \tilde {w}}) \big) \xrightarrow {r _ {3 i}} H _ {i - 1} \big (R _ {\alpha} (\mathcal {G} ^ {w}) \big) \xrightarrow {r _ {3 i - 1}} \\ \xrightarrow {r _ {3 i - 1}} \dots \xrightarrow {r _ {1}} H _ {0} \left(R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right)\right) \xrightarrow {r _ {0}} 0 \tag {2} \\ \end{array}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
is exact, i.e. for any $j$ the kernel of the map $r_j$ is the image of the map $r_{j + 1}$
|
| 116 |
+
|
| 117 |
+
The proof of the first two properties is immediate and the third property follows from the properties of distinguished triangles of complexes, see Appendix A for more details. The exactness of the sequence (2) for $j = 1,2,3$ implies that the calculation of the topological features from Section 2.1 for all $\alpha$ is reduced to the calculation of $H_{1}(R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}}))$ for all $\alpha$ , i.e. to the calculation of $R$ -Cross-Barcode $_{1}(P,\tilde{P})$ .
|
| 118 |
+
|
| 119 |
+
# 2.3. Representation Topology Divergence.
|
| 120 |
+
|
| 121 |
+
The $R$ -Cross-Barcode $_{*}(P, \tilde{P})$ is by itself, to our opinion, a precise and intuitive tool for understanding discrepancies
|
| 122 |
+
|
| 123 |
+
Input: $\mathcal{P} \in \mathbb{R}^{|\mathcal{V}| \times D}$ , $\hat{\mathcal{P}} \in \mathbb{R}^{|\mathcal{V}| \times \tilde{D}}$ : data representations
|
| 124 |
+
|
| 125 |
+
for $j = 1$ to $n$ do
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
V _ {j} \leftarrow \text {r a n d o m c h o i c e} (\mathcal {V}, b)
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
P _ {j}, \bar {P} _ {j} \leftarrow \mathcal {P} (V _ {j}), \hat {\mathcal {P}} (V _ {j})
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathcal {B} _ {j} \leftarrow R \text {- C r o s s - B a r c o d e} _ {1} \left(P _ {j}, \bar {P} _ {j}\right) \text {i n t e r v a l s} \text {l i s t c a l c u l a t e d b y A l g o r m} 1
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
r t d _ {j} \leftarrow \text {s u m o f l e n g t h s o f a l l i n t e r v a l s i n} \mathcal {B} _ {j}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
end for
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
R T D _ {1} (\mathcal {P}, \tilde {\mathcal {P}}) \leftarrow \operatorname {m e a n} (r t d)
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
Return: number $RTD_{1}(\mathcal{P},\mathcal{P})$ representing discrepancy between the representations $\mathcal{P},\hat{\mathcal{P}}$
|
| 150 |
+
|
| 151 |
+
between two representations. There are several numerical characteristics measuring the non-emptyness of $R$ -Cross-Barcode. Based on experiments and on relation of sum of bars' lengths with Earth Moving Distance (Barannikov et al., 2021), we define the sum of lengths of the bars in $R$ -Cross-Barcode $_i(P, \tilde{P})$ , denoted $RTD_i(P, \tilde{P})$ , as the scalar characterizing the degree of topological discrepancy between the representations $P, \tilde{P}$ . We use most often the average of $RTD_1(P, \tilde{P})$ and $RTD_1(\tilde{P}, P)$ , denoted $RTD$ score, in our computations below.
|
| 152 |
+
|
| 153 |
+
Proposition 2.2. If $RTD_{i}(P, \tilde{P}) = RTD_{i}(\tilde{P}, P) = 0$ for all $i \geq 1$ , then the barcodes of the weighted graphs $\mathcal{G}^w$ and $\mathcal{G}^{\tilde{w}}$ are the same in any degree. Moreover, in this case the topological features are located in the same places: the inclusions $R_{\alpha}(\mathcal{G}^w) \subseteq R_{\alpha}(\mathcal{G}^{\min(w, \tilde{w})}), R_{\alpha}(\mathcal{G}^{\tilde{w}}) \subseteq R_{\alpha}(\mathcal{G}^{\min(w, \tilde{w})})$ induce homology isomorphisms for any threshold $\alpha$ .
|
| 154 |
+
|
| 155 |
+
# 2.4. Algorithm
|
| 156 |
+
|
| 157 |
+
First we compute the $R$ -Cross-Barcode $_1(P, \tilde{P})$ on two representations $P, \tilde{P}$ of a sample $V$ . For this we calculate the matrices of pairwise distances $w, \tilde{w}$ within the point clouds $P, \tilde{P}$ . We assume that the metrics in the ambient spaces of representations are normalized so that the two point clouds are of comparable size, namely their 0.9 quantile of pairwise distances coincide. This ensures that our score has scaling invariance, the reasonable property of a good representation similarity measure, as argued in e.g. (Kornblith et al., 2019). Next, the algorithm builds the Vietoris-Rips complex from the matrix $m$ defined in Equation 1. Then the 1-dimensional barcode, see (Barannikov, 2021), of the built filtered simplicial complex is calculated. The last two steps can be done using scripts that are optimized for GPU acceleration (Zhang et al., 2020). Then we sum the lengths of bars in $R$ -Cross-Barcode $_1(P, \tilde{P})$ . To get the symmetric measure we usually take the half-sum with the similar sum
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
(a) Point clouds used in "clusters" experiment.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
(b) Representations' comparison measures. Ideally, the measure should change monotonically with the increase of topological discrepancy.
|
| 166 |
+
(c) R-Cross-Barcode $(P, \tilde{P})$ for the "clusters" experiments. $\tilde{P}$ - is the point cloud having one cluster, $P - 2, 3, 4, 5, 6, 10, 12$ clusters.
|
| 167 |
+
Figure 3: RTD perfectly detects cluster structures, while rival measures fail. One cluster is compared with 2-12 clusters.
|
| 168 |
+
|
| 169 |
+
of bars in $R$ -Cross-Barcode $_1(\tilde{P}, P)$ . The computation is repeated a sufficient number of times to obtain the mean of the chosen characteristics. We have observed experimentally that about 10 times is usually sufficient for common datasets. The main steps of the computation are summarized in Algorithms 1 and 2.
|
| 170 |
+
|
| 171 |
+
Complexity. Algorithm 1 starts with computation of the two matrices of pairwise distances $w$ , $\tilde{w}$ for a pair of representations of a sample $V$ : $P \in \mathbb{R}^{b \times D}$ , $\tilde{P} \in \mathbb{R}^{b \times \tilde{D}}$ involving $O(|V|^2 (D + \tilde{D}))$ operations. Next, persistent intervals of the filtered complex must be computed. Given the distance matrix $m$ , the complexity of their computation does not depend on the dimensions $D$ , $\tilde{D}$ of the data representations. Generally, the barcode computation is at worst cubic in the number of simplexes involved. In practice, the computation is quite fast since the boundary matrix is typically sparse for real datasets. For R-Cross-Barcodes' calculation, we used GPU-optimized software. Thus, the computation of R-Cross-Barcode takes a similar time as in the previous step even on datasets of high dimensionality. Since only the dissimilarities in representation topology are calculated, the results are quite robust and a rather low number of iterations is needed to obtain accurate results.
|
| 172 |
+
|
| 173 |
+
# 3. Experiments
|
| 174 |
+
|
| 175 |
+
In the experimental section, we study the ability of the proposed R-Cross-Barcodes and RTD to detect changes in topological structures with the use of synthetic point clouds;
|
| 176 |
+
|
| 177 |
+
we demonstrate the superiority of RTD over CKA, SVCCA, IMD (Section 3.1). RTD meaningfully compares representations from UMAP with different parameters (Section 3.2). By comparing representations from various architectures (Section 3.3), layers, epochs, ensembles and after data distribution shift (Section 3.4) we show that RTD is in line with natural notion of representational similarity. A high correlation between RTD and disagreement of neural network predictions is an interesting empirical finding.
|
| 178 |
+
|
| 179 |
+
# 3.1. Experiments with synthetic point clouds
|
| 180 |
+
|
| 181 |
+
We start with small-scale experiments with synthetic point clouds: "clusters" and "rings". For the "clusters" experiment (Figure 3, top), the initial point cloud consists of 300 points randomly sampled from the 2-dimensional normal distribution having mean $(0,0)$ . Next, we split it into 2,3...12 parts (clusters) and move them to the circle of radius 10. Then, we compare the initial point cloud (having one cluster) with the split ones.
|
| 182 |
+
|
| 183 |
+
We compared these point clouds by calculating: RTD, CKA (Kornblith et al., 2019), IMD (Tsitsulin et al., 2020) and SVCCA (Raghu et al., 2017). We calculated linear CKA since (Kornblith et al., 2019) concluded that it provides the same performance as the RBF kernel, but does not require selecting a kernel width. For SVCCA, we calculated average correlation $\bar{\rho}$ for the truncation threshold 0.99, as recommended in (Raghu et al., 2017). The IMD score (Tsitsulin et al., 2020) was very noisy and we averaged it over 100 runs.
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
(a) 2D representations of MNIST with n_neighbors $= 10, 50, 200$
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 4: Comparing representations of MNIST by UMAP with varying n_neighbors.
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
(b) 1-CKA
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
(c) RTD
|
| 200 |
+
|
| 201 |
+
Figure 3b presents the results: RTD perfectly tracks the change of the topological complexity while the alternative measures mostly fail. The Kendall-tau rank correlations of the measures with a number of clusters are: RTD: 1.0, CKA: 0.23, IMD: 0.43, SVCCA: 0.14. We also note that RTD does not have any tunable parameters as SVCCA and does not require averaging over as many runs as IMD. Figure 3c shows the $H_{1}$ R-Cross-Barcodes calculated while comparing clusters. In accordance with the definition of RTD, $H_{0}$ barcodes are absent. The sum of the lengths of the segments increases with increasing differences in topology. Running times and all of the R-Cross-Barcodes are shown in Appendix C. Additional representation similarity measures were evaluated in Appendix I.
|
| 202 |
+
|
| 203 |
+
In the "rings" experiment, we compared synthetic point clouds consisting of a variable number of rings, see Figure 12a in Appendix D. Initially, there are 500 points uniformly distributed over the unit circle. Then, the points are moved onto circles with radii varying from 0.5 to 1.5. Finally, we compare the point cloud having 5 rings with other ones. Figure 12b in Appendix D present the results. RTD almost ideally reflects the change of the topological complexity while the alternative measures mostly fail. The Kendall-tau rank correlations of the measures with a number of rings are: RTD: 0.8, CKA: -0.2, IMD: 0.8, SVCCA: -0.2.
|
| 204 |
+
|
| 205 |
+
In the next sections, we compare RTD only with CKA, since it is the most popular method for comparing neural representations (Kornblith et al., 2019; Nguyen et al., 2021).
|
| 206 |
+
|
| 207 |
+
# 3.2. Comparing representations from UMAP
|
| 208 |
+
|
| 209 |
+
UMAP (McInnes et al., 2018) is the state-of-the-art method for visualizing high-dimensional datasets by obtaining their 2D/3D representations. We apply UMAP to the MNIST dataset to get 2D representations. We vary the number of neighbors in UMAP in the range (10, 20, 50, 100, 200), see Figure 4a (all of the figures are in Appendix H). This parameter affects the cluster structure: for low values, the algorithm focuses on the local structure and clusters are crisp; for high values, the algorithm pays more attention to the global structure, and clusters were found to often
|
| 210 |
+
|
| 211 |
+
overlap. Then, we perform the pairwise comparison of all the variants of 2D representations by RTD and CKA, see Figure 4. RTD reveals a nice monotonic pattern w.r.t. a number of neighbors, while values of CKA are quite chaotic.
|
| 212 |
+
|
| 213 |
+
# 3.3. Experiments with NAS-Bench-NLP
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
Figure 5: Multi-dimensional scaling of 90 architectures selected randomly from NAS-Bench-NLP. Color depicts log. perplexity.
|
| 217 |
+
|
| 218 |
+
Recently, neural architecture search has attracted a lot of attention in the machine learning community (Liu et al., 2019; Dong & Yang, 2019; Chen et al., 2021). NAS-BenchNLP (Klyuchnikov et al., 2020) is a benchmark for neural architecture search which is a collection of 14,322 recurrent architectures; all of the architectures were trained on the PTB dataset. We took 90 randomly selected architectures and compared word embeddings by RTD: each architecture contains 400-dimensional embeddings of 10,000 words. Then, we evaluated all the pairwise similarities between embeddings<sup>1</sup> from the architectures and visualized them via multi-dimensional scaling, see Fig. 5, where color depicts a log. perplexity. According to common sense, architectures having similar embeddings have a similar log. perplexity. Also, we checked that RTD is approximately a metric for this particular case since it satisfies the triangle inequality for $97\%$ of triplets of architectures from NAS-Bench-NLP.
|
| 219 |
+
|
| 220 |
+
Table 1: The correlation of metrics with Disagreement in the training dynamics experiment
|
| 221 |
+
|
| 222 |
+
<table><tr><td></td><td>RTD</td><td>1-CKA</td></tr><tr><td>VGG-11</td><td>0.976 ± 0.003</td><td>0.818 ± 0.010</td></tr><tr><td>ResNet-20</td><td>0.971 ± 0.001</td><td>0.924 ± 0.008</td></tr></table>
|
| 223 |
+
|
| 224 |
+
# 3.4. Experiments with convolutional neural networks
|
| 225 |
+
|
| 226 |
+
To demonstrate the abilities of RTD to work with image representations, we train ResNet-20 (He et al., 2016) and VGG-11 (Simonyan & Zisserman, 2014) networks on CIFAR (Krizhevsky et al., 2009) datasets. In experiments, we compare RTD with CKA and disagreement of predictions. For a more intuitive comparison, we consider 1-CKA instead of CKA. As a measure of the difference in predictions, we use Disagreement (Kuncheva & Whitaker, 2003; Wen et al., 2020), the fraction of mismatched predictions calculated as $\frac{1}{N}\sum_{n=1}^{N}[f_{\theta_1}(x_n)\neq f_{\theta_2}(x_n)]$ , where $f_{\theta}(x)$ denotes the class label predicted by the network for input $x$ . As discussed in (Fort et al., 2019), the lower the accuracy of predictions, the higher its potential mismatch due to the possibility of the wrong answers being random, and then we normalize the Disagreement by $(1 - a)$ , where $a$ is the mean accuracy of the predictions. To calculate the final metrics, we averaged the values for five random batches of 500 representations from the test dataset.
|
| 227 |
+
|
| 228 |
+
# 3.4.1. TRAINING DYNAMICS
|
| 229 |
+
|
| 230 |
+
In the first experiment, we analyze the training dynamics of neural networks. On each epoch, we collect the outputs of the convolutional part that extract the representations. To compare dynamics properly, we scaled the metrics by their maximum value. Fig. 1 shows the dynamics of the differences with the final representations. The results coincide with the intuition: the representations on each epoch become more similar to the final one. Moreover, RTD demonstrates the same behavior as disagreement of predictions. RTD better correlates with the Disagreement, see Table 1.
|
| 231 |
+
|
| 232 |
+
# 3.4.2. LAYERS
|
| 233 |
+
|
| 234 |
+
In the next experiment, we compare the outputs of layer blocks within the trained network. For VGG-11, the block has the form $\mathrm{Conv}\rightarrow \mathrm{BN}\rightarrow \mathrm{Activation}\rightarrow (\mathrm{Pooling})$ , and for ResNet-20, we take the output of the first $\mathrm{Conv}\rightarrow \mathrm{BN}\rightarrow \mathrm{Activation}$ block, and then the outputs of each residual block. In Figure 6, we see that both RTD and 1-CKA show similar results, including the slight difference between adjacent layers. We see that both metrics reveal the significant changes in the outputs of the ResNet-20 last block. In Figure 18, we performed similar experiment with ResNet-50 and ConvNeXt-tiny (Liu et al., 2022) architectures pre-trained on ImageNet-1k dataset (Deng et al., 2009).
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Figure 6: The representation differences between the layer blocks within trained networks. The columns correspond to the architecture, and the rows, to the metric.
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
|
| 245 |
+
Table 2: Analysis of ResNet-20 representations under different data distribution shifts. The correlation of metrics with Disagreement.
|
| 246 |
+
|
| 247 |
+
<table><tr><td></td><td>RTD</td><td>1-CKA</td></tr><tr><td>Noise</td><td>0.966 ± 0.001</td><td>0.927 ± 0.006</td></tr><tr><td>Gaussian blur</td><td>0.982 ± 0.004</td><td>0.913 ± 0.011</td></tr><tr><td>Grayscale</td><td>0.990 ± 0.004</td><td>0.928 ± 0.040</td></tr><tr><td>Hue</td><td>0.978 ± 0.008</td><td>0.927 ± 0.017</td></tr></table>
|
| 248 |
+
|
| 249 |
+
# 3.4.3. DATA DISTRIBUTION SHIFT
|
| 250 |
+
|
| 251 |
+
Here, we apply the data distribution shift to test the RTD. As a shift, we consider different image transformations: noising, blurring, grayscaleing, and hue changing. For each transformation, we analyze the metric dynamics as the strength of a transformation increases. Figure 7 confirms our sanity check of the monotony of RTD and other metrics with respect to data distribution shift. Moreover, Table 2 shows that RTD has a higher correlation with disagreement of predictions.
|
| 252 |
+
|
| 253 |
+
# 3.4.4. ENSEMBLES
|
| 254 |
+
|
| 255 |
+
It is known that an ensemble of neural networks performs better than a single network and can estimate the uncertainty of the predictions. It is shown in (Lee et al., 2015; Opitz et al., 1996) that the diverse ensembles work better. Thus, measuring ensembles' diversity is important. The disagreement is a good example of such a measure. To show that RTD can measure the diversity as well as disagreement, we learn two types of ensembles: the classical ensemble, when we learn the networks from different random initializations, and the Fast Geometric Ensemble (FGE) (Garipov et al., 2018), which is known to have lower diversity. We learn four models for each type of ensemble and average the metrics among all pairs. The results in Table 3 confirm that RTD is capable of measuring the diversity on the same scale as the disagreement of predictions.
|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
(a) Noise
|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
(b) Gaussian blur
|
| 262 |
+
Figure 7: Analysis of ResNet-20 representations under different data distribution shifts. The dynamics of scaled metrics with the monotonic transformations of images.
|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
(c) Grayscale
|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
(d) Hue
|
| 269 |
+
|
| 270 |
+
Table 3: The averaged metric among all pairs of ensemble members with a ResNet-20 architecture, and the relative difference between the types of ensemble.
|
| 271 |
+
|
| 272 |
+
<table><tr><td></td><td>Class. Ensemble</td><td>FGE</td><td>Diff. %</td></tr><tr><td>RTD</td><td>15.27 ± 0.12</td><td>10.45 ± 0.32</td><td>31.6</td></tr><tr><td>1-CKA</td><td>0.094 ± 0.02</td><td>0.033 ± 0.003</td><td>64.9</td></tr><tr><td>Disagreement</td><td>0.915 ± 0.05</td><td>0.607 ± 0.03</td><td>33.6</td></tr></table>
|
| 273 |
+
|
| 274 |
+
Table 4: The correlation of metrics with Disagreement in the transfer learning experiment
|
| 275 |
+
|
| 276 |
+
<table><tr><td></td><td>RTD</td><td>1-CKA</td></tr><tr><td>CIFAR-100</td><td>0.98 ± 0.01</td><td>0.93 ± 0.02</td></tr><tr><td>CIFAR-10</td><td>0.91 ± 0.01</td><td>0.89 ± 0.02</td></tr></table>
|
| 277 |
+
|
| 278 |
+
# 3.4.5. TRANSFER LEARNING
|
| 279 |
+
|
| 280 |
+
Another possible application is the measure of changes in representations after transferring the pre-trained model to a new task. In this experiment, we conduct the transfer learning from CIFAR-100 to the CIFAR-10 dataset. We make full fine-tuning with the small learning rate for the convolutional part. In Fig. 8, we demonstrate the dynamics for both dataset representations. The results again coincide with the intuition about the difference during the learning steps, and here RTD has also a high correlation with Disagreement, see Table 4. Also, we note that RTD can be applied to the continual learning task, where catastrophic forgetting appears, and thus it is crucial to track the changes in network representations.
|
| 281 |
+
|
| 282 |
+
# 3.5. Additional experiments
|
| 283 |
+
|
| 284 |
+
We describe how RTD can be used to evaluate a disentanglement of generative models in Appendix G. Comparisons of BigGAN's internal representations by RTD agree with those of images by FID, see Appendix E.
|
| 285 |
+
|
| 286 |
+
# 4. Conclusions
|
| 287 |
+
|
| 288 |
+
In this paper, we have proposed a topologically-inspired approach to compare neural network representations. The most widely used methods for this problem are statistical: Canonical Correlation Analysis (CCA) and Centered Kernel Alignment (CKA). But the problem itself is a geometric one: the comparison of two neural representations of the same objects is de-facto the comparison of two points clouds from different spaces. The natural way is to compare their geometrical and topological features with due account of their localization — that is exactly what was done by the R-Cross-Barcode and RTD. We demonstrated that RTD coincides with the natural assessment of representations similarity. We used the RTD to gain insights into neural network representations in computer vision and NLP domains for various problems: training dynamics analysis, data distribution shift, transfer learning, ensemble learning, and disentanglement assessment.
|
| 289 |
+
|
| 290 |
+
RTD correlates strikingly well with the disagreement of models' predictions; this is an intriguing topic for further research. Finally, R-Cross-Barcode and RTD are general tools that are not limited to the comparison of representations only. They could be applied to other problems involving comparison of two point clouds with one-to-one correspondence, for example, in 3D computer vision.
|
| 291 |
+
|
| 292 |
+
Acknowledgements. The work was supported by the Analytical center under the RF Government (subsidy agreement 000000D730321P5Q0002, Grant No. 70-2021-00145 02.11.2021).
|
| 293 |
+
|
| 294 |
+
# References
|
| 295 |
+
|
| 296 |
+
Barannikov, S. Framed Morse complexes and its invariants. Adv. Soviet Math., 22:93-115, 1994.
|
| 297 |
+
Barannikov, S. Canonical Forms = Persistence Diagrams. Tutorial. In European Workshop on Computational Geometry (EuroCG 2021), 2021.
|
| 298 |
+
Barannikov, S., Trofimov, I., Sotnikov, G., Trimbach, E., Korotin, A., Filippov, A., and Burnaev, E. Manifold
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
Figure 8: Scaled metrics demonstrating the difference between representations of CIFAR-100 and CIFAR-10 datasets during fine-tune process.
|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
|
| 305 |
+
Topology Divergence: a framework for comparing data manifolds. In Proceedings of the 35th International Conference on Neural Information Processing Systems, NeurIPS'21, arXiv:2106.04024, 2021.
|
| 306 |
+
Barannikov, S., Trofimov, I., Trimbach, E., Wang, J., and Burnaev, E. Homological assessment of data representations. In Fourteenth International Conference on Machine Vision (ICMV 2021), volume 12084, pp. 86-90. SPIE, 2022.
|
| 307 |
+
Belkin, M. and Niyogi, P. Laplacian eigenmaps and spectral techniques for embedding and clustering. In Proceedings of the 14th International Conference on Neural Information Processing Systems: Natural and Synthetic, pp. 585-591, 2001.
|
| 308 |
+
Brock, A., Donahue, J., and Simonyan, K. Large scale gan training for high fidelity natural image synthesis. arXiv preprint arXiv:1809.11096, 2018.
|
| 309 |
+
Chen, W., Gong, X., and Wang, Z. Neural architecture search onImagenet in fourgpu hours: A theoretically inspired perspective. International Conference on Learning Representations, 2021.
|
| 310 |
+
Connolly, A. C., Guntupalli, J. S., Gors, J., Hanke, M., Halchenko, Y. O., Wu, Y.-C., Abdi, H., and Haxby, J. V. The representation of biological classes in the human brain. Journal of Neuroscience, 32(8):2608-2618, 2012.
|
| 311 |
+
Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.
|
| 312 |
+
Dong, X. and Yang, Y. Searching for a robust neural architecture in fourgpu hours. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1761-1770, 2019.
|
| 313 |
+
Edelman, S. Representation is representation of similarities. Behavioral and brain sciences, 21(4):449-467, 1998.
|
| 314 |
+
|
| 315 |
+
Fort, S., Hu, H., and Lakshminarayanan, B. Deep ensembles: A loss landscape perspective. arXiv preprint arXiv:1912.02757, 2019.
|
| 316 |
+
Garipov, T., Izmailov, P., Podoprikhin, D., Vetrov, D., and Wilson, A. G. Loss surfaces, mode connectivity, and fast assembling of dnns. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, pp. 8803-8812, 2018.
|
| 317 |
+
Gelfand, S. I. and Manin, Y. I. Methods of homological algebra. Springer Science & Business Media, 2002.
|
| 318 |
+
Goodfellow, I., Bengio, Y., Courville, A., and Bengio, Y. Deep learning, volume 1. MIT press Cambridge, 2016.
|
| 319 |
+
Gretton, A., Bousquet, O., Smola, A., and Scholkopf, B. Measuring statistical dependence with hilbert-schmidt norms. In International conference on algorithmic learning theory, pp. 63-77. Springer, 2005.
|
| 320 |
+
Hatcher, A. Algebraic topology. 2005.
|
| 321 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
|
| 322 |
+
Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. Gans trained by a two time-scale update rule converge to a local Nash equilibrium. Advances in neural information processing systems, 30, 2017.
|
| 323 |
+
Khrulkov, V. and Oseledets, I. Geometry score: A method for comparing generative adversarial networks. In International Conference on Machine Learning, pp. 2621-2629. PMLR, 2018.
|
| 324 |
+
Klyuchnikov, N., Trofimov, I., Artemova, E., Salnikov, M., Fedorov, M., and Burnaev, E. Nas-bench-nlp: neural architecture search benchmark for natural language processing. arXiv preprint arXiv:2006.07116, 2020.
|
| 325 |
+
|
| 326 |
+
Kornblith, S., Norouzi, M., Lee, H., and Hinton, G. Similarity of neural network representations revisited. In International Conference on Machine Learning, pp. 3519-3529. PMLR, 2019.
|
| 327 |
+
Kriegeskorte, N., Mur, M., and Bandettini, P. A. Representational similarity analysis-connecting the branches of systems neuroscience. Frontiers in systems neuroscience, 2:4, 2008.
|
| 328 |
+
Krizhevsky, A., Hinton, G., et al. Learning multiple layers of features from tiny images. 2009.
|
| 329 |
+
Kuncheva, L. I. and Whitaker, C. J. Measures of diversity in classifier ensembles and their relationship with the ensemble accuracy. Machine learning, 51(2):181-207, 2003.
|
| 330 |
+
Kynkänniemi, T., Karras, T., Laine, S., Lehtinen, J., and Aila, T. Improved precision and recall metric for assessing generative models. In 33rd Conference on Neural Information Processing Systems (NeurIPS 2019), 2019.
|
| 331 |
+
Le Peutrec, D., Nier, F., and Viterbo, C. Precise Arrhenius law for p-forms: The Witten Laplacian and Morse-Barannikov complex. Annales Henri Poincaré, 14(3): 567-610, Apr 2013. ISSN 1424-0661. doi: 10.1007/s00023-012-0193-9.
|
| 332 |
+
Lee, S., Purushwalkam, S., Cogswell, M., Crandall, D., and Batra, D. Why m heads are better than one: Training a diverse ensemble of deep networks. arXiv preprint arXiv:1511.06314, 2015.
|
| 333 |
+
Li, Y., Yosinski, J., Clune, J., Lipson, H., Hopcroft, J. E., et al. Convergent learning: Do different neural networks learn the same representations? In FE@ NIPS, pp. 196-212, 2015.
|
| 334 |
+
Liu, H., Simonyan, K., and Yang, Y. Darts: Differentiable architecture search. International Conference on Learning Representations, 2019.
|
| 335 |
+
Liu, Z., Mao, H., Wu, C.-Y., Feichtenhofer, C., Darrell, T., and Xie, S. A convnet for the 2020s. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976-11986, 2022.
|
| 336 |
+
McInnes, L., Healy, J., and Melville, J. Umap: Uniform manifold approximation and projection for dimension reduction. arXiv preprint arXiv:1802.03426, 2018.
|
| 337 |
+
Moor, M., Horn, M., Rieck, B., and Borgwardt, K. Topological autoencoders. In International Conference on Machine Learning, pp. 7045-7054. PMLR, 2020.
|
| 338 |
+
Morcos, A. S., Raghu, M., and Bengio, S. Insights on representational similarity in neural networks with canonical correlation. arXiv preprint arXiv:1806.05759, 2018.
|
| 339 |
+
|
| 340 |
+
Nguyen, T., Raghu, M., and Kornblith, S. Do wide and deep networks learn the same things? uncovering how neural network representations vary with width and depth. International Conference on Learning Representations, 2021.
|
| 341 |
+
Niyogi, P., Smale, S., and Weinberger, S. Finding the homology of submanifolds with high confidence from random samples. Discrete & Computational Geometry, 39(1-3): 419-441, 2008.
|
| 342 |
+
Opitz, D. W., Shavlik, J. W., et al. Generating accurate and diverse members of a neural-network ensemble. Advances in neural information processing systems, pp. 535-541, 1996.
|
| 343 |
+
Raghu, A., Raghu, M., Bengio, S., and Vinyals, O. Rapid learning or feature reuse? towards understanding the effectiveness of maml. International Conference on Learning Representations, 2020.
|
| 344 |
+
Raghu, M., Gilmer, J., Yosinski, J., and Sohl-Dickstein, J. Svcca: Singular vector canonical correlation analysis for deep learning dynamics and interpretability. arXiv preprint arXiv:1706.05806, 2017.
|
| 345 |
+
Simonyan, K. and Zisserman, A. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.
|
| 346 |
+
Tsitsulin, A., Munkhoeva, M., Mottin, D., Karras, P., Bronstein, A., Oseledets, I., and Mueller, E. The shape of data: Intrinsic distance for data distributions. In International Conference on Learning Representations, 2020.
|
| 347 |
+
Voita, E., Sennrich, R., and Titov, I. The bottom-up evolution of representations in the transformer: A study with machine translation and language modeling objectives. EMNLP, 2019.
|
| 348 |
+
Wang, L., Hu, L., Gu, J., Wu, Y., Hu, Z., He, K., and Hopcroft, J. Towards understanding learning representations: To what extent do different neural networks learn the same representation. 32nd Conference on Neural Information Processing Systems (NeurIPS), 2018.
|
| 349 |
+
Wen, Y., Tran, D., and Ba, J. Batchensemble: An alternative approach to efficient ensemble and lifelong learning. *ArXiv*, abs/2002.06715, 2020.
|
| 350 |
+
Whitehead, G. W. Elements of homotopy theory, volume 61. Springer Science & Business Media, 1968.
|
| 351 |
+
Wu, J. M., Belinkov, Y., Sajjad, H., Durrani, N., Dalvi, F., and Glass, J. Similarity analysis of contextual word representation models. Proceedings of ACL, 2020.
|
| 352 |
+
|
| 353 |
+
Zhang, S., Xiao, M., and Wang, H. GPU-accelerated computation of Vietoris-Rips persistence barcodes. In 36th International Symposium on Computational Geometry (SoCG 2020). Schloss Dagstuhl-Leibniz-Zentrum für Informatik, 2020.
|
| 354 |
+
Zhou, S., Zelikman, E., Lu, F., Ng, A. Y., Carlsson, G. E., and Ermon, S. Evaluating the disentanglement of deep generative models through manifold topology. In ICLR, 2021.
|
| 355 |
+
Zomorodian, A. J. Computing and comprehending topology: Persistence and hierarchical Morse complexes (Ph.D.Thesis). University of Illinois at Urbana-Champaign, 2001.
|
| 356 |
+
|
| 357 |
+
# A. Background on Simplicial Complexes. Barcodes
|
| 358 |
+
|
| 359 |
+
The simplicial complex is a combinatorial object that can be thought of as a higher-dimensional generalization of a graph.
|
| 360 |
+
|
| 361 |
+
A simplex is defined via the set of its vertices. Given a finite set $V$ , a $k$ -simplex is a finite $(k + 1)$ -element subset in $V$ . Simplicial complex $S$ is a collection of $k$ -simplexes, $k \geq 0$ , which satisfies the natural condition that for each $\sigma \in S$ , $\sigma' \subset \sigma$ implies $\sigma' \in S$ . A simplicial complex consisting only of $0-$ and $1$ -simplexes is a graph.
|
| 362 |
+
|
| 363 |
+
Denote via $C_k(S)$ the vector space over the field $\mathbb{Z} / 2\mathbb{Z} = \{0,1\}$ whose basis elements are $k$ -simplexes from $S$ . The boundary linear operator $\partial_k: C_k(S) \to C_{k-1}(S)$ is defined on $\sigma = \{A_0, \ldots, A_k\}$ as
|
| 364 |
+
|
| 365 |
+
$$
|
| 366 |
+
\partial_ {k} \sigma = \sum_ {j = 0} ^ {k} \left\{A _ {0}, \dots , A _ {j - 1}, A _ {j + 1}, \dots , A _ {k} \right\}.
|
| 367 |
+
$$
|
| 368 |
+
|
| 369 |
+
The $k$ th homology group $H_{k}(S)$ is the factor vector space $\ker \partial_k / \operatorname{im} \partial_{k+1}$ . The elements $c \in \ker \partial_k$ are called cycles. The elements of $H_{k}(S)$ represent various $k$ -dimensional topological features in $S$ . A basis in $H_{k}(S)$ corresponds to a set of basic topological features.
|
| 370 |
+
|
| 371 |
+
For example, the vector space $H_0$ has the basis whose elements are in one-to-one correspondence with equivalence classes of vertices, connected by paths of 1-simplices (edges), i.e. with connected components of $S$ . The basis elements of the vector space $H_1$ correspond to basic equivalence classes of nontrivial closed paths of 1-simplices. Two closed paths, also named 1-cycles, are equivalent if they are connected by a chain of modifications by boundaries of triangles (2-simplices).
|
| 372 |
+
|
| 373 |
+
A map $S_1 \to S_2$ , e.g. $\mathcal{G}^{w \leq \alpha} \to \mathcal{G}^{\min(w, \tilde{w}) \leq \alpha}$ see section 2.1, defines the maps $H_k(S_1) \to H_k(S_2)$ . The kernel of the linear map $H_0(S_1) \to H_0(S_2)$ is spanned by the pairs of $S_1$ clusters merged together in $S_2$ . The cokernel of the linear map $H_1(S_1) \to H_1(S_2)$ consists of 1-cycles in $S_2$ which are not from $S_1$ , i.e. it consists of equivalence classes of closed paths in $S_2$ , which cannot be modified by boundaries of triangles into images of 1-cycles from $S_1$ .
|
| 374 |
+
|
| 375 |
+
In applications, the simplicial complexes are often built via consequential adding of simplexes one after another in increasing order of some numerical characteristics. Mathematically this corresponds to a filtration on the simplicial complex. It is defined as a family of simplicial complexes $S_{\alpha}$ , indexed by a finite set of real numbers, with nested collections of simplexes: for $\alpha_{1} < \alpha_{2}$ all simplexes of $S_{\alpha_{1}}$ are also in $S_{\alpha_{2}}$ . An example of a filtered simplicial complex is the Vietoris-Rips simplicial complex from Section 2.2.
|
| 376 |
+
|
| 377 |
+
The inclusions $S_{\alpha} \subseteq S_{\beta}$ induce the maps on homology $H_{k}(S_{\alpha}) \to H_{k}(S_{\beta})$ . The evolution of cycles across the nested family of simplicial complexes $S_{\alpha_i}$ is described by the principal persistent homology theorem (Barannikov, 1994; Zomorodian, 2001; Le Peutrec et al., 2013), according to which for each dimension there exists a choice of a set of basic topological features across all nested simplicial complexes $S_{\alpha}$ so that each basic feature $c$ appears in $H_{k}(S_{\alpha})$ at specific time $\alpha = b_{c}$ and disappears at specific time $\alpha = d_{c}$ . The barcode of the filtered complex is the record of the appearance, or "birth" time, and the disappearance, or "death" time, of all these basic topological features.
|
| 378 |
+
|
| 379 |
+
# A.1. Exact sequence and topological features
|
| 380 |
+
|
| 381 |
+
A sequence of vector spaces and linear maps
|
| 382 |
+
|
| 383 |
+
$$
|
| 384 |
+
A _ {5} \xrightarrow {r _ {4}} A _ {4} \xrightarrow {r _ {3}} A _ {3} \xrightarrow {r _ {2}} A _ {2} \xrightarrow {r _ {1}} A _ {1} \tag {3}
|
| 385 |
+
$$
|
| 386 |
+
|
| 387 |
+
is exact at $A_{j}$ if the kernel of the linear map $r_{j - 1}$ coincides with the image of the previous map $r_{j}$ .
|
| 388 |
+
|
| 389 |
+
Proposition A.1. If the sequence (3) is exact at $A_{2},A_{3},A_{4}$ then $A_{3}\simeq \mathrm{Ker}(r_{1})\oplus \mathrm{Coker}(r_{4})$
|
| 390 |
+
|
| 391 |
+
Proof. Since $A / \operatorname{Ker}(r) \simeq \operatorname{Image}(r)$ for any linear map $r : A \to A'$ , therefore $A_3 \simeq \operatorname{Image}(r_2) \oplus \operatorname{Ker}(r_2)$ . If the sequence is exact at $A_2$ , then $\operatorname{Image}(r_2) \simeq \operatorname{Ker}(r_1)$ . Exactness at $A_3$ , $A_4$ gives $\operatorname{Ker}(r_2) \simeq \operatorname{Image}(r_3)$ , $\operatorname{Ker}(r_3) \simeq \operatorname{Image}(r_4)$ . Then $\operatorname{Image}(r_3) \simeq A_4 / \operatorname{Ker}(r_3)$ imply that $\operatorname{Ker}(r_2) \simeq A_4 / \operatorname{Image}(r_4)$ , which equals $\operatorname{Coker}(r_4)$ , the cokernel of the linear map $r_4$ . Hence $A_3 \simeq \operatorname{Ker}(r_1) \oplus \operatorname{Coker}(r_4)$ .
|
| 392 |
+
|
| 393 |
+
Therefore the exact sequence from Theorem 2.1 implies that the calculation of the topological features from Section 2.1 for all $\alpha$ is reduced to the calculation of $H_{1}(R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}}))$ for all $\alpha$ , i.e. to the calculation of $R$ -Cross-Barcode $_1(P, \tilde{P})$ .
|
| 394 |
+
|
| 395 |
+
# A.2. Construction of R-Cross-Barcode
|
| 396 |
+
|
| 397 |
+
Here we gather some intuition behind the construction of the graph $\hat{\mathcal{G}}^{w,\bar{w}}$ and the $R$ -Cross-Barcode.
|
| 398 |
+
|
| 399 |
+
The Vietoris-Rips complex $R_{\alpha}(\mathcal{G}^{\min(w, \tilde{w})})$ is the union of simplexes whose edges connect data points with distance less than $\alpha$ in at least one of representations $\mathcal{P}, \tilde{\mathcal{P}}$ . An inclusion of simple simplicial complexes $S \subset R$ is an equivalence in homotopy category, if and only if the induced map on homology is an isomorphism (Whitehead, 1968). The maps on homology induced by the inclusions of filtered simplicial complexes
|
| 400 |
+
|
| 401 |
+
$$
|
| 402 |
+
R _ {\alpha} \left(\mathcal {G} ^ {w}\right) \subseteq R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right), R _ {\alpha} \left(\mathcal {G} ^ {\tilde {w}}\right) \subseteq R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right) \tag {4}
|
| 403 |
+
$$
|
| 404 |
+
|
| 405 |
+
should therefore be as close as possible to isomorphisms, in order that the approximations at threshold $\alpha$ to the manifolds $M_{\mathcal{P}}$ and $M_{\tilde{\mathcal{P}}}$ have essentially the same geometric features located at the same places. It follows from the exact sequence from Theorem 2.1 that the R-Cross-Barcode $_{*}(P, \tilde{P})$ is exactly the list of topological features describing the failure of the maps induced on homology by inclusions (4) to be isomorphisms.
|
| 406 |
+
|
| 407 |
+
Introduce the weighted graph $\hat{\mathcal{G}}^w$ with doubled set of vertices and with the edge weights defined as follows. We fix the numbering of vertices $\operatorname{Vert}(\mathcal{G}) = \{A_1, \dots, A_N\}$ . Let us add the extra vertex $A'$ together with $A$ to $\hat{\mathcal{G}}^w$ for each vertex $A \in \operatorname{Vert}(\mathcal{G})$ , plus the two additional vertexes $O, O'$ , and define the distance-like edge weights in $\hat{\mathcal{G}}^w$ as:
|
| 408 |
+
|
| 409 |
+
$$
|
| 410 |
+
d _ {A _ {i} A _ {j}} = d _ {A _ {i} A _ {j} ^ {\prime}} = w _ {A _ {i} A _ {j}},
|
| 411 |
+
$$
|
| 412 |
+
|
| 413 |
+
$$
|
| 414 |
+
d _ {A _ {i} ^ {\prime} A _ {j} ^ {\prime}} = d _ {A _ {i} A _ {i} ^ {\prime}} = d _ {O ^ {\prime} A _ {i} ^ {\prime}} = d _ {O A _ {i}} = 0, d _ {A _ {j} ^ {\prime} A _ {i}} = d _ {O ^ {\prime} A _ {i}} = d _ {O A _ {i} ^ {\prime}} = d _ {O O ^ {\prime}} = + \infty \tag {5}
|
| 415 |
+
$$
|
| 416 |
+
|
| 417 |
+
where $i < j$ and $O, O' \in \mathrm{Vert}(\hat{\mathcal{G}}^w)$ are the two additional vertexes.
|
| 418 |
+
|
| 419 |
+
The suspension $C[-1]$ of chain complex $C$ denotes the same chain complex with degree shifted by 1, $C[-1]_n = C_{n-1}$ , so that the $n$ th chains of $R_\alpha(\mathcal{G}^w)[-1]$ are linear combinations of $(n-1)$ -dimensional simplexes from $R_\alpha(\mathcal{G}^w)$ . We denote via $A_{i_1} \ldots A_{i_n}[-1]$ the element from $C_n(R_\alpha(\mathcal{G}^w)[-1])$ corresponding to the simplex $A_{i_1} \ldots A_{i_n}$ .
|
| 420 |
+
|
| 421 |
+
A chain map $f$ between two chain complexes $(C, d_C)$ and $(B, d_B)$ is a sequence of linear maps $f_n: C_n \to B_n$ that commutes with the boundary operators: $d_{B,n} \circ f_n = f_{n-1} \circ d_{C,n}$ . The cone of a chain map $f$ is the chain complex $\mathrm{Cone}(f) = C[-1] \oplus B$ with differential $d_{\mathrm{Cone}(f)} = \begin{pmatrix} d_{C[-1]} & 0 \\ f[-1] & d_B \end{pmatrix}$ . A homotopy equivalence is a pair of chain maps $f: C \to B, g: B \to C$ , and a pair of maps $h_{C,n}: C_n \to C_{n+1}, h_{B,n}: B_n \to B_{n+1}$ , such that $g \circ f = \operatorname{Id} + [h_C, d_C]$ and $f \circ g = \operatorname{Id} + [h_B, d_B]$ . We assume that the Vietoris-Rips complexes are augmented with $C_{-1} = \mathbb{Z}/2\mathbb{Z}$ and $\partial_0\{A_i\} = 1$ .
|
| 422 |
+
|
| 423 |
+
The proof of the exact homology sequence from Theorem 2.1 follows from the following two propositions.
|
| 424 |
+
|
| 425 |
+
Proposition A.2. There are homotopy equivalences of chain complexes:
|
| 426 |
+
|
| 427 |
+
$$
|
| 428 |
+
R _ {\alpha} \left(\mathcal {G} ^ {w}\right) [ - 1 ] \sim R _ {\alpha} \left(\hat {\mathcal {G}} ^ {w}\right) \tag {6}
|
| 429 |
+
$$
|
| 430 |
+
|
| 431 |
+
$$
|
| 432 |
+
\operatorname {C o n e} \left(R _ {\alpha} \left(\mathcal {G} ^ {w}\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right)\right) \sim R _ {\alpha} \left(\hat {\mathcal {G}} ^ {w, \tilde {w}}\right). \tag {7}
|
| 433 |
+
$$
|
| 434 |
+
|
| 435 |
+
Proof. The simplexes of the chain complex $R_{\alpha}(\hat{\mathcal{G}}^w)$ are of four types: $A_{i_1}\ldots A_{i_k}A_{i_k}'\ldots A_{i_n}'$ , $A_{i_1}\ldots A_{i_k}A_{i_{k+1}}' \ldots A_{i_n}'$ , $OA_{i_1}\ldots A_{i_n}$ and $O'A_{i_1}'\ldots A_{i_n}'$ where $A_{i_k} \in \mathrm{Vert}(\mathcal{G})$ , $i_0 < \ldots < i_k < i_{k+1} < \ldots < i_n$ , with edge weights satisfying $w_{A_{i_r}A_{i_s}} < \alpha$ for $r \leq k$ . Define the map $\phi : R_{\alpha}(\mathcal{G}^w)[-1] \to R_{\alpha}(\hat{\mathcal{G}}^w)$
|
| 436 |
+
|
| 437 |
+
$$
|
| 438 |
+
\phi : A _ {i _ {1}} \dots A _ {i _ {n}} [ - 1 ] \mapsto O A _ {i _ {1}} \dots A _ {i _ {n}} + O ^ {\prime} A _ {i _ {1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} + \sum_ {k = 1} ^ {n} A _ {i _ {1}} \dots A _ {i _ {k}} A _ {i _ {k}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \tag {8}
|
| 439 |
+
$$
|
| 440 |
+
|
| 441 |
+
The map $\phi$ together with the map $\tilde{\phi}:R_{\alpha}(\hat{\mathcal{G}}^{w})\to R_{\alpha}(\mathcal{G}^{w})[-1]$
|
| 442 |
+
|
| 443 |
+
$$
|
| 444 |
+
\tilde {\phi}: O A _ {i _ {1}} \dots A _ {i _ {n}} \mapsto A _ {i _ {1}} \dots A _ {i _ {n}} [ - 1 ], \tilde {\phi} (\Delta) = 0 \text {f o r a n y o t h e r s i m p l e x} \Delta , \tag {9}
|
| 445 |
+
$$
|
| 446 |
+
|
| 447 |
+
gives a homotopy equivalence, $\tilde{\phi} \circ \phi = \mathrm{Id}$ , $\phi \circ \tilde{\phi} = \mathrm{Id} + [h, \partial]$ , where the homotopy $h$ is given by
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
h: A _ {i _ {1}} \dots A _ {i _ {k}} A _ {i _ {k + 1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \mapsto \sum_ {l = 1} ^ {k} A _ {i _ {1}} \dots A _ {i _ {l}} A _ {i _ {l}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} + O ^ {\prime} A _ {i _ {1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \tag {10}
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
$h:A_{i_1}'\ldots A_{i_n}'\mapsto O'A_{i_1}'\ldots A_{i_n}', h(\Delta) = 0$ for any other simplex $\Delta$
|
| 454 |
+
|
| 455 |
+
Simplexes of the chain complex $R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}})$ are of three types. The first type: $A_{i_1}\ldots A_{i_k}A_{i_k}'\ldots A_{i_n}'$ with edge weights satisfying $w_{A_{i_r}A_{i_s}} < \alpha$ for $r \leq k$ and $\min (w_{A_{i_r}A_{i_s}},\tilde{w}_{A_{i_r}A_{i_s}}) < \alpha$ for $r,s > k$ ; the second type: $A_{i_1}\ldots A_{i_{k - 1}}A_{i_k}'\ldots A_{i_n}'$ with edge weights satisfying $w_{A_{i_r}A_{i_s}} < \alpha$ for $r < k$ , and $\min (w_{A_{i_r}A_{i_s}},\tilde{w}_{A_{i_r}A_{i_s}}) < \alpha$ for $r,s \geq k$ ; and the third type: $OA_{i_1}\ldots A_{i_n}$ with edge weights satisfying $w_{A_{i_r}A_{i_s}} < \alpha$ for all $r,s$ . Define the map $\psi : \operatorname{Cone}\left(R_{\alpha}(\mathcal{G}^w)\to R_{\alpha}(\mathcal{G}^{\min (w,\tilde{w})})\right)\to R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}})$
|
| 456 |
+
|
| 457 |
+
$$
|
| 458 |
+
\psi : A _ {i _ {1}} \dots A _ {i _ {n}} [ - 1 ] \mapsto O A _ {i _ {1}} \dots A _ {i _ {n}} + \sum_ {k = 1} ^ {n} A _ {i _ {1}} \dots A _ {i _ {k}} A _ {i _ {k}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \tag {11}
|
| 459 |
+
$$
|
| 460 |
+
|
| 461 |
+
for $A_{i_1}\ldots A_{i_n}[-1]\in R_\alpha (\mathcal{G}^w)[-1]$
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\psi : A _ {i _ {1}} \dots A _ {i _ {n}} \mapsto A _ {i _ {1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \tag {12}
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
for $A_{i_1}\ldots A_{i_n}\in R_\alpha (\mathcal{G}^{\min (w,\tilde{w})})$ . The map $\psi$ together with the map $\tilde{\psi}:R_{\alpha}(\hat{\mathcal{G}}^{w,\tilde{w}})\to \mathrm{Cone}\left(R_{\alpha}(\mathcal{G}^{w})\to R_{\alpha}(\mathcal{G}^{\min (w,\tilde{w})})\right)$
|
| 468 |
+
|
| 469 |
+
$$
|
| 470 |
+
\tilde {\psi}: O A _ {i _ {1}} \dots A _ {i _ {n}} \mapsto A _ {i _ {1}} \dots A _ {i _ {n}} [ - 1 ], A _ {i _ {1}} \dots A _ {i _ {n}} [ - 1 ] \in R _ {\alpha} (\mathcal {G} ^ {w}) [ - 1 ],
|
| 471 |
+
$$
|
| 472 |
+
|
| 473 |
+
$$
|
| 474 |
+
A _ {i _ {1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \mapsto A _ {i _ {1}} \dots A _ {i _ {n}}, A _ {i _ {1}} \dots A _ {i _ {n}} \in R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right), \tag {13}
|
| 475 |
+
$$
|
| 476 |
+
|
| 477 |
+
$$
|
| 478 |
+
\tilde {\psi} (\Delta) = 0 \text {f o r a n y o t h e r s i m p l e x} \Delta ,
|
| 479 |
+
$$
|
| 480 |
+
|
| 481 |
+
gives a homotopy equivalence, $\tilde{\psi} \circ \psi = \mathrm{Id}$ , $\psi \circ \tilde{\psi} = \mathrm{Id} + [H, \partial]$ , where the homotopy $H$ is given by
|
| 482 |
+
|
| 483 |
+
$$
|
| 484 |
+
H: A _ {i _ {1}} \dots A _ {i _ {k}} A _ {i _ {k + 1}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime} \mapsto \sum_ {l = 1} ^ {k} A _ {i _ {1}} \dots A _ {i _ {l}} A _ {i _ {l}} ^ {\prime} \dots A _ {i _ {n}} ^ {\prime}, 1 \leq k \leq n \tag {14}
|
| 485 |
+
$$
|
| 486 |
+
|
| 487 |
+
$$
|
| 488 |
+
H (\Delta) = 0 \text {f o r a n y o t h e r s i m p l e x} \Delta .
|
| 489 |
+
$$
|
| 490 |
+
|
| 491 |
+
The long exact sequences such as (2) arise from distinguished triangles in the homotopy category of chain complexes. A distinguished triangle is a diagram isomorphic in this category to a diagram $A \xrightarrow{f} B \to \operatorname{Cone}(f) \to A[-1]$ .
|
| 492 |
+
|
| 493 |
+
Proposition A.3. The embeddings of graphs $\mathcal{G}^{w\leq \alpha}\subseteq \mathcal{G}^{\min (w,\tilde{w})\leq \alpha}\subset \hat{\mathcal{G}}^{w,\tilde{w}\leq \alpha}$ give distinguished triangles, see (Gelfand & Manin, 2002), in the homotopy category of chain complexes:
|
| 494 |
+
|
| 495 |
+
$$
|
| 496 |
+
R _ {\alpha} \left(\mathcal {G} ^ {w}\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {\min (w, \tilde {w})}\right)\rightarrow R _ {\alpha} \left(\hat {\mathcal {G}} ^ {w, \tilde {w}}\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {w}\right) [ - 1 ]. \tag {15}
|
| 497 |
+
$$
|
| 498 |
+
|
| 499 |
+
Proof. Taken together the homotopy equivalences (8)-(14) define an isomorphism of (15) with the distinguished triangle
|
| 500 |
+
|
| 501 |
+
$$
|
| 502 |
+
R _ {\alpha} \left(\mathcal {G} ^ {w}\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {\min \left(w, \tilde {w}\right)}\right)\rightarrow \operatorname {C o n e} \left(R _ {\alpha} \left(\mathcal {G} ^ {w}\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {\min \left(w, \tilde {w}\right)}\right)\right)\rightarrow R _ {\alpha} \left(\mathcal {G} ^ {w}\right) [ - 1 ]. \tag {16}
|
| 503 |
+
$$
|
| 504 |
+
|
| 505 |
+
Comparison with Cross-Barcode and Geometry Score. The Cross-Barcode from (Barannikov et al., 2021) compares two data manifolds lying in the same ambient space. It does not use the information that can be provided by a one-to-one correspondence between points of the two data clouds. To compare the locations of topological features the Cross-Barcode from loc.cit. uses instead the proximity information inferred from the pairwise distances between points from different clouds lying in the same ambient space. Geometry score from (Khrulkov & Oseledets, 2018) is based on a comparison of standard barcodes for each cloud and is insensitive to the location of topological features, for example, it does not detect any difference when similar topological features are located geometrically in distant places of the two clouds.
|
| 506 |
+
|
| 507 |
+

|
| 508 |
+
|
| 509 |
+

|
| 510 |
+
Figure 9: Merging between clusters already connected via a chain of mergings.
|
| 511 |
+
Figure 10: Merging of three clusters into two clusters. Graphs $\mathcal{G}^{w\leq \alpha}$ , $\mathcal{G}^{\tilde{w}\leq \alpha}$ and $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ are shown. Edges of $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ not in $\mathcal{G}^{w\leq \alpha}$ are colored in green. In this example there are exactly four different weights (13), (14), (23), (24) in the graphs $\mathcal{G}^{w\leq \alpha}$ and $\mathcal{G}^{\min (w,\tilde{w})\leq \alpha}$ . The unique topological feature in $R$ -Cross-Barcode $_1(P, \tilde{P})$ in this case is born at the threshold $\tilde{w}_{24}$ when the difference in the cluster structures of the two graphs arises, as the points 2 and 4 are in the same cluster at this threshold in $\mathcal{G}^{\min (w,\tilde{w})}$ and not in $\mathcal{G}^w$ . This feature dies at the threshold $w_{23}$ since the clusters containing 2 and 4 are merged at this threshold in $\mathcal{G}^w$ .
|
| 512 |
+
|
| 513 |
+
# B. Discussion of CKA
|
| 514 |
+
|
| 515 |
+
Given two series of equal size $x_{i} \in \mathbb{R}^{n_{x}}$ , $y_{i} \in \mathbb{R}^{n_{y}}$ , $i = 1 \dots n$ the CKA (Kornblith et al., 2019) is defined as
|
| 516 |
+
|
| 517 |
+
$$
|
| 518 |
+
\operatorname {C K A} (K, L) = \frac {\operatorname {H S I C} (K , L)}{\sqrt {\operatorname {H S I C} (K , K) \operatorname {H S I C} (L , L)}}
|
| 519 |
+
$$
|
| 520 |
+
|
| 521 |
+
where $\mathrm{HSIC}(K,L)$ is a Hilbert-Schmidt Independence Criterion (Gretton et al., 2005), $K_{i,j} = k(x_i,x_j)$ , $L_{i,j} = l(y_i,y_j)$ , $L = E - n^{-1}$ where $k(\cdot ,\cdot)$ , $l(\cdot ,\cdot)$ are kernels. HSIC itself is an empirical estimate of the Hilbert-Schmidt norm of the cross-covariance operator. HSIC is equivalent to maximum mean discrepancy between the joint distribution $P(X,Y)$ and the product of the marginal distributions $P(X)P(Y)$ ; HSIC $= 0$ implies independence of $X$ and $Y$ if the associated kernel is universal.
|
| 522 |
+
|
| 523 |
+
However, CKA is sometimes applied to measure similarity between representations from different layers of a neural network. In this case $Y = f(X)$ . $X$ and $Y$ are tightly dependent and the joint distribution can always be factorized as
|
| 524 |
+
|
| 525 |
+
$P(X,Y) = P(Y|X)P(X)$ . Thus, the application of CKA to the comparison of representation from different layers is questionable.
|
| 526 |
+
|
| 527 |
+
# C. Details on experiments with synthetic point clouds
|
| 528 |
+
|
| 529 |
+

|
| 530 |
+
|
| 531 |
+

|
| 532 |
+
|
| 533 |
+

|
| 534 |
+
|
| 535 |
+

|
| 536 |
+
|
| 537 |
+

|
| 538 |
+
|
| 539 |
+

|
| 540 |
+
|
| 541 |
+

|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
Figure 11: R-Cross-Barcodes for the "clusters" experiments. Top: R-Cross-Barcode $(\tilde{P}, P)$ , Bottom: R-Cross-Barcode $(P, \tilde{P})$ ; $\tilde{P}$ is the point cloud having one cluster; $P-2, 3, 4, 5, 6, 10, 12$ clusters.
|
| 545 |
+
|
| 546 |
+

|
| 547 |
+
|
| 548 |
+

|
| 549 |
+
|
| 550 |
+

|
| 551 |
+
|
| 552 |
+

|
| 553 |
+
|
| 554 |
+

|
| 555 |
+
|
| 556 |
+

|
| 557 |
+
|
| 558 |
+
Runtime comparison. Here we present the total wall time of the experiments with synthetic point clouds:
|
| 559 |
+
|
| 560 |
+
"Clusters experiment": RTD: 19.7 s, CKA: 0.07 s, IMD: 83 s, SVCCA: 0.03 s.
|
| 561 |
+
|
| 562 |
+
"Rings experiment": RTD: 144 s, CKA: 0.7 s, IMD: 91 s, SVCCA: 0.6 s.
|
| 563 |
+
|
| 564 |
+
# D. Details on the "rings" experiment
|
| 565 |
+
|
| 566 |
+

|
| 567 |
+
|
| 568 |
+

|
| 569 |
+
|
| 570 |
+

|
| 571 |
+
|
| 572 |
+

|
| 573 |
+
|
| 574 |
+

|
| 575 |
+
|
| 576 |
+
(a) Point clouds used in "rings" experiment.
|
| 577 |
+
|
| 578 |
+

|
| 579 |
+
(b) Representations' comparison measures. Ideally, the measure should change monotonically with the increase of topological discrepancy.
|
| 580 |
+
|
| 581 |
+

|
| 582 |
+
Figure 12: RTD perfectly detects changes in topology, while rival measures fail. Five rings are compared with 5,4,3,2,1 rings.
|
| 583 |
+
|
| 584 |
+

|
| 585 |
+
|
| 586 |
+

|
| 587 |
+
|
| 588 |
+
# E. Experiment with BigGAN
|
| 589 |
+
|
| 590 |
+
In this experiment, we applied RTD and CKA for comparison of internal representations in BigGAN (Brock et al., 2018) $^2$ . Initially, we generated a set of $k = 100$ random latent codes $Z_{0} = \{z_{0,j}\}_{j=1}^{k}$ and derived sets $Z_{1}, \ldots, Z_{n}$ by adding to $Z_{0}$ a Gaussian noise of increasing strength $z_{i,j} = z_{0,j} + \epsilon_{i,j}$ , where $\epsilon_{i,j} \sim N(0, \sigma_{i})$ . The noise standard deviation $\sigma_{i}$ grows from 0.001 to 0.25 by a logarithmic scale and the difference between $Z_{0}$ and $Z_{i}$ tends to increase when $i$ increases.
|
| 591 |
+
|
| 592 |
+

|
| 593 |
+
|
| 594 |
+

|
| 595 |
+
|
| 596 |
+

|
| 597 |
+
|
| 598 |
+

|
| 599 |
+
|
| 600 |
+

|
| 601 |
+
Figure 13: R-Cross-Barcodes for the "rings" experiments. Top: R-Cross-Barcode $(P, \tilde{P})$ , Bottom: R-Cross-Barcode $(\tilde{P}, P)$ . $P$ - is the point cloud having 5 rings, $\tilde{P} - 4, 3, 2, 1$ rings.
|
| 602 |
+
|
| 603 |
+

|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
|
| 607 |
+

|
| 608 |
+
|
| 609 |
+

|
| 610 |
+
Figure 14: Comparison of normalized RTD, CKA (computed for sets of internal representations) vs. FID (computed for sets of images).
|
| 611 |
+
|
| 612 |
+
Then, we pass sets of latent codes $Z_0, \ldots, Z_n$ together with vector encoding of the "husky" class through the BigGAN and save internal representations $R_i$ for one of the top layers (results were quite similar for other layers). Also, we get sets of images $I_0, \ldots, I_k$ . To compare these sets we used the state-of-the-art measure FID (Heusel et al., 2017) which is often applied for GAN evaluation.
|
| 613 |
+
|
| 614 |
+
It is natural to assume that the difference between sets of internal representations $R_0$ and $R_i$ should have a good correlation with the difference between sets of images $I_0$ and $I_i$ . To check this hypothesis, we calculated $\mathrm{RTD}(R_0, R_i)$ , $\mathrm{CKA}(R_0, R_i)$ and compared them with $\mathrm{FID}(I_0, I_i)$ , for $i = 1, \dots, n$ . Figure 14 shows the results. We conclude that RTD enjoys higher correlation with FID: 0.97, while the correlation of CKA and FID is lower: 0.79.
|
| 615 |
+
|
| 616 |
+
# F. Details on experiments with convolutional networks
|
| 617 |
+
|
| 618 |
+
<table><tr><td colspan="2">Metrics to correlate</td><td>Noise</td><td>Gaussian Blur</td><td>Grayscale</td><td>Hue</td></tr><tr><td rowspan="2">Disagreement</td><td>RTD</td><td>0.966 ± 0.001</td><td>0.982 ± 0.004</td><td>0.990 ± 0.004</td><td>0.978 ± 0.008</td></tr><tr><td>1-CKA</td><td>0.927 ± 0.006</td><td>0.913 ± 0.011</td><td>0.928 ± 0.040</td><td>0.927 ± 0.017</td></tr><tr><td rowspan="2">Error rate</td><td>RTD</td><td>0.982 ± 0.002</td><td>0.963 ± 0.007</td><td>0.856 ± 0.052</td><td>0.935 ± 0.030</td></tr><tr><td>1-CKA</td><td>0.966 ± 0.007</td><td>0.999 ± 0.001</td><td>0.958 ± 0.018</td><td>0.944 ± 0.033</td></tr></table>
|
| 619 |
+
|
| 620 |
+
Table 5: Analysis of ResNet-20 representations under different data distribution shifts. The correlation of RTD and 1—CKA with Disagreement and Error rate.
|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
(a) Zoom
|
| 624 |
+
|
| 625 |
+

|
| 626 |
+
(b) Contrast
|
| 627 |
+
Figure 15: Analysis of ResNet-20 representations under different data distribution shifts. The dynamics of scaled metrics with the monotonic application of various types of image transformations.
|
| 628 |
+
|
| 629 |
+

|
| 630 |
+
(c) Brightness
|
| 631 |
+
|
| 632 |
+

|
| 633 |
+
(d) Rotation
|
| 634 |
+
|
| 635 |
+
<table><tr><td colspan="2">Metrics to correlate</td><td>Zoom</td><td>Brightness</td><td>Contrast</td><td>Rotation</td></tr><tr><td rowspan="2">Disagreement</td><td>RTD</td><td>0.950 ± 0.006</td><td>0.975 ± 0.002</td><td>0.936 ± 0.010</td><td>0.955 ± 0.015</td></tr><tr><td>1-CKA</td><td>0.886 ± 0.010</td><td>0.854 ± 0.024</td><td>0.851 ± 0.021</td><td>0.857 ± 0.020</td></tr><tr><td rowspan="2">Error rate</td><td>RTD</td><td>0.946 ± 0.006</td><td>0.921 ± 0.011</td><td>0.937 ± 0.005</td><td>0.940 ± 0.009</td></tr><tr><td>1-CKA</td><td>0.994 ± 0.002</td><td>0.997 ± 0.001</td><td>0.998 ± 0.001</td><td>0.981 ± 0.005</td></tr></table>
|
| 636 |
+
|
| 637 |
+
# G. Experiments with disentanglement
|
| 638 |
+
|
| 639 |
+
Learning disentangled representations is a fundamental problem for improving the generalization, robustness, and interpretability of generative models. Let $Z$ be a latent space, $X$ - a space of objects, $g: Z \to X$ - a generator. The disentanglement of generative models can be evaluated by comparing the topology of data manifold slices $X_{v} = g(Z|_{z_{i} = v})$ for different values of $v$ (Zhou et al., 2021; Barannikov et al., 2022). If the direction $z_{i}$ corresponds to an interpretable factor, then $X_{v}$ must be topologically similar for different $v$ .
|
| 640 |
+
|
| 641 |
+
We use the following experimental design. $Z_{v,n} = \{z\in Z\mid (z,n) = v\}$ - a slice in a latent space orthogonal to a unit vector $n$ . We take a finite random sample $Z_{1}\subset Z_{v,n}$ and a shifted sample $Z_{2} = \{z_{i} + \delta n\}_{i = 1}^{|Z_1|}$ for small $\delta$ . By definition, $Z_{1}$ and $Z_{2}$ have natural point-wise mapping and we can estimate homological similarity of $g(Z_{1})$ and $g(Z_{2})$ by RTD.
|
| 642 |
+
|
| 643 |
+
In this experiment, we use dSprites<sup>3</sup> for the evaluation of disentanglement. dSprites is a dataset of procedurally generated 2D shapes from 5 ground truth independent latent factors: shape, scale, rotation, x-position, and y-position of a sprite. Thus, the latent space is disentangled and fully factorized. Particularly, we compare the slices orthogonal to axis-aligned vectors and orthogonal to random vectors, see Table 10. Except for the first axis, the topological dissimilarity estimated by RTD is significantly less than for a random direction. The first axis corresponds to a categorical factor - shape for which the aforementioned approach is arguably not applicable. The dSprites dataset is quite simple and RTD was calculated for point clouds in the pixel space. However, the same technique can be straightforwardly applied to evaluate the disentanglement of image representations for more complex datasets.
|
| 644 |
+
|
| 645 |
+
Table 6: Analysis of ResNet-20 representations under different data distribution shifts. The correlation of RTD and 1—CKA with Disagreement and Error rate.
|
| 646 |
+
|
| 647 |
+
<table><tr><td></td><td>RTD</td><td>1-CKA</td></tr><tr><td>Disagreement</td><td>0.98 ± 0.01</td><td>0.93 ± 0.02</td></tr><tr><td>Error rate</td><td>0.9 ± 0.03</td><td>0.99 ± 0.01</td></tr></table>
|
| 648 |
+
|
| 649 |
+
(a) CIFAR-100
|
| 650 |
+
|
| 651 |
+
<table><tr><td></td><td>RTD</td><td>1-CKA</td></tr><tr><td>Disagreement</td><td>0.91 ± 0.01</td><td>0.89 ± 0.02</td></tr><tr><td>Error rate</td><td>0.60 ± 0.02</td><td>0.73 ± 0.01</td></tr></table>
|
| 652 |
+
|
| 653 |
+
(b) CIFAR-10
|
| 654 |
+
|
| 655 |
+
Table 7: The correlation of metric dynamics when transferring the ResNet-20 network from CIFAR-100 to CIFAR-10 dataset.
|
| 656 |
+
|
| 657 |
+
<table><tr><td></td><td>VGG-11</td><td>ResNet-20</td></tr><tr><td>Number of epochs</td><td colspan="2">100</td></tr><tr><td>Optimizer</td><td colspan="2">SGD, momentum=0.9</td></tr><tr><td>Learning rate (initial)</td><td colspan="2">0.1</td></tr><tr><td rowspan="3">Scheduler</td><td colspan="2"><50%: 0.1</td></tr><tr><td colspan="2">50-90%: 0.1-0.001 (linear)</td></tr><tr><td colspan="2">>90%: 0.001</td></tr><tr><td>Batch size</td><td colspan="2">128</td></tr></table>
|
| 658 |
+
|
| 659 |
+
Table 8: Details on learning the neural networks from random initialization on CIFAR datasets.
|
| 660 |
+
|
| 661 |
+
<table><tr><td></td><td colspan="2">Encoder part</td><td>Classifier part</td></tr><tr><td>Number of epochs</td><td colspan="2">50</td><td></td></tr><tr><td>Optimizer</td><td colspan="2">SGD, momentum=0.9</td><td></td></tr><tr><td>Learning rate (initial)</td><td>0.001</td><td></td><td>0.1</td></tr><tr><td></td><td></td><td><50%: 0.1</td><td></td></tr><tr><td>Scheduler</td><td>None</td><td>50-90%: 0.1-0.001 (linear)</td><td></td></tr><tr><td></td><td></td><td>>90%: 0.001</td><td></td></tr><tr><td>Batch size</td><td colspan="2">128</td><td></td></tr></table>
|
| 662 |
+
|
| 663 |
+
Table 9: Details on fine-tuning the ResNet-20 from CIFAR-100 to CIFAR-10 dataset.
|
| 664 |
+
|
| 665 |
+
# H. Details on dimensionality reduction of MNIST with UMAP
|
| 666 |
+
|
| 667 |
+
Visual inspection of Figure 17 reveals apparent incoherences of CKA. Denote by $U(n)$ representations obtained by UMAP with the number of neighbors $n$ . According to CKA (Figure 4b), $U(10)$ is closer to $U(200)$ than to $U(20)$ ; also $U(200)$ is closer to $U(10)$ than to $U(100)$ .
|
| 668 |
+
|
| 669 |
+
# I. Additional experiments
|
| 670 |
+
|
| 671 |
+
For the "clusters" experiments, we did additional comparisons of point clouds with alternative similarity measures. Firstly, we calculated CKA with the RBF kernel for 3 bandwidths equal to 0.2, 0.4, 0.8 of median pairwise distances (as proposed in (Kornblith et al., 2019)). The performance as measured by Kendall-tau correlation with the true ordering was 0.23, 0.04, 0.14 - not better than for CKA with the linear kernel. Secondly, we applied the topological loss term from (Moor et al., 2020). The performance as measured by Kendall-tau correlation with the true ordering was poor: -0.52.
|
| 672 |
+
|
| 673 |
+
# J. Internal similarity of Neural Network layers
|
| 674 |
+
|
| 675 |
+
Here we compare the outputs of layer blocks within the trained network. We consider ResNet-50 and ConvNeXt-tiny (Liu et al., 2022) architectures pre-trained on ImageNet-1k dataset (Deng et al., 2009). We calculate RTD, CKA and SVCCA within outputs after each Bottleneck Residual Block or ConvNeXt's block respectively. In Fig. 18, we plot similarity
|
| 676 |
+
|
| 677 |
+
Table 10: Evaluation of the disentanglement for various directions in the latent space of dSprites.
|
| 678 |
+
|
| 679 |
+
<table><tr><td>axis</td><td>RTD</td></tr><tr><td>axis 1</td><td>148.1</td></tr><tr><td>axis 2</td><td>71.3</td></tr><tr><td>axis 3</td><td>53.4</td></tr><tr><td>axis 4</td><td>41.2</td></tr><tr><td>axis 5</td><td>40.5</td></tr><tr><td>random</td><td>162.8 ± 18.6</td></tr></table>
|
| 680 |
+
|
| 681 |
+

|
| 682 |
+
|
| 683 |
+

|
| 684 |
+
|
| 685 |
+

|
| 686 |
+
|
| 687 |
+

|
| 688 |
+
|
| 689 |
+

|
| 690 |
+
|
| 691 |
+

|
| 692 |
+
Figure 16: dSprites generated across directions in the latent space, top: random direction, bottom: axis-aligned direction, corresponds to an interpretable factor of variation.
|
| 693 |
+
|
| 694 |
+

|
| 695 |
+
|
| 696 |
+

|
| 697 |
+
|
| 698 |
+

|
| 699 |
+
|
| 700 |
+

|
| 701 |
+
|
| 702 |
+

|
| 703 |
+
Figure 17: 2D representations of MNIST produced by UMAP, n_neighbors $\in$ (10, 20, 50, 100, 200)
|
| 704 |
+
|
| 705 |
+

|
| 706 |
+
|
| 707 |
+

|
| 708 |
+
|
| 709 |
+

|
| 710 |
+
|
| 711 |
+

|
| 712 |
+
|
| 713 |
+
between layers within each architecture. We observe that RTD catches architecture's block structure better than CKA, SVCCA. The ResNet-50 architecture has sequence of blocks in form [3, 4, 6, 3] and it can be seen that RTD highlights it with sub-squares of corresponding sizes.
|
| 714 |
+
|
| 715 |
+

|
| 716 |
+
|
| 717 |
+

|
| 718 |
+
|
| 719 |
+

|
| 720 |
+
|
| 721 |
+

|
| 722 |
+
Figure 18: The representation differences between the layer blocks within trained networks, ImageNet-1k dataset. The columns correspond to the metrics, and the rows - to the architectures.
|
| 723 |
+
|
| 724 |
+

|
| 725 |
+
|
| 726 |
+

|
2201.00xxx/2201.00058/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5637bcc9a3af3a4035fec4ad5aee3797a73f1d47672536685a6c65f809b872a5
|
| 3 |
+
size 1136803
|
2201.00xxx/2201.00058/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00107/734bfe8b-4420-43f1-9421-124c4f83332f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6a36dabb7b5f691287ea82ccc69fab0de61132766df10d17758cd816c86281d6
|
| 3 |
+
size 715012
|
2201.00xxx/2201.00107/full.md
ADDED
|
@@ -0,0 +1,531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Quality-aware Part Models for Occluded Person Re-identification
|
| 2 |
+
|
| 3 |
+
Pengfei Wang, Student Member, IEEE, Changxing Ding, Member, IEEE, Zhiyin Shao, Zhibin Hong, Shengli Zhang, Member, IEEE, and Dacheng Tao, Fellow, IEEE
|
| 4 |
+
|
| 5 |
+
Abstract—Occlusion poses a major challenge for person re-identification (ReID). Existing approaches typically rely on outside tools to infer visible body parts, which may be suboptimal in terms of both computational efficiency and ReID accuracy. In particular, they may fail when facing complex occlusions, such as those between pedestrians. Accordingly, in this paper, we propose a novel method named Quality-aware Part Models (QPM) for occlusion-robust ReID. First, we propose to jointly learn part features and predict part quality scores. As no quality annotation is available, we introduce a strategy that automatically assigns low scores to occluded body parts, thereby weakening the impact of occluded body parts on ReID results. Second, based on the predicted part quality scores, we propose a novel identity-aware spatial attention (ISA) module. In this module, a coarse identity-aware feature is utilized to highlight pixels of the target pedestrian, so as to handle the occlusion between pedestrians. Third, we design an adaptive and efficient approach for generating global features from common non-occluded regions with respect to each image pair. This design is crucial, but is often ignored by existing methods. QPM has three key advantages: 1) it does not rely on any outside tools in either the training or inference stages; 2) it handles occlusions caused by both objects and other pedestrians; 3) it is highly computationally efficient. Experimental results on four popular databases for occluded ReID demonstrate that QPM consistently outperforms state-of-the-art methods by significant margins. The code of QPM will be released.
|
| 6 |
+
|
| 7 |
+
Index Terms—Person Re-identification, Occlusion, Attention Models
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
PERSON re-identification (ReID) involves spotting a specific person of interest, e.g. a missing child, across disjoint camera views. Due to the widespread deployment of modern surveillance systems, ReID has attracted increasing attention from both academia and industry [1]–[13]. Most existing ReID approaches assume that the pedestrian's entire body is visible, and tend to ignore the more challenging occlusion situations. However, in real-world applications, pedestrians are very often occluded by objects or other pedestrians.
|
| 12 |
+
|
| 13 |
+
Occlusion poses a major challenge for ReID, as it affects the appearance of pedestrian. As illustrated in Fig. 1(a), similar
|
| 14 |
+
|
| 15 |
+
Pengfei Wang, Changxing Ding, and Zhiyin Shao are with the School of Electronic and Information Engineering, South China University of Technology, 381 Wushan Road, Tianhe District, Guangzhou 510000, P.R. China (e-mail: eepengfei.wang@mail.scut.edu.cn; chxding@scut.edu.cn; eezyshao@mail.scut.edu.cn).
|
| 16 |
+
|
| 17 |
+
Zhibin Hong is with department of Computer Vision Technology (VIS), Baidu Inc, ShenZhen 518000, China (e-mail: hongzhibin@baidu.com).
|
| 18 |
+
|
| 19 |
+
Shengli Zhang is with the College of Electronic and Information Engineering, Shenzhen University, Shenzhen 518052, China (e-mail: zsl@szu.edu.cn).
|
| 20 |
+
|
| 21 |
+
Dacheng Tao is with JD Explore Academy, JD.com, Beijing 100176, China (e-mail: taodacheng@jd.com).
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
(a)
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
Fig. 1. Example images that illustrate the challenges for occluded ReID: (a) Similar occlusion reduces inter-class distance. (b) Dissimilar occlusion enlarges intra-class variation. (c) Both human parsing and pose estimation tools may fail when facing complex occlusions, e.g. occlusion between pedestrians. (d) Both tools may ignore discriminative accessories, such as backpacks or handbags. (Best viewed in color.)
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
(c)
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
(d)
|
| 48 |
+
|
| 49 |
+

|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+
occlusion reduces inter-class distance, which indicates that images of different identities may have similar visual features. Moreover, as shown in Fig. 1(b), different occlusions enlarge intra-class distance, meaning that two images of the same pedestrian may be quite different in terms of their appearance. This is because occlusions may differ as regards their location and content; therefore, occlusion tends to result in incorrect retrieval results.
|
| 54 |
+
|
| 55 |
+
Since occlusion changes pedestrian appearance, one intuitive solution is to only utilize visible body parts for ReID. Most existing methods adopt this strategy [14]–[16]. However, they usually rely on outside tools to acquire the visibility cues of body parts (e.g. the prediction confidence of pose estimation models). Beside extra computational cost, this strategy may not be robust to complex occlusions, such as occlusions between pedestrians. As illustrated in Fig. 1(c), both human parsing and pose estimation tools may fail when facing occlusions between pedestrians. Moreover, visibility is not equivalent to discriminative power. On the one hand, one visible part may look quite similar across different pedestrians. For example, nearly all pedestrians' forearms are not covered by any clothes in images captured in summer. On the other hand, invisible body parts may be occluded by discriminative accessories, such as backpacks and bags, as shown in Fig. 1(d). These accessories are critical for ReID, but tend to be ignored by outside tools. It is therefore reasonable to seek out a robust and easy-to-use method that automatically infers and utilizes discriminative body parts to handle the occlusion problem.
|
| 56 |
+
|
| 57 |
+
Accordingly, in this paper, we propose a novel framework named QPM for occluded ReID. QPM includes a part branch and a global branch. The part branch automatically infers part-specific quality scores rather than visibility scores. More specifically, it jointly learns discriminative part features and predicts part quality scores in an end-to-end fashion. This is achieved by including both pair-wise part distances and pairwise part quality scores in the triplet loss [17]; as a result, the part branch can automatically assign low quality scores to poor-quality body parts in order to weaken their influence. Another key benefit of our approach is that it is independent from any outside tools and does not require annotations of quality scores. However, when a pedestrian is occluded by other persons, the model may predict a relatively high score for occluded regions; this is because the model cannot well differentiate body parts of different pedestrians. We solve this problem in the global branch.
|
| 58 |
+
|
| 59 |
+
The global branch includes two main components. First, we propose an identity-aware spatial attention (ISA) module based on the predicted part quality scores. In this module, a coarse identity-aware feature is processed by a simple two-layer network and optimized using the cross-entropy loss function. Then, it is utilized to suppress noisy responses and highlight responses from the body region of the pedestrian needing to be identified. Therefore, it can be used to handle occlusions between pedestrians. Second, we design an adaptive and efficient approach to generate global features from the common nonoccluded regions with respect to each image pair. By contrast, existing works typically extract fixed global features, ignoring the difference in occlusion locations between a pair of images.
|
| 60 |
+
|
| 61 |
+
In the inference stage, both the part and global features are utilized to calculate the similarity score between each pair of images. The weighted average of both scores represents the overall similarity of an image pair. We conduct extensive experiments on four popular datasets for occluded ReID, i.e. Partial-iLIDS [18], Partial-REID [19], Occluded-Duke [14], and P-DukeMTMC [20]. The results show that our simple QPM model consistently outperforms existing approaches by significant margins. Moreover, our approach enjoys further advantages of being robust and easy to use.
|
| 62 |
+
|
| 63 |
+
In conclusion, the main contributions of this paper are summarized as following:
|
| 64 |
+
|
| 65 |
+
- We propose an end-to-end framework that jointly learns discriminative features and predicts part quality scores. Compared with existing works, it does not rely on any outside tools in either the training or inference stages.
|
| 66 |
+
- We propose a novel identity-aware spatial attention (ISA) approach that efficiently handles the occlusion between pedestrians. Experimental results prove that it outperforms existing spatial attention methods.
|
| 67 |
+
- We introduce an Adaptive Global Feature Extraction (AGFE) module that extracts global features from the commonly non-occluded regions for each image pair, which significantly promotes ReID performance.
|
| 68 |
+
|
| 69 |
+
The remainder of this paper is organized as follows. We first review the related works in Section II. Then, we describe the proposed QPM in more detail in Section III. Extensive experimental results on three benchmarks are reported and
|
| 70 |
+
|
| 71 |
+
analyzed in Section IV, after which the conclusions of the present work are outlined in Section V.
|
| 72 |
+
|
| 73 |
+
# II. RELATED WORK
|
| 74 |
+
|
| 75 |
+
# A. Occluded Person ReID Models
|
| 76 |
+
|
| 77 |
+
One main challenge for occluded ReID is to identify visible body regions. Most existing works utilize visibility cues provided by outside tools [6], [21]–[25]. Here, we divide occluded ReID methods into two categories depending on whether or not outside tools are required during training and testing.
|
| 78 |
+
|
| 79 |
+
The first category of methods employs outside tools in both the training and testing stages [14]–[16]. For example, Miao et al. [14] utilized pose landmarks to identify visible local patches and only adopt commonly visible local patches of one image pair for matching. Wang et al. [15] utilized pose landmarks to learn high-order relation and topology information of the visible local features, so as to better match probe with gallery images. Gao et al. [16] employed graph matching and utilized pose landmarks to self-mine part visibility scores. They then match probe and gallery images by calculating the part-to-part distances in visible regions. However, in addition to its extra computational cost, another key downside of this approach is that external tools may not be reliable when encountering complex occlusions, as illustrated in Fig. 1(c-d).
|
| 80 |
+
|
| 81 |
+
The second category of methods avoids using outside tools in the testing stage [26]–[29]. For example, under the guidance of human masks, He et al. [26] designed an occlusion-sensitive foreground probability generator that enables the model to focus on non-occluded human body parts. He et al. [27] further combined pose landmarks and human masks to generate spatial attention maps that guide discriminative feature learning. These approaches can reduce the impact of occlusions on the extracted features. Despite the convenience this affords during testing, this approach still relies on the visibility information of body part for each training image.
|
| 82 |
+
|
| 83 |
+
Moreover, two very recent works [30], [31] try to predict visible scores without outside tools. VPM [31] classifies each pixel in the image into one body part. However, it is designed for the partial ReID problem and cannot be directly used to solve the occluded ReID problem. Specifically, VPM classifies each pixel in the partial image into one body part, assuming there is no occlusion in the partial image. ISP [30] performs cascaded clustering on feature maps to generate the pseudolabels of human parts for each pixel. However, ISP contains no strategy to handle the occlusion problem between pedestrians. In addition, the clustering process is time-consuming.
|
| 84 |
+
|
| 85 |
+
In a departure from existing works, our approach aims to predict the part quality rather than visibility in an end-to-end framework. And we do not employ any outside tools or require any quality annotations in either the training or inference stages. Moreover, our approach is robust to complex occlusions. In conclusion, QPM is a powerful and efficient model.
|
| 86 |
+
|
| 87 |
+
# B. Part-based Person ReID Models
|
| 88 |
+
|
| 89 |
+
Due to their powerful representation ability, part-based methods are popular for ReID. Depending on the way to
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
Fig. 2. Model architecture of QPM in the training stage. Based on ResNet-50, QPM builds a part branch and a global branch. The part branch jointly extracts part features and predicts part quality scores. The quality scores are utilized to weaken the impact of occluded body parts. The global branch first adopts a novel Identity-aware Spatial Attention module to handle occlusions between pedestrians. Then, it adaptively and efficiently extracts global features from the common non-occluded regions for each image pair. In the testing stage, the global features, part features, and part quality scores are utilized together for occluded ReID. (Best viewed in color.)
|
| 93 |
+
|
| 94 |
+
obtain body part locations, we divide existing works into three categories.
|
| 95 |
+
|
| 96 |
+
Fixed Location-based Methods. Methods in this category typically split the output feature maps of one backbone model into several stripes in fixed locations [3], [32], [33]. Part features are then respectively extracted from the stripes. For example, Sun et al. [3] uniformly divide the output feature maps into 6 horizontal stripes to represent different part-level features. Wang et al. [32] also partition one image into horizontal stripes. The main advantage of this strategy lies in its efficiency.
|
| 97 |
+
|
| 98 |
+
Outside Tools-based Methods. These methods utilize outside tools, e.g. pose estimation [23], [25], [34], [35] and human parsing models [6], [21], to detect body parts. They then extract part features from the detected body parts. There are two key downsides of these approaches: first, they require additional computational cost; second, ReID performance is vulnerable to the reliability of outside tools.
|
| 99 |
+
|
| 100 |
+
Attention-based Methods. These methods predict body part locations based on the feature maps produced by ReID models [2], [36]–[38]. For example, Zhao et al. [36] proposed to predict a set of masks and perform element-wise multiplication between one mask and each channel of the feature maps to produce part-specific features. In comparison, Li et al. [2] designed a hard regional attention model that can predict bounding boxes for each body part. However, the lack of explicit supervision for part alignment may cause difficulty in the optimization of attention models.
|
| 101 |
+
|
| 102 |
+
In part-based ReID methods, it is a common practice to concatenate part features as the final representation [3], [23], [33], [36], [39]–[42]. However, it is less effective for occluded ReID, as it ignores the impact of features from occluded parts.
|
| 103 |
+
|
| 104 |
+
In this paper, we accordingly propose to jointly learn part features and predict part quality. For simplicity, we extract part features from fixed part locations. If part locations are provided by outside tools or attention modules, the performance of our approach can be further promoted.
|
| 105 |
+
|
| 106 |
+
# III. METHODS
|
| 107 |
+
|
| 108 |
+
The architecture of QPM is illustrated in Fig. 2. It consists of a part feature learning branch and a global feature learning branch. The part branch outputs $K$ part-level features, as well as $K$ quality scores that indicate the discriminative power of each body part. The global branch generates global features that are adaptively and efficiently extracted from the common non-occluded regions for each gallery-query image pair. In the following, we will introduce three key designs in the two branches individually.
|
| 109 |
+
|
| 110 |
+
# A. Joint Learning Part Feature and Quality Scores
|
| 111 |
+
|
| 112 |
+
Part Feature Extractor. Following [3], we adopt ResNet-50 as backbone and remove its last spatial down-sampling operation to increase the size of the output feature maps. The output feature maps are denoted as $\mathbf{F}$ for simplicity. To obtain the part features, $\mathbf{F}$ is first uniformly split into $K$ parts in the vertical orientation. Following [3], we set $K$ as 6 in this work. Next, the feature maps for each part are processed by a Region Average Pooling (RAP) operation and one $1\times 1$ Conv layer. The parameters of the Conv layer are not shared between parts. For the $k$ -th part, the feature vectors before and after the $1\times 1$ Conv layer are denoted as $\mathbf{z}_k$ and $\mathbf{f}_k$ , respectively.
|
| 113 |
+
|
| 114 |
+
$\mathbf{f}_k$ is utilized as the final part feature and is optimized by the cross-entropy loss:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\mathcal {L} _ {\text {p a r t}} ^ {i d} = \frac {1}{N} \sum_ {l = 1} ^ {N} \sum_ {k = 1} ^ {K} \mathcal {L} _ {c e} \left(\mathbf {W} _ {k} ^ {p} \mathbf {f} _ {k} ^ {l}\right), \tag {1}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
where $N$ denotes the batch size. $\mathbf{f}_k^l\in \mathbb{R}^d$ represents the $k$ -th part-level feature for the $l$ -th image in a batch. $\mathbf{W}_k^p$ stands for the parameters of the classification layer for the $k$ -th part feature. $\mathcal{L}_{ce}$ stands for the cross-entropy loss function.
|
| 121 |
+
|
| 122 |
+
Part Quality Predictor. A key challenge for occluded ReID is to identify visible body parts. Recent works [14], [16] typically rely on outside tools to infer whether or not one part is visible. However, as argued in Section I, visibility is not equivalent to discriminative power. We accordingly propose an efficient method to predict part feature quality rather than part visibility. As shown in Fig. 2, we feed $\mathbf{z}_k$ into the part quality predictor module. This module comprises one $1 \times 1$ Conv layer, one batch normalization (BN) [43] layer, and one sigmoid activation layer. The output of the sigmoid layer is the quality score of the $k$ -th part. The parameters of the predictor are not shared between parts.
|
| 123 |
+
|
| 124 |
+
# Joint Optimization.
|
| 125 |
+
|
| 126 |
+
As there is no annotation of part quality scores provided, we cannot impose a direct supervision signal on the predicted part quality. Recent works for video-based recognition have revealed that framewise features and quality scores can be jointly optimized with the same identification [44], [45] or metric learning loss [46]. In these works, e.g., QAN [46] and MG-RAFA [44], the quality scores are utilized to aggregate multiple frame-level features into a single video-level feature. However, this approach cannot be directly used in image-based ReID, as recognition is based on each single image. Inspired by these works, we propose to jointly optimize part features and part quality scores using triplet loss. Accordingly, the quality scores in QPM are imposed on part distances instead of frame features. Moreover, both pair-wise part distances and pair part quality scores are included in the triplet loss, instead of using a sample's own quality score only. In this way, our approach optimizes pair-wise part distance and pair part quality scores for occluded ReID in an end-to-end manner.
|
| 127 |
+
|
| 128 |
+
Specifically, given a probe image $I^p$ and a gallery image $I^g$ to be compared, we first calculate their part-wise cosine distances $d_k^{pg}$ ( $1 \leq k \leq K$ ). Next, the part-wise distances are summed via weighted average, as follows:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
D _ {p a r t} ^ {p g} = \frac {\sum_ {k = 1} ^ {K} q _ {k} ^ {p} q _ {k} ^ {g} d _ {k} ^ {p g}}{\sum_ {k = 1} ^ {K} q _ {k} ^ {p} q _ {k} ^ {g}}, \tag {2}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where $q_{k}^{p}$ and $q_{k}^{g}$ are the quality score of $k$ -th part for probe image $I^{p}$ and gallery image $I^{g}$ , respectively. In this way, body parts with low quality scores will contribute less to $D_{\text{part}}^{pg}$ , which weakens the impact of occlusion.
|
| 135 |
+
|
| 136 |
+
To sample sufficient triplets during training, we randomly sample $A$ images in each of $P$ random identities to create a
|
| 137 |
+
|
| 138 |
+
mini-batch, the batchsize $N$ of which is equal to $P\times A$ . The triplet loss is formulated as $\mathcal{L}_{part}^{tp} =$
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
\frac{1}{N_{tp}}\sum_{i = 1}^{P}\sum_{a = 1}^{A}[\alpha +\max_{p = 1\dots A}D_{part}^{a_{ip_i}} - \min_{\substack{n = 1\dots A\\ j = 1\dots P\\ j\neq i}}D_{part}^{a_in_j}]_+, \tag{3}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $\alpha$ is the margin of the triplet constraint, while $N_{tp}$ is the number of triplets in a batch that violate the triplet constraint and $[\cdot ]_{+}$ denotes the hinge loss. $D_{part}^{a_i p_i}$ and $D_{part}^{a_i n_j}$ are calculated by Eq. 2 and denote the distances of the positive and negative image pairs in a triplet, respectively. In order to reduce triplet loss, the part quality predictors have to predict lower scores to occluded parts.
|
| 145 |
+
|
| 146 |
+
Discussion. With the help of the above constraints, the model can predict the quality scores of the part features without using any external tools. However, this approach still has limitations. Take the predicted quality scores in Fig. 4 as an example. It can be seen that the model works well for occlusions caused by objects (e.g. cars, trees, and boxes). In these situations, the quality scores of occluded body parts are very low. However, when a pedestrian is occluded by other persons, the model may predict a relatively high score for occluded regions; this is because the model cannot well differentiate body parts of different pedestrians. In the following, we handle the above problem in the global feature learning branch. Most existing approaches [14], [15], [29] simply extract global features from visible regions for each image. However, this strategy suffers from two problems. First, as explained above, visibility or quality prediction of image regions may be interfered by occlusions between pedestrians. Second, as shown in Fig. 1(b), two images may differ in occlusion locations, meaning that they are not directly comparable even if features are extracted from visible regions for each image. In the following, we propose an Identity-aware Spatial Attention (ISA) approach and an adaptive global feature extraction approach to handle each of these two problems, respectively.
|
| 147 |
+
|
| 148 |
+
# B. Identity-aware Spatial Attention
|
| 149 |
+
|
| 150 |
+
Fig. 3 illustrates the structure of ISA. ISA makes use of a coarse identity-aware feature to generate spatial attention for each image. This attention suppresses occlusions caused by objects and other pedestrians, meaning that only features of spatial regions relevant to the target pedestrian are highlighted.
|
| 151 |
+
|
| 152 |
+
In more detail, we feed $\mathbf{F}$ into another $1\times 1$ Conv layer and obtain the feature maps $\mathbf{G}$ . Global features are then extracted based on $\mathbf{G}$ . Similar to the part branch, we uniformly partition $\mathbf{G}$ into $K$ parts and obtain $K$ part-level features that are denoted as $\mathbf{g}_k$ ( $1\leq k\leq K$ ) via RAP operations. We then obtain a coarse identity-aware global feature $\mathbf{h}$ by fusing $\mathbf{g}_k$ via weighted averaging as follows:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\mathbf {h} = \sum_ {k = 1} ^ {K} \hat {q} _ {k} \mathbf {g} _ {k}, \tag {4}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $\hat{q}_k$ is the normalized part quality score produced in the part branch in Section III-A. Formally,
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\hat {q} _ {k} = \frac {q _ {k}}{\sum_ {i = 1} ^ {K} q _ {i}}. \tag {5}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
To further reduce the impact of occlusions, we process $\mathbf{h}$ using a simple two-layer network inspired by the squeeze-and-excitation network [47]. As illustrated in Fig. 3, the dimension of $\mathbf{h}$ is first reduced via a $1\times 1$ Conv layer which is followed by a ReLU layer, then recovered to the original dimension by means of another $1\times 1$ Conv layer. The reduction ratio of the first Conv layer is set to 4; the output of the two Conv layers are denoted as $\hat{\mathbf{h}}$ and $\widetilde{\mathbf{h}}$ , respectively.
|
| 165 |
+
|
| 166 |
+
Moreover, to ensure that noisy elements in $\mathbf{h}$ are suppressed via the reduction operation, we impose a cross-entropy loss on $\hat{\mathbf{h}}$ as follows:
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\mathcal {L} _ {\text {g l o b a l}} ^ {i d} = \frac {1}{N} \sum_ {l = 1} ^ {N} \mathcal {L} _ {c e} \left(\mathbf {W} ^ {g} \hat {\mathbf {h}} ^ {l}\right), \tag {6}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $\hat{\mathbf{h}}^l$ represents the feature produced by the first Conv layer for the $l$ -th image in a batch, while $\mathbf{W}^g$ denotes the parameters of the classification layers.
|
| 173 |
+
|
| 174 |
+
By adopting this approach, information in $\tilde{\mathbf{h}}$ is identity-aware and noise-free. Accordingly, we employ $\tilde{\mathbf{h}}$ to generate the spatial attention map $\mathbf{M}$ for the feature maps $\mathbf{G}$ . Formally:
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\mathbf {M} = \sigma (\mathbf {G} * \widetilde {\mathbf {h}}), \tag {7}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
where $\sigma$ is a sigmoid function, while $*$ represents the inner product between $\widetilde{\mathbf{h}}$ and the feature vector of each pixel in $\mathbf{G}$ . Accordingly, $\mathbf{M}$ is a matrix with the same height and width as $\mathbf{G}$ . As identity-relevant pixels obtain a high response value in $\mathbf{M}$ , we apply $\mathbf{M}$ to weigh $\mathbf{G}$ and produce new feature maps denoted as $\widetilde{\mathbf{G}}$ . The above process can be summarized as follows:
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\widetilde {\mathbf {G}} = \mathbf {M} \odot \mathbf {G} + \mathbf {G}, \tag {8}
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
where $\odot$ signifies the element-wise multiplication between $\mathbf{M}$ and each channel of $\mathbf{G}$ .
|
| 187 |
+
|
| 188 |
+
Discussion. To the best of our knowledge, ISA is one of the first efficient method to address occlusions between pedestrians in occluded ReID. A few most recent works [48], [49] can potentially solve this problem. These approaches adopt co-attention mechanism and attempt to search for pixel-level correspondence between each pair of images, enabling features to be extracted from semantically corresponding regions. However, during the training and inference stage, these approaches adopt computationally expensive matrix multiplication to infer semantically corresponding pixels for each pair of query and gallery images. Obviously their computationally cost is significantly higher than that of ISA. Therefore, our method has obvious advantages in efficiency for ReID.
|
| 189 |
+
|
| 190 |
+
# C. Adaptive Global Feature Extraction
|
| 191 |
+
|
| 192 |
+
As indicated in Fig. 5, the responses on $\widetilde{\mathbf{G}}$ focus primarily on the body of the pedestrian to identify after the processing of ISA. However, this does not mean that it is reasonable to extract global-level features directly from $\widetilde{\mathbf{G}}$ ; this is because the two images being compared may differ in terms of their occlusion locations. To ensure semantic consistency, it is essential to adaptively extract global features from the common non-occluded regions for each image pair. We designed the
|
| 193 |
+
|
| 194 |
+
Adaptive Global Feature Extraction (AGFE) module with the help of the part quality score to achieve this goal.
|
| 195 |
+
|
| 196 |
+
For example, given a probe image $I^p$ and a gallery image $I^g$ , we first obtain their feature maps $\widetilde{\mathbf{G}}^p$ and $\widetilde{\mathbf{G}}^g$ from the output of the ISA module. In the next step, we equally partition each of them into $K$ parts and apply the RAP operation on each divided feature maps. In this way, we obtain a set of $K$ feature vectors for $I^p$ and $I^g$ , denoted as $\widetilde{\mathbf{g}}_k^p$ and $\widetilde{\mathbf{g}}_k^g$ respectively. We then adopt the part quality scores from the part branch to aggregate $\widetilde{\mathbf{g}}_k^p$ and $\widetilde{\mathbf{g}}_k^g$ and obtain the global-level features $\mathbf{h}_g^p$ and $\mathbf{h}_p^g$ for $I^p$ and $I^g$ , respectively. More specifically,
|
| 197 |
+
|
| 198 |
+
$$
|
| 199 |
+
\mathbf {h} _ {g} ^ {p} = \sum_ {k = 1} ^ {K} \widetilde {q} _ {k} \widetilde {\mathbf {g}} _ {k} ^ {p}, \tag {9}
|
| 200 |
+
$$
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
\mathbf {h} _ {p} ^ {g} = \sum_ {k = 1} ^ {K} \widetilde {q} _ {k} \widetilde {\mathbf {g}} _ {k} ^ {g}, \tag {10}
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
where $\widetilde{q}_k$ denotes the weight for $\widetilde{\mathbf{g}}_k^p$ and $\widetilde{\mathbf{g}}_k^g$ . $\widetilde{q}_k$ is computed as follows:
|
| 207 |
+
|
| 208 |
+
$$
|
| 209 |
+
\widetilde {q} _ {k} = \frac {q _ {k} ^ {p} q _ {k} ^ {g}}{\sum_ {i = 1} ^ {K} q _ {i} ^ {p} q _ {i} ^ {g}}. \tag {11}
|
| 210 |
+
$$
|
| 211 |
+
|
| 212 |
+
The classification loss for the final global representations can thus be formulated as follows:
|
| 213 |
+
|
| 214 |
+
$$
|
| 215 |
+
\mathcal {L} _ {s g} ^ {i d} = \frac {1}{N N} \sum_ {g = 1} ^ {N} \sum_ {p = 1} ^ {N} \left(\mathcal {L} _ {c e} \left(\mathbf {W} ^ {s} \mathbf {h} _ {g} ^ {p}\right) + \mathcal {L} _ {c e} \left(\mathbf {W} ^ {s} \mathbf {h} _ {p} ^ {g}\right)\right). \tag {12}
|
| 216 |
+
$$
|
| 217 |
+
|
| 218 |
+
We also apply the triplet loss to ensure that the intra-class distances are smaller than the inter-class distances. This triplet loss is similar to Eq. 3:
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
\mathcal {L} _ {\text {g l o b a l}} ^ {t p} = \frac {1}{N _ {t p}} \sum_ {i = 1} ^ {P} \sum_ {a = 1} ^ {A} [ \alpha + \max _ {p = 1 \dots A} D _ {\text {g l o b a l}} ^ {a _ {i} p _ {i}} - \min _ {\substack {n = 1 \dots A \\ j = 1 \dots P \\ j \neq i}} D _ {\text {g l o b a l}} ^ {a _ {i} n _ {j}} ] _ {+}, \tag{13}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
where $D_{global}^{a_i p_i}$ and $D_{global}^{a_i n_j}$ represent the cosine distances of global features which are obtained by Eq. 9 and Eq. 10 for the positive and negative image pairs in a triplet, respectively.
|
| 225 |
+
|
| 226 |
+
Obtaining global features from the common non-occluded regions of each image pair is usually ignored by existing works. As shown in Table V, the AGFE module significantly promotes performance for occluded ReID. This is because the AGFE module extracts semantically aligned global features.
|
| 227 |
+
|
| 228 |
+
# D. Occluded ReID via QPM
|
| 229 |
+
|
| 230 |
+
During training, the overall objective function of QPM can be written as follows:
|
| 231 |
+
|
| 232 |
+
$$
|
| 233 |
+
\mathcal {L} = \mathcal {L} _ {\text {p a r t}} ^ {i d} + \mathcal {L} _ {\text {p a r t}} ^ {t p} + \mathcal {L} _ {\text {g l o b a l}} ^ {i d} + \mathcal {L} _ {\text {s g}} ^ {i d} + \mathcal {L} _ {\text {g l o b a l}} ^ {t p}. \tag {14}
|
| 234 |
+
$$
|
| 235 |
+
|
| 236 |
+
During the training process, the overall loss is optimized together. The parameters of the network, including those of the fully connected layers, are optimized together via gradient descent.
|
| 237 |
+
|
| 238 |
+
Many existing works design local and global branches [14], [15], [50], [51] for multi-source structural information integration. Similar to [14], there are two parts in QPM that make up the final distance between one pair of query
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Fig. 3. Structure of the Identity-aware Spatial Attention (ISA) module. It first adopts a coarse identity-aware global feature vector $\mathbf{h}$ and a lightweight two-layer network to generate a spatial attention map $\mathbf{M}$ . $\mathbf{M}$ is applied on feature maps, i.e. $\mathbf{G}$ , to suppress responses of occluded pixels. As $\mathbf{M}$ is identity aware, occlusions between pedestrians can be suppressed. (Best viewed in color.)
|
| 242 |
+
|
| 243 |
+
and gallery images: namely, the distance between part-level features and the distance between global-level features. In our approach, the distance between the part-level features is computed according to Eq. 2. The global-level features with respect to the image pair are obtained according to Eq. 9 and Eq. 10. Formally,
|
| 244 |
+
|
| 245 |
+
$$
|
| 246 |
+
D ^ {p q} = \gamma D _ {\text {p a r t}} ^ {p q} + (1 - \gamma) D _ {\text {g l o b a l}} ^ {p q}, \tag {15}
|
| 247 |
+
$$
|
| 248 |
+
|
| 249 |
+
where $\gamma$ is the weight that balances the contributions from $D_{part}^{pq}$ and $D_{global}^{pq}$ . $\gamma$ is consistently set to 0.6 in this work.
|
| 250 |
+
|
| 251 |
+
In the inference stage, we first compute the distance between a query image and each of the gallery images using the part features according to Eq. 2. The body parts with low quality scores will contribute less to the distance, which weakens the impact of occlusion. In this way, we efficiently obtain the top $n$ nearest neighbors for the query image. Then, we compute the final distance according to Eq. 15 between the query image and each of the $n$ nearest neighbors. Therefore, the AGFE module hardly increases the inference time cost.
|
| 252 |
+
|
| 253 |
+
# IV. EXPERIMENTS
|
| 254 |
+
|
| 255 |
+
# A. Datasets and Settings
|
| 256 |
+
|
| 257 |
+
Datasets. We conduct experiments on four popular databases for occluded ReID, i.e. Partial-iLIDS [18], Partial-REID [19], Occluded-Duke [14] and P-DukeMTMC [20].
|
| 258 |
+
|
| 259 |
+
Partial-iLIDS [18] was constructed based on the iLIDS [52] dataset. It contains 238 images of 119 identities, all of which were captured in an airport. Some images in the dataset contain people occluded by other individuals or luggages. Each pedestrian has 1 full-body image and 1 occluded image. All probe images are occluded person images, while all gallery images are holistic images.
|
| 260 |
+
|
| 261 |
+
Partial-REID [19] was collected at a university campus and includes 600 images of 60 pedestrians. Each person has 5 full-body images and 5 occluded images. These images are collected from different viewpoints, backgrounds, and different
|
| 262 |
+
|
| 263 |
+
TABLEI PERFORMANCE COMPARISONS ON OCLOUDED-DUKE UNDER SUPERVISED SETTING. \*INDICATES THAT A DIFFERENT BACKBONE IS USED.+ REPRESENTS THE EXTENDED VERSION OF THE CONFERENCE PAPER.
|
| 264 |
+
|
| 265 |
+
<table><tr><td>Method</td><td>Rank-1</td><td>Rank-5</td><td>Rank-10</td><td>mAP</td></tr><tr><td>Part-Aligned [36]</td><td>28.8</td><td>44.6</td><td>51.0</td><td>20.2</td></tr><tr><td>Part-Bilinear [56]</td><td>36.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Random Erasing [57]</td><td>40.5</td><td>59.6</td><td>66.8</td><td>30.0</td></tr><tr><td>DSR [18]</td><td>40.8</td><td>58.2</td><td>65.2</td><td>30.4</td></tr><tr><td>SFR [28]</td><td>42.3</td><td>60.3</td><td>67.3</td><td>32.0</td></tr><tr><td>PCB [3]</td><td>42.6</td><td>57.1</td><td>62.9</td><td>33.7</td></tr><tr><td>Adver Occluded [58]</td><td>44.5</td><td>-</td><td>-</td><td>32.2</td></tr><tr><td>PVPM [16]</td><td>47.0</td><td>-</td><td>-</td><td>37.7</td></tr><tr><td>PGFA [14]</td><td>51.4</td><td>68.6</td><td>74.9</td><td>37.3</td></tr><tr><td>HOReID [15]</td><td>55.1</td><td>-</td><td>-</td><td>43.8</td></tr><tr><td>PGFA+ [59]</td><td>56.3</td><td>72.4</td><td>78.0</td><td>43.5</td></tr><tr><td>ISP* [30]</td><td>62.8</td><td>78.1</td><td>82.9</td><td>52.3</td></tr><tr><td>QPM</td><td>64.4</td><td>79.3</td><td>84.2</td><td>49.7</td></tr><tr><td>QPM*</td><td>66.7</td><td>80.2</td><td>84.4</td><td>53.3</td></tr></table>
|
| 266 |
+
|
| 267 |
+
types of severe occlusion. All probe images are occluded person images, while all gallery images are holistic images.
|
| 268 |
+
|
| 269 |
+
Occluded-Duke [14] was constructed based on the DukeMTMC [53] database. It is composed of 15,618 training images of 702 identities, 2,210 occluded query images of 519 identities, and 17,661 gallery images. There are rich variations in Occluded-Duke, including different viewpoints and a large variety of obstacles, including cars, bicycles, trees, other persons. Occluded-Duke is a more difficult and practical dataset since both probe and gallery images have occlusions.
|
| 270 |
+
|
| 271 |
+
P-DukeMTMC [20] is another subset of DukeMTMC [53]. There are 12,927 training images of 665 identities, 2,163 query images of 634 identities, and 9,053 gallery images. These images are occluded by different types of occlusion in public, e.g., people, luggage, cars and guideboards.
|
| 272 |
+
|
| 273 |
+
Implementation Details. We conduct experiments using the Pytorch framework. We set both $P$ and $A$ to 8; therefore, the batch size is 64. We adopt random erasing to simulate occlusion. All images are resized to $384 \times 128$ pixels and augmented via random horizontal flipping. The number of body parts, i.e. $K$ , is set to 6. The margin $\alpha$ for the triplet loss is set to 0.3. The number of the nearest neighbors, i.e., $n$ , is set as 30. The SGD optimizer is utilized for model optimization. Following [3], [32], [41], we do not use weight regularization in the SGD optimizer. Fine-tuned from the IDE model [54], the QPM is trained in an end-to-end fashion for 70 epochs. The initialized learning rate is set to 0.01 and is reduced by multiplying 0.1 for every 20 epochs.
|
| 274 |
+
|
| 275 |
+
Evaluation Protocols. We report the Cumulated Matching Characteristics (CMC) and mean Average Precision (mAP) value for the proposed approach. The evaluation package is provided by [55], and all the experimental results are obtained in the single query setting.
|
| 276 |
+
|
| 277 |
+
Moreover, we provide stability analysis on the performance of QPM in the supplementary material.
|
| 278 |
+
|
| 279 |
+
# B. Performance under Supervised Setting
|
| 280 |
+
|
| 281 |
+
For the two large-scale datasets, i.e. Occluded-Duke [14] and P-DukeMTMC [20], we train QPM using their own training sets, respectively.
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
Fig. 4. Example images with predicted part quality scores. (a) Occlusion caused by billboards. (b) Occlusion caused by cars. (c) Occlusion caused by buildings. (d) Occlusion caused by pedestrians.
|
| 285 |
+
|
| 286 |
+
TABLE II PERFORMANCE COMPARISONS ON P-DUKEMTMC UNDER SUPERVISED SETTING.
|
| 287 |
+
|
| 288 |
+
<table><tr><td>Method</td><td>Rank-1</td><td>Rank-5</td><td>Rank-10</td><td>mAP</td></tr><tr><td>Teacher-S [29]</td><td>51.4</td><td>50.9</td><td>-</td><td>-</td></tr><tr><td>PCB [3]</td><td>79.4</td><td>87.1</td><td>90.0</td><td>63.9</td></tr><tr><td>IDE [54]</td><td>82.9</td><td>89.4</td><td>91.5</td><td>65.9</td></tr><tr><td>PVPM [16]</td><td>85.1</td><td>91.3</td><td>93.3</td><td>69.9</td></tr><tr><td>PGFA+ [59]</td><td>85.7</td><td>92.0</td><td>94.2</td><td>72.4</td></tr><tr><td>ISP* [30]</td><td>89.0</td><td>94.1</td><td>95.3</td><td>74.7</td></tr><tr><td>QPM</td><td>89.4</td><td>93.9</td><td>95.6</td><td>74.4</td></tr><tr><td>QPM*</td><td>90.7</td><td>94.4</td><td>95.9</td><td>75.3</td></tr></table>
|
| 289 |
+
|
| 290 |
+
Results on Occluded-Duke. The performance of QPM and state-of-the-art methods on Occluded-Duke are tabulated in Table I. Some recent methods have achieved competitive performance with the help of pose landmarks: for example, HOReID [15] learns high-order relation and topology information for local features of visible landmarks, facilitating better match between probe and gallery images, and achieves $55.1\%$ Rank-1 accuracy and $43.8\%$ mAP. In comparison, QPM significantly outperforms HOReID by $9.3\%$ and $5.9\%$ in terms of Rank-1 accuracy and mAP, respectively. Moreover, QPM does not depend on pose estimation tools in either training or testing. This remarkable performance improvement clearly demonstrates the effectiveness of QPM.
|
| 291 |
+
|
| 292 |
+
One recent method $\mathrm{ISP^{*}}$ [30] achieves strong performance with a deeper backbone model, i.e. HRNet-W32 [60]. With the same backbone model, QPM\* achieves higher Rank-1 and mAP performance than $\mathrm{ISP^{*}}$ .
|
| 293 |
+
|
| 294 |
+
Results on P-DukeMTMC. Comparison results on P-DukeMTMC are summarized in Table II. As the table shows, QPM achieves $89.4\%$ Rank-1 accuracy and $74.4\%$ mAP, surpassing the best previous method PVPM [16] by $4.3\%$ and
|
| 295 |
+
|
| 296 |
+
$4.5\%$ in terms of Rank-1 accuracy and mAP, respectively. The above comparison results are consistent with those obtained on the Occluded-Duke database. These experimental results further demonstrate that our method can effectively solve the occlusion problem for ReID.
|
| 297 |
+
|
| 298 |
+
# C. Performance under Transfer Setting
|
| 299 |
+
|
| 300 |
+
Following [15], [16], [18], [31], ReID model in this setting are trained using the Market-1501 database [61]. Then, the model is directly evaluated on the Partial-REID, Partial-iLIDS, and P-DukeMTMC databases.
|
| 301 |
+
|
| 302 |
+
Results on Partial-REID and Partial-iLIDS. Each of the two databases contain two types of testing data: namely, partial images from which occluded regions have been manually removed, and the original occluded images. Similarly, depending on the testing data used, existing methods can be roughly divided into two categories: those using partial images and those using the original occluded images. The performance of QPM and state-of-the-art methods are tabulated in Table III.
|
| 303 |
+
|
| 304 |
+
As is evident from the table, for Partial-iLIDS, it can be seen that QPM outperforms all other methods that also evaluate on the original occluded images. For example, QPM beats PGFA [14] by $8.2\%$ and $4.8\%$ in terms of Rank-1 and Rank-3 accuracy respectively. It also outperforms one of the most recent methods using partial data, i.e. HOReID [15], by $4.7\%$ in terms of Rank-1 accuracy. For Partial-REID, QPM outperforms all other methods using the original occluded images: for example, QPM beats PVPM [16] by $3.4\%$ in terms of Rank-1 accuracy. While its performance is slightly lower than HOReID [15] (which evaluates on partial data), it should be noted that unlike HOReID, our method does not require either the manual removal of occluded regions during testing or any additional tools. $\mathrm{ISP^{*}}$ [30] achieves $65.7\%$ Rank-1 and $75.3\%$ Rank-3 accuracy on Partial-REID, which is lower than those by QPM. This is because the image quality of Partial-REID is poor and the image resolution is relatively low. In this case, ISP is prone to errors in pixel classification. In comparison, our method is more robust.
|
| 305 |
+
|
| 306 |
+
Results on P-DukeMTMC. Finally, we evaluate the performance of QPM on P-DukeMTMC under transfer setting. The results presented in Table IV show that QPM achieves state-of-the-art performance under all metrics. For example, QPM outperforms one of the most recent methods, i.e. PVPM [16], by $5.8\%$ and $1.9\%$ in terms of Rank-1 accuracy and mAP, respectively. Experiments on this database further justifies the effectiveness of QPM.
|
| 307 |
+
|
| 308 |
+
# D. Ablation Study
|
| 309 |
+
|
| 310 |
+
Ablation study are conducted on two large-scale datasets, i.e. Occluded-Duke [14] and P-DukeMTMC [20]. The experimental results on the reported two benchmarks are shown in Table V. It should be noted that Table V lists the results on Occluded-Duke under supervised setting and the results on P-DukeMTMC under transfer setting. These results show the robustness and effectiveness of our method under different experimental settings.
|
| 311 |
+
|
| 312 |
+
TABLE III PERFORMANCE COMPARISONS ON PARTIAL-ILIDS AND PARTIAL-REID UNDER TRANSFER SETTING. $^+$ REPRESENTS THE EXTENDED VERSION OF THE CONFERENCE PAPER.
|
| 313 |
+
|
| 314 |
+
<table><tr><td rowspan="2" colspan="2">Methods</td><td colspan="2">Partial-iLIDS</td><td colspan="2">Partial-REID</td></tr><tr><td>R-1</td><td>R-3</td><td>R-1</td><td>R-3</td></tr><tr><td rowspan="8">partial</td><td>MTRC [62]</td><td>17.7</td><td>26.1</td><td>23.7</td><td>27.3</td></tr><tr><td>AMC+SWM [19]</td><td>21.0</td><td>32.8</td><td>37.3</td><td>46.0</td></tr><tr><td>DSR [18]</td><td>58.8</td><td>67.2</td><td>50.7</td><td>70.0</td></tr><tr><td>DCR [63]</td><td>60.5</td><td>69.7</td><td>52.0</td><td>67.5</td></tr><tr><td>STNReID [64]</td><td>54.6</td><td>71.3</td><td>66.7</td><td>80.3</td></tr><tr><td>SFR [28]</td><td>63.9</td><td>74.8</td><td>56.9</td><td>78.5</td></tr><tr><td>VPM [31]</td><td>65.5</td><td>74.8</td><td>67.7</td><td>81.9</td></tr><tr><td>HOReID [15]</td><td>72.6</td><td>86.4</td><td>85.3</td><td>91.0</td></tr><tr><td rowspan="7">original</td><td>MaskReID [22]</td><td>33.0</td><td>-</td><td>28.7</td><td>-</td></tr><tr><td>PCB [3]</td><td>46.8</td><td>-</td><td>56.3</td><td>-</td></tr><tr><td>ISP* [30]</td><td>66.4</td><td>80.7</td><td>65.7</td><td>75.3</td></tr><tr><td>PGFA [14]</td><td>69.1</td><td>80.9</td><td>68.0</td><td>80.0</td></tr><tr><td>PVPM [16]</td><td>-</td><td>-</td><td>78.3</td><td>-</td></tr><tr><td>PGFA+ [59]</td><td>70.6</td><td>81.3</td><td>72.5</td><td>83.0</td></tr><tr><td>QPM</td><td>77.3</td><td>85.7</td><td>81.7</td><td>88.0</td></tr></table>
|
| 315 |
+
|
| 316 |
+
TABLE IV PERFORMANCE COMPARISONS ON P-DUKEMTMC UNDER TRANSFER SETTING.
|
| 317 |
+
|
| 318 |
+
<table><tr><td>Methods</td><td>Rank-1</td><td>Rank-5</td><td>Rank-10</td><td>mAP</td></tr><tr><td>HACNN [2]</td><td>30.4</td><td>42.1</td><td>49.0</td><td>17.0</td></tr><tr><td>MLFN [65]</td><td>31.3</td><td>43.6</td><td>49.6</td><td>18.1</td></tr><tr><td>OsNet [66]</td><td>33.7</td><td>46.5</td><td>54.0</td><td>20.1</td></tr><tr><td>IDE [54]</td><td>36.0</td><td>49.3</td><td>55.2</td><td>19.7</td></tr><tr><td>Part Bilinear [56]</td><td>39.2</td><td>50.6</td><td>56.4</td><td>25.4</td></tr><tr><td>PCB [3]</td><td>43.6</td><td>57.1</td><td>63.3</td><td>24.7</td></tr><tr><td>PGFA [14]</td><td>44.2</td><td>56.7</td><td>63.0</td><td>23.1</td></tr><tr><td>ISP* [30]</td><td>46.3</td><td>56.9</td><td>60.8</td><td>26.4</td></tr><tr><td>PGFA+ [59]</td><td>48.2</td><td>59.6</td><td>65.8</td><td>26.8</td></tr><tr><td>PVPM [16]</td><td>51.5</td><td>64.4</td><td>69.6</td><td>29.2</td></tr><tr><td>QPM</td><td>57.3</td><td>69.9</td><td>75.5</td><td>31.1</td></tr></table>
|
| 319 |
+
|
| 320 |
+
$$
|
| 321 |
+
\mathcal {L} = \mathcal {L} _ {\text {p a r t}} ^ {i d} + \mathcal {L} _ {\text {p a r t}} ^ {t p} + \mathcal {L} _ {\text {g l o b a l}} ^ {i d} + \mathcal {L} _ {\text {s g}} ^ {i d} + \mathcal {L} _ {\text {g l o b a l}} ^ {t p}. \tag {16}
|
| 322 |
+
$$
|
| 323 |
+
|
| 324 |
+
Effectiveness of the quality scores. In Table V, 'Baseline' refers to the PCB model [3]. 'Baseline(+triplet)' equips PCB with the triplet loss in Eq. 3, while all part quality scores are set to 1. 'Part branch' means that we adopt the part feature learning branch only in QPM for ReID. As shown in Table V, 'Baseline(+triplet)' slightly improves the performance relative to the baseline. In comparison, the 'Part branch' brings in significant performance promotion, suggesting that part quality scores considerably benefit the occluded ReID task.
|
| 325 |
+
|
| 326 |
+
Effectiveness of the ISA module. In Table V, 'GAP global' means that we perform GAP on the feature maps $\mathbf{G}$ to obtain the global feature for each image. 'AGFE global' means that we utilize adaptive global feature extraction module. When the ISA module is equipped, performance of both types of global features is promoted. In particular, ISA promotes the Rank-1 accuracy of 'AGFE global' by $6.0\%$ and $4.6\%$ , as well as mAP by $3.0\%$ and $1.8\%$ , on the two databases, respectively.
|
| 327 |
+
|
| 328 |
+
Effectiveness of the AGFE module. Compared with 'GAP global' and 'GAP global(+ISA)'. the AGFE module consistently brings about significant performance gains. For example, 'AGFE global(+ISA)' outperforms 'GAP global(+ISA)' in the
|
| 329 |
+
|
| 330 |
+
TABLE V ABLATION STUDY ON EACH KEY COMPONENT OF QPM.
|
| 331 |
+
|
| 332 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td>Baseline</td><td>42.1</td><td>34.8</td><td>47.2</td><td>22.9</td></tr><tr><td>Baseline(+triplet)</td><td>43.7</td><td>37.0</td><td>48.5</td><td>26.4</td></tr><tr><td>Part branch</td><td>58.3</td><td>46.7</td><td>55.1</td><td>28.8</td></tr><tr><td>GAP global</td><td>40.8</td><td>31.7</td><td>26.0</td><td>14.7</td></tr><tr><td>GAP global(+ISA)</td><td>42.7</td><td>33.0</td><td>27.0</td><td>15.5</td></tr><tr><td>AGFE global</td><td>56.6</td><td>42.2</td><td>41.4</td><td>23.3</td></tr><tr><td>AGFE global(+ISA)</td><td>62.6</td><td>45.2</td><td>46.0</td><td>25.1</td></tr><tr><td>QPM</td><td>64.4</td><td>49.7</td><td>57.3</td><td>31.1</td></tr></table>
|
| 333 |
+
|
| 334 |
+
TABLE VI PERFORMANCE COMPARISON BETWEEN DIFFERENT TYPES OF GLOBAL FEATURES.
|
| 335 |
+
|
| 336 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td>GAP global</td><td>40.8</td><td>31.7</td><td>26.0</td><td>14.7</td></tr><tr><td>SI global</td><td>38.5</td><td>26.6</td><td>26.5</td><td>13.1</td></tr><tr><td>AGFE global</td><td>56.6</td><td>42.2</td><td>41.4</td><td>23.3</td></tr></table>
|
| 337 |
+
|
| 338 |
+
Rank-1 accuracy by as much as $19.9\%$ on Occluded-Duke. These experimental results indicate that it is vital to adaptively extract global features from the common non-occluded regions for each image pair.
|
| 339 |
+
|
| 340 |
+
Moreover, we compare the performance of 'AGFE global' with 'SI global' in Table VI. 'SI global' means that we obtain global features for each single image (SI) using Eq. 4, without considering the difference in occlusion locations for each image pair. To facilitate a fair comparison, 'SI global' adopts the same loss functions as 'AGFE global', i.e., the cross-entropy loss and triplet loss. It is shown that the performance of 'SI global' drops dramatically compared with 'AGFE global', which suggests that Eq. 4 alone cannot promote ReID performance. This experimental result indicates that it is vital to extract semantically consistent global features for each image pair.
|
| 341 |
+
|
| 342 |
+
Effectiveness of the combination. With both ISA and AGFE modules, the quality of global features is promoted significantly. For example, 'AGFE global(+ISA)' outperforms 'GAP global' in the Rank-1 accuracy by as much as $21.8\%$ and $20.0\%$ on Occluded-Duke and P-DukeMTMC databases, respectively.
|
| 343 |
+
|
| 344 |
+
Finally, the combination of the part branch and the global branch, which is denoted as QPM in Table V, achieves better performance than using either one branch alone. The above comparisons justify the effectiveness of each key component in QPM.
|
| 345 |
+
|
| 346 |
+
ISA vs. Other Approaches. To facilitate fair comparison, all experiments are based on the 'AGFE global' model. We equip the 'AGFE global' model with different spatial attention modules respectively and summarize their performance in Table VII. It is shown that ISA significantly outperforms all other methods by at least $3.3\%$ and $2.9\%$ in terms of Rank-1 accuracy on Occluded-Duke and P-DukeMTMC, respectively. This is because ISA is identity-aware; therefore, it effectively handles the occlusions between pedestrians.
|
| 347 |
+
|
| 348 |
+
TABLE VII PERFORMANCE COMPARISONS OF DIFFERENT ATTENTION MODELS.
|
| 349 |
+
|
| 350 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td>AGFE global</td><td>56.6</td><td>42.2</td><td>41.4</td><td>23.3</td></tr><tr><td>+FC-SA [67]</td><td>56.9</td><td>42.0</td><td>42.8</td><td>23.8</td></tr><tr><td>+RGA-SA [68]</td><td>57.6</td><td>41.2</td><td>40.9</td><td>23.1</td></tr><tr><td>+RA-SA [69]</td><td>58.1</td><td>42.7</td><td>41.3</td><td>23.3</td></tr><tr><td>+CBAM-SA [70]</td><td>59.3</td><td>43.9</td><td>43.1</td><td>24.5</td></tr><tr><td>+ISA</td><td>62.6</td><td>45.2</td><td>46.0</td><td>25.1</td></tr></table>
|
| 351 |
+
|
| 352 |
+

|
| 353 |
+
|
| 354 |
+

|
| 355 |
+
|
| 356 |
+

|
| 357 |
+
|
| 358 |
+

|
| 359 |
+
(a)
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
(b)
|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
|
| 376 |
+

|
| 377 |
+
(c)
|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
|
| 385 |
+

|
| 386 |
+
(d)
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
|
| 390 |
+

|
| 391 |
+
|
| 392 |
+

|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
(e)
|
| 396 |
+
|
| 397 |
+

|
| 398 |
+
|
| 399 |
+

|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
(f)
|
| 405 |
+
Fig. 5. Illustration of heat maps for feature maps after processing by different attention models. (a) The original image. (b) w/o attention. (c) FC-SA. (d) RGA-SA. (e) RA-SA. (f) CBAM-SA. (g) ISA (ours). (Best viewed in color.)
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
(g)
|
| 415 |
+
|
| 416 |
+
Moreover, we illustrate the heat maps for feature maps after the processing of different attention modules in Fig. 5. We have the following observations. First, heat maps for the baseline model have high responses on both occluded and non-occluded regions; second, existing popular spatial attention models [67]–[70] handle occlusions between pedestrians poorly; third, with the identity-aware guidance, our ISA module can well differentiate discriminative body parts from the occluded ones by both objects and other pedestrians. The above comparisons further demonstrate the effectiveness of ISA.
|
| 417 |
+
|
| 418 |
+
Comparisons of Model Complexity. In this experiment, we demonstrate that QPM not only achieves superior performance in terms of occluded ReID accuracy, but also offers advantages in terms of both its time and space complexities.
|
| 419 |
+
|
| 420 |
+
Three recent powerful occluded ReID approaches are compared: PGFA [14], HOReID [15], and $\mathrm{PGFA}^{+}$ [59]. To facilitate fair comparison, all experimental settings are consistent with the paper description. Following [14], [15], [59], the input
|
| 421 |
+
|
| 422 |
+
TABLE VIII COMPARISONS OF MODEL COMPLEXITY ON THE OCLOUDED-DUKE DATABASE. + REPRESENTS THE EXTENDED VERSION OF THE CONFERENCE PAPER.
|
| 423 |
+
|
| 424 |
+
<table><tr><td>Method</td><td>Train</td><td>Inference</td><td>Params</td><td>Rank-1</td></tr><tr><td>PGFA [14]</td><td>0.82s/iter</td><td>0.78s/img</td><td>115.5M</td><td>51.4</td></tr><tr><td>HOReID [15]</td><td>1.34s/iter</td><td>1.94s/img</td><td>117.5M</td><td>55.1</td></tr><tr><td>PGFA+ [59]</td><td>0.88s/iter</td><td>0.79s/img</td><td>115.5M</td><td>56.3</td></tr><tr><td>QPM</td><td>0.68s/iter</td><td>0.47s/img</td><td>29.0M</td><td>64.4</td></tr></table>
|
| 425 |
+
|
| 426 |
+
TABLE IX PERFORMANCE OF QPM WITH DIFFERENT VALUES OF $K$
|
| 427 |
+
|
| 428 |
+
<table><tr><td rowspan="2">Part number K</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>Rank-1</td><td>mAP</td><td>Rank-1</td><td>mAP</td></tr><tr><td>2</td><td>61.7</td><td>49.1</td><td>44.2</td><td>25.6</td></tr><tr><td>3</td><td>61.9</td><td>49.4</td><td>49.1</td><td>26.8</td></tr><tr><td>4</td><td>62.7</td><td>49.4</td><td>50.5</td><td>28.4</td></tr><tr><td>6</td><td>64.4</td><td>49.7</td><td>57.3</td><td>31.1</td></tr><tr><td>8</td><td>62.7</td><td>49.5</td><td>53.7</td><td>28.8</td></tr></table>
|
| 429 |
+
|
| 430 |
+
images are resized to $256 \times 128$ pixels for HOReID and $384 \times 128$ pixels for PGFA, $\mathrm{PGFA}^{+}$ and QPM. The batch size is set to 64 uniformly for all methods. Comparisons are conducted on a Titan V GPU, and results are summarized in Table VIII. The inference time cost in Table VIII includes the feature extraction of the query image and the matching time of all gallery images.
|
| 431 |
+
|
| 432 |
+
As is evident from the Table VIII, the model size of QPM is much smaller since it does not need an additional detection model. In addition, QPM has a faster training and testing speed. Specifically, our test time is only $60\%$ and $24\%$ of PGFA [14] and HOReID [15], respectively. This is because QPM does not require time-consuming human key point extraction. Although QPM uses less additional information during training and testing than PGFA [14] and HOReID [15], it still has significant performance advantages. Accordingly, the above comparisons demonstrate that the proposed QPM model is both compact and efficient.
|
| 433 |
+
|
| 434 |
+
# E. Parameter Analysis
|
| 435 |
+
|
| 436 |
+
The Impact of Part Number $K$ . In this experiment, we analyze the impact of the part number $K$ . Experimental results are summarized in Table IX. It is shown that the optimal value of $K$ is 6, which is consistent with the conclusion in [3]. Therefore, we consistently set $K$ to 6 in this work.
|
| 437 |
+
|
| 438 |
+
The Impact of the Feature Dimension $d$ . Table X shows the ReID performance with different feature dimension $d$ . QPM
|
| 439 |
+
|
| 440 |
+
TABLE X PERFORMANCE OF QPM WITH DIFFERENT VALUES OF $d$
|
| 441 |
+
|
| 442 |
+
<table><tr><td rowspan="2">Feature dimension d</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>rank-1</td><td>mAP</td><td>rank-1</td><td>mAP</td></tr><tr><td>128</td><td>61.0</td><td>48.7</td><td>52.0</td><td>28.8</td></tr><tr><td>256</td><td>62.9</td><td>49.5</td><td>55.2</td><td>29.3</td></tr><tr><td>512</td><td>63.5</td><td>49.7</td><td>56.3</td><td>30.8</td></tr><tr><td>1024</td><td>64.4</td><td>49.7</td><td>57.3</td><td>31.1</td></tr></table>
|
| 443 |
+
|
| 444 |
+
TABLE XI PERFORMANCE OF QPM WITH DIFFERENT VALUES OF $\gamma$
|
| 445 |
+
|
| 446 |
+
<table><tr><td rowspan="2">Weight γ</td><td colspan="2">Occluded-Duke</td><td colspan="2">P-DukeMTMC</td></tr><tr><td>rank-1</td><td>mAP</td><td>rank-1</td><td>mAP</td></tr><tr><td>0.5</td><td>64.0</td><td>49.3</td><td>57.0</td><td>30.7</td></tr><tr><td>0.6</td><td>64.4</td><td>49.7</td><td>57.3</td><td>31.1</td></tr><tr><td>0.7</td><td>63.6</td><td>49.3</td><td>57.1</td><td>30.6</td></tr><tr><td>0.8</td><td>62.9</td><td>49.3</td><td>56.6</td><td>30.1</td></tr><tr><td>0.9</td><td>61.4</td><td>48.7</td><td>56.2</td><td>29.6</td></tr><tr><td>1.0</td><td>58.3</td><td>46.7</td><td>55.1</td><td>28.8</td></tr></table>
|
| 447 |
+
|
| 448 |
+
consistently achieves state-of-the-art performance when $d$ is set to 256, 512, and 1024, with the best result achieved when $d$ is set to 1024.
|
| 449 |
+
|
| 450 |
+
The Impact of the weight $\gamma$ . Table XI shows the ReID performance with different values of $\gamma$ . It is shown that the optimal value of $\gamma$ is 0.6. Therefore, we consistently set $\gamma$ to 0.6 in this work.
|
| 451 |
+
|
| 452 |
+
# V. CONCLUSION
|
| 453 |
+
|
| 454 |
+
In this paper, we propose a novel framework named QPM to handle the occluded person ReID problem. Unlike most existing methods, which depend on visibility cues from outside tools, QPM jointly learns part features and predicts part quality in an end-to-end framework without using any annotations or outside tools. Moreover, based on the predicted part quality scores, we propose a novel identity-aware spatial attention (ISA) model to handle occlusion between pedestrians. We further design a novel approach that adaptively generates global features from common non-occluded regions for each image pair. Finally, extensive experiments on four popular datasets demonstrate the effectiveness of QPM.
|
| 455 |
+
|
| 456 |
+
# REFERENCES
|
| 457 |
+
|
| 458 |
+
[1] Z. Zhang, C. Lan, W. Zeng, and Z. Chen, “Densely semantically aligned person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019, pp. 667–676.
|
| 459 |
+
[2] W. Li, X. Zhu, and S. Gong, "Harmonious attention network for person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 2285-2294.
|
| 460 |
+
[3] Y. Sun, L. Zheng, Y. Yang, Q. Tian, and S. Wang, "Beyond part models: Person retrieval with refined part pooling (and a strong convolutional baseline)," in Proc. Eur. Conf. Comput. Vis., 2018, pp. 480-496.
|
| 461 |
+
[4] J. Xu, R. Zhao, F. Zhu, H. Wang, and W. Ouyang, “Attention-aware compositional network for person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 2119–2128.
|
| 462 |
+
[5] Y. Chen, X. Zhu, and S. Gong, "Person re-identification by deep learning multi-scale representations," in Proc. IEEE Int. Conf. Comput. Vis., 2017, pp. 2590–2600.
|
| 463 |
+
[6] C. Song, Y. Huang, W. Ouyang, and L. Wang, "Mask-guided contrastive attention model for person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 1179-1188.
|
| 464 |
+
[7] M. Tian, S. Yi, H. Li, S. Li, X. Zhang, J. Shi, J. Yan, and X. Wang, "Eliminating background-bias for robust person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 5794-5803.
|
| 465 |
+
[8] A. Wu, W.-S. Zheng, and J.-H. Lai, “Robust depth-based person re-identification,” IEEE Trans. Image Process, vol. 26, no. 6, pp. 2588–2603, 2017.
|
| 466 |
+
[9] H. Luo, W. Jiang, Y. Gu, F. Liu, X. Liao, S. Lai, and J. Gu, "A strong baseline and batch normalization neck for deep person re-identification," IEEE Trans. Multimedia, vol. 22, no. 10, pp. 2597-2609, 2019.
|
| 467 |
+
[10] W. J. Scheirer, P. J. Flynn, C. Ding, G. Guo, V. Struc, M. Al Jazaery, K. Grm, S. Dobrisek, D. Tao, Y. Zhu et al., "Report on the bias 2016 video person recognition evaluation," in IEEE 8th International Conference on Biometrics Theory, Applications and Systems (BTAS), 2016.
|
| 468 |
+
|
| 469 |
+
[11] L. Wei, S. Zhang, H. Yao, W. Gao, and Q. Tian, “Glad: Global-local-alignment descriptor for scalable person re-identification,” IEEE Trans. Multimedia, vol. 21, no. 4, pp. 986–999, 2018.
|
| 470 |
+
[12] C. Yan, G. Pang, X. Bai, C. Liu, N. Xin, L. Gu, and J. Zhou, "Beyond triplet loss: person re-identification with fine-grained difference-aware pairwise loss," IEEE Trans. Multimedia, 2021.
|
| 471 |
+
[13] B. Jiang, X. Wang, A. Zheng, J. Tang, and B. Luo, "Ph-gcn: Person retrieval with part-based hierarchical graph convolutional network," IEEE Trans. Multimedia, 2021.
|
| 472 |
+
[14] J. Miao, Y. Wu, P. Liu, Y. Ding, and Y. Yang, “Pose-guided feature alignment for occluded person re-identification,” in Proc. IEEE Int. Conf. Comput. Vis., 2019, pp. 542–551.
|
| 473 |
+
[15] G. Wang, S. Yang, H. Liu, Z. Wang, Y. Yang, S. Wang, G. Yu, E. Zhou, and J. Sun, “High-order information matters: Learning relation and topology for occluded person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., June 2020.
|
| 474 |
+
[16] S. Gao, J. Wang, H. Lu, and Z. Liu, “Pose-guided visible part matching for occluded person Reid,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., June 2020.
|
| 475 |
+
[17] A. Hermans, L. Beyer, and B. Leibe, "In defense of the triplet loss for person re-identification," arXiv preprint arXiv:1703.07737, 2017.
|
| 476 |
+
[18] L. He, J. Liang, H. Li, and Z. Sun, “Deep spatial feature reconstruction for partial person re-identification: Alignment-free approach,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 7073–7082.
|
| 477 |
+
[19] W.-S. Zheng, X. Li, T. Xiang, S. Liao, J. Lai, and S. Gong, “Partial person re-identification,” in Proc. IEEE Int. Conf. Comput. Vis., 2015, pp. 4678–4686.
|
| 478 |
+
[20] J. Zhuo, Z. Chen, J. Lai, and G. Wang, “Occluded person re-identification,” in 2018 IEEE Int. Conf. Multimedia Expo, 2018, pp. 1–6.
|
| 479 |
+
[21] M. M. Kalayeh, E. Basaran, M. Gökmen, M. E. Kamasak, and M. Shah, "Human semantic parsing for person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 1062-1071.
|
| 480 |
+
[22] L. Qi, J. Huo, L. Wang, Y. Shi, and Y. Gao, "Maskreid: A mask based deep ranking neural network for person re-identification," arXiv preprint arXiv:1804.03864, 2018.
|
| 481 |
+
[23] L. Zheng, Y. Huang, H. Lu, and Y. Yang, "Pose-invariant embedding for deep person re-identification," IEEE Trans. Image Process, vol. 28, no. 9, pp. 4500-4509, 2019.
|
| 482 |
+
[24] J. Liu, B. Ni, Y. Yan, P. Zhou, S. Cheng, and J. Hu, “Pose transferrable person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 4099–4108.
|
| 483 |
+
[25] C. Su, J. Li, S. Zhang, J. Xing, W. Gao, and Q. Tian, “Pose-driven deep convolutional model for person re-identification,” in Proc. IEEE Int. Conf. Comput. Vis., 2017, pp. 3960–3969.
|
| 484 |
+
[26] L. He, Y. Wang, W. Liu, X. Liao, H. Zhao, Z. Sun, and J. Feng, "Foreground-aware pyramid reconstruction for alignment-free occluded person re-identification." Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019.
|
| 485 |
+
[27] L. He and W. Liu, "Guided saliency feature learning for person re-identification in crowded scenes," in Proc. Eur. Conf. Comput. Vis., 2020, pp. 357-373.
|
| 486 |
+
[28] L. He, Z. Sun, Y. Zhu, and Y. Wang, "Recognizing partial biometric patterns," arXiv preprint arXiv:1810.07399, 2018.
|
| 487 |
+
[29] J. Zhuo, J. Lai, and P. Chen, “A novel teacher-student learning framework for occluded person re-identification,” arXiv preprint arXiv:1907.03253, 2019.
|
| 488 |
+
[30] K. Zhu, H. Guo, Z. Liu, M. Tang, and J. Wang, "Identity-guided human semantic parsing for person re-identification," in Proc. Eur. Conf. Comput. Vis., 2020, pp. 346-363.
|
| 489 |
+
[31] Y. Sun, Q. Xu, Y. Li, C. Zhang, Y. Li, S. Wang, and J. Sun, "Perceive where to focus: Learning visibility-aware part-level features for partial person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019, pp. 393-402.
|
| 490 |
+
[32] G. Wang, Y. Yuan, X. Chen, J. Li, and X. Zhou, “Learning discriminative features with multiple granularities for person re-identification,” in Proc. ACM Int. Conf. Multimedia, 2018, pp. 274–282.
|
| 491 |
+
[33] F. Zheng, C. Deng, X. Sun, X. Jiang, X. Guo, Z. Yu, F. Huang, and R. Ji, "Pyramidal person re-identification via multi-loss dynamic training," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019, pp. 8514-8522.
|
| 492 |
+
[34] M. S. Sarfraz, Å. Schumann, A. Eberle, and R. Stiefelhagen, “A possessive embedding for person re-identification with expanded cross neighborhood re-ranking,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 420–429.
|
| 493 |
+
[35] H. Zhao, M. Tian, S. Sun, J. Shao, J. Yan, S. Yi, X. Wang, and X. Tang, "Spindle net: Person re-identification with human body region guided
|
| 494 |
+
|
| 495 |
+
feature decomposition and fusion," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 1077-1085.
|
| 496 |
+
[36] L. Zhao, X. Li, Y. Zhuang, and J. Wang, “Deeply-learned part-aligned representations for person re-identification,” in Proc. IEEE Int. Conf. Comput. Vis., 2017, pp. 3219–3228.
|
| 497 |
+
[37] D. Li, X. Chen, Z. Zhang, and K. Huang, “Learning deep context-aware features over body and latent parts for person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 384–393.
|
| 498 |
+
[38] X. Gong, Z. Yao, X. Li, Y. Fan, B. Luo, J. Fan, and B. Lao, "Lag-net: Multi-granularity network for person re-identification via local attention system," IEEE Trans. Multimedia, 2021.
|
| 499 |
+
[39] Y. Li, J. He, T. Zhang, X. Liu, Y. Zhang, and F. Wu, "Diverse part discovery: Occluded person re-identification with part-aware transformer," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2021, pp. 2898–2907.
|
| 500 |
+
[40] C. Wan, Y. Wu, X. Tian, J. Huang, and X.-S. Hua, “Concentrated local part discovery with fine-grained part representation for person re-identification,” IEEE Trans. Multimedia, vol. 22, no. 6, pp. 1605–1618, 2019.
|
| 501 |
+
[41] C. Ding, K. Wang, P. Wang, and D. Tao, "Multi-task learning with coarse priors for robust part-aware person re-identification," IEEE Trans. Pattern Anal. Mach. Intell, 2020.
|
| 502 |
+
[42] K. Wang, P. Wang, C. Ding, and D. Tao, "Batch coherence-driven network for part-aware person re-identification," IEEE Trans. Image Process, vol. 30, pp. 3405-3418, 2021.
|
| 503 |
+
[43] S. Ioffe and C. Szegedy, "Batch normalization: Accelerating deep network training by reducing internal covariate shift," in International conference on machine learning. PMLR, 2015, pp. 448-456.
|
| 504 |
+
[44] Z. Zhang, C. Lan, W. Zeng, and Z. Chen, “Multi-granularity reference-aided attentive feature aggregation for video-based person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2020, pp. 10407–10416.
|
| 505 |
+
[45] Z. Zhou, Y. Huang, W. Wang, L. Wang, and T. Tan, "See the forest for the trees: Joint spatial and temporal recurrent neural networks for video-based person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 4747-4756.
|
| 506 |
+
[46] Y. Liu, J. Yan, and W. Ouyang, "Quality aware network for set to set recognition," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 5790-5799.
|
| 507 |
+
[47] J. Hu, L. Shen, and G. Sun, "Squeeze-and-excitation networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 7132-7141.
|
| 508 |
+
[48] S. Zhao, C. Gao, J. Zhang, H. Cheng, C. Han, X. Jiang, X. Guo, W.-S. Zheng, N. Sang, and X. Sun, "Do not disturb me: Person re-identification under the interference of other pedestrians," in Proc. Eur. Conf. Comput. Vis., 2020, pp. 647-663.
|
| 509 |
+
[49] X. Lu, W. Wang, C. Ma, J. Shen, L. Shao, and F. Porikli, "See more, know more: Unsupervised video object segmentation with co-attention siamese networks," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019, pp. 3623-3632.
|
| 510 |
+
[50] M. Zhang, W. Li, R. Tao, H. Li, and Q. Du, "Information fusion for classification of hyperspectral and lidar data using ip-cnn," IEEE Trans. Geosci. Remote. Sens. Lett., 2021.
|
| 511 |
+
[51] P. Xie, M. Zhao, and X. Hu, "Pistrc: Position-informed sign language transformer with content-aware convolution," IEEE Trans. Multimedia, 2021.
|
| 512 |
+
[52] W.-S. Zheng, S. Gong, and T. Xiang, "Person re-identification by probabilistic relative distance comparison," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2011, pp. 649-656.
|
| 513 |
+
[53] Z. Zheng, L. Zheng, and Y. Yang, "Unlabeled samples generated by gan improve the person re-identification baseline in vitro," in Proc. IEEE Int. Conf. Comput. Vis., 2017, pp. 3754-3762.
|
| 514 |
+
[54] L. Zheng, H. Zhang, S. Sun, M. Chandraker, Y. Yang, and Q. Tian, "Person re-identification in the wild," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 1367-1376.
|
| 515 |
+
[55] K. Zhou and T. Xiang, "Torchreid: A library for deep learning person re-identification in pytorch," arXiv preprint arXiv:1910.10093, 2019.
|
| 516 |
+
[56] Y. Suh, J. Wang, S. Tang, T. Mei, and K. M. Lee, "Part-aligned bilinear representations for person re-identification," in Proc. Eur. Conf. Comput. Vis., 2018, pp. 418-437.
|
| 517 |
+
[57] Z. Zhong, L. Zheng, G. Kang, S. Li, and Y. Yang, "Random erasing data augmentation," in Proc. AAAI Conf. Artif. Intell., 2020, pp. 13001-13008.
|
| 518 |
+
[58] H. Huang, D. Li, Z. Zhang, X. Chen, and K. Huang, "Adversarily occluded samples for person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 5098-5107.
|
| 519 |
+
|
| 520 |
+
[59] J. Miao, Y. Wu, and Y. Yang, "Identifying visible parts via pose estimation for occluded person re-identification," IEEE Trans. Neural Netw. Learn. Syst., 2021.
|
| 521 |
+
[60] K. Sun, B. Xiao, D. Liu, and J. Wang, "Deep high-resolution representation learning for human pose estimation," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2019, pp. 5693-5703.
|
| 522 |
+
[61] L. Zheng, L. Shen, L. Tian, S. Wang, J. Wang, and Q. Tian, "Scalable person re-identification: A benchmark," in Proc. IEEE Int. Conf. Comput. Vis., 2015, pp. 1116-1124.
|
| 523 |
+
[62] S. Liao, A. K. Jain, and S. Z. Li, “Partial face recognition: Alignment-free approach,” IEEE Trans. Pattern Anal. Mach. Intell, vol. 35, no. 5, pp. 1193–1205, 2012.
|
| 524 |
+
[63] Z. Gao, H. Zhang, L. Gao, Z. Cheng, R. Hong, and S. Chen, “Dcr: A unified framework for holistic/partial person Reid,” IEEE Trans. Multimedia, 2020.
|
| 525 |
+
[64] H. Luo, W. Jiang, X. Fan, and C. Zhang, "Stnreid: Deep convolutional networks with pairwise spatial transformer networks for partial person re-identification," IEEE Trans. Multimedia, vol. 22, no. 11, pp. 2905–2913, 2020.
|
| 526 |
+
[65] X. Chang, T. M. Hospedales, and T. Xiang, "Multi-level factorisation net for person re-identification," in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2018, pp. 2109-2118.
|
| 527 |
+
[66] K. Zhou, Y. Yang, A. Cavallaro, and T. Xiang, "Omni-scale feature learning for person re-identification," in Proc. IEEE Int. Conf. Comput. Vis., 2019, pp. 3702-3712.
|
| 528 |
+
[67] Y. Liu, Z. Yuan, W. Zhou, and H. Li, "Spatial and temporal mutual promotion for video-based person re-identification," in Proc. AAAI Conf. Artif. Intell., vol. 33, no. 01, 2019, pp. 8786-8793.
|
| 529 |
+
[68] Z. Zhang, C. Lan, W. Zeng, X. Jin, and Z. Chen, “Relation-aware global attention for person re-identification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2020, pp. 3186–3195.
|
| 530 |
+
[69] F. Wang, M. Jiang, C. Qian, S. Yang, C. Li, H. Zhang, X. Wang, and X. Tang, “Residual attention network for image classification,” in Proc. IEEE Conf. Comput. Vis. Pattern Recognit., 2017, pp. 3156-3164.
|
| 531 |
+
[70] S. Woo, J. Park, J.-Y. Lee, and I. So Kweon, “Čham: Convolutional block attention module,” in Proc. Eur. Conf. Comput. Vis., 2018, pp. 3-19.
|
2201.00xxx/2201.00107/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3038d265e8343dc34eeb2d9e3901135bc3381c9679bbfa41fd6a066819e129e0
|
| 3 |
+
size 835857
|
2201.00xxx/2201.00107/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_content_list.json
ADDED
|
@@ -0,0 +1,1598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Toward Pareto Efficient Fairness- Utility Trade-off in Recommendation through Reinforcement Learning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
99,
|
| 9 |
+
831,
|
| 10 |
+
151
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yingqiang Ge†, Xiaoting Zhao*, Lucia Yu*, Saurabh Paul*, Diane Hu*, Chu-Cheng Hsieh*, Yongfeng Zhang† \n†Rutgers University * Etsy Inc.",
|
| 17 |
+
"bbox": [
|
| 18 |
+
267,
|
| 19 |
+
162,
|
| 20 |
+
730,
|
| 21 |
+
214
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "yingqiang.ge@rutgers.edu,{xzhao,lyu,spaul,dhu,chsieh}@etsy.com,yongfeng.zhang@rutgers.edu",
|
| 28 |
+
"bbox": [
|
| 29 |
+
173,
|
| 30 |
+
215,
|
| 31 |
+
821,
|
| 32 |
+
229
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "ABSTRACT",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
83,
|
| 42 |
+
239,
|
| 43 |
+
183,
|
| 44 |
+
253
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "The issue of fairness in recommendation is becoming increasingly essential as Recommender Systems (RS) touch and influence more and more people in their daily lives. In fairness-aware recommendation, most of the existing algorithmic approaches mainly aim at solving a constrained optimization problem by imposing a constraint on the level of fairness while optimizing the main recommendation objective, e.g., click through rate (CTR). While this alleviates the impact of unfair recommendations, the expected return of an approach may significantly compromise the recommendation accuracy due to the inherent trade-off between fairness and utility. This motivates us to deal with these conflicting objectives and explore the optimal trade-off between them in recommendation. One conspicuous approach is to seek a Pareto efficient/optimal solution to guarantee optimal compromises between utility and fairness. Moreover, considering the needs of real-world e-commerce platforms, it would be more desirable if we can generalize the whole Pareto Frontier, so that the decision-makers can specify any preference of one objective over another based on their current business needs. Therefore, in this work, we propose a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL), called MoFIR (pronounced \"more fair\"), which is able to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Specially, we modify traditional Deep Deterministic Policy Gradient (DDPG) by introducing conditioned network (CN) into it, which conditions the networks directly on these preferences and outputs Q-value-vectors. Experiments on several real-world recommendation datasets verify the superiority of our framework on both fairness metrics and recommendation measures when compared with all other baselines. We also extract the approximate Pareto Frontier on real-world datasets generated by MoFIR and compare to state-of-the-art fairness methods.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
81,
|
| 53 |
+
258,
|
| 54 |
+
482,
|
| 55 |
+
686
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "CCS CONCEPTS",
|
| 62 |
+
"text_level": 1,
|
| 63 |
+
"bbox": [
|
| 64 |
+
83,
|
| 65 |
+
703,
|
| 66 |
+
220,
|
| 67 |
+
717
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "- Information systems $\\rightarrow$ Recommender systems; - Computing methodologies $\\rightarrow$ Sequential decision making.",
|
| 74 |
+
"bbox": [
|
| 75 |
+
81,
|
| 76 |
+
722,
|
| 77 |
+
482,
|
| 78 |
+
750
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
81,
|
| 87 |
+
780,
|
| 88 |
+
482,
|
| 89 |
+
852
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "WSDM '22, February 21-25, 2022, Tempe, AZ, USA",
|
| 96 |
+
"bbox": [
|
| 97 |
+
84,
|
| 98 |
+
853,
|
| 99 |
+
316,
|
| 100 |
+
863
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "$\\odot$ 2022 Association for Computing Machinery.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
84,
|
| 109 |
+
864,
|
| 110 |
+
302,
|
| 111 |
+
875
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "ACM ISBN 978-1-4503-9132-0/22/02...$15.00",
|
| 118 |
+
"bbox": [
|
| 119 |
+
84,
|
| 120 |
+
875,
|
| 121 |
+
294,
|
| 122 |
+
883
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "https://doi.org/10.1145/3488560.3498487",
|
| 129 |
+
"bbox": [
|
| 130 |
+
84,
|
| 131 |
+
883,
|
| 132 |
+
272,
|
| 133 |
+
895
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "KEYWORDS",
|
| 140 |
+
"text_level": 1,
|
| 141 |
+
"bbox": [
|
| 142 |
+
514,
|
| 143 |
+
239,
|
| 144 |
+
620,
|
| 145 |
+
253
|
| 146 |
+
],
|
| 147 |
+
"page_idx": 0
|
| 148 |
+
},
|
| 149 |
+
{
|
| 150 |
+
"type": "text",
|
| 151 |
+
"text": "Recommender System; Multi-Objective Reinforcement Learning; Pareto Efficient Fairness; Unbiased Recommendation",
|
| 152 |
+
"bbox": [
|
| 153 |
+
513,
|
| 154 |
+
258,
|
| 155 |
+
913,
|
| 156 |
+
284
|
| 157 |
+
],
|
| 158 |
+
"page_idx": 0
|
| 159 |
+
},
|
| 160 |
+
{
|
| 161 |
+
"type": "text",
|
| 162 |
+
"text": "ACM Reference Format:",
|
| 163 |
+
"text_level": 1,
|
| 164 |
+
"bbox": [
|
| 165 |
+
514,
|
| 166 |
+
291,
|
| 167 |
+
661,
|
| 168 |
+
301
|
| 169 |
+
],
|
| 170 |
+
"page_idx": 0
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"type": "text",
|
| 174 |
+
"text": "Yingqiang Ge, Xiaoting Zhao, Lucia Yu, Saurabh Paul, Diane Hu, Chu-Cheng Hsieh, Yongfeng Zhang. 2022. Toward Pareto Efficient Fairness-Utility Trade-off in Recommendation through Reinforcement Learning. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (WSDM '22), February 21–25, 2022, Tempe, AZ, USA. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3488560.3498487",
|
| 175 |
+
"bbox": [
|
| 176 |
+
513,
|
| 177 |
+
303,
|
| 178 |
+
913,
|
| 179 |
+
378
|
| 180 |
+
],
|
| 181 |
+
"page_idx": 0
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"type": "text",
|
| 185 |
+
"text": "1 INTRODUCTION",
|
| 186 |
+
"text_level": 1,
|
| 187 |
+
"bbox": [
|
| 188 |
+
514,
|
| 189 |
+
392,
|
| 190 |
+
687,
|
| 191 |
+
406
|
| 192 |
+
],
|
| 193 |
+
"page_idx": 0
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"type": "text",
|
| 197 |
+
"text": "Personalized recommender systems (RS), which are extensively employed in e-commerce platforms, have been acknowledged for their capacity to deliver high-quality services that bridge the gap between products and customers [7, 17, 44, 51]. Despite these huge advantages, several recent studies also raised concerns that RS may be vulnerable to algorithmic bias in several aspects, which may result in detrimental consequences for underrepresented or disadvantaged groups [19, 29, 43, 59]. For example, the \"Matthew Effect\" becomes increasingly evident in RS, which creates a huge disparity in the exposure of the producers/products in real-world recommendation systems [16, 18, 33]. Fortunately, these concerns about algorithmic fairness have resulted in a resurgence of interest to develop fairness-aware recommendation models to ensure such models do not become a source of unfair discrimination in recommendation [13, 15, 26, 28].",
|
| 198 |
+
"bbox": [
|
| 199 |
+
511,
|
| 200 |
+
411,
|
| 201 |
+
913,
|
| 202 |
+
619
|
| 203 |
+
],
|
| 204 |
+
"page_idx": 0
|
| 205 |
+
},
|
| 206 |
+
{
|
| 207 |
+
"type": "text",
|
| 208 |
+
"text": "In the area of fairness-aware recommendation, the methods can be roughly divided into three categories: pre-processing, in-processing and post-processing algorithms [14, 29]. Pre-processing methods usually aim to remove bias in data, e.g., sampling from data to cover items of all groups or balancing data to increase coverage of minority groups. In-processing methods aim at encoding fairness as part of the objective function, while post-processing methods tend to modify the presentations of the results. Even though all of them could successfully alleviate the impact of unfair recommendations to some extent, the expected return of an approach may significantly compromise the recommendation accuracy due to the inherent trade-off between fairness and utility, which has been demonstrated by several recent work both empirically and theoretically [22, 23, 32, 55].",
|
| 209 |
+
"bbox": [
|
| 210 |
+
511,
|
| 211 |
+
619,
|
| 212 |
+
913,
|
| 213 |
+
811
|
| 214 |
+
],
|
| 215 |
+
"page_idx": 0
|
| 216 |
+
},
|
| 217 |
+
{
|
| 218 |
+
"type": "text",
|
| 219 |
+
"text": "In light of the above, one fundamental research questions is asked, RQ1: Can we learn a recommendation model that allows for higher fairness without significantly compromising recommendation accuracy? And a more challenging one is, RQ2: Can we learn a single recommendation model that is able to produce optimal recommendation policies under different levels of fairness-utility trade-off",
|
| 220 |
+
"bbox": [
|
| 221 |
+
513,
|
| 222 |
+
811,
|
| 223 |
+
913,
|
| 224 |
+
896
|
| 225 |
+
],
|
| 226 |
+
"page_idx": 0
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"type": "aside_text",
|
| 230 |
+
"text": "arXiv:2201.00140v1 [cs.IR] 1 Jan 2022",
|
| 231 |
+
"bbox": [
|
| 232 |
+
22,
|
| 233 |
+
282,
|
| 234 |
+
57,
|
| 235 |
+
700
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 0
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "text",
|
| 241 |
+
"text": "so that it would be more desirable for decision-makers of e-commerce platforms to specify any preference of one objective over another based on their current business needs?",
|
| 242 |
+
"bbox": [
|
| 243 |
+
83,
|
| 244 |
+
106,
|
| 245 |
+
480,
|
| 246 |
+
147
|
| 247 |
+
],
|
| 248 |
+
"page_idx": 1
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"type": "text",
|
| 252 |
+
"text": "To deal with RQ1, one conspicuous approach is to seek a Pareto optimal solution to guarantee optimal compromises between utility and fairness, where a Pareto efficient/optimal solution means no single objective can be further improved without hurting the others. To find solutions with different levels of trade-off between utility and fairness (RQ2), we need to generalize their Pareto frontier in the objective space, where Pareto frontier denotes a set, whose elements are all Pareto optimal. Unfortunately, state-of-the-art approaches of fairness-aware recommendation are limited in understanding the fairness-utility trade-off.",
|
| 253 |
+
"bbox": [
|
| 254 |
+
81,
|
| 255 |
+
148,
|
| 256 |
+
480,
|
| 257 |
+
286
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 1
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "text",
|
| 263 |
+
"text": "Therefore, in this work, we aim to address the above problems and propose a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL) with linear preferences, called MoFIR, which aims to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Technically, we first formulate the fairness-aware recommendation task as a Multi-Objective Markov Decision Process (MOMDP), with one recommendation objective, e.g., CTR, and one fairness objective, e.g., item exposure fairness (our method is able to generalize to more recommendation objectives as well as more fairness objectives). Second, we modify classic and commonly-used RL algorithm—DDPG [42] by introducing conditioned networks [3] into it, which is a representative method to deal with multi-objective reinforcement learning. Specially, we condition the policy network and the value network directly on the preferences by augmenting them to the feature space. Finally, we utilize the vectorized Q-value functions together with modified loss function to update the parameters. The contributions of this work can be summarized as follows:",
|
| 264 |
+
"bbox": [
|
| 265 |
+
81,
|
| 266 |
+
286,
|
| 267 |
+
482,
|
| 268 |
+
547
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 1
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "list",
|
| 274 |
+
"sub_type": "text",
|
| 275 |
+
"list_items": [
|
| 276 |
+
"- We study the problem of Pareto optimal/efficient fairness-utility trade-off in recommendation and extensively explore their Pareto frontier to better satisfy real-world needs;",
|
| 277 |
+
"- We formulate the problem into a MOMDP and solve it through a MORL framework, MoFIR, which is optimized over the entire space of preferences in a domain, and allows the trained model to produce the optimal policy for any specified preferences;",
|
| 278 |
+
"- Unlike prior methods for fairness-aware recommendation, the proposed framework does not employ any relaxation for objectives in the optimization problem, hence it could achieve state-of-the-art results;",
|
| 279 |
+
"- Experiments on several real-world recommendation datasets verify the superiority of our framework on both fairness measures and recommendation performance when compared with all other baselines."
|
| 280 |
+
],
|
| 281 |
+
"bbox": [
|
| 282 |
+
83,
|
| 283 |
+
553,
|
| 284 |
+
478,
|
| 285 |
+
758
|
| 286 |
+
],
|
| 287 |
+
"page_idx": 1
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "text",
|
| 291 |
+
"text": "2 RELATED WORK",
|
| 292 |
+
"text_level": 1,
|
| 293 |
+
"bbox": [
|
| 294 |
+
83,
|
| 295 |
+
773,
|
| 296 |
+
256,
|
| 297 |
+
787
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 1
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "text",
|
| 303 |
+
"text": "2.1 Fairness in Recommendation",
|
| 304 |
+
"text_level": 1,
|
| 305 |
+
"bbox": [
|
| 306 |
+
83,
|
| 307 |
+
794,
|
| 308 |
+
364,
|
| 309 |
+
808
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 1
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "There have been growing concerns on fairness in recommendation as recommender systems touch and influence more and more people in their daily lives. Several recent works have found various types of bias in recommendations, such as gender and race [2, 8], item popularity [15, 16, 59], user feedback [13, 25, 27] and opinion polarity [54]. There are two primary paradigms adopted in recent",
|
| 316 |
+
"bbox": [
|
| 317 |
+
81,
|
| 318 |
+
811,
|
| 319 |
+
480,
|
| 320 |
+
896
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 1
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"text": "studies on algorithmic discrimination: individual fairness and group fairness. Individual fairness requires that each similar individual should be treated similarly, while group fairness requires that the protected groups should be treated similarly to the advantaged group or the populations as a whole. Our work focuses on the item popularity fairness from a group level, yet it can be used to solve multiple types of fairness simultaneously by properly defining and adding them as additional objectives.",
|
| 327 |
+
"bbox": [
|
| 328 |
+
511,
|
| 329 |
+
106,
|
| 330 |
+
913,
|
| 331 |
+
217
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 1
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"text": "The relevant methods related to fairness in ranking and recommendation can be roughly divided into three categories: preprocessing, in-processing and post-processing algorithms [14, 28, 29]. First of all, pre-processing methods usually aim to minimize the bias in data as bias may arise from the data source. This includes fairness-aware sampling methodologies in the data collection process to cover items of all groups, or balancing methodologies to increase coverage of minority groups, or repairing methodologies to ensure label correctness, remove disparate impact [14]. However, most of the time, we do not have access to the data collection process, but are given the dataset. Secondly, in-processing methods aim at encoding fairness as part of the objective function, typically as a regularizer [1, 4]. Finally, post-processing methods tend to modify the presentations of the results, e.g., re-ranking through linear programming [25, 43, 53] or multi-armed bandit [5]. However, there is no free lunch, imposing fairness constraints to the main learning task introduces a trade-off between these objectives, which have been asserted in several studies [22, 23, 32, 55], e.g., Dutta et al. [12] showed that because of noise on the underrepresented groups the trade-off between accuracy and equality of opportunity exists.",
|
| 338 |
+
"bbox": [
|
| 339 |
+
511,
|
| 340 |
+
217,
|
| 341 |
+
913,
|
| 342 |
+
494
|
| 343 |
+
],
|
| 344 |
+
"page_idx": 1
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"text": "Unfortunately, there is very few work of fairness-aware recommendation that can be found to study the fairness-utility trade-off. The closest one to our work is [47], which mainly focused on the trade-off between two-sided fairness in e-commerce recommendation. [47] used a traditional multiple gradient descent algorithm to solve multi-objective optimization problems, meaning that they need to train one network per point on the Pareto frontier, while our MoFIR generates the full Pareto frontier of solutions in a single optimization run. Besides, the authors relaxed all their objectives to get their differentiable approximations, which, to some extent, hurt its performance, as is shown in the experiment part, Fig. 2.",
|
| 349 |
+
"bbox": [
|
| 350 |
+
511,
|
| 351 |
+
494,
|
| 352 |
+
913,
|
| 353 |
+
647
|
| 354 |
+
],
|
| 355 |
+
"page_idx": 1
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"text": "2.2 Multi-Objective Recommendation",
|
| 360 |
+
"text_level": 1,
|
| 361 |
+
"bbox": [
|
| 362 |
+
513,
|
| 363 |
+
669,
|
| 364 |
+
834,
|
| 365 |
+
684
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 1
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "text",
|
| 371 |
+
"text": "Recommendation with multiple objectives is a significant but challenging problem, with the core difficulty stemming from the potential conflicts between objectives. In most real-world recommendation systems, recommendation accuracy (e.g., CTR-oriented objectives) is the dominating factor, while some studies believed that other characteristics, such as usability, profitability, usefulness, or diversity should be considered at the same time [20, 21, 36]. When multiple objectives are concerned, it is expected to get a Pareto optimal/efficient recommendation [31, 39, 50].",
|
| 372 |
+
"bbox": [
|
| 373 |
+
511,
|
| 374 |
+
686,
|
| 375 |
+
913,
|
| 376 |
+
811
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 1
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "text",
|
| 382 |
+
"text": "The approaches on recommendation with multiple objectives to achieve Pareto efficiency can be categorized into two groups: evolutionary algorithm [60] and scalarization [31]. Ribeiro et al. [39, 40] jointly considered multiple trained recommendation algorithms with a Pareto-efficient manner, and conducted an evolutionary algorithm to find the appropriate parameters for weighted model",
|
| 383 |
+
"bbox": [
|
| 384 |
+
511,
|
| 385 |
+
813,
|
| 386 |
+
913,
|
| 387 |
+
896
|
| 388 |
+
],
|
| 389 |
+
"page_idx": 1
|
| 390 |
+
},
|
| 391 |
+
{
|
| 392 |
+
"type": "text",
|
| 393 |
+
"text": "combination. Besides, Lin et al. [31] optimized GMV and CTR in e-commerce simultaneously based on multiple-gradient descent algorithm, which combines scalarization with Pareto-efficient SGD, and used a relaxed KKT condition. Our proposed method, MoFIR, belongs to scalarization, however, compared with earlier attempts in multi-objective recommendation [31, 47], our method learns to adapt a single network for all the trade-off combinations of the inputted preference vectors, therefore it is able to approximate all solutions of the Pareto frontier after a single optimization run.",
|
| 394 |
+
"bbox": [
|
| 395 |
+
81,
|
| 396 |
+
106,
|
| 397 |
+
480,
|
| 398 |
+
232
|
| 399 |
+
],
|
| 400 |
+
"page_idx": 2
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"type": "text",
|
| 404 |
+
"text": "2.3 RL for Recommendation",
|
| 405 |
+
"text_level": 1,
|
| 406 |
+
"bbox": [
|
| 407 |
+
83,
|
| 408 |
+
244,
|
| 409 |
+
330,
|
| 410 |
+
258
|
| 411 |
+
],
|
| 412 |
+
"page_idx": 2
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"text": "RL-based recommenders have recently become an important and attractive topic, as it is natural to model the recommendation process as a Markov Decision Process (MDP) and use RL agents to capture the dynamics in recommendation scenarios [34, 35, 41, 48, 49, 58]. Generally speaking, RL-based recommendation systems can be further classified into two categories: policy-based [6, 9, 11] or value-based [37, 56, 58] methods. On one hand, policy-based methods aim to learn strategies that generate actions based on state (such as recommending items). These methods are optimized by policy gradient, which can be deterministic approaches [11, 30, 42] or stochastic approaches [6, 9]. On the other hand, value-based methods aims to model the quality (e.g. Q-value) of actions so that the best action corresponds to the one with the highest Q-value. Apart from using RL in general recommendation task, there also existed several works focusing on using RL in explainable recommendation through knowledge graphs [48, 49].",
|
| 417 |
+
"bbox": [
|
| 418 |
+
81,
|
| 419 |
+
262,
|
| 420 |
+
482,
|
| 421 |
+
484
|
| 422 |
+
],
|
| 423 |
+
"page_idx": 2
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"text": "Currently, there are very few studies using MORL in recommendation. Xie et al. [50] studied multi-objective recommendation to capture users' objective-level preferences. However, unlike our proposed MoFIR, which learns a single parametric representation for optimal recommendation policies, they conducted a Pareto-oriented RL to generate the personalized objective weights in scalarization for each user, which is a totally different problem formulation.",
|
| 428 |
+
"bbox": [
|
| 429 |
+
81,
|
| 430 |
+
484,
|
| 431 |
+
482,
|
| 432 |
+
580
|
| 433 |
+
],
|
| 434 |
+
"page_idx": 2
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"text": "3 PRELIMINARY",
|
| 439 |
+
"text_level": 1,
|
| 440 |
+
"bbox": [
|
| 441 |
+
83,
|
| 442 |
+
593,
|
| 443 |
+
238,
|
| 444 |
+
607
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 2
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "3.1 Markov Decision Processes",
|
| 451 |
+
"text_level": 1,
|
| 452 |
+
"bbox": [
|
| 453 |
+
83,
|
| 454 |
+
613,
|
| 455 |
+
346,
|
| 456 |
+
627
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 2
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "In reinforcement learning, agents aim at learning to act in an environment in order to maximize their cumulative reward. A popular model for such problems is Markov Decision Processes (MDP), which is a tuple $M = (S, \\mathcal{A}, \\mathcal{P}, \\mathcal{R}, \\mu, \\gamma)$ , where $S$ is a set of $n$ states, $\\mathcal{A}$ is a set of $m$ actions, $\\mathcal{P}: S \\times \\mathcal{A} \\times S \\to [0,1]$ denotes the transition probability function, $\\mathcal{R}: S \\times \\mathcal{A} \\times S \\to \\mathbb{R}$ is the reward function, $\\mu: S \\to [0,1]$ is the starting state distribution, and $\\gamma \\in [0,1)$ is the discount factor. We denote the set of all stationary policies by $\\Pi$ , where a stationary policy $\\pi \\in \\Pi: S \\to P(\\mathcal{A})$ is a map from states to probability distributions over actions, with $\\pi(a|s)$ denoting the probability of selecting action $a$ in state $s$ . We aim to learn a policy $\\pi \\in \\Pi$ , able to maximize a performance measure, $J(\\pi)$ , which is typically taken to be the infinite horizon discounted total return,",
|
| 463 |
+
"bbox": [
|
| 464 |
+
81,
|
| 465 |
+
632,
|
| 466 |
+
482,
|
| 467 |
+
811
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "equation",
|
| 473 |
+
"text": "\n$$\nJ (\\pi) \\doteq \\underset {\\tau \\sim \\pi} {\\mathrm {E}} \\left[ \\sum_ {t = 0} ^ {\\infty} \\gamma^ {\\top} R \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) \\right], \\tag {1}\n$$\n",
|
| 474 |
+
"text_format": "latex",
|
| 475 |
+
"bbox": [
|
| 476 |
+
181,
|
| 477 |
+
816,
|
| 478 |
+
480,
|
| 479 |
+
849
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 2
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "text",
|
| 485 |
+
"text": "where $\\tau$ denotes a trajectory, e.g., $\\tau = (s_0, a_0, s_1, a_1, \\ldots)$ , and $\\tau \\sim \\pi$ indicates that the distribution over trajectories depends on $\\pi: s_0 \\sim \\mu, a_t \\sim \\pi (\\cdot | s_t), s_{t+1} \\sim P (\\cdot | s_t, a_t)$ . We denote $R(\\tau)$ as the",
|
| 486 |
+
"bbox": [
|
| 487 |
+
81,
|
| 488 |
+
854,
|
| 489 |
+
482,
|
| 490 |
+
896
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 2
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "discounted rewards of a trajectory, the on-policy value function as $V^{\\pi}(s) \\doteq \\mathrm{E}_{\\tau \\sim \\pi} [R(\\tau)|s_0 = s]$ , the on-policy action-value function as $Q^{\\pi}(s,a) \\doteq \\mathrm{E}_{\\tau \\sim \\pi} [R(\\tau)|s_0 = s,a_0 = a]$ , and the advantage function as $A^{\\pi}(s,a) \\doteq Q^{\\pi}(s,a) - V^{\\pi}(s)$ .",
|
| 497 |
+
"bbox": [
|
| 498 |
+
513,
|
| 499 |
+
106,
|
| 500 |
+
911,
|
| 501 |
+
162
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 2
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "3.2 Multi-Objective Markov Decision Processes",
|
| 508 |
+
"text_level": 1,
|
| 509 |
+
"bbox": [
|
| 510 |
+
514,
|
| 511 |
+
175,
|
| 512 |
+
910,
|
| 513 |
+
190
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 2
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "text",
|
| 519 |
+
"text": "Multi-Objective Markov Decision Processes (MOMDP) are MDPs with a vector-valued reward function $\\mathbf{r}_t = \\mathbf{R}(s_t, a_t)$ , where each component of $\\mathbf{r}_t$ corresponds to one certain objective. A scalarization function $f$ maps the multi-objective value of a policy $\\pi$ to a scalar value. In this work, we consider the commonly-used class of MOMDPs with linear preference functions, e.g., $f_{\\omega}(\\mathbf{R}(s, a)) = \\boldsymbol{\\omega} \\cdot \\mathbf{R}(s, a)$ . It is worth noting that if $\\boldsymbol{\\omega}$ is fixed to a single value, this MOMDP collapses into a standard MDP. An optimal solution for an MOMDP under linear $f$ is a convex coverage set (CCS), e.g., a set of undominated policies containing at least one optimal policy for any linear scalarization.",
|
| 520 |
+
"bbox": [
|
| 521 |
+
511,
|
| 522 |
+
194,
|
| 523 |
+
913,
|
| 524 |
+
345
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 2
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "3.3 Conditioned Network",
|
| 531 |
+
"text_level": 1,
|
| 532 |
+
"bbox": [
|
| 533 |
+
514,
|
| 534 |
+
359,
|
| 535 |
+
735,
|
| 536 |
+
373
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 2
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "text",
|
| 542 |
+
"text": "Abels et al. [3] studied multi-objective reinforcement learning with linear preferences and proposed a novel algorithm for learning a single Q-network that is optimized over the entire space of preferences in a domain. The main idea is called Conditioned Network (CN), in which a Q-Network is augmented to output weight-dependent multi-objective Q-value-vectors, as is shown in the right side of Fig. 1 (Conditioned Critic Network, where action and state representations together with weight vector are inputted to the network). Besides, to promote quick convergence on the new weight vector's policy and to maintain previously learned policies, the authors updated each experience tuple in a mini-batch with respect to the current weight vector and a random previously encountered weight vector. Specially, given a mini-batch of trajectories, they computed the loss for a given trajectory $(s_j,a_j,\\mathbf{r}_j,s_{j + 1})$ as the sum of the loss on the active weight vector $\\omega_{t}$ and on $\\omega_{j}$ randomly sampled from the set of encountered weights.",
|
| 543 |
+
"bbox": [
|
| 544 |
+
511,
|
| 545 |
+
377,
|
| 546 |
+
913,
|
| 547 |
+
599
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 2
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "equation",
|
| 553 |
+
"text": "\n$$\n\\frac {1}{2} \\left[ \\left| \\mathbf {y} _ {\\omega_ {t}} ^ {(j)} - \\mathbf {Q} _ {C N} \\left(a _ {j}, s _ {j}; \\omega_ {t}\\right) \\right| + \\left| \\mathbf {y} _ {\\omega_ {j}} ^ {(j)} - \\mathbf {Q} _ {C N} \\left(a _ {j}, s _ {j}; \\omega_ {j}\\right) \\right| \\right] \\tag {2}\n$$\n",
|
| 554 |
+
"text_format": "latex",
|
| 555 |
+
"bbox": [
|
| 556 |
+
531,
|
| 557 |
+
606,
|
| 558 |
+
911,
|
| 559 |
+
632
|
| 560 |
+
],
|
| 561 |
+
"page_idx": 2
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "equation",
|
| 565 |
+
"text": "\n$$\n\\mathbf {y} _ {\\omega} ^ {(j)} = \\mathbf {r} _ {j} + \\gamma \\mathbf {Q} _ {C N} ^ {-} \\left(\\underset {a \\in A} {\\operatorname {a r g m a x}} \\mathbf {Q} _ {C N} (a, s _ {j + 1}; \\omega) \\cdot \\omega , s _ {j + 1}; \\omega\\right) \\tag {3}\n$$\n",
|
| 566 |
+
"text_format": "latex",
|
| 567 |
+
"bbox": [
|
| 568 |
+
535,
|
| 569 |
+
651,
|
| 570 |
+
911,
|
| 571 |
+
681
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 2
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "text",
|
| 577 |
+
"text": "where $\\mathbf{Q}_{CN}(a,s;\\omega)$ is the network's Q-value-vector for action $a$ in state $s$ and with weight vector $\\omega$ . They claimed that training the same sample on two different weight vectors has the added advantage of forcing the network to identify that different weight vectors can have different Q-values for the same state. A more comprehensive review of MOMDPs and CN can be seen in [3].",
|
| 578 |
+
"bbox": [
|
| 579 |
+
511,
|
| 580 |
+
686,
|
| 581 |
+
913,
|
| 582 |
+
770
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 2
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "text",
|
| 588 |
+
"text": "In the original paper, the authors only proposed an algorithm based on Double DQN with discrete action space, which is not suitable for recommendation scenarios as the action space of recommendation is very large. Therefore, we modify the traditional DDPG [42] by introducing conditioned network into its policy network as well as critic network, and more importantly, we modify the original loss functions for both of them. We choose DDPG as it is a commonly adopted methods in RL, while our modification can be generalized to other reinforcement learning methods, such as trust",
|
| 589 |
+
"bbox": [
|
| 590 |
+
511,
|
| 591 |
+
771,
|
| 592 |
+
913,
|
| 593 |
+
896
|
| 594 |
+
],
|
| 595 |
+
"page_idx": 2
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "text",
|
| 599 |
+
"text": "region policy optimization. More details about our modification will be introduced in Section 5.",
|
| 600 |
+
"bbox": [
|
| 601 |
+
81,
|
| 602 |
+
106,
|
| 603 |
+
480,
|
| 604 |
+
133
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 3
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "text",
|
| 610 |
+
"text": "4 PROBLEM FORMULATION",
|
| 611 |
+
"text_level": 1,
|
| 612 |
+
"bbox": [
|
| 613 |
+
83,
|
| 614 |
+
146,
|
| 615 |
+
336,
|
| 616 |
+
161
|
| 617 |
+
],
|
| 618 |
+
"page_idx": 3
|
| 619 |
+
},
|
| 620 |
+
{
|
| 621 |
+
"type": "text",
|
| 622 |
+
"text": "4.1 MOMDP for Recommendation",
|
| 623 |
+
"text_level": 1,
|
| 624 |
+
"bbox": [
|
| 625 |
+
83,
|
| 626 |
+
167,
|
| 627 |
+
375,
|
| 628 |
+
181
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 3
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"text": "The recommendation agent will take the feature representation of the current user and item candidates $\\mathcal{I}$ as input, and generate a list of items $L \\in \\mathcal{I}^K$ to recommend, where $K \\geq 1$ after a user sends a request to it at timestamp $t \\in (t_1, t_2, t_3, t_4, t_5, \\ldots)$ . User $u$ who has received the list of recommended items $L$ will give feedback $B$ via clicking on this set of items, which can be used to measure the recommendation performance. Besides, based on the recommendation results, we will acquire the total number of exposure for each item group $G$ , which can later be used to measure fairness. Thus, the state $s$ can be represented by user features (e.g., user's recent click history), action $a$ is represented by items in $L$ , reward $r$ is the immediate reward vector after taking action $a$ , with each component of $r$ corresponds to one certain objective (e.g., whether user clicks on an item in $L$ for utility objective or whether an item comes from predefined disadvantageous group for fairness objective). The problem formulation is formally presented as follows:",
|
| 635 |
+
"bbox": [
|
| 636 |
+
81,
|
| 637 |
+
185,
|
| 638 |
+
482,
|
| 639 |
+
419
|
| 640 |
+
],
|
| 641 |
+
"page_idx": 3
|
| 642 |
+
},
|
| 643 |
+
{
|
| 644 |
+
"type": "list",
|
| 645 |
+
"sub_type": "text",
|
| 646 |
+
"list_items": [
|
| 647 |
+
"- State $S$ : A state $s_t$ is the representation of user's most recent positive interaction history $H_t$ with the recommendation system, together with his/her demographic information (if exists).",
|
| 648 |
+
"- Action $\\mathcal{A}$ : An action $a_{t} = \\{a_{t}^{1},\\dots,a_{t}^{K}\\}$ is a recommendation list with $K$ items to a user $u$ at time $t$ with current state $s_t$ .",
|
| 649 |
+
"- Vector Reward Function $r$ : A vector-valued reward function $\\mathbf{r}_t = \\mathbf{R}(s_t, a_t)$ , where each component of $\\mathbf{r}_t$ corresponds to one certain objective. In this work, the reward vector includes two elements: utility objective and fairness objective. The details of the definition of our task-specific objectives will be introduced in the following section.",
|
| 650 |
+
"- Scalarization function $f$ : In this paper, we consider the class of MOMDPs with linear preferences functions $f$ , which is a commonly-used scalarization function. Under this setting, each objective is given a weight $\\omega_{i}$ , such that the scalarization function becomes $f_{\\omega}(\\mathbf{R}) = \\boldsymbol{\\omega} \\cdot \\mathbf{R}$ , where each $\\omega_{i} \\in [0,1]$ and $\\sum_{i} \\omega_{i} = 1$ .",
|
| 651 |
+
"- Discount rate $\\gamma$ : $\\gamma \\in [0,1]$ is a discount factor measuring the present value of long-term rewards."
|
| 652 |
+
],
|
| 653 |
+
"bbox": [
|
| 654 |
+
83,
|
| 655 |
+
424,
|
| 656 |
+
482,
|
| 657 |
+
672
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 3
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "text",
|
| 663 |
+
"text": "We aim to learn a policy $\\pi$ , mapping from states to actions, to generate recommendations that achieve the Pareto efficient trade-off between fairness and utility.",
|
| 664 |
+
"bbox": [
|
| 665 |
+
81,
|
| 666 |
+
676,
|
| 667 |
+
482,
|
| 668 |
+
718
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 3
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "text",
|
| 674 |
+
"text": "4.2 Multi-Objectives in Fair Recommendation",
|
| 675 |
+
"text_level": 1,
|
| 676 |
+
"bbox": [
|
| 677 |
+
83,
|
| 678 |
+
731,
|
| 679 |
+
468,
|
| 680 |
+
746
|
| 681 |
+
],
|
| 682 |
+
"page_idx": 3
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "text",
|
| 686 |
+
"text": "The reward vector is designed to measure the recommendation system's gain regarding utility and fairness. While our method is capable of dealing with multiple objectives simultaneously, for simplicity we deliberately select click through rate and item (group) exposure fairness as our two objectives recommendation utility and item exposure fairness respectively.",
|
| 687 |
+
"bbox": [
|
| 688 |
+
81,
|
| 689 |
+
750,
|
| 690 |
+
482,
|
| 691 |
+
833
|
| 692 |
+
],
|
| 693 |
+
"page_idx": 3
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "text",
|
| 697 |
+
"text": "4.2.1 Utility Objective. On one hand, given the recommendation based on the action $a_{t}$ and the user state $s_{t}$ , the user will provide feedback, e.g. click or purchase, etc. The recommender receives immediate reward $R_{u}(s_{t},a_{t})$ according to the user's positive feedback.",
|
| 698 |
+
"bbox": [
|
| 699 |
+
81,
|
| 700 |
+
840,
|
| 701 |
+
482,
|
| 702 |
+
896
|
| 703 |
+
],
|
| 704 |
+
"page_idx": 3
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "text",
|
| 708 |
+
"text": "We also normalize the reward value by dividing $K$ , which is the length of the recommendation list.",
|
| 709 |
+
"bbox": [
|
| 710 |
+
513,
|
| 711 |
+
106,
|
| 712 |
+
911,
|
| 713 |
+
133
|
| 714 |
+
],
|
| 715 |
+
"page_idx": 3
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "equation",
|
| 719 |
+
"text": "\n$$\nR _ {u} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\frac {\\sum_ {l = 1} ^ {K} \\mathbb {1} \\left(a _ {t} ^ {l} \\text {g e t s p o s i t i v e f e e d b a c k}\\right)}{K} \\tag {4}\n$$\n",
|
| 720 |
+
"text_format": "latex",
|
| 721 |
+
"bbox": [
|
| 722 |
+
570,
|
| 723 |
+
145,
|
| 724 |
+
911,
|
| 725 |
+
174
|
| 726 |
+
],
|
| 727 |
+
"page_idx": 3
|
| 728 |
+
},
|
| 729 |
+
{
|
| 730 |
+
"type": "text",
|
| 731 |
+
"text": "4.2.2 Fairness Objective. On the other hand, based on the recommendation list $a_{t}$ , the total number of exposure of each item group will be counted and used to measure exposure fairness. Here, we calculate the ratio of items from sensitive group to the total number of recommended items, and use a hinge loss with margin $\\beta$ to punish the abuse of fairness. Usually, we set $\\beta$ to be the ratio of the number of items in sensitive group to the total number of items.",
|
| 732 |
+
"bbox": [
|
| 733 |
+
513,
|
| 734 |
+
179,
|
| 735 |
+
915,
|
| 736 |
+
276
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 3
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "equation",
|
| 742 |
+
"text": "\n$$\nR _ {f} \\left(s _ {t}, a _ {t}, s _ {t + 1}\\right) = \\max \\left(\\frac {\\sum_ {l = 1} ^ {K} \\mathbb {1} \\left(a _ {t} ^ {l} i s i n s e s i t i v e g r o u p\\right)}{K}, \\beta\\right) \\tag {5}\n$$\n",
|
| 743 |
+
"text_format": "latex",
|
| 744 |
+
"bbox": [
|
| 745 |
+
529,
|
| 746 |
+
291,
|
| 747 |
+
911,
|
| 748 |
+
325
|
| 749 |
+
],
|
| 750 |
+
"page_idx": 3
|
| 751 |
+
},
|
| 752 |
+
{
|
| 753 |
+
"type": "text",
|
| 754 |
+
"text": "5 PROPOSED FRAMEWORK",
|
| 755 |
+
"text_level": 1,
|
| 756 |
+
"bbox": [
|
| 757 |
+
514,
|
| 758 |
+
334,
|
| 759 |
+
764,
|
| 760 |
+
348
|
| 761 |
+
],
|
| 762 |
+
"page_idx": 3
|
| 763 |
+
},
|
| 764 |
+
{
|
| 765 |
+
"type": "text",
|
| 766 |
+
"text": "5.1 The Conditioned Actor",
|
| 767 |
+
"text_level": 1,
|
| 768 |
+
"bbox": [
|
| 769 |
+
514,
|
| 770 |
+
354,
|
| 771 |
+
746,
|
| 772 |
+
369
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 3
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "text",
|
| 778 |
+
"text": "The conditioned actor is almost the same as traditional actor except that we condition the predictions of the policy network to the preference vectors. Practically, we concatenate the state representation $s_t$ with the vector $\\omega$ and train a neural network on this joint feature space, which is depicted in Fig. 1 (Conditioned Actor Network). The conditioned actor $\\pi$ parameterized by $\\theta^{\\pi}$ serves as a stochastic policy that samples an action $a_t \\in \\mathcal{I}^K$ given the current state $s_t \\in \\mathbb{R}^m$ of a user and the preference vector $\\omega$ .",
|
| 779 |
+
"bbox": [
|
| 780 |
+
513,
|
| 781 |
+
373,
|
| 782 |
+
915,
|
| 783 |
+
484
|
| 784 |
+
],
|
| 785 |
+
"page_idx": 3
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "text",
|
| 789 |
+
"text": "First of all, we define $s_t$ as the concatenation of the user embedding $\\mathbf{e}_u \\in \\mathbb{R}^d$ and their recent history embedding $\\mathbf{h}_u$ :",
|
| 790 |
+
"bbox": [
|
| 791 |
+
513,
|
| 792 |
+
484,
|
| 793 |
+
915,
|
| 794 |
+
512
|
| 795 |
+
],
|
| 796 |
+
"page_idx": 3
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"type": "equation",
|
| 800 |
+
"text": "\n$$\ns _ {t} = \\left[ \\mathbf {e} _ {u}; \\mathbf {h} _ {u} \\right], \\tag {6}\n$$\n",
|
| 801 |
+
"text_format": "latex",
|
| 802 |
+
"bbox": [
|
| 803 |
+
669,
|
| 804 |
+
518,
|
| 805 |
+
911,
|
| 806 |
+
534
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 3
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "text",
|
| 812 |
+
"text": "where the recent history embedding $\\mathbf{h}_u = \\mathrm{GRU}(H_t)$ is acquired by encoding $N$ item embeddings via Gated Recurrent Units (GRU) [10], and $H_{t} = \\{H_{t}^{1},H_{t}^{2},\\ldots ,H_{t}^{N}\\}$ denotes the most recent $N$ items from user $u$ 's interaction history. We define the user's recent history is organized as a queue with fixed length, and update it only if the recommended item $a_{t}^{l}\\in a_{t}$ receives a positive feedback, which ensures that the state can always represent the user's most recent interests.",
|
| 813 |
+
"bbox": [
|
| 814 |
+
513,
|
| 815 |
+
539,
|
| 816 |
+
913,
|
| 817 |
+
648
|
| 818 |
+
],
|
| 819 |
+
"page_idx": 3
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"type": "equation",
|
| 823 |
+
"text": "\n$$\nH _ {t + 1} = \\left\\{ \\begin{array}{c c} \\left\\{H _ {t} ^ {2}, \\dots , H _ {t} ^ {N}, a _ {t} ^ {l} \\right\\} & a _ {t} ^ {l} \\text {g e t s p o s i t i v e f e e d b a c k} \\\\ H _ {t} & \\text {O t h e r w i s e} \\end{array} \\right. \\tag {7}\n$$\n",
|
| 824 |
+
"text_format": "latex",
|
| 825 |
+
"bbox": [
|
| 826 |
+
549,
|
| 827 |
+
652,
|
| 828 |
+
911,
|
| 829 |
+
680
|
| 830 |
+
],
|
| 831 |
+
"page_idx": 3
|
| 832 |
+
},
|
| 833 |
+
{
|
| 834 |
+
"type": "text",
|
| 835 |
+
"text": "Secondly, we assume that the probability of actions conditioned on states and preferences follows a continuous high-dimensional Gaussian distribution. We also assume it has mean $\\mu \\in \\mathbb{R}^{Kd}$ and covariance matrix $\\Sigma \\in \\mathbb{R}^{Kd\\times Kd}$ (only elements at diagonal are nonzeros and there are actually $Kd$ parameters). In order to achieve better representation ability, we approximate the distribution via a deep neural network, which maps the encoded state $s_t$ and preferences $\\omega$ to $\\mu$ and $\\Sigma$ . Specifically, we adopt a Multi Layer Perceptron (MLP) with $\\tanh (\\cdot)$ as the non-linear activation function,",
|
| 836 |
+
"bbox": [
|
| 837 |
+
513,
|
| 838 |
+
686,
|
| 839 |
+
915,
|
| 840 |
+
811
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 3
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "equation",
|
| 846 |
+
"text": "\n$$\n(\\mu , \\Sigma) = \\operatorname {M L P} (s _ {t}, \\omega). \\tag {8}\n$$\n",
|
| 847 |
+
"text_format": "latex",
|
| 848 |
+
"bbox": [
|
| 849 |
+
650,
|
| 850 |
+
818,
|
| 851 |
+
911,
|
| 852 |
+
833
|
| 853 |
+
],
|
| 854 |
+
"page_idx": 3
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "text",
|
| 858 |
+
"text": "Once received $\\mu$ and $\\Sigma$ , we sample a vector from the acquired Gaussian distribution $\\mathcal{N}(\\mu, \\Sigma)$ and convert it into a proposal matrix $W \\sim \\mathcal{N}(\\mu, \\Sigma) \\in \\mathbb{R}^{K \\times d}$ , whose $k$ -th row, denoted by $W_k \\in \\mathbb{R}^d$ , represents an \"ideal\" embedding of a virtual item.",
|
| 859 |
+
"bbox": [
|
| 860 |
+
513,
|
| 861 |
+
839,
|
| 862 |
+
915,
|
| 863 |
+
896
|
| 864 |
+
],
|
| 865 |
+
"page_idx": 3
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "image",
|
| 869 |
+
"img_path": "images/c93d836d0617838549cc8ffbbbabcc2194d2e7b4c9fd6f61fe888e8c18c3caee.jpg",
|
| 870 |
+
"image_caption": [
|
| 871 |
+
"Figure 1: The architecture of the proposed MoFIR."
|
| 872 |
+
],
|
| 873 |
+
"image_footnote": [],
|
| 874 |
+
"bbox": [
|
| 875 |
+
86,
|
| 876 |
+
101,
|
| 877 |
+
919,
|
| 878 |
+
324
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 4
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"text": "Finally, the probability matrix $P \\in \\mathbb{R}^{K \\times |I|}$ of selecting the $k$ -th candidate item is given by $P_{k} = \\mathrm{softmax}(W_{k} \\mathcal{V}^{\\top})$ , $k = 1, \\dots, K$ , where $\\mathcal{V} \\in \\mathbb{R}^{|\\mathcal{I}| \\times d}$ is the embedding matrix of all candidate items. This is equivalent to using dot product to determine similarity between $W_{k}$ and any item. As the result of taking the action at step $t$ , the actor recommends the $k$ -th item as follows:",
|
| 885 |
+
"bbox": [
|
| 886 |
+
81,
|
| 887 |
+
354,
|
| 888 |
+
482,
|
| 889 |
+
438
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 4
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "equation",
|
| 895 |
+
"text": "\n$$\na _ {t} ^ {k} = \\underset {i \\in \\{1, \\dots , | I | \\}} {\\arg \\max } P _ {k, i}, \\forall k = 1, \\dots , K, \\tag {9}\n$$\n",
|
| 896 |
+
"text_format": "latex",
|
| 897 |
+
"bbox": [
|
| 898 |
+
184,
|
| 899 |
+
444,
|
| 900 |
+
480,
|
| 901 |
+
468
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 4
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "text",
|
| 907 |
+
"text": "where $P_{k,i}$ denotes the probability of taking the $i$ -th item at rank $k$ .",
|
| 908 |
+
"bbox": [
|
| 909 |
+
81,
|
| 910 |
+
473,
|
| 911 |
+
482,
|
| 912 |
+
488
|
| 913 |
+
],
|
| 914 |
+
"page_idx": 4
|
| 915 |
+
},
|
| 916 |
+
{
|
| 917 |
+
"type": "text",
|
| 918 |
+
"text": "5.2 The Conditioned Critic",
|
| 919 |
+
"text_level": 1,
|
| 920 |
+
"bbox": [
|
| 921 |
+
83,
|
| 922 |
+
500,
|
| 923 |
+
316,
|
| 924 |
+
513
|
| 925 |
+
],
|
| 926 |
+
"page_idx": 4
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "text",
|
| 930 |
+
"text": "The conditioned critic $\\mu$ also differs from the traditional critic in that we concatenate the state representation $s_t$ with the vector $\\omega$ as well as the embedding of action $a_t$ , and require the output to be a Q-value-vector with the size equal to the number of objectives, which is depicted in Fig. 1 (Conditioned Critic Network). The conditioned critic $\\mu$ is parameterized with $\\theta^{\\mu}$ and is constructed to approximate the true state-action value vector function $\\mathbf{Q}^{\\pi}(s_t,a_t,\\omega)$ and is used in the optimization of the actor. Following Eq. 2 introduced in conditioned network [3], the conditioned critic network is updated according to temporal-difference learning that minimizes the following loss function:",
|
| 931 |
+
"bbox": [
|
| 932 |
+
81,
|
| 933 |
+
518,
|
| 934 |
+
482,
|
| 935 |
+
671
|
| 936 |
+
],
|
| 937 |
+
"page_idx": 4
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "equation",
|
| 941 |
+
"text": "\n$$\n\\mathcal {L} \\left(\\theta^ {\\mu}\\right) = \\mathbb {E} _ {s, a, \\omega} \\left[ \\left\\| \\mathbf {y} _ {t} - \\mathbf {Q} _ {t} (s, a, \\omega ; \\theta^ {\\mu}) \\right\\| _ {2} ^ {2} \\right] \\tag {10}\n$$\n",
|
| 942 |
+
"text_format": "latex",
|
| 943 |
+
"bbox": [
|
| 944 |
+
165,
|
| 945 |
+
676,
|
| 946 |
+
480,
|
| 947 |
+
691
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 4
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "text",
|
| 953 |
+
"text": "where $\\mathbf{y}_t = \\mathbf{r}_t + \\gamma \\mathbf{Q}_{\\omega}(s_{t+1}, a_{t+1}, \\omega; \\theta^\\mu)$ .",
|
| 954 |
+
"bbox": [
|
| 955 |
+
81,
|
| 956 |
+
696,
|
| 957 |
+
318,
|
| 958 |
+
713
|
| 959 |
+
],
|
| 960 |
+
"page_idx": 4
|
| 961 |
+
},
|
| 962 |
+
{
|
| 963 |
+
"type": "text",
|
| 964 |
+
"text": "5.3 Parameters Training Procedure of MoFIR",
|
| 965 |
+
"text_level": 1,
|
| 966 |
+
"bbox": [
|
| 967 |
+
81,
|
| 968 |
+
724,
|
| 969 |
+
464,
|
| 970 |
+
739
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 4
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "text",
|
| 976 |
+
"text": "We present the detailed training procedure of our proposed model, MoFIR, in Algorithm 1 and the model architecture in Fig. 1. As mentioned before, we modify traditional single-objective DDPG into multi-objective DDPG by introducing the conditioned networks to both its actor network and critic network. In each episode, there are two phases - the trajectory generation phase (line 15-20) and model updating phase (line 22-32). In the trajectory generation phase, we sample one linear preference $\\omega_0$ and fix it to generate user-item interaction trajectories. Then in the model updating phase, we sample another $\\mathcal{N}_{\\omega}$ preferences together with $\\omega_0$ to update the conditioned actor network and the conditioned critic network. Here, we do not",
|
| 977 |
+
"bbox": [
|
| 978 |
+
81,
|
| 979 |
+
743,
|
| 980 |
+
482,
|
| 981 |
+
893
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 4
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "text",
|
| 987 |
+
"text": "follow the original setting in [3], which only uses one more random sampled preference vector, as Yang et al. [52] observed that increasing the number of sampled preference vectors can further improve the coverage ratio of RL agent and diminish the adaptation error in their experiments.",
|
| 988 |
+
"bbox": [
|
| 989 |
+
511,
|
| 990 |
+
354,
|
| 991 |
+
913,
|
| 992 |
+
425
|
| 993 |
+
],
|
| 994 |
+
"page_idx": 4
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "text",
|
| 998 |
+
"text": "6 EXPERIMENTS",
|
| 999 |
+
"text_level": 1,
|
| 1000 |
+
"bbox": [
|
| 1001 |
+
514,
|
| 1002 |
+
436,
|
| 1003 |
+
671,
|
| 1004 |
+
450
|
| 1005 |
+
],
|
| 1006 |
+
"page_idx": 4
|
| 1007 |
+
},
|
| 1008 |
+
{
|
| 1009 |
+
"type": "text",
|
| 1010 |
+
"text": "In this section, we first introduce the datasets, the comparison baselines, then discuss and analyse the experimental results.",
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
513,
|
| 1013 |
+
454,
|
| 1014 |
+
911,
|
| 1015 |
+
483
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 4
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "text",
|
| 1021 |
+
"text": "6.1 Dataset Description",
|
| 1022 |
+
"text_level": 1,
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
514,
|
| 1025 |
+
494,
|
| 1026 |
+
718,
|
| 1027 |
+
511
|
| 1028 |
+
],
|
| 1029 |
+
"page_idx": 4
|
| 1030 |
+
},
|
| 1031 |
+
{
|
| 1032 |
+
"type": "text",
|
| 1033 |
+
"text": "To evaluate the models under different data scales, data sparsity and application scenarios, we perform experiments on three real-world datasets. Some basic statistics of the experimental datasets are shown in Table 1.",
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
513,
|
| 1036 |
+
513,
|
| 1037 |
+
913,
|
| 1038 |
+
568
|
| 1039 |
+
],
|
| 1040 |
+
"page_idx": 4
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"type": "list",
|
| 1044 |
+
"sub_type": "text",
|
| 1045 |
+
"list_items": [
|
| 1046 |
+
"- Movielens: We choose Movielens100K $^{1}$ , which includes about one hundred thousand user transactions, respectively (user id, item id, rating, timestamp, etc.).",
|
| 1047 |
+
"- Ciao: Ciao was collected by Tang et al. [45] from a popular product review site, Epinions, in the month of May, $2011^{2}$ . For each user, they collected user profiles, user ratings and user trust relations. For each rating, they collected the product name and its category, the rating score, the time point when the rating is created, and the helpfulness of this rating.",
|
| 1048 |
+
"- Etsy: We collect a few weeks of user-item interaction data on a famous e-commerce platform, Etsy. For each record, we collect user id, item id and timestamp. Since the original data is sparse, we filter out users and items with fewer than twenty interactions."
|
| 1049 |
+
],
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
514,
|
| 1052 |
+
570,
|
| 1053 |
+
913,
|
| 1054 |
+
751
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 4
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"text": "For each dataset, we first sort the records of each user based on the timestamp, and then split the records into training and testing sets chronologically by 4:1. The last item in the training set of each user is put into the validation set. Since we focus on item exposure fairness, we need to split items into two groups $G_{0}$ and $G_{1}$ based on item popularity. It would be desirable if we have the item impression/listing information and use it to group items, however, since Movielens and Ciao are public dataset and only have",
|
| 1061 |
+
"bbox": [
|
| 1062 |
+
511,
|
| 1063 |
+
753,
|
| 1064 |
+
913,
|
| 1065 |
+
864
|
| 1066 |
+
],
|
| 1067 |
+
"page_idx": 4
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "page_footnote",
|
| 1071 |
+
"text": "$^{1}$ https://grouplens.org/datasets/Movielens/",
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
514,
|
| 1074 |
+
872,
|
| 1075 |
+
718,
|
| 1076 |
+
883
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 4
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "page_footnote",
|
| 1082 |
+
"text": "$^{2}$ https://www.cse.msu.edu/tangjili/datasetcode/truststudy.htm",
|
| 1083 |
+
"bbox": [
|
| 1084 |
+
514,
|
| 1085 |
+
883,
|
| 1086 |
+
812,
|
| 1087 |
+
895
|
| 1088 |
+
],
|
| 1089 |
+
"page_idx": 4
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "code",
|
| 1093 |
+
"sub_type": "algorithm",
|
| 1094 |
+
"code_caption": [
|
| 1095 |
+
"Algorithm 1: Multi-Objective DDPG Algorithm"
|
| 1096 |
+
],
|
| 1097 |
+
"code_body": "1 Input: \n2 A preference sampling distribution $D_{\\omega}$ \n3 A multi-objective critic network $\\mu$ parameterized by $\\theta^{\\mu}$ \n4 An actor network $\\pi$ parameterized by $\\theta^{\\pi}$ \n5 Pre-trained user embeddings $\\mathcal{U}$ and item embeddings $\\mathcal{V}$ \n6 Output: \n7 Parameters $\\theta^{\\pi},\\theta^{\\mu}$ of the actor network and critic network. \n8 Initialization: \n9 Randomly initialize $\\theta^{\\pi}$ and $\\theta^{\\mu}$ \n10 Initialize target network $\\mu^\\prime$ and $\\pi^{\\prime}$ with weights $\\theta^{\\pi '}\\gets \\theta^{\\pi},\\theta^{\\mu '}\\gets \\theta^{\\mu};$ \n11 Initialize replay buffer $D$ \nfor Episode $= 1\\dots M$ do \n13 Initialize user state $s_0$ from log data; \n14 Sample a linear preference $\\omega_0\\sim D_\\omega$ . \nfor $t = 1\\dots T$ do Observe current state, represent it as $s_t$ based on Eq. (6); Select an action $a_{t}\\in I^{K}$ using actor network $\\pi$ based on Eq. (9) Calculate utility reward and fairness reward and get the multi-objective reward vector $\\mathbf{r}_t$ according to environment feedback based on Eq. (4) and Eq. (5); Update $s_{t + 1}$ based on Eq. (6); Store transition $(s_t,a_t,\\mathbf{r}_t,s_{t + 1})$ in $D$ end if update then Sample minibatch of $\\mathcal{N}$ trajectories $\\mathcal{T}$ from $D$ Sample $\\mathcal{N}_{\\omega}$ preferences $W = \\{\\omega_{1},\\omega_{2},\\dots ,\\omega_{N_{\\omega}}\\} \\sim D_{\\omega}$ Append $\\omega_0$ to $W$ Select an action $a^\\prime \\in I^K$ using actor target network $\\pi '$ Set $\\mathbf{y} = \\mathbf{r} + \\gamma \\mathbf{Q}'(s',a',\\omega ;\\theta^{\\mu '}),\\omega \\in W$ Update critic by minimizing $\\| \\mathbf{y} - \\mathbf{Q}(s,a,\\omega ;\\theta^{\\mu})\\| _2^2$ according to: $\\nabla_{\\theta^{\\mu}}\\mathcal{L}\\approx \\frac{1}{NN_{\\omega}}\\left[(\\mathbf{y} - \\mathbf{Q}(s,a,\\omega;\\theta^{\\mu}))^{T}\\nabla_{\\theta^{\\mu}}\\mathbf{Q}(s,a,\\omega;\\theta^{\\mu})\\right]$ Update the actor using the sampled policy gradient: $\\nabla_{\\theta^{\\pi}}\\pi \\approx \\frac{1}{NN_{\\omega}}\\sum_i\\boldsymbol {\\omega}^T\\nabla_a\\mathbf{Q}(s,a,\\boldsymbol {\\omega};\\theta^{\\mu})\\nabla_{\\theta^{\\pi}}\\pi (s,\\boldsymbol {\\omega})$ Update the critic target networks: $\\theta^{\\mu '}\\gets \\tau \\theta^{\\mu} + (1 - \\tau)\\theta^{\\mu '}$ Update the actor target networks: $\\theta^{\\pi '}\\gets \\tau \\theta^{\\pi} + (1 - \\tau)\\theta^{\\pi '}$ end",
|
| 1098 |
+
"bbox": [
|
| 1099 |
+
84,
|
| 1100 |
+
125,
|
| 1101 |
+
501,
|
| 1102 |
+
747
|
| 1103 |
+
],
|
| 1104 |
+
"page_idx": 5
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "table",
|
| 1108 |
+
"img_path": "images/49b0a3af5980ec68bba1d16006e3947f8c1b07881f25a99dadde72688c68dace.jpg",
|
| 1109 |
+
"table_caption": [
|
| 1110 |
+
"Table 1: Basic statistics of the experimental datasets."
|
| 1111 |
+
],
|
| 1112 |
+
"table_footnote": [],
|
| 1113 |
+
"table_body": "<table><tr><td>Dataset</td><td>#users</td><td>#items</td><td>#act./user</td><td>#act./item</td><td>#act.</td><td>density</td></tr><tr><td>Movielens100K</td><td>943</td><td>1682</td><td>106</td><td>59.45</td><td>100,000</td><td>6.305%</td></tr><tr><td>Ciao</td><td>2248</td><td>16861</td><td>16</td><td>2</td><td>36065</td><td>0.095%</td></tr><tr><td>Etsy</td><td>1030</td><td>945</td><td>47</td><td>51</td><td>48080</td><td>4.940%</td></tr></table>",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
84,
|
| 1116 |
+
786,
|
| 1117 |
+
480,
|
| 1118 |
+
844
|
| 1119 |
+
],
|
| 1120 |
+
"page_idx": 5
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "text",
|
| 1124 |
+
"text": "interaction data, we use the number of interaction to group items in them. Specifically, for Movielens and Ciao, the top $20\\%$ items in",
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
81,
|
| 1127 |
+
868,
|
| 1128 |
+
480,
|
| 1129 |
+
896
|
| 1130 |
+
],
|
| 1131 |
+
"page_idx": 5
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "text",
|
| 1135 |
+
"text": "terms of number of interactions belong to the popular group $G_0$ , and the remaining $80\\%$ belong to the long-tail group $G_1$ , while for Etsy data, we additionally collect the listing impressions per month for each item and group items based on this.",
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
513,
|
| 1138 |
+
106,
|
| 1139 |
+
913,
|
| 1140 |
+
161
|
| 1141 |
+
],
|
| 1142 |
+
"page_idx": 5
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "text",
|
| 1146 |
+
"text": "Moreover, for RL-based methods, we set the initial state for each user during training as the first five clicked items in the training set, and the initial state during testing as the last five clicked items in the training set. We also set the RL agent recommend ten items to a user each time.",
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
513,
|
| 1149 |
+
162,
|
| 1150 |
+
913,
|
| 1151 |
+
229
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 5
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"text": "6.2 Experimental Setup",
|
| 1158 |
+
"text_level": 1,
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
514,
|
| 1161 |
+
244,
|
| 1162 |
+
720,
|
| 1163 |
+
260
|
| 1164 |
+
],
|
| 1165 |
+
"page_idx": 5
|
| 1166 |
+
},
|
| 1167 |
+
{
|
| 1168 |
+
"type": "text",
|
| 1169 |
+
"text": "6.2.1 Baselines: We compare our proposed method with the following baselines, including both traditional and reinforcement learning based recommendation models.",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
513,
|
| 1172 |
+
262,
|
| 1173 |
+
913,
|
| 1174 |
+
304
|
| 1175 |
+
],
|
| 1176 |
+
"page_idx": 5
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "list",
|
| 1180 |
+
"sub_type": "text",
|
| 1181 |
+
"list_items": [
|
| 1182 |
+
"- MF: Collaborative Filtering based on matrix factorization [24] is a representative method for rating prediction. However, since not all datasets contain rating scores, we turn the rating prediction task into ranking prediction. Specifically, the user and item interaction vectors are considered as the representation vector for each user and item.",
|
| 1183 |
+
"- BPR-MF: Bayesian Personalized Ranking [38] is one of the most widely used ranking methods for top-K recommendation, which models recommendation as a pair-wise ranking problem. In the implementation, we conduct balanced negative sampling on non-purchased items for model learning.",
|
| 1184 |
+
"- NGCF: Neural Graph Collaborative Filtering [46] is a neural network-based recommendation algorithm, which integrates the user-item interactions into the embedding learning process and exploits the graph structure by propagating embeddings on it to model the high-order connectivity.",
|
| 1185 |
+
"- LIRD: The original paper for List-wise recommendation based on deep reinforcement learning (LIRD) [57] utilized the concatenation of item embeddings to represent the user state, and the actor will provide a list of K items as an action."
|
| 1186 |
+
],
|
| 1187 |
+
"bbox": [
|
| 1188 |
+
514,
|
| 1189 |
+
306,
|
| 1190 |
+
913,
|
| 1191 |
+
584
|
| 1192 |
+
],
|
| 1193 |
+
"page_idx": 5
|
| 1194 |
+
},
|
| 1195 |
+
{
|
| 1196 |
+
"type": "text",
|
| 1197 |
+
"text": "We also include two state-of-the-art fairness frameworks to show the fairness performance of our proposed method.",
|
| 1198 |
+
"bbox": [
|
| 1199 |
+
513,
|
| 1200 |
+
587,
|
| 1201 |
+
913,
|
| 1202 |
+
616
|
| 1203 |
+
],
|
| 1204 |
+
"page_idx": 5
|
| 1205 |
+
},
|
| 1206 |
+
{
|
| 1207 |
+
"type": "list",
|
| 1208 |
+
"sub_type": "text",
|
| 1209 |
+
"list_items": [
|
| 1210 |
+
"- FOE: Fairness of Exposure in Ranking (FOE) [43] is a type of post-processing algorithm incorporating a standard linear program and the Birkhoff-von Neumann decomposition. It is originally designed for searching problems, so we follow the same modification method mentioned in [16, 47], and use ranking prediction model such as MF, BPR, and NGCF as the base ranker, where the raw utility is given by the predicted probability of user $i$ clicking item $j$ . In our experiment, we have MF-FOE, BPR-FOE and NGCF-FOE as our fairness baselines. Since FOE assumes independence of items in the list, it cannot be applied to LIRD, which is a sequential model and the order in its recommendation makes a difference.",
|
| 1211 |
+
"- MFR: Multi-FR (MFR) [47] is a generic fairness-aware recommendation framework with multi-objective optimization, which jointly optimizes fairness and utility for two-sided recommendation. In our experiment, we only choose its item popularity fairness. We also modify it as the original fairness considers position bias as well, which is not the same setting as ours. Finally, we have MF-MFR, BPR-MFR and NGCF-MFR. For same reason as FOE, we do not include LIRD as well."
|
| 1212 |
+
],
|
| 1213 |
+
"bbox": [
|
| 1214 |
+
514,
|
| 1215 |
+
618,
|
| 1216 |
+
913,
|
| 1217 |
+
895
|
| 1218 |
+
],
|
| 1219 |
+
"page_idx": 5
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "table",
|
| 1223 |
+
"img_path": "images/87ff2f1ad4cd8bd12e2b9b137b397e33553634f9e6969456b44933054f3734fb.jpg",
|
| 1224 |
+
"table_caption": [
|
| 1225 |
+
"Table 2: Summary of the performance on three datasets. We evaluate for ranking (Recall, F1 and NDCG, in percentage (%), % symbol is omitted in the table for clarity) and fairness (KL Divergence and Popularity Rate, also in % values), while $K$ is the length of recommendation list. Bold scores are used when MoFIR is the best, while underlined scores indicate the strongest baselines. When MoFIR is the best, its improvements against the best baseline are significant at $p < {0.01}$ ."
|
| 1226 |
+
],
|
| 1227 |
+
"table_footnote": [],
|
| 1228 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">Recall (%)↑</td><td colspan=\"3\">F1 (%)↑</td><td colspan=\"3\">NDCG (%)↑</td><td colspan=\"3\">KL (%)↓</td><td colspan=\"3\">Popularity Rate (%)↓</td></tr><tr><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td></tr><tr><td colspan=\"16\">Movielens-100K</td></tr><tr><td>MF</td><td>1.422</td><td>2.713</td><td>5.228</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.561</td><td>3.830</td><td>4.705</td><td>229.124</td><td>224.390</td><td>215.772</td><td>99.745</td><td>99.258</td><td>98.224</td></tr><tr><td>BPR-MF</td><td>1.304</td><td>3.539</td><td>8.093</td><td>1.824</td><td>3.592</td><td>5.409</td><td>3.025</td><td>3.946</td><td>5.787</td><td>230.531</td><td>230.531</td><td>229.464</td><td>99.873</td><td>99.873</td><td>99.777</td></tr><tr><td>NGCF</td><td>1.995</td><td>3.831</td><td>6.983</td><td>2.846</td><td>4.267</td><td>5.383</td><td>5.319</td><td>5.660</td><td>6.510</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>LIRD</td><td>2.798</td><td>6.586</td><td>13.711</td><td>3.198</td><td>4.850</td><td>5.855</td><td>4.583</td><td>6.217</td><td>8.840</td><td>209.845</td><td>193.918</td><td>176.644</td><td>97.434</td><td>95.058</td><td>92.121</td></tr><tr><td>MF-FOE</td><td>1.164</td><td>2.247</td><td>4.179</td><td>1.739</td><td>2.730</td><td>3.794</td><td>3.520</td><td>3.796</td><td>4.367</td><td>181.000</td><td>175.355</td><td>170.444</td><td>92.895</td><td>91.888</td><td>90.981</td></tr><tr><td>BPR-FOE</td><td>0.974</td><td>2.053</td><td>4.404</td><td>1.496</td><td>2.568</td><td>3.933</td><td>3.127</td><td>3.514</td><td>4.332</td><td>176.938</td><td>172.465</td><td>168.952</td><td>92.174</td><td>91.357</td><td>90.700</td></tr><tr><td>NGCF-FOE</td><td>1.193</td><td>1.987</td><td>4.251</td><td>1.759</td><td>2.398</td><td>3.698</td><td>4.033</td><td>3.897</td><td>4.633</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>MF-MFR</td><td>1.546</td><td>2.807</td><td>5.422</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.276</td><td>3.613</td><td>4.571</td><td>100.590</td><td>96.620</td><td>85.420</td><td>74.867</td><td>73.743</td><td>70.419</td></tr><tr><td>BPR-MFR</td><td>1.418</td><td>2.811</td><td>6.155</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.522</td><td>3.822</td><td>5.047</td><td>165.897</td><td>155.339</td><td>137.663</td><td>90.117</td><td>88.017</td><td>84.205</td></tr><tr><td>NGCF-MFR</td><td>1.456</td><td>2.900</td><td>6.570</td><td>2.846</td><td>4.267</td><td>5.383</td><td>3.041</td><td>3.472</td><td>4.928</td><td>212.497</td><td>202.306</td><td>185.518</td><td>97.794</td><td>96.352</td><td>93.674</td></tr><tr><td>MoFIR-1.0</td><td>6.580</td><td>12.753</td><td>22.843</td><td>5.658</td><td>7.178</td><td>7.858</td><td>8.026</td><td>10.848</td><td>14.683</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>MoFIR-0.5</td><td>4.679</td><td>9.520</td><td>19.918</td><td>4.438</td><td>5.808</td><td>7.421</td><td>6.633</td><td>9.300</td><td>14.370</td><td>173.672</td><td>170.303</td><td>162.386</td><td>91.580</td><td>90.954</td><td>89.433</td></tr><tr><td>MoFIR-0.1</td><td>0.323</td><td>0.781</td><td>1.550</td><td>0.521</td><td>1.008</td><td>1.483</td><td>1.251</td><td>1.404</td><td>1.598</td><td>0.795</td><td>0.608</td><td>0.306</td><td>24.305</td><td>23.754</td><td>22.646</td></tr><tr><td colspan=\"16\">Ciao</td></tr><tr><td>MF</td><td>0.518</td><td>1.938</td><td>3.100</td><td>0.395</td><td>0.687</td><td>0.599</td><td>0.408</td><td>0.924</td><td>1.264</td><td>81.154</td><td>65.458</td><td>47.848</td><td>69.088</td><td>63.835</td><td>57.098</td></tr><tr><td>BPR-MF</td><td>1.087</td><td>2.204</td><td>4.607</td><td>0.677</td><td>0.770</td><td>0.858</td><td>0.776</td><td>1.181</td><td>1.900</td><td>119.307</td><td>100.884</td><td>82.717</td><td>79.826</td><td>74.949</td><td>69.580</td></tr><tr><td>NGCF</td><td>1.721</td><td>2.816</td><td>4.380</td><td>1.056</td><td>0.958</td><td>0.783</td><td>1.670</td><td>2.027</td><td>2.450</td><td>142.025</td><td>96.789</td><td>59.561</td><td>85.181</td><td>73.792</td><td>61.693</td></tr><tr><td>LIRD</td><td>0.766</td><td>2.448</td><td>3.599</td><td>0.554</td><td>1.082</td><td>0.921</td><td>1.393</td><td>2.638</td><td>3.277</td><td>65.744</td><td>105.507</td><td>64.888</td><td>63.936</td><td>76.223</td><td>63.632</td></tr><tr><td>MF-FOE</td><td>0.685</td><td>1.208</td><td>1.914</td><td>0.458</td><td>0.474</td><td>0.396</td><td>0.475</td><td>0.669</td><td>0.864</td><td>19.720</td><td>11.167</td><td>7.622</td><td>43.068</td><td>37.033</td><td>33.915</td></tr><tr><td>BPR-FOE</td><td>1.442</td><td>2.111</td><td>3.693</td><td>0.812</td><td>0.663</td><td>0.731</td><td>0.934</td><td>1.154</td><td>1.657</td><td>55.999</td><td>46.858</td><td>40.626</td><td>60.347</td><td>56.686</td><td>53.987</td></tr><tr><td>NGCF-FOE</td><td>1.234</td><td>1.907</td><td>2.903</td><td>0.651</td><td>0.583</td><td>0.566</td><td>0.937</td><td>1.156</td><td>1.477</td><td>79.313</td><td>74.038</td><td>71.335</td><td>43.357</td><td>34.226</td><td>30.391</td></tr><tr><td>MF-MFR</td><td>0.307</td><td>0.619</td><td>1.281</td><td>0.395</td><td>0.687</td><td>0.599</td><td>0.237</td><td>0.345</td><td>0.535</td><td>0.185</td><td>0.096</td><td>0.068</td><td>18.003</td><td>18.553</td><td>18.784</td></tr><tr><td>BPR-MFR</td><td>1.146</td><td>1.962</td><td>2.667</td><td>0.395</td><td>0.687</td><td>0.599</td><td>1.011</td><td>1.314</td><td>1.534</td><td>4.303</td><td>2.540</td><td>1.454</td><td>30.304</td><td>27.829</td><td>25.868</td></tr><tr><td>NGCF-MFR</td><td>1.284</td><td>2.131</td><td>4.033</td><td>1.056</td><td>0.958</td><td>0.783</td><td>1.014</td><td>1.342</td><td>1.901</td><td>37.133</td><td>20.302</td><td>10.515</td><td>52.388</td><td>43.430</td><td>36.498</td></tr><tr><td>MoFIR-1.0</td><td>2.162</td><td>3.867</td><td>5.866</td><td>1.626</td><td>1.513</td><td>1.323</td><td>4.000</td><td>4.764</td><td>5.813</td><td>181.742</td><td>156.545</td><td>123.213</td><td>93.025</td><td>88.263</td><td>80.796</td></tr><tr><td>MoFIR-0.5</td><td>1.254</td><td>2.665</td><td>4.122</td><td>0.845</td><td>0.971</td><td>0.879</td><td>2.031</td><td>2.724</td><td>3.490</td><td>19.077</td><td>12.750</td><td>8.032</td><td>42.663</td><td>38.278</td><td>34.305</td></tr><tr><td>MoFIR-0.1</td><td>0.892</td><td>1.610</td><td>2.338</td><td>0.557</td><td>0.532</td><td>0.445</td><td>1.054</td><td>1.311</td><td>1.576</td><td>0.054</td><td>0.010</td><td>0.484</td><td>21.100</td><td>19.522</td><td>16.795</td></tr><tr><td colspan=\"16\">Etsy</td></tr><tr><td>MF</td><td>2.693</td><td>5.581</td><td>10.348</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.438</td><td>4.671</td><td>6.681</td><td>190.410</td><td>190.173</td><td>186.243</td><td>94.491</td><td>94.452</td><td>93.797</td></tr><tr><td>BPR-MF</td><td>3.113</td><td>5.850</td><td>11.704</td><td>3.309</td><td>4.320</td><td>5.385</td><td>3.700</td><td>4.880</td><td>7.341</td><td>179.815</td><td>176.447</td><td>169.740</td><td>92.687</td><td>92.085</td><td>90.849</td></tr><tr><td>NGCF</td><td>3.414</td><td>6.026</td><td>11.746</td><td>3.674</td><td>4.498</td><td>5.406</td><td>4.180</td><td>5.238</td><td>7.610</td><td>194.985</td><td>185.756</td><td>175.403</td><td>95.228</td><td>93.715</td><td>91.896</td></tr><tr><td>LIRD</td><td>7.163</td><td>12.176</td><td>24.056</td><td>4.158</td><td>4.493</td><td>4.967</td><td>6.587</td><td>9.289</td><td>13.833</td><td>212.890</td><td>197.336</td><td>166.047</td><td>97.847</td><td>95.597</td><td>90.145</td></tr><tr><td>MF-FOE</td><td>1.382</td><td>2.436</td><td>4.515</td><td>1.641</td><td>2.160</td><td>2.704</td><td>2.111</td><td>2.482</td><td>3.318</td><td>42.682</td><td>29.960</td><td>22.502</td><td>54.898</td><td>48.865</td><td>44.758</td></tr><tr><td>BPR-FOE</td><td>1.503</td><td>2.808</td><td>5.513</td><td>1.802</td><td>2.468</td><td>3.132</td><td>2.328</td><td>2.783</td><td>3.844</td><td>43.394</td><td>30.734</td><td>23.390</td><td>55.209</td><td>49.263</td><td>45.276</td></tr><tr><td>NGCF-FOE</td><td>1.958</td><td>3.135</td><td>5.478</td><td>2.227</td><td>2.593</td><td>2.923</td><td>2.705</td><td>3.106</td><td>4.024</td><td>47.548</td><td>30.829</td><td>21.678</td><td>56.974</td><td>49.311</td><td>44.268</td></tr><tr><td>MF-MFR</td><td>2.482</td><td>5.150</td><td>10.279</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.265</td><td>4.504</td><td>6.671</td><td>173.889</td><td>158.030</td><td>134.564</td><td>91.620</td><td>88.565</td><td>83.497</td></tr><tr><td>BPR-MFR</td><td>2.510</td><td>4.849</td><td>9.711</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.144</td><td>4.110</td><td>6.206</td><td>136.319</td><td>120.661</td><td>94.153</td><td>83.899</td><td>80.165</td><td>73.031</td></tr><tr><td>NGCF-MFR</td><td>2.325</td><td>4.146</td><td>7.946</td><td>3.558</td><td>4.820</td><td>5.616</td><td>2.994</td><td>3.636</td><td>5.210</td><td>104.348</td><td>91.557</td><td>74.402</td><td>75.907</td><td>72.270</td><td>66.901</td></tr><tr><td>MoFIR-1.0</td><td>6.690</td><td>13.871</td><td>24.728</td><td>4.833</td><td>5.932</td><td>6.238</td><td>9.183</td><td>13.629</td><td>19.822</td><td>139.319</td><td>134.627</td><td>129.318</td><td>84.578</td><td>83.511</td><td>82.270</td></tr><tr><td>MoFIR-0.5</td><td>5.333</td><td>10.342</td><td>19.383</td><td>3.460</td><td>3.979</td><td>4.218</td><td>4.626</td><td>6.614</td><td>9.798</td><td>70.154</td><td>67.961</td><td>64.956</td><td>65.470</td><td>64.714</td><td>63.657</td></tr><tr><td>MoFIR-0.1</td><td>1.340</td><td>2.966</td><td>5.864</td><td>1.151</td><td>1.641</td><td>1.895</td><td>1.778</td><td>2.839</td><td>4.425</td><td>0.569</td><td>0.545</td><td>0.396</td><td>23.628</td><td>23.550</td><td>23.016</td></tr></table>",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
84,
|
| 1231 |
+
164,
|
| 1232 |
+
916,
|
| 1233 |
+
734
|
| 1234 |
+
],
|
| 1235 |
+
"page_idx": 6
|
| 1236 |
+
},
|
| 1237 |
+
{
|
| 1238 |
+
"type": "text",
|
| 1239 |
+
"text": "We implement MF, BPR-MF, NGCF, MF-FOE, BPR-FOE, NGCF-FOE, MF-MFR BPR-MFR and NGCF-MFR using Pytorch with Adam optimizer. For all of them, we consider latent dimensions $d$ from $\\{16, 32, 64, 128, 256\\}$ , learning rate $lr$ from $\\{1e - 1, 5e - 2, 1e - 2, \\dots, 5e - 4, 1e - 4\\}$ , and the L2 penalty is chosen from $\\{0.01, 0.1, 1\\}$ . We tune the hyper-parameters using the validation set and terminate training when the performance on the validation set does not change within 5 epochs. Further, since the FOE-based methods need to solve a linear programming with size $|\\mathcal{I}| \\times |\\mathcal{I}|$ for each consumer, which brings huge computational costs, we rerank the top-200 items from",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
81,
|
| 1242 |
+
742,
|
| 1243 |
+
480,
|
| 1244 |
+
881
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 6
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "text",
|
| 1250 |
+
"text": "the base model then select the new top-K (K<100) as the final recommendation. Similarly, we implement MoFIR with Pytorch. We first perform basic MF to pretrain 16-dimensional user and item embeddings, and fix them through training and test. We set $|H_{t}| = 5$ , and use two GRU layers to get the state representation $s_t$ . For the actor network and the critic network, we use two hidden layer MLP with $\\tanh(\\cdot)$ as activation function. Finally, we fine-tune MoFIR's hyper-parameters on our validation set. In order to examine the trade-off between performance and fairness, we use different level of preference vectors in test. Since MoFIR is able to approximate all possible solutions of the Pareto frontier, we simply input different",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
511,
|
| 1253 |
+
742,
|
| 1254 |
+
915,
|
| 1255 |
+
897
|
| 1256 |
+
],
|
| 1257 |
+
"page_idx": 6
|
| 1258 |
+
},
|
| 1259 |
+
{
|
| 1260 |
+
"type": "image",
|
| 1261 |
+
"img_path": "images/a05795ed13f500202266c76a3f28716105a29db4b31f2702565b2776e1cd5cf6.jpg",
|
| 1262 |
+
"image_caption": [
|
| 1263 |
+
"(a) NDCG vs Long-tail Rate on ML100K"
|
| 1264 |
+
],
|
| 1265 |
+
"image_footnote": [],
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
109,
|
| 1268 |
+
114,
|
| 1269 |
+
334,
|
| 1270 |
+
265
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 7
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "image",
|
| 1276 |
+
"img_path": "images/9bd70f8dc84fb89ab5b3e766f7249c9924211099fc445d8a053756f6026e5278.jpg",
|
| 1277 |
+
"image_caption": [
|
| 1278 |
+
"(b) NDCG vs Long-tail Rate on Ciao",
|
| 1279 |
+
"Figure 2: Approximate Pareto frontier in three datasets generated by MoFIR and NGCF-MFR, where $x$ -axis represents the Longtail Rate@20 (Longtail Rate equals to one minus Popularity Rate) and $y$ -axis represents the value of NDCG@20."
|
| 1280 |
+
],
|
| 1281 |
+
"image_footnote": [],
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
375,
|
| 1284 |
+
114,
|
| 1285 |
+
602,
|
| 1286 |
+
265
|
| 1287 |
+
],
|
| 1288 |
+
"page_idx": 7
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "image",
|
| 1292 |
+
"img_path": "images/b6b0d276257713232adec054aaa56f4ef171898075cb7bfdcb83230a3bde4045.jpg",
|
| 1293 |
+
"image_caption": [
|
| 1294 |
+
"(c) NDCG vs Long-tail Rate on Etsy"
|
| 1295 |
+
],
|
| 1296 |
+
"image_footnote": [],
|
| 1297 |
+
"bbox": [
|
| 1298 |
+
643,
|
| 1299 |
+
114,
|
| 1300 |
+
866,
|
| 1301 |
+
265
|
| 1302 |
+
],
|
| 1303 |
+
"page_idx": 7
|
| 1304 |
+
},
|
| 1305 |
+
{
|
| 1306 |
+
"type": "text",
|
| 1307 |
+
"text": "preference vectors $\\omega$ into the trained model to get variants of MoFIR and denote the resulting alternatives as MoFIR-1.0, MoFIR-0.5, and MoFIR-0.1, where the scalar is the weight on the recommendation utility objective.",
|
| 1308 |
+
"bbox": [
|
| 1309 |
+
81,
|
| 1310 |
+
345,
|
| 1311 |
+
483,
|
| 1312 |
+
402
|
| 1313 |
+
],
|
| 1314 |
+
"page_idx": 7
|
| 1315 |
+
},
|
| 1316 |
+
{
|
| 1317 |
+
"type": "text",
|
| 1318 |
+
"text": "6.2.2 Evaluation Metrics: We select several most commonly used top-K ranking metrics to evaluate each model's recommendation performance, including Recall, F1 Score, and NDCG. For fairness evaluation, we define Popularity Rate, which simply refers to the ratio of the number of popular items in the recommendation list to the total number of items in the list. We also employ KL-divergence (KL) to compute the expectation of the difference between protected group membership at top-K vs. in the over-all population, where $d_{KL}(D_1||D_2) = \\sum_j D_1(j) \\ln \\frac{D_1(j)}{D_2(j)}$ with $D_1$ represents the true group distribution between $G_0$ and $G_1$ in top-K recommendation list, and $D_2 = \\left[\\frac{|G_0|}{|\\mathcal{I}|}, \\frac{|G_1|}{|\\mathcal{I}|}\\right]$ represents their ideal distribution of the overall population.",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
81,
|
| 1321 |
+
409,
|
| 1322 |
+
483,
|
| 1323 |
+
583
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 7
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "text",
|
| 1329 |
+
"text": "6.3 Experimental Results",
|
| 1330 |
+
"text_level": 1,
|
| 1331 |
+
"bbox": [
|
| 1332 |
+
83,
|
| 1333 |
+
601,
|
| 1334 |
+
303,
|
| 1335 |
+
616
|
| 1336 |
+
],
|
| 1337 |
+
"page_idx": 7
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "text",
|
| 1341 |
+
"text": "The major experimental results are shown in Table 2, besides, we also plot the approximate Pareto frontier between NDCG and Long-tail Rate (namely, 1-Popularity Rate) in Fig. 2. We analyze and discuss the results in terms of the following perspectives.",
|
| 1342 |
+
"bbox": [
|
| 1343 |
+
81,
|
| 1344 |
+
619,
|
| 1345 |
+
482,
|
| 1346 |
+
675
|
| 1347 |
+
],
|
| 1348 |
+
"page_idx": 7
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "text",
|
| 1352 |
+
"text": "6.3.1 Recommendation Performance. For recommendation performance, we compare MoFIR-1.0 with MF, BPR, NGCF, and LIRD based on Recall@k, F1@k and NDCG@k and provide the these results of the recommendation performance in Table 2. Among all the baseline models, we can see that all sequential recommendation methods (LIRD, MoFIR-1.0) are much better than the traditional method, which demonstrates the superiority of sequential recommendation on top-K ranking tasks. Specifically, LIRD is the strongest baseline in all three datasets on all performance metrics: when averaging across recommendation lengths LIRD achieves $41.28\\%$ improvement than MF, $27.08\\%$ improvement than BPR-MF, and $8.97\\%$ improvement than NGCF.",
|
| 1353 |
+
"bbox": [
|
| 1354 |
+
81,
|
| 1355 |
+
688,
|
| 1356 |
+
482,
|
| 1357 |
+
853
|
| 1358 |
+
],
|
| 1359 |
+
"page_idx": 7
|
| 1360 |
+
},
|
| 1361 |
+
{
|
| 1362 |
+
"type": "text",
|
| 1363 |
+
"text": "Our MoFIR approach achieves the best top-K recommendation performance against all baselines on all datasets: when averaging across three recommendation lengths on all performance metrics,",
|
| 1364 |
+
"bbox": [
|
| 1365 |
+
81,
|
| 1366 |
+
854,
|
| 1367 |
+
483,
|
| 1368 |
+
896
|
| 1369 |
+
],
|
| 1370 |
+
"page_idx": 7
|
| 1371 |
+
},
|
| 1372 |
+
{
|
| 1373 |
+
"type": "text",
|
| 1374 |
+
"text": "MoFIR gets $41.40\\%$ improvement than the best baseline on MovieLens100K; MoFIR gets $46.45\\%$ improvement than LIRD on Ciao; and MoFIR gets $18.98\\%$ improvement than LIRD on Etsy. These above observations imply that the proposed method does have the ability to capture the dynamic nature in user-item interactions, which results in better recommendation results. Besides, unlike LIRD, which only concatenates user and item embeddings together, MoFIR uses several GRU layers to better capture the sequential information in user history, which benefits the model performance.",
|
| 1375 |
+
"bbox": [
|
| 1376 |
+
511,
|
| 1377 |
+
345,
|
| 1378 |
+
913,
|
| 1379 |
+
470
|
| 1380 |
+
],
|
| 1381 |
+
"page_idx": 7
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "text",
|
| 1385 |
+
"text": "6.3.2 Fairness Performance. For fairness performance, we compare MoFIRs with FOE-based methods and MFR-based methods based on KL Divergence@k and Popularity Rate@k, which are also shown in Table 2. It is easy to find that there does exist a trade-off between the recommendation performance and the fairness performance, which is understandable, as most of the long-tail items have relatively fewer interactions with users. When comparing the baselines, we can easily find that MFR is able to achieve better trade-off than FOE as it is also a multi-objective optimization method.",
|
| 1386 |
+
"bbox": [
|
| 1387 |
+
511,
|
| 1388 |
+
482,
|
| 1389 |
+
913,
|
| 1390 |
+
607
|
| 1391 |
+
],
|
| 1392 |
+
"page_idx": 7
|
| 1393 |
+
},
|
| 1394 |
+
{
|
| 1395 |
+
"type": "text",
|
| 1396 |
+
"text": "From Table 2, MoFIR is able to adjust the degree of trade-off between utility and fairness through simply modifying the weight of recommendation utility objective. It is worth noting that MoFIR-0.1 can always closely achieve the ideal distribution as its $KLs$ are close to zero. In Table 2, we can find that even MoFIR has the similar performance of fairness with other baselines, it can still achieve much better recommendation performance (for example, BPR-FOE and MoFIR-0.5 in Movielens100k or NGCF-FOE and MoFIR-0.5 in Ciao or MF-MFR and MoFIR-0.5 in Etsy), which indicates its capability of finding better trade-off.",
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
511,
|
| 1399 |
+
607,
|
| 1400 |
+
913,
|
| 1401 |
+
746
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 7
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "text",
|
| 1407 |
+
"text": "6.3.3 Fairness-Utility Trade-off. We only compare MoFIR with MFR, since FOE is a post-processing method, which doesn't optimize the fairness-utility trade-off. In order to better illustrate the trade-off between utility and fairness, we fix the length of the recommendation list at 20 and plot NDCG@20 against Longtail Rate in Fig. 2 for all datasets, where Longtail Rate equals to one minus Popularity Rate. Each blue point is generated by simply changing the input weights to the fine-tuned MoFIR, while each orange point is generated by running the entire MFR optimization. The clear margin distance between the blue points' curve (Approximate",
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
511,
|
| 1410 |
+
757,
|
| 1411 |
+
913,
|
| 1412 |
+
896
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 7
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "Pareto frontier) and the orange points' curve demonstrates the great effectiveness of MORL compared with traditional multi-objective optimization method in recommendation.",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
81,
|
| 1421 |
+
107,
|
| 1422 |
+
480,
|
| 1423 |
+
148
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 8
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "text",
|
| 1429 |
+
"text": "7 CONCLUSION",
|
| 1430 |
+
"text_level": 1,
|
| 1431 |
+
"bbox": [
|
| 1432 |
+
83,
|
| 1433 |
+
157,
|
| 1434 |
+
232,
|
| 1435 |
+
172
|
| 1436 |
+
],
|
| 1437 |
+
"page_idx": 8
|
| 1438 |
+
},
|
| 1439 |
+
{
|
| 1440 |
+
"type": "text",
|
| 1441 |
+
"text": "In this work, we achieve the approximate Pareto efficient trade-off between fairness and utility in recommendation systems and characterize their Pareto Frontier in the objective space in order to find solutions with different levels of trade-off. We accomplish the task by proposing a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL) with linear preferences, called MoFIR, which aims to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Experiments across three different datasets demonstrate the effectiveness of our approach in both fairness measures and recommendation performance.",
|
| 1442 |
+
"bbox": [
|
| 1443 |
+
81,
|
| 1444 |
+
176,
|
| 1445 |
+
482,
|
| 1446 |
+
330
|
| 1447 |
+
],
|
| 1448 |
+
"page_idx": 8
|
| 1449 |
+
},
|
| 1450 |
+
{
|
| 1451 |
+
"type": "text",
|
| 1452 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1453 |
+
"text_level": 1,
|
| 1454 |
+
"bbox": [
|
| 1455 |
+
83,
|
| 1456 |
+
340,
|
| 1457 |
+
281,
|
| 1458 |
+
354
|
| 1459 |
+
],
|
| 1460 |
+
"page_idx": 8
|
| 1461 |
+
},
|
| 1462 |
+
{
|
| 1463 |
+
"type": "text",
|
| 1464 |
+
"text": "We gratefully acknowledge the valuable cooperation of Runzhe Yang from Princeton University and Shuchang Liu from Rutgers University.",
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
81,
|
| 1467 |
+
359,
|
| 1468 |
+
482,
|
| 1469 |
+
402
|
| 1470 |
+
],
|
| 1471 |
+
"page_idx": 8
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "text",
|
| 1475 |
+
"text": "REFERENCES",
|
| 1476 |
+
"text_level": 1,
|
| 1477 |
+
"bbox": [
|
| 1478 |
+
84,
|
| 1479 |
+
412,
|
| 1480 |
+
202,
|
| 1481 |
+
426
|
| 1482 |
+
],
|
| 1483 |
+
"page_idx": 8
|
| 1484 |
+
},
|
| 1485 |
+
{
|
| 1486 |
+
"type": "list",
|
| 1487 |
+
"sub_type": "ref_text",
|
| 1488 |
+
"list_items": [
|
| 1489 |
+
"[1] Himan Abdollahpouri, Robin Burke, and Bamshad Mobasher. 2017. Controlling popularity bias in learning-to-rank recommendation. In Proceedings of the eleventh ACM conference on recommender systems. 42-46.",
|
| 1490 |
+
"[2] Himan Abdollahpouri, Masoud Mansoury, Robin Burke, and Bamshad Mobasher. 2019. The unfairness of popularity bias in recommendation. arXiv preprint arXiv:1907.13286 (2019).",
|
| 1491 |
+
"[3] Axel Abels, Diederik Roijers, Tom Lenaerts, Ann Nowé, and Denis Steckelmacher. 2019. Dynamic weights in multi-objective deep reinforcement learning. In International Conference on Machine Learning. PMLR, 11-20.",
|
| 1492 |
+
"[4] Alex Beutel, Jilin Chen, Tulsee Doshi, Hai Qian, Li Wei, Yi Wu, Lukasz Heldt, Zhe Zhao, Lichan Hong, Ed H Chi, et al. 2019. Fairness in the recommendation ranking through pairwise comparisons. In Proceedings of the 25th ACM SIGKDD.",
|
| 1493 |
+
"[5] L Elisa Celis, Sayash Kapoor, Farnood Salehi, and Nisheeth Vishnoi. 2019. Controlling polarization in personalization: An algorithmic framework. In Proceedings of the conference on fairness, accountability, and transparency. 160-169.",
|
| 1494 |
+
"[6] Haokun Chen, Xinyi Dai, Han Cai, Weinan Zhang, Xuejian Wang, Ruiming Tang, Yuzhou Zhang, and Yong Yu. 2019. Large-scale interactive recommendation with tree-structured policy gradient. In Proceedings of the AAAI, Vol. 33. 3312-3320.",
|
| 1495 |
+
"[7] Hanxiong Chen, Li Yunqi, Shi Shaoyun, Shuchang Liu, He Zhu, and Yongfeng Zhang. 2022. Graph Logic Reasoning for Recommendation and Link Prediction. In Proceedings of the 15th ACM International Conference on Web Search and Data Mining.",
|
| 1496 |
+
"[8] Le Chen, Ruijun Ma, Aniko Hannák, and Christo Wilson. [n.d.]. Investigating the Impact of Gender on Rank in Resume Search Engines. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems.",
|
| 1497 |
+
"[9] Minmin Chen, Alex Beutel, Paul Covington, Sagar Jain, Francois Belletti, and Ed H Chi. 2019. Top-k off-policy correction for a REINFORCE recommender system. In Proceedings of the 12th ACM WSDM. 456-464.",
|
| 1498 |
+
"[10] Kyunghyun Cho, Bart van Merrienboer, Dzmitry Bahdanau, and Yoshua Bengio. 2014. On the Properties of Neural Machine Translation: Encoder-Decoder Approaches. in SsST@EMNLP.",
|
| 1499 |
+
"[11] Gabriel Dulac-Arnold, Richard Evans, Peter Sunehag, and Ben Coppin. 2015. Reinforcement Learning in Large Discrete Action Spaces. (2015). arXiv:1512.07679",
|
| 1500 |
+
"[12] Sanghamitra Dutta, Dennis Wei, Hazar Yueksel, Pin-Yu Chen, Sijia Liu, and Kush Varshney. 2020. Is there a trade-off between fairness and accuracy? a perspective using mismatched hypothesis testing. In International Conference on Machine Learning. PMLR, 2803-2813.",
|
| 1501 |
+
"[13] Zuohui Fu, Yikun Xian, Ruoyuan Gao, Jieyu Zhao, Qiaoying Huang, Yingqiang Ge, Shuyuan Xu, Shijie Geng, Chirag Shah, Yongfeng Zhang, et al. 2020. Fairness-aware explainable recommendation over knowledge graphs. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 69-78.",
|
| 1502 |
+
"[14] Ruoyuan Gao and Chirag Shah. 2021. Addressing Bias and Fairness in Search Systems. In Proceedings of the 44th International ACM SIGIR (Virtual Event, Canada) (SIGIR '21). 4 pages. https://doi.org/10.1145/3404835.3462807"
|
| 1503 |
+
],
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
86,
|
| 1506 |
+
429,
|
| 1507 |
+
482,
|
| 1508 |
+
883
|
| 1509 |
+
],
|
| 1510 |
+
"page_idx": 8
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "list",
|
| 1514 |
+
"sub_type": "ref_text",
|
| 1515 |
+
"list_items": [
|
| 1516 |
+
"[15] Yingqiang Ge, Shuchang Liu, Ruoyuan Gao, Yikun Xian, Yunqi Li, Xiangyu Zhao, Changhua Pei, Fei Sun, Junfeng Ge, Wenwu Ou, et al. 2021. Towards Long-term Fairness in Recommendation. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining. 445-453.",
|
| 1517 |
+
"[16] Yingqiang Ge, Shuchang Liu, Ruoyuan Gao, Yikun Xian, Yunqi Li, Xiangyu Zhao, Changhua Pei, Fei Sun, Junfeng Ge, Wenwu Ou, and Yongfeng Zhang, 2021. Towards Long-Term Fairness in Recommendation. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (Virtual Event, Israel) (WSDM '21), 445-453. https://doi.org/10.1145/3437963.3441824",
|
| 1518 |
+
"[17] Yingqiang Ge, Shuyuan Xu, Shuchang Liu, Zuohui Fu, Fei Sun, and Yongfeng Zhang. 2020. Learning Personalized Risk Preferences for Recommendation. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 409-418.",
|
| 1519 |
+
"[18] Yingqiang Ge, Shuya Zhao, Honglu Zhou, Changhua Pei, Fei Sun, Wenwu Ou, and Yongfeng Zhang. 2020. Understanding echo chambers in e-commerce recommender systems. In Proceedings of the 43rd international ACM SIGIR conference on research and development in information retrieval. 2261-2270.",
|
| 1520 |
+
"[19] Sahin Cem Geyik, Stuart Ambler, and Krishnamaram Kenthapadi. 2019. Fairness-Aware Ranking in Search & Recommendation Systems with Application to LinkedIn Talent Search. In Proceedings of KDD. ACM, 2221-2231.",
|
| 1521 |
+
"[20] Neil J. Hurley. 2013. Personalised Ranking with Diversity. In Proceedings of the 7th ACM Conference on Recommender Systems (Hong Kong, China) (RecSys '13). 379-382. https://doi.org/10.1145/2507157.2507226",
|
| 1522 |
+
"[21] Tamas Jambor and Jun Wang, 2010. Optimizing multiple objectives in collaborative filtering. In Proceedings of the fourth ACM conference on Recommender systems. 55-62.",
|
| 1523 |
+
"[22] Mohammad Mahdi Kamani, Rana Forsati, James Z Wang, and Mehrdad Mahdavi. 2021. Pareto Efficient Fairness in Supervised Learning: From Extraction to Tracing. arXiv preprint arXiv:2104.01634 (2021).",
|
| 1524 |
+
"[23] Michael Kearns and Aaron Roth. 2019. The ethical algorithm: The science of socially aware algorithm design. Oxford University Press.",
|
| 1525 |
+
"[24] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 8 (2009), 30-37.",
|
| 1526 |
+
"[25] Yunqi Li, Hanxiong Chen, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2021. User-oriented Fairness in Recommendation. In Proceedings of the Web Conference 2021, 624-632.",
|
| 1527 |
+
"[26] Yunqi Li, Hanxiong Chen, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2021. Towards Personalized Fairness based on Causal Notion. arXiv preprint arXiv:2105.09829 (2021).",
|
| 1528 |
+
"[27] Yunqi Li, Hanxiong Chen, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2021. Towards Personalized Fairness Based on Causal Notion. In Proceedings of the 44th International ACM SIGIR (Virtual Event, Canada) (SIGIR '21). 10 pages. https://doi.org/10.1145/3404835.3462966",
|
| 1529 |
+
"[28] Yunqi Li, Yingqiang Ge, and Yongfeng Zhang. 2021. CIKM 2021 Tutorial on Fairness of Machine Learning in Recommender Systems. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4857-4860.",
|
| 1530 |
+
"[29] Yunqi Li, Yingqiang Ge, and Yongfeng Zhang. 2021. Tutorial on Fairness of Machine Learning in Recommender Systems.",
|
| 1531 |
+
"[30] Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Manfred Otto Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. 2016. Continuous control with deep reinforcement learning. CoRR abs/1509.02971 (2016).",
|
| 1532 |
+
"[31] Xiao Lin, Hongjie Chen, Changhua Pei, Fei Sun, Xuanji Xiao, Hanxiao Sun, Yongfeng Zhang, Wenwu Ou, and Peng Jiang. 2019. A pareto-efficient algorithm for multiple objective optimization in e-commerce recommendation. In Proceedings of the 13th ACM Conference on recommender systems. 20-28.",
|
| 1533 |
+
"[32] Zachary Lipton, Julian McAuley, and Alexandra Chouldchova. 2018. Does mitigating ML's impact disparity require treatment disparity?. In Advances in Neural Information Processing Systems. Curran Associates, Inc.",
|
| 1534 |
+
"[33] Yudan Liu, Kaikai Ge, Xu Zhang, and Leyu Lin. 2019. Real-Time Attention Based Look-Alike Model for Recommender System. In Proceedings of SIGKDD'19 (Anchorage, AK, USA) (KDD '19), 2765-2773.",
|
| 1535 |
+
"[34] Tariq Mahmood and Francesco Ricci. 2007. Learning and adaptivity in interactive recommender systems. In Proceedings of the 9th international conference on Electronic commerce. 75-84.",
|
| 1536 |
+
"[35] Tariq Mahmood and Francesco Ricci. 2009. Improving recommender systems with adaptive conversational strategies. In Proceedings of the 20th ACM conference on Hypertext and hypermedia. 73-82.",
|
| 1537 |
+
"[36] Sean M McNee, John Riedl, and Joseph A Konstan. 2006. Being accurate is not enough: how accuracy metrics have hurt recommender systems. In CHI'06 extended abstracts on Human factors in computing systems. 1097-1101.",
|
| 1538 |
+
"[37] Changhua Pei, Xinru Yang, Qing Cui, Xiao Lin, Fei Sun, Peng Jiang, Wenwu Ou, and Yongfeng Zhang. 2019. Value-aware recommendation based on reinforcement profit maximization. In The World Wide Web Conference. 3123-3129.",
|
| 1539 |
+
"[38] Steffen Rendle, Christoph Freudenthaler, Zeno Ganttner, and Lars Schmidt-Thieme. 2009. BPR: Bayesian personalized ranking from implicit feedback. In Proceedings of the 25th conference on uncertainty in artificial intelligence. AUAI Press, 452-461."
|
| 1540 |
+
],
|
| 1541 |
+
"bbox": [
|
| 1542 |
+
516,
|
| 1543 |
+
108,
|
| 1544 |
+
913,
|
| 1545 |
+
885
|
| 1546 |
+
],
|
| 1547 |
+
"page_idx": 8
|
| 1548 |
+
},
|
| 1549 |
+
{
|
| 1550 |
+
"type": "list",
|
| 1551 |
+
"sub_type": "ref_text",
|
| 1552 |
+
"list_items": [
|
| 1553 |
+
"[39] Marco Tulio Ribeiro, Anisio Lacerda, Adriano Veloso, and Nivio Ziviani. 2012. Pareto-efficient hybridization for multi-objective recommender systems. In Proceedings of the sixth ACM conference on Recommender systems. 19-26.",
|
| 1554 |
+
"[40] Marco Tulio Ribeiro, Nivio Ziviani, Edleno Silva De Moura, Itamar Hata, Anisio Lacerda, and Adriano Veloso. 2014. Multiobjective pareto-efficient approaches for recommender systems. ACM Transactions on Intelligent Systems and Technology (TIST) 5, 4 (2014), 1-20.",
|
| 1555 |
+
"[41] Guy Shani, David Heckerman, and Ronen I Brafman. 2005. An MDP-based recommender system. Journal of Machine Learning Research 6, Sep (2005).",
|
| 1556 |
+
"[42] David Silver, Guy Lever, Nicolas Manfred Otto Heess, Thomas Degris, Daan Wierstra, and Martin A. Riedmiller. 2014. Deterministic Policy Gradient Algorithms. In ICML.",
|
| 1557 |
+
"[43] Ashudeep Singh and Thorsten Joachims. 2018. Fairness of Exposure in Rankings. In Proceedings of the 24th ACM SIGKDD (London, United Kingdom).",
|
| 1558 |
+
"[44] Juntao Tan, Shuyuan Xu, Yingqiang Ge, Yunqi Li, Xu Chen, and Yongfeng Zhang. 2021. Counterfactual explainable recommendation. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 1784-1793.",
|
| 1559 |
+
"[45] Jiliang Tang, Huiji Gao, Huan Liu, and Atish Das Sarma. 2012. ETrust: Understanding Trust Evolution in an Online World. In Proceedings of the 18th ACM SIGKDD (Beijing, China) (KDD '12). https://doi.org/10.1145/2339530.2339574",
|
| 1560 |
+
"[46] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174.",
|
| 1561 |
+
"[47] Haolun Wu, Chen Ma, Bhaskar Mitra, Fernando Diaz, and Xue Liu. 2021. Multi-FR: A Multi-Objective Optimization Method for Achieving Two-sided Fairness in E-commerce Recommendation. arXiv preprint arXiv:2105.02951 (2021).",
|
| 1562 |
+
"[48] Yikun Xian, Zuohui Fu, S Muthukrishnan, Gerard De Melo, and Yongfeng Zhang. 2019. Reinforcement knowledge graph reasoning for explainable recommendation. In SIGIR.",
|
| 1563 |
+
"[49] Yikun Xian, Zuohui Fu, Handong Zhao, Yingqiang Ge, Xu Chen, Qiaoying Huang, Shijie Geng, Zhou Qin, Gerard De Melo, Shan Muthukrishnan, and Yongfeng"
|
| 1564 |
+
],
|
| 1565 |
+
"bbox": [
|
| 1566 |
+
84,
|
| 1567 |
+
108,
|
| 1568 |
+
483,
|
| 1569 |
+
421
|
| 1570 |
+
],
|
| 1571 |
+
"page_idx": 9
|
| 1572 |
+
},
|
| 1573 |
+
{
|
| 1574 |
+
"type": "list",
|
| 1575 |
+
"sub_type": "ref_text",
|
| 1576 |
+
"list_items": [
|
| 1577 |
+
"Zhang. 2020. CAFE: Coarse-to-fine neural symbolic reasoning for explainable recommendation. In CIKM.",
|
| 1578 |
+
"[50] Ruobing Xie, Yanlei Liu, Shaoliang Zhang, Rui Wang, Feng Xia, and Leyu Lin. 2021. Personalized Approximate Pareto-Efficient Recommendation. In Proceedings of the Web Conference 2021. 3839-3849.",
|
| 1579 |
+
"[51] Shuyuan Xu, Yingqiang Ge, Yunqi Li, Zuohui Fu, Xu Chen, and Yongfeng Zhang. 2021. Causal Collaborative Filtering. arXiv preprint arXiv:2102.01868 (2021).",
|
| 1580 |
+
"[52] Runzhe Yang, Xingyuan Sun, and Karthik Narasimhan. 2019. A Generalized Algorithm for Multi-Objective Reinforcement Learning and Policy Adaptation. Advances in Neural Information Processing Systems 32 (2019), 14636-14647.",
|
| 1581 |
+
"[53] Tao Yang and Qingyao Ai. 2021. Maximizing Marginal Fairness for Dynamic Learning to Rank. In Proceedings of the Web Conference 2021. 137-145.",
|
| 1582 |
+
"[54] Sirui Yao and Bert Huang. 2017. Beyond Parity: Fairness Objectives for Collaborative Filtering. In Advances in Neural Information Processing Systems.",
|
| 1583 |
+
"[55] Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rodriguez, Krishna P. Gummadi, and Adrian Weller. 2017. From Parity to Preference-Based Notions of Fairness in Classification. In Proceedings of NIPS'17.",
|
| 1584 |
+
"[56] Xiangyu Zhao, Liang Zhang, Zhuoye Ding, Long Xia, Jiliang Tang, and Dawei Yin. 2018. Recommendations with negative feedback via pairwise deep reinforcement learning. In Proceedings of the 24th ACM SIGKDD. 1040-1048.",
|
| 1585 |
+
"[57] Xiangyu Zhao, Liang Zhang, Zhuoye Ding, Dawei Yin, Yihong Zhao, and Jiliang Tang. 2018. Deep Reinforcement Learning for List-wise Recommendations. CoRR abs/1801.00209 (2018). arXiv:1801.00209",
|
| 1586 |
+
"[58] Guanjie Zheng, Fuzheng Zhang, Zihan Zheng, Yang Xiang, Nicholas Jing Yuan, Xing Xie, and Zhenhui Li. 2018. DRN: A deep reinforcement learning framework for news recommendation. In Proceedings of WWW '18. 167-176.",
|
| 1587 |
+
"[59] Ziwei Zhu, Xia Hu, and James Caverlee. [n.d.]. Fairness-Aware Tensor-Based Recommendation. In Proceedings of CIKM '18 (Torino, Italy) (CIKM '18). 1153-1162.",
|
| 1588 |
+
"[60] Eckart Zitzler, Marco Laumanns, and Lothar Thiele. 2001. SPEA2: Improving the strength Pareto evolutionary algorithm. TIK-report 103 (2001)."
|
| 1589 |
+
],
|
| 1590 |
+
"bbox": [
|
| 1591 |
+
516,
|
| 1592 |
+
108,
|
| 1593 |
+
913,
|
| 1594 |
+
411
|
| 1595 |
+
],
|
| 1596 |
+
"page_idx": 9
|
| 1597 |
+
}
|
| 1598 |
+
]
|
2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00140/b6725be1-2dfc-4354-9009-f7657b4687af_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:deaa24b74ef621d221be75227c2c3a758d96a3042fb40f25e9af145bb1617d4e
|
| 3 |
+
size 772648
|
2201.00xxx/2201.00140/full.md
ADDED
|
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Toward Pareto Efficient Fairness- Utility Trade-off in Recommendation through Reinforcement Learning
|
| 2 |
+
|
| 3 |
+
Yingqiang Ge†, Xiaoting Zhao*, Lucia Yu*, Saurabh Paul*, Diane Hu*, Chu-Cheng Hsieh*, Yongfeng Zhang†
|
| 4 |
+
†Rutgers University * Etsy Inc.
|
| 5 |
+
|
| 6 |
+
yingqiang.ge@rutgers.edu,{xzhao,lyu,spaul,dhu,chsieh}@etsy.com,yongfeng.zhang@rutgers.edu
|
| 7 |
+
|
| 8 |
+
# ABSTRACT
|
| 9 |
+
|
| 10 |
+
The issue of fairness in recommendation is becoming increasingly essential as Recommender Systems (RS) touch and influence more and more people in their daily lives. In fairness-aware recommendation, most of the existing algorithmic approaches mainly aim at solving a constrained optimization problem by imposing a constraint on the level of fairness while optimizing the main recommendation objective, e.g., click through rate (CTR). While this alleviates the impact of unfair recommendations, the expected return of an approach may significantly compromise the recommendation accuracy due to the inherent trade-off between fairness and utility. This motivates us to deal with these conflicting objectives and explore the optimal trade-off between them in recommendation. One conspicuous approach is to seek a Pareto efficient/optimal solution to guarantee optimal compromises between utility and fairness. Moreover, considering the needs of real-world e-commerce platforms, it would be more desirable if we can generalize the whole Pareto Frontier, so that the decision-makers can specify any preference of one objective over another based on their current business needs. Therefore, in this work, we propose a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL), called MoFIR (pronounced "more fair"), which is able to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Specially, we modify traditional Deep Deterministic Policy Gradient (DDPG) by introducing conditioned network (CN) into it, which conditions the networks directly on these preferences and outputs Q-value-vectors. Experiments on several real-world recommendation datasets verify the superiority of our framework on both fairness metrics and recommendation measures when compared with all other baselines. We also extract the approximate Pareto Frontier on real-world datasets generated by MoFIR and compare to state-of-the-art fairness methods.
|
| 11 |
+
|
| 12 |
+
# CCS CONCEPTS
|
| 13 |
+
|
| 14 |
+
- Information systems $\rightarrow$ Recommender systems; - Computing methodologies $\rightarrow$ Sequential decision making.
|
| 15 |
+
|
| 16 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 17 |
+
|
| 18 |
+
WSDM '22, February 21-25, 2022, Tempe, AZ, USA
|
| 19 |
+
|
| 20 |
+
$\odot$ 2022 Association for Computing Machinery.
|
| 21 |
+
|
| 22 |
+
ACM ISBN 978-1-4503-9132-0/22/02...$15.00
|
| 23 |
+
|
| 24 |
+
https://doi.org/10.1145/3488560.3498487
|
| 25 |
+
|
| 26 |
+
# KEYWORDS
|
| 27 |
+
|
| 28 |
+
Recommender System; Multi-Objective Reinforcement Learning; Pareto Efficient Fairness; Unbiased Recommendation
|
| 29 |
+
|
| 30 |
+
# ACM Reference Format:
|
| 31 |
+
|
| 32 |
+
Yingqiang Ge, Xiaoting Zhao, Lucia Yu, Saurabh Paul, Diane Hu, Chu-Cheng Hsieh, Yongfeng Zhang. 2022. Toward Pareto Efficient Fairness-Utility Trade-off in Recommendation through Reinforcement Learning. In Proceedings of the Fifteenth ACM International Conference on Web Search and Data Mining (WSDM '22), February 21–25, 2022, Tempe, AZ, USA. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3488560.3498487
|
| 33 |
+
|
| 34 |
+
# 1 INTRODUCTION
|
| 35 |
+
|
| 36 |
+
Personalized recommender systems (RS), which are extensively employed in e-commerce platforms, have been acknowledged for their capacity to deliver high-quality services that bridge the gap between products and customers [7, 17, 44, 51]. Despite these huge advantages, several recent studies also raised concerns that RS may be vulnerable to algorithmic bias in several aspects, which may result in detrimental consequences for underrepresented or disadvantaged groups [19, 29, 43, 59]. For example, the "Matthew Effect" becomes increasingly evident in RS, which creates a huge disparity in the exposure of the producers/products in real-world recommendation systems [16, 18, 33]. Fortunately, these concerns about algorithmic fairness have resulted in a resurgence of interest to develop fairness-aware recommendation models to ensure such models do not become a source of unfair discrimination in recommendation [13, 15, 26, 28].
|
| 37 |
+
|
| 38 |
+
In the area of fairness-aware recommendation, the methods can be roughly divided into three categories: pre-processing, in-processing and post-processing algorithms [14, 29]. Pre-processing methods usually aim to remove bias in data, e.g., sampling from data to cover items of all groups or balancing data to increase coverage of minority groups. In-processing methods aim at encoding fairness as part of the objective function, while post-processing methods tend to modify the presentations of the results. Even though all of them could successfully alleviate the impact of unfair recommendations to some extent, the expected return of an approach may significantly compromise the recommendation accuracy due to the inherent trade-off between fairness and utility, which has been demonstrated by several recent work both empirically and theoretically [22, 23, 32, 55].
|
| 39 |
+
|
| 40 |
+
In light of the above, one fundamental research questions is asked, RQ1: Can we learn a recommendation model that allows for higher fairness without significantly compromising recommendation accuracy? And a more challenging one is, RQ2: Can we learn a single recommendation model that is able to produce optimal recommendation policies under different levels of fairness-utility trade-off
|
| 41 |
+
|
| 42 |
+
so that it would be more desirable for decision-makers of e-commerce platforms to specify any preference of one objective over another based on their current business needs?
|
| 43 |
+
|
| 44 |
+
To deal with RQ1, one conspicuous approach is to seek a Pareto optimal solution to guarantee optimal compromises between utility and fairness, where a Pareto efficient/optimal solution means no single objective can be further improved without hurting the others. To find solutions with different levels of trade-off between utility and fairness (RQ2), we need to generalize their Pareto frontier in the objective space, where Pareto frontier denotes a set, whose elements are all Pareto optimal. Unfortunately, state-of-the-art approaches of fairness-aware recommendation are limited in understanding the fairness-utility trade-off.
|
| 45 |
+
|
| 46 |
+
Therefore, in this work, we aim to address the above problems and propose a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL) with linear preferences, called MoFIR, which aims to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Technically, we first formulate the fairness-aware recommendation task as a Multi-Objective Markov Decision Process (MOMDP), with one recommendation objective, e.g., CTR, and one fairness objective, e.g., item exposure fairness (our method is able to generalize to more recommendation objectives as well as more fairness objectives). Second, we modify classic and commonly-used RL algorithm—DDPG [42] by introducing conditioned networks [3] into it, which is a representative method to deal with multi-objective reinforcement learning. Specially, we condition the policy network and the value network directly on the preferences by augmenting them to the feature space. Finally, we utilize the vectorized Q-value functions together with modified loss function to update the parameters. The contributions of this work can be summarized as follows:
|
| 47 |
+
|
| 48 |
+
- We study the problem of Pareto optimal/efficient fairness-utility trade-off in recommendation and extensively explore their Pareto frontier to better satisfy real-world needs;
|
| 49 |
+
- We formulate the problem into a MOMDP and solve it through a MORL framework, MoFIR, which is optimized over the entire space of preferences in a domain, and allows the trained model to produce the optimal policy for any specified preferences;
|
| 50 |
+
- Unlike prior methods for fairness-aware recommendation, the proposed framework does not employ any relaxation for objectives in the optimization problem, hence it could achieve state-of-the-art results;
|
| 51 |
+
- Experiments on several real-world recommendation datasets verify the superiority of our framework on both fairness measures and recommendation performance when compared with all other baselines.
|
| 52 |
+
|
| 53 |
+
# 2 RELATED WORK
|
| 54 |
+
|
| 55 |
+
# 2.1 Fairness in Recommendation
|
| 56 |
+
|
| 57 |
+
There have been growing concerns on fairness in recommendation as recommender systems touch and influence more and more people in their daily lives. Several recent works have found various types of bias in recommendations, such as gender and race [2, 8], item popularity [15, 16, 59], user feedback [13, 25, 27] and opinion polarity [54]. There are two primary paradigms adopted in recent
|
| 58 |
+
|
| 59 |
+
studies on algorithmic discrimination: individual fairness and group fairness. Individual fairness requires that each similar individual should be treated similarly, while group fairness requires that the protected groups should be treated similarly to the advantaged group or the populations as a whole. Our work focuses on the item popularity fairness from a group level, yet it can be used to solve multiple types of fairness simultaneously by properly defining and adding them as additional objectives.
|
| 60 |
+
|
| 61 |
+
The relevant methods related to fairness in ranking and recommendation can be roughly divided into three categories: preprocessing, in-processing and post-processing algorithms [14, 28, 29]. First of all, pre-processing methods usually aim to minimize the bias in data as bias may arise from the data source. This includes fairness-aware sampling methodologies in the data collection process to cover items of all groups, or balancing methodologies to increase coverage of minority groups, or repairing methodologies to ensure label correctness, remove disparate impact [14]. However, most of the time, we do not have access to the data collection process, but are given the dataset. Secondly, in-processing methods aim at encoding fairness as part of the objective function, typically as a regularizer [1, 4]. Finally, post-processing methods tend to modify the presentations of the results, e.g., re-ranking through linear programming [25, 43, 53] or multi-armed bandit [5]. However, there is no free lunch, imposing fairness constraints to the main learning task introduces a trade-off between these objectives, which have been asserted in several studies [22, 23, 32, 55], e.g., Dutta et al. [12] showed that because of noise on the underrepresented groups the trade-off between accuracy and equality of opportunity exists.
|
| 62 |
+
|
| 63 |
+
Unfortunately, there is very few work of fairness-aware recommendation that can be found to study the fairness-utility trade-off. The closest one to our work is [47], which mainly focused on the trade-off between two-sided fairness in e-commerce recommendation. [47] used a traditional multiple gradient descent algorithm to solve multi-objective optimization problems, meaning that they need to train one network per point on the Pareto frontier, while our MoFIR generates the full Pareto frontier of solutions in a single optimization run. Besides, the authors relaxed all their objectives to get their differentiable approximations, which, to some extent, hurt its performance, as is shown in the experiment part, Fig. 2.
|
| 64 |
+
|
| 65 |
+
# 2.2 Multi-Objective Recommendation
|
| 66 |
+
|
| 67 |
+
Recommendation with multiple objectives is a significant but challenging problem, with the core difficulty stemming from the potential conflicts between objectives. In most real-world recommendation systems, recommendation accuracy (e.g., CTR-oriented objectives) is the dominating factor, while some studies believed that other characteristics, such as usability, profitability, usefulness, or diversity should be considered at the same time [20, 21, 36]. When multiple objectives are concerned, it is expected to get a Pareto optimal/efficient recommendation [31, 39, 50].
|
| 68 |
+
|
| 69 |
+
The approaches on recommendation with multiple objectives to achieve Pareto efficiency can be categorized into two groups: evolutionary algorithm [60] and scalarization [31]. Ribeiro et al. [39, 40] jointly considered multiple trained recommendation algorithms with a Pareto-efficient manner, and conducted an evolutionary algorithm to find the appropriate parameters for weighted model
|
| 70 |
+
|
| 71 |
+
combination. Besides, Lin et al. [31] optimized GMV and CTR in e-commerce simultaneously based on multiple-gradient descent algorithm, which combines scalarization with Pareto-efficient SGD, and used a relaxed KKT condition. Our proposed method, MoFIR, belongs to scalarization, however, compared with earlier attempts in multi-objective recommendation [31, 47], our method learns to adapt a single network for all the trade-off combinations of the inputted preference vectors, therefore it is able to approximate all solutions of the Pareto frontier after a single optimization run.
|
| 72 |
+
|
| 73 |
+
# 2.3 RL for Recommendation
|
| 74 |
+
|
| 75 |
+
RL-based recommenders have recently become an important and attractive topic, as it is natural to model the recommendation process as a Markov Decision Process (MDP) and use RL agents to capture the dynamics in recommendation scenarios [34, 35, 41, 48, 49, 58]. Generally speaking, RL-based recommendation systems can be further classified into two categories: policy-based [6, 9, 11] or value-based [37, 56, 58] methods. On one hand, policy-based methods aim to learn strategies that generate actions based on state (such as recommending items). These methods are optimized by policy gradient, which can be deterministic approaches [11, 30, 42] or stochastic approaches [6, 9]. On the other hand, value-based methods aims to model the quality (e.g. Q-value) of actions so that the best action corresponds to the one with the highest Q-value. Apart from using RL in general recommendation task, there also existed several works focusing on using RL in explainable recommendation through knowledge graphs [48, 49].
|
| 76 |
+
|
| 77 |
+
Currently, there are very few studies using MORL in recommendation. Xie et al. [50] studied multi-objective recommendation to capture users' objective-level preferences. However, unlike our proposed MoFIR, which learns a single parametric representation for optimal recommendation policies, they conducted a Pareto-oriented RL to generate the personalized objective weights in scalarization for each user, which is a totally different problem formulation.
|
| 78 |
+
|
| 79 |
+
# 3 PRELIMINARY
|
| 80 |
+
|
| 81 |
+
# 3.1 Markov Decision Processes
|
| 82 |
+
|
| 83 |
+
In reinforcement learning, agents aim at learning to act in an environment in order to maximize their cumulative reward. A popular model for such problems is Markov Decision Processes (MDP), which is a tuple $M = (S, \mathcal{A}, \mathcal{P}, \mathcal{R}, \mu, \gamma)$ , where $S$ is a set of $n$ states, $\mathcal{A}$ is a set of $m$ actions, $\mathcal{P}: S \times \mathcal{A} \times S \to [0,1]$ denotes the transition probability function, $\mathcal{R}: S \times \mathcal{A} \times S \to \mathbb{R}$ is the reward function, $\mu: S \to [0,1]$ is the starting state distribution, and $\gamma \in [0,1)$ is the discount factor. We denote the set of all stationary policies by $\Pi$ , where a stationary policy $\pi \in \Pi: S \to P(\mathcal{A})$ is a map from states to probability distributions over actions, with $\pi(a|s)$ denoting the probability of selecting action $a$ in state $s$ . We aim to learn a policy $\pi \in \Pi$ , able to maximize a performance measure, $J(\pi)$ , which is typically taken to be the infinite horizon discounted total return,
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
J (\pi) \doteq \underset {\tau \sim \pi} {\mathrm {E}} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {\top} R \left(s _ {t}, a _ {t}, s _ {t + 1}\right) \right], \tag {1}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\tau$ denotes a trajectory, e.g., $\tau = (s_0, a_0, s_1, a_1, \ldots)$ , and $\tau \sim \pi$ indicates that the distribution over trajectories depends on $\pi: s_0 \sim \mu, a_t \sim \pi (\cdot | s_t), s_{t+1} \sim P (\cdot | s_t, a_t)$ . We denote $R(\tau)$ as the
|
| 90 |
+
|
| 91 |
+
discounted rewards of a trajectory, the on-policy value function as $V^{\pi}(s) \doteq \mathrm{E}_{\tau \sim \pi} [R(\tau)|s_0 = s]$ , the on-policy action-value function as $Q^{\pi}(s,a) \doteq \mathrm{E}_{\tau \sim \pi} [R(\tau)|s_0 = s,a_0 = a]$ , and the advantage function as $A^{\pi}(s,a) \doteq Q^{\pi}(s,a) - V^{\pi}(s)$ .
|
| 92 |
+
|
| 93 |
+
# 3.2 Multi-Objective Markov Decision Processes
|
| 94 |
+
|
| 95 |
+
Multi-Objective Markov Decision Processes (MOMDP) are MDPs with a vector-valued reward function $\mathbf{r}_t = \mathbf{R}(s_t, a_t)$ , where each component of $\mathbf{r}_t$ corresponds to one certain objective. A scalarization function $f$ maps the multi-objective value of a policy $\pi$ to a scalar value. In this work, we consider the commonly-used class of MOMDPs with linear preference functions, e.g., $f_{\omega}(\mathbf{R}(s, a)) = \boldsymbol{\omega} \cdot \mathbf{R}(s, a)$ . It is worth noting that if $\boldsymbol{\omega}$ is fixed to a single value, this MOMDP collapses into a standard MDP. An optimal solution for an MOMDP under linear $f$ is a convex coverage set (CCS), e.g., a set of undominated policies containing at least one optimal policy for any linear scalarization.
|
| 96 |
+
|
| 97 |
+
# 3.3 Conditioned Network
|
| 98 |
+
|
| 99 |
+
Abels et al. [3] studied multi-objective reinforcement learning with linear preferences and proposed a novel algorithm for learning a single Q-network that is optimized over the entire space of preferences in a domain. The main idea is called Conditioned Network (CN), in which a Q-Network is augmented to output weight-dependent multi-objective Q-value-vectors, as is shown in the right side of Fig. 1 (Conditioned Critic Network, where action and state representations together with weight vector are inputted to the network). Besides, to promote quick convergence on the new weight vector's policy and to maintain previously learned policies, the authors updated each experience tuple in a mini-batch with respect to the current weight vector and a random previously encountered weight vector. Specially, given a mini-batch of trajectories, they computed the loss for a given trajectory $(s_j,a_j,\mathbf{r}_j,s_{j + 1})$ as the sum of the loss on the active weight vector $\omega_{t}$ and on $\omega_{j}$ randomly sampled from the set of encountered weights.
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\frac {1}{2} \left[ \left| \mathbf {y} _ {\omega_ {t}} ^ {(j)} - \mathbf {Q} _ {C N} \left(a _ {j}, s _ {j}; \omega_ {t}\right) \right| + \left| \mathbf {y} _ {\omega_ {j}} ^ {(j)} - \mathbf {Q} _ {C N} \left(a _ {j}, s _ {j}; \omega_ {j}\right) \right| \right] \tag {2}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\mathbf {y} _ {\omega} ^ {(j)} = \mathbf {r} _ {j} + \gamma \mathbf {Q} _ {C N} ^ {-} \left(\underset {a \in A} {\operatorname {a r g m a x}} \mathbf {Q} _ {C N} (a, s _ {j + 1}; \omega) \cdot \omega , s _ {j + 1}; \omega\right) \tag {3}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
where $\mathbf{Q}_{CN}(a,s;\omega)$ is the network's Q-value-vector for action $a$ in state $s$ and with weight vector $\omega$ . They claimed that training the same sample on two different weight vectors has the added advantage of forcing the network to identify that different weight vectors can have different Q-values for the same state. A more comprehensive review of MOMDPs and CN can be seen in [3].
|
| 110 |
+
|
| 111 |
+
In the original paper, the authors only proposed an algorithm based on Double DQN with discrete action space, which is not suitable for recommendation scenarios as the action space of recommendation is very large. Therefore, we modify the traditional DDPG [42] by introducing conditioned network into its policy network as well as critic network, and more importantly, we modify the original loss functions for both of them. We choose DDPG as it is a commonly adopted methods in RL, while our modification can be generalized to other reinforcement learning methods, such as trust
|
| 112 |
+
|
| 113 |
+
region policy optimization. More details about our modification will be introduced in Section 5.
|
| 114 |
+
|
| 115 |
+
# 4 PROBLEM FORMULATION
|
| 116 |
+
|
| 117 |
+
# 4.1 MOMDP for Recommendation
|
| 118 |
+
|
| 119 |
+
The recommendation agent will take the feature representation of the current user and item candidates $\mathcal{I}$ as input, and generate a list of items $L \in \mathcal{I}^K$ to recommend, where $K \geq 1$ after a user sends a request to it at timestamp $t \in (t_1, t_2, t_3, t_4, t_5, \ldots)$ . User $u$ who has received the list of recommended items $L$ will give feedback $B$ via clicking on this set of items, which can be used to measure the recommendation performance. Besides, based on the recommendation results, we will acquire the total number of exposure for each item group $G$ , which can later be used to measure fairness. Thus, the state $s$ can be represented by user features (e.g., user's recent click history), action $a$ is represented by items in $L$ , reward $r$ is the immediate reward vector after taking action $a$ , with each component of $r$ corresponds to one certain objective (e.g., whether user clicks on an item in $L$ for utility objective or whether an item comes from predefined disadvantageous group for fairness objective). The problem formulation is formally presented as follows:
|
| 120 |
+
|
| 121 |
+
- State $S$ : A state $s_t$ is the representation of user's most recent positive interaction history $H_t$ with the recommendation system, together with his/her demographic information (if exists).
|
| 122 |
+
- Action $\mathcal{A}$ : An action $a_{t} = \{a_{t}^{1},\dots,a_{t}^{K}\}$ is a recommendation list with $K$ items to a user $u$ at time $t$ with current state $s_t$ .
|
| 123 |
+
- Vector Reward Function $r$ : A vector-valued reward function $\mathbf{r}_t = \mathbf{R}(s_t, a_t)$ , where each component of $\mathbf{r}_t$ corresponds to one certain objective. In this work, the reward vector includes two elements: utility objective and fairness objective. The details of the definition of our task-specific objectives will be introduced in the following section.
|
| 124 |
+
- Scalarization function $f$ : In this paper, we consider the class of MOMDPs with linear preferences functions $f$ , which is a commonly-used scalarization function. Under this setting, each objective is given a weight $\omega_{i}$ , such that the scalarization function becomes $f_{\omega}(\mathbf{R}) = \boldsymbol{\omega} \cdot \mathbf{R}$ , where each $\omega_{i} \in [0,1]$ and $\sum_{i} \omega_{i} = 1$ .
|
| 125 |
+
- Discount rate $\gamma$ : $\gamma \in [0,1]$ is a discount factor measuring the present value of long-term rewards.
|
| 126 |
+
|
| 127 |
+
We aim to learn a policy $\pi$ , mapping from states to actions, to generate recommendations that achieve the Pareto efficient trade-off between fairness and utility.
|
| 128 |
+
|
| 129 |
+
# 4.2 Multi-Objectives in Fair Recommendation
|
| 130 |
+
|
| 131 |
+
The reward vector is designed to measure the recommendation system's gain regarding utility and fairness. While our method is capable of dealing with multiple objectives simultaneously, for simplicity we deliberately select click through rate and item (group) exposure fairness as our two objectives recommendation utility and item exposure fairness respectively.
|
| 132 |
+
|
| 133 |
+
4.2.1 Utility Objective. On one hand, given the recommendation based on the action $a_{t}$ and the user state $s_{t}$ , the user will provide feedback, e.g. click or purchase, etc. The recommender receives immediate reward $R_{u}(s_{t},a_{t})$ according to the user's positive feedback.
|
| 134 |
+
|
| 135 |
+
We also normalize the reward value by dividing $K$ , which is the length of the recommendation list.
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
R _ {u} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \frac {\sum_ {l = 1} ^ {K} \mathbb {1} \left(a _ {t} ^ {l} \text {g e t s p o s i t i v e f e e d b a c k}\right)}{K} \tag {4}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
4.2.2 Fairness Objective. On the other hand, based on the recommendation list $a_{t}$ , the total number of exposure of each item group will be counted and used to measure exposure fairness. Here, we calculate the ratio of items from sensitive group to the total number of recommended items, and use a hinge loss with margin $\beta$ to punish the abuse of fairness. Usually, we set $\beta$ to be the ratio of the number of items in sensitive group to the total number of items.
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
R _ {f} \left(s _ {t}, a _ {t}, s _ {t + 1}\right) = \max \left(\frac {\sum_ {l = 1} ^ {K} \mathbb {1} \left(a _ {t} ^ {l} i s i n s e s i t i v e g r o u p\right)}{K}, \beta\right) \tag {5}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
# 5 PROPOSED FRAMEWORK
|
| 148 |
+
|
| 149 |
+
# 5.1 The Conditioned Actor
|
| 150 |
+
|
| 151 |
+
The conditioned actor is almost the same as traditional actor except that we condition the predictions of the policy network to the preference vectors. Practically, we concatenate the state representation $s_t$ with the vector $\omega$ and train a neural network on this joint feature space, which is depicted in Fig. 1 (Conditioned Actor Network). The conditioned actor $\pi$ parameterized by $\theta^{\pi}$ serves as a stochastic policy that samples an action $a_t \in \mathcal{I}^K$ given the current state $s_t \in \mathbb{R}^m$ of a user and the preference vector $\omega$ .
|
| 152 |
+
|
| 153 |
+
First of all, we define $s_t$ as the concatenation of the user embedding $\mathbf{e}_u \in \mathbb{R}^d$ and their recent history embedding $\mathbf{h}_u$ :
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
s _ {t} = \left[ \mathbf {e} _ {u}; \mathbf {h} _ {u} \right], \tag {6}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
where the recent history embedding $\mathbf{h}_u = \mathrm{GRU}(H_t)$ is acquired by encoding $N$ item embeddings via Gated Recurrent Units (GRU) [10], and $H_{t} = \{H_{t}^{1},H_{t}^{2},\ldots ,H_{t}^{N}\}$ denotes the most recent $N$ items from user $u$ 's interaction history. We define the user's recent history is organized as a queue with fixed length, and update it only if the recommended item $a_{t}^{l}\in a_{t}$ receives a positive feedback, which ensures that the state can always represent the user's most recent interests.
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
H _ {t + 1} = \left\{ \begin{array}{c c} \left\{H _ {t} ^ {2}, \dots , H _ {t} ^ {N}, a _ {t} ^ {l} \right\} & a _ {t} ^ {l} \text {g e t s p o s i t i v e f e e d b a c k} \\ H _ {t} & \text {O t h e r w i s e} \end{array} \right. \tag {7}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
Secondly, we assume that the probability of actions conditioned on states and preferences follows a continuous high-dimensional Gaussian distribution. We also assume it has mean $\mu \in \mathbb{R}^{Kd}$ and covariance matrix $\Sigma \in \mathbb{R}^{Kd\times Kd}$ (only elements at diagonal are nonzeros and there are actually $Kd$ parameters). In order to achieve better representation ability, we approximate the distribution via a deep neural network, which maps the encoded state $s_t$ and preferences $\omega$ to $\mu$ and $\Sigma$ . Specifically, we adopt a Multi Layer Perceptron (MLP) with $\tanh (\cdot)$ as the non-linear activation function,
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
(\mu , \Sigma) = \operatorname {M L P} (s _ {t}, \omega). \tag {8}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
Once received $\mu$ and $\Sigma$ , we sample a vector from the acquired Gaussian distribution $\mathcal{N}(\mu, \Sigma)$ and convert it into a proposal matrix $W \sim \mathcal{N}(\mu, \Sigma) \in \mathbb{R}^{K \times d}$ , whose $k$ -th row, denoted by $W_k \in \mathbb{R}^d$ , represents an "ideal" embedding of a virtual item.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 1: The architecture of the proposed MoFIR.
|
| 175 |
+
|
| 176 |
+
Finally, the probability matrix $P \in \mathbb{R}^{K \times |I|}$ of selecting the $k$ -th candidate item is given by $P_{k} = \mathrm{softmax}(W_{k} \mathcal{V}^{\top})$ , $k = 1, \dots, K$ , where $\mathcal{V} \in \mathbb{R}^{|\mathcal{I}| \times d}$ is the embedding matrix of all candidate items. This is equivalent to using dot product to determine similarity between $W_{k}$ and any item. As the result of taking the action at step $t$ , the actor recommends the $k$ -th item as follows:
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
a _ {t} ^ {k} = \underset {i \in \{1, \dots , | I | \}} {\arg \max } P _ {k, i}, \forall k = 1, \dots , K, \tag {9}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
where $P_{k,i}$ denotes the probability of taking the $i$ -th item at rank $k$ .
|
| 183 |
+
|
| 184 |
+
# 5.2 The Conditioned Critic
|
| 185 |
+
|
| 186 |
+
The conditioned critic $\mu$ also differs from the traditional critic in that we concatenate the state representation $s_t$ with the vector $\omega$ as well as the embedding of action $a_t$ , and require the output to be a Q-value-vector with the size equal to the number of objectives, which is depicted in Fig. 1 (Conditioned Critic Network). The conditioned critic $\mu$ is parameterized with $\theta^{\mu}$ and is constructed to approximate the true state-action value vector function $\mathbf{Q}^{\pi}(s_t,a_t,\omega)$ and is used in the optimization of the actor. Following Eq. 2 introduced in conditioned network [3], the conditioned critic network is updated according to temporal-difference learning that minimizes the following loss function:
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\mathcal {L} \left(\theta^ {\mu}\right) = \mathbb {E} _ {s, a, \omega} \left[ \left\| \mathbf {y} _ {t} - \mathbf {Q} _ {t} (s, a, \omega ; \theta^ {\mu}) \right\| _ {2} ^ {2} \right] \tag {10}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
where $\mathbf{y}_t = \mathbf{r}_t + \gamma \mathbf{Q}_{\omega}(s_{t+1}, a_{t+1}, \omega; \theta^\mu)$ .
|
| 193 |
+
|
| 194 |
+
# 5.3 Parameters Training Procedure of MoFIR
|
| 195 |
+
|
| 196 |
+
We present the detailed training procedure of our proposed model, MoFIR, in Algorithm 1 and the model architecture in Fig. 1. As mentioned before, we modify traditional single-objective DDPG into multi-objective DDPG by introducing the conditioned networks to both its actor network and critic network. In each episode, there are two phases - the trajectory generation phase (line 15-20) and model updating phase (line 22-32). In the trajectory generation phase, we sample one linear preference $\omega_0$ and fix it to generate user-item interaction trajectories. Then in the model updating phase, we sample another $\mathcal{N}_{\omega}$ preferences together with $\omega_0$ to update the conditioned actor network and the conditioned critic network. Here, we do not
|
| 197 |
+
|
| 198 |
+
follow the original setting in [3], which only uses one more random sampled preference vector, as Yang et al. [52] observed that increasing the number of sampled preference vectors can further improve the coverage ratio of RL agent and diminish the adaptation error in their experiments.
|
| 199 |
+
|
| 200 |
+
# 6 EXPERIMENTS
|
| 201 |
+
|
| 202 |
+
In this section, we first introduce the datasets, the comparison baselines, then discuss and analyse the experimental results.
|
| 203 |
+
|
| 204 |
+
# 6.1 Dataset Description
|
| 205 |
+
|
| 206 |
+
To evaluate the models under different data scales, data sparsity and application scenarios, we perform experiments on three real-world datasets. Some basic statistics of the experimental datasets are shown in Table 1.
|
| 207 |
+
|
| 208 |
+
- Movielens: We choose Movielens100K $^{1}$ , which includes about one hundred thousand user transactions, respectively (user id, item id, rating, timestamp, etc.).
|
| 209 |
+
- Ciao: Ciao was collected by Tang et al. [45] from a popular product review site, Epinions, in the month of May, $2011^{2}$ . For each user, they collected user profiles, user ratings and user trust relations. For each rating, they collected the product name and its category, the rating score, the time point when the rating is created, and the helpfulness of this rating.
|
| 210 |
+
- Etsy: We collect a few weeks of user-item interaction data on a famous e-commerce platform, Etsy. For each record, we collect user id, item id and timestamp. Since the original data is sparse, we filter out users and items with fewer than twenty interactions.
|
| 211 |
+
|
| 212 |
+
For each dataset, we first sort the records of each user based on the timestamp, and then split the records into training and testing sets chronologically by 4:1. The last item in the training set of each user is put into the validation set. Since we focus on item exposure fairness, we need to split items into two groups $G_{0}$ and $G_{1}$ based on item popularity. It would be desirable if we have the item impression/listing information and use it to group items, however, since Movielens and Ciao are public dataset and only have
|
| 213 |
+
|
| 214 |
+
Algorithm 1: Multi-Objective DDPG Algorithm
|
| 215 |
+
1 Input:
|
| 216 |
+
2 A preference sampling distribution $D_{\omega}$
|
| 217 |
+
3 A multi-objective critic network $\mu$ parameterized by $\theta^{\mu}$
|
| 218 |
+
4 An actor network $\pi$ parameterized by $\theta^{\pi}$
|
| 219 |
+
5 Pre-trained user embeddings $\mathcal{U}$ and item embeddings $\mathcal{V}$
|
| 220 |
+
6 Output:
|
| 221 |
+
7 Parameters $\theta^{\pi},\theta^{\mu}$ of the actor network and critic network.
|
| 222 |
+
8 Initialization:
|
| 223 |
+
9 Randomly initialize $\theta^{\pi}$ and $\theta^{\mu}$
|
| 224 |
+
10 Initialize target network $\mu^\prime$ and $\pi^{\prime}$ with weights $\theta^{\pi '}\gets \theta^{\pi},\theta^{\mu '}\gets \theta^{\mu};$
|
| 225 |
+
11 Initialize replay buffer $D$
|
| 226 |
+
for Episode $= 1\dots M$ do
|
| 227 |
+
13 Initialize user state $s_0$ from log data;
|
| 228 |
+
14 Sample a linear preference $\omega_0\sim D_\omega$ .
|
| 229 |
+
for $t = 1\dots T$ do Observe current state, represent it as $s_t$ based on Eq. (6); Select an action $a_{t}\in I^{K}$ using actor network $\pi$ based on Eq. (9) Calculate utility reward and fairness reward and get the multi-objective reward vector $\mathbf{r}_t$ according to environment feedback based on Eq. (4) and Eq. (5); Update $s_{t + 1}$ based on Eq. (6); Store transition $(s_t,a_t,\mathbf{r}_t,s_{t + 1})$ in $D$ end if update then Sample minibatch of $\mathcal{N}$ trajectories $\mathcal{T}$ from $D$ Sample $\mathcal{N}_{\omega}$ preferences $W = \{\omega_{1},\omega_{2},\dots ,\omega_{N_{\omega}}\} \sim D_{\omega}$ Append $\omega_0$ to $W$ Select an action $a^\prime \in I^K$ using actor target network $\pi '$ Set $\mathbf{y} = \mathbf{r} + \gamma \mathbf{Q}'(s',a',\omega ;\theta^{\mu '}),\omega \in W$ Update critic by minimizing $\| \mathbf{y} - \mathbf{Q}(s,a,\omega ;\theta^{\mu})\| _2^2$ according to: $\nabla_{\theta^{\mu}}\mathcal{L}\approx \frac{1}{NN_{\omega}}\left[(\mathbf{y} - \mathbf{Q}(s,a,\omega;\theta^{\mu}))^{T}\nabla_{\theta^{\mu}}\mathbf{Q}(s,a,\omega;\theta^{\mu})\right]$ Update the actor using the sampled policy gradient: $\nabla_{\theta^{\pi}}\pi \approx \frac{1}{NN_{\omega}}\sum_i\boldsymbol {\omega}^T\nabla_a\mathbf{Q}(s,a,\boldsymbol {\omega};\theta^{\mu})\nabla_{\theta^{\pi}}\pi (s,\boldsymbol {\omega})$ Update the critic target networks: $\theta^{\mu '}\gets \tau \theta^{\mu} + (1 - \tau)\theta^{\mu '}$ Update the actor target networks: $\theta^{\pi '}\gets \tau \theta^{\pi} + (1 - \tau)\theta^{\pi '}$ end
|
| 230 |
+
|
| 231 |
+
Table 1: Basic statistics of the experimental datasets.
|
| 232 |
+
|
| 233 |
+
<table><tr><td>Dataset</td><td>#users</td><td>#items</td><td>#act./user</td><td>#act./item</td><td>#act.</td><td>density</td></tr><tr><td>Movielens100K</td><td>943</td><td>1682</td><td>106</td><td>59.45</td><td>100,000</td><td>6.305%</td></tr><tr><td>Ciao</td><td>2248</td><td>16861</td><td>16</td><td>2</td><td>36065</td><td>0.095%</td></tr><tr><td>Etsy</td><td>1030</td><td>945</td><td>47</td><td>51</td><td>48080</td><td>4.940%</td></tr></table>
|
| 234 |
+
|
| 235 |
+
interaction data, we use the number of interaction to group items in them. Specifically, for Movielens and Ciao, the top $20\%$ items in
|
| 236 |
+
|
| 237 |
+
terms of number of interactions belong to the popular group $G_0$ , and the remaining $80\%$ belong to the long-tail group $G_1$ , while for Etsy data, we additionally collect the listing impressions per month for each item and group items based on this.
|
| 238 |
+
|
| 239 |
+
Moreover, for RL-based methods, we set the initial state for each user during training as the first five clicked items in the training set, and the initial state during testing as the last five clicked items in the training set. We also set the RL agent recommend ten items to a user each time.
|
| 240 |
+
|
| 241 |
+
# 6.2 Experimental Setup
|
| 242 |
+
|
| 243 |
+
6.2.1 Baselines: We compare our proposed method with the following baselines, including both traditional and reinforcement learning based recommendation models.
|
| 244 |
+
|
| 245 |
+
- MF: Collaborative Filtering based on matrix factorization [24] is a representative method for rating prediction. However, since not all datasets contain rating scores, we turn the rating prediction task into ranking prediction. Specifically, the user and item interaction vectors are considered as the representation vector for each user and item.
|
| 246 |
+
- BPR-MF: Bayesian Personalized Ranking [38] is one of the most widely used ranking methods for top-K recommendation, which models recommendation as a pair-wise ranking problem. In the implementation, we conduct balanced negative sampling on non-purchased items for model learning.
|
| 247 |
+
- NGCF: Neural Graph Collaborative Filtering [46] is a neural network-based recommendation algorithm, which integrates the user-item interactions into the embedding learning process and exploits the graph structure by propagating embeddings on it to model the high-order connectivity.
|
| 248 |
+
- LIRD: The original paper for List-wise recommendation based on deep reinforcement learning (LIRD) [57] utilized the concatenation of item embeddings to represent the user state, and the actor will provide a list of K items as an action.
|
| 249 |
+
|
| 250 |
+
We also include two state-of-the-art fairness frameworks to show the fairness performance of our proposed method.
|
| 251 |
+
|
| 252 |
+
- FOE: Fairness of Exposure in Ranking (FOE) [43] is a type of post-processing algorithm incorporating a standard linear program and the Birkhoff-von Neumann decomposition. It is originally designed for searching problems, so we follow the same modification method mentioned in [16, 47], and use ranking prediction model such as MF, BPR, and NGCF as the base ranker, where the raw utility is given by the predicted probability of user $i$ clicking item $j$ . In our experiment, we have MF-FOE, BPR-FOE and NGCF-FOE as our fairness baselines. Since FOE assumes independence of items in the list, it cannot be applied to LIRD, which is a sequential model and the order in its recommendation makes a difference.
|
| 253 |
+
- MFR: Multi-FR (MFR) [47] is a generic fairness-aware recommendation framework with multi-objective optimization, which jointly optimizes fairness and utility for two-sided recommendation. In our experiment, we only choose its item popularity fairness. We also modify it as the original fairness considers position bias as well, which is not the same setting as ours. Finally, we have MF-MFR, BPR-MFR and NGCF-MFR. For same reason as FOE, we do not include LIRD as well.
|
| 254 |
+
|
| 255 |
+
Table 2: Summary of the performance on three datasets. We evaluate for ranking (Recall, F1 and NDCG, in percentage (%), % symbol is omitted in the table for clarity) and fairness (KL Divergence and Popularity Rate, also in % values), while $K$ is the length of recommendation list. Bold scores are used when MoFIR is the best, while underlined scores indicate the strongest baselines. When MoFIR is the best, its improvements against the best baseline are significant at $p < {0.01}$ .
|
| 256 |
+
|
| 257 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">Recall (%)↑</td><td colspan="3">F1 (%)↑</td><td colspan="3">NDCG (%)↑</td><td colspan="3">KL (%)↓</td><td colspan="3">Popularity Rate (%)↓</td></tr><tr><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td><td>K=5</td><td>K=10</td><td>K=20</td></tr><tr><td colspan="16">Movielens-100K</td></tr><tr><td>MF</td><td>1.422</td><td>2.713</td><td>5.228</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.561</td><td>3.830</td><td>4.705</td><td>229.124</td><td>224.390</td><td>215.772</td><td>99.745</td><td>99.258</td><td>98.224</td></tr><tr><td>BPR-MF</td><td>1.304</td><td>3.539</td><td>8.093</td><td>1.824</td><td>3.592</td><td>5.409</td><td>3.025</td><td>3.946</td><td>5.787</td><td>230.531</td><td>230.531</td><td>229.464</td><td>99.873</td><td>99.873</td><td>99.777</td></tr><tr><td>NGCF</td><td>1.995</td><td>3.831</td><td>6.983</td><td>2.846</td><td>4.267</td><td>5.383</td><td>5.319</td><td>5.660</td><td>6.510</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>LIRD</td><td>2.798</td><td>6.586</td><td>13.711</td><td>3.198</td><td>4.850</td><td>5.855</td><td>4.583</td><td>6.217</td><td>8.840</td><td>209.845</td><td>193.918</td><td>176.644</td><td>97.434</td><td>95.058</td><td>92.121</td></tr><tr><td>MF-FOE</td><td>1.164</td><td>2.247</td><td>4.179</td><td>1.739</td><td>2.730</td><td>3.794</td><td>3.520</td><td>3.796</td><td>4.367</td><td>181.000</td><td>175.355</td><td>170.444</td><td>92.895</td><td>91.888</td><td>90.981</td></tr><tr><td>BPR-FOE</td><td>0.974</td><td>2.053</td><td>4.404</td><td>1.496</td><td>2.568</td><td>3.933</td><td>3.127</td><td>3.514</td><td>4.332</td><td>176.938</td><td>172.465</td><td>168.952</td><td>92.174</td><td>91.357</td><td>90.700</td></tr><tr><td>NGCF-FOE</td><td>1.193</td><td>1.987</td><td>4.251</td><td>1.759</td><td>2.398</td><td>3.698</td><td>4.033</td><td>3.897</td><td>4.633</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>MF-MFR</td><td>1.546</td><td>2.807</td><td>5.422</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.276</td><td>3.613</td><td>4.571</td><td>100.590</td><td>96.620</td><td>85.420</td><td>74.867</td><td>73.743</td><td>70.419</td></tr><tr><td>BPR-MFR</td><td>1.418</td><td>2.811</td><td>6.155</td><td>2.019</td><td>3.016</td><td>4.127</td><td>3.522</td><td>3.822</td><td>5.047</td><td>165.897</td><td>155.339</td><td>137.663</td><td>90.117</td><td>88.017</td><td>84.205</td></tr><tr><td>NGCF-MFR</td><td>1.456</td><td>2.900</td><td>6.570</td><td>2.846</td><td>4.267</td><td>5.383</td><td>3.041</td><td>3.472</td><td>4.928</td><td>212.497</td><td>202.306</td><td>185.518</td><td>97.794</td><td>96.352</td><td>93.674</td></tr><tr><td>MoFIR-1.0</td><td>6.580</td><td>12.753</td><td>22.843</td><td>5.658</td><td>7.178</td><td>7.858</td><td>8.026</td><td>10.848</td><td>14.683</td><td>232.193</td><td>232.193</td><td>232.193</td><td>100.000</td><td>100.000</td><td>100.000</td></tr><tr><td>MoFIR-0.5</td><td>4.679</td><td>9.520</td><td>19.918</td><td>4.438</td><td>5.808</td><td>7.421</td><td>6.633</td><td>9.300</td><td>14.370</td><td>173.672</td><td>170.303</td><td>162.386</td><td>91.580</td><td>90.954</td><td>89.433</td></tr><tr><td>MoFIR-0.1</td><td>0.323</td><td>0.781</td><td>1.550</td><td>0.521</td><td>1.008</td><td>1.483</td><td>1.251</td><td>1.404</td><td>1.598</td><td>0.795</td><td>0.608</td><td>0.306</td><td>24.305</td><td>23.754</td><td>22.646</td></tr><tr><td colspan="16">Ciao</td></tr><tr><td>MF</td><td>0.518</td><td>1.938</td><td>3.100</td><td>0.395</td><td>0.687</td><td>0.599</td><td>0.408</td><td>0.924</td><td>1.264</td><td>81.154</td><td>65.458</td><td>47.848</td><td>69.088</td><td>63.835</td><td>57.098</td></tr><tr><td>BPR-MF</td><td>1.087</td><td>2.204</td><td>4.607</td><td>0.677</td><td>0.770</td><td>0.858</td><td>0.776</td><td>1.181</td><td>1.900</td><td>119.307</td><td>100.884</td><td>82.717</td><td>79.826</td><td>74.949</td><td>69.580</td></tr><tr><td>NGCF</td><td>1.721</td><td>2.816</td><td>4.380</td><td>1.056</td><td>0.958</td><td>0.783</td><td>1.670</td><td>2.027</td><td>2.450</td><td>142.025</td><td>96.789</td><td>59.561</td><td>85.181</td><td>73.792</td><td>61.693</td></tr><tr><td>LIRD</td><td>0.766</td><td>2.448</td><td>3.599</td><td>0.554</td><td>1.082</td><td>0.921</td><td>1.393</td><td>2.638</td><td>3.277</td><td>65.744</td><td>105.507</td><td>64.888</td><td>63.936</td><td>76.223</td><td>63.632</td></tr><tr><td>MF-FOE</td><td>0.685</td><td>1.208</td><td>1.914</td><td>0.458</td><td>0.474</td><td>0.396</td><td>0.475</td><td>0.669</td><td>0.864</td><td>19.720</td><td>11.167</td><td>7.622</td><td>43.068</td><td>37.033</td><td>33.915</td></tr><tr><td>BPR-FOE</td><td>1.442</td><td>2.111</td><td>3.693</td><td>0.812</td><td>0.663</td><td>0.731</td><td>0.934</td><td>1.154</td><td>1.657</td><td>55.999</td><td>46.858</td><td>40.626</td><td>60.347</td><td>56.686</td><td>53.987</td></tr><tr><td>NGCF-FOE</td><td>1.234</td><td>1.907</td><td>2.903</td><td>0.651</td><td>0.583</td><td>0.566</td><td>0.937</td><td>1.156</td><td>1.477</td><td>79.313</td><td>74.038</td><td>71.335</td><td>43.357</td><td>34.226</td><td>30.391</td></tr><tr><td>MF-MFR</td><td>0.307</td><td>0.619</td><td>1.281</td><td>0.395</td><td>0.687</td><td>0.599</td><td>0.237</td><td>0.345</td><td>0.535</td><td>0.185</td><td>0.096</td><td>0.068</td><td>18.003</td><td>18.553</td><td>18.784</td></tr><tr><td>BPR-MFR</td><td>1.146</td><td>1.962</td><td>2.667</td><td>0.395</td><td>0.687</td><td>0.599</td><td>1.011</td><td>1.314</td><td>1.534</td><td>4.303</td><td>2.540</td><td>1.454</td><td>30.304</td><td>27.829</td><td>25.868</td></tr><tr><td>NGCF-MFR</td><td>1.284</td><td>2.131</td><td>4.033</td><td>1.056</td><td>0.958</td><td>0.783</td><td>1.014</td><td>1.342</td><td>1.901</td><td>37.133</td><td>20.302</td><td>10.515</td><td>52.388</td><td>43.430</td><td>36.498</td></tr><tr><td>MoFIR-1.0</td><td>2.162</td><td>3.867</td><td>5.866</td><td>1.626</td><td>1.513</td><td>1.323</td><td>4.000</td><td>4.764</td><td>5.813</td><td>181.742</td><td>156.545</td><td>123.213</td><td>93.025</td><td>88.263</td><td>80.796</td></tr><tr><td>MoFIR-0.5</td><td>1.254</td><td>2.665</td><td>4.122</td><td>0.845</td><td>0.971</td><td>0.879</td><td>2.031</td><td>2.724</td><td>3.490</td><td>19.077</td><td>12.750</td><td>8.032</td><td>42.663</td><td>38.278</td><td>34.305</td></tr><tr><td>MoFIR-0.1</td><td>0.892</td><td>1.610</td><td>2.338</td><td>0.557</td><td>0.532</td><td>0.445</td><td>1.054</td><td>1.311</td><td>1.576</td><td>0.054</td><td>0.010</td><td>0.484</td><td>21.100</td><td>19.522</td><td>16.795</td></tr><tr><td colspan="16">Etsy</td></tr><tr><td>MF</td><td>2.693</td><td>5.581</td><td>10.348</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.438</td><td>4.671</td><td>6.681</td><td>190.410</td><td>190.173</td><td>186.243</td><td>94.491</td><td>94.452</td><td>93.797</td></tr><tr><td>BPR-MF</td><td>3.113</td><td>5.850</td><td>11.704</td><td>3.309</td><td>4.320</td><td>5.385</td><td>3.700</td><td>4.880</td><td>7.341</td><td>179.815</td><td>176.447</td><td>169.740</td><td>92.687</td><td>92.085</td><td>90.849</td></tr><tr><td>NGCF</td><td>3.414</td><td>6.026</td><td>11.746</td><td>3.674</td><td>4.498</td><td>5.406</td><td>4.180</td><td>5.238</td><td>7.610</td><td>194.985</td><td>185.756</td><td>175.403</td><td>95.228</td><td>93.715</td><td>91.896</td></tr><tr><td>LIRD</td><td>7.163</td><td>12.176</td><td>24.056</td><td>4.158</td><td>4.493</td><td>4.967</td><td>6.587</td><td>9.289</td><td>13.833</td><td>212.890</td><td>197.336</td><td>166.047</td><td>97.847</td><td>95.597</td><td>90.145</td></tr><tr><td>MF-FOE</td><td>1.382</td><td>2.436</td><td>4.515</td><td>1.641</td><td>2.160</td><td>2.704</td><td>2.111</td><td>2.482</td><td>3.318</td><td>42.682</td><td>29.960</td><td>22.502</td><td>54.898</td><td>48.865</td><td>44.758</td></tr><tr><td>BPR-FOE</td><td>1.503</td><td>2.808</td><td>5.513</td><td>1.802</td><td>2.468</td><td>3.132</td><td>2.328</td><td>2.783</td><td>3.844</td><td>43.394</td><td>30.734</td><td>23.390</td><td>55.209</td><td>49.263</td><td>45.276</td></tr><tr><td>NGCF-FOE</td><td>1.958</td><td>3.135</td><td>5.478</td><td>2.227</td><td>2.593</td><td>2.923</td><td>2.705</td><td>3.106</td><td>4.024</td><td>47.548</td><td>30.829</td><td>21.678</td><td>56.974</td><td>49.311</td><td>44.268</td></tr><tr><td>MF-MFR</td><td>2.482</td><td>5.150</td><td>10.279</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.265</td><td>4.504</td><td>6.671</td><td>173.889</td><td>158.030</td><td>134.564</td><td>91.620</td><td>88.565</td><td>83.497</td></tr><tr><td>BPR-MFR</td><td>2.510</td><td>4.849</td><td>9.711</td><td>2.917</td><td>4.176</td><td>4.912</td><td>3.144</td><td>4.110</td><td>6.206</td><td>136.319</td><td>120.661</td><td>94.153</td><td>83.899</td><td>80.165</td><td>73.031</td></tr><tr><td>NGCF-MFR</td><td>2.325</td><td>4.146</td><td>7.946</td><td>3.558</td><td>4.820</td><td>5.616</td><td>2.994</td><td>3.636</td><td>5.210</td><td>104.348</td><td>91.557</td><td>74.402</td><td>75.907</td><td>72.270</td><td>66.901</td></tr><tr><td>MoFIR-1.0</td><td>6.690</td><td>13.871</td><td>24.728</td><td>4.833</td><td>5.932</td><td>6.238</td><td>9.183</td><td>13.629</td><td>19.822</td><td>139.319</td><td>134.627</td><td>129.318</td><td>84.578</td><td>83.511</td><td>82.270</td></tr><tr><td>MoFIR-0.5</td><td>5.333</td><td>10.342</td><td>19.383</td><td>3.460</td><td>3.979</td><td>4.218</td><td>4.626</td><td>6.614</td><td>9.798</td><td>70.154</td><td>67.961</td><td>64.956</td><td>65.470</td><td>64.714</td><td>63.657</td></tr><tr><td>MoFIR-0.1</td><td>1.340</td><td>2.966</td><td>5.864</td><td>1.151</td><td>1.641</td><td>1.895</td><td>1.778</td><td>2.839</td><td>4.425</td><td>0.569</td><td>0.545</td><td>0.396</td><td>23.628</td><td>23.550</td><td>23.016</td></tr></table>
|
| 258 |
+
|
| 259 |
+
We implement MF, BPR-MF, NGCF, MF-FOE, BPR-FOE, NGCF-FOE, MF-MFR BPR-MFR and NGCF-MFR using Pytorch with Adam optimizer. For all of them, we consider latent dimensions $d$ from $\{16, 32, 64, 128, 256\}$ , learning rate $lr$ from $\{1e - 1, 5e - 2, 1e - 2, \dots, 5e - 4, 1e - 4\}$ , and the L2 penalty is chosen from $\{0.01, 0.1, 1\}$ . We tune the hyper-parameters using the validation set and terminate training when the performance on the validation set does not change within 5 epochs. Further, since the FOE-based methods need to solve a linear programming with size $|\mathcal{I}| \times |\mathcal{I}|$ for each consumer, which brings huge computational costs, we rerank the top-200 items from
|
| 260 |
+
|
| 261 |
+
the base model then select the new top-K (K<100) as the final recommendation. Similarly, we implement MoFIR with Pytorch. We first perform basic MF to pretrain 16-dimensional user and item embeddings, and fix them through training and test. We set $|H_{t}| = 5$ , and use two GRU layers to get the state representation $s_t$ . For the actor network and the critic network, we use two hidden layer MLP with $\tanh(\cdot)$ as activation function. Finally, we fine-tune MoFIR's hyper-parameters on our validation set. In order to examine the trade-off between performance and fairness, we use different level of preference vectors in test. Since MoFIR is able to approximate all possible solutions of the Pareto frontier, we simply input different
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
(a) NDCG vs Long-tail Rate on ML100K
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
(b) NDCG vs Long-tail Rate on Ciao
|
| 268 |
+
Figure 2: Approximate Pareto frontier in three datasets generated by MoFIR and NGCF-MFR, where $x$ -axis represents the Longtail Rate@20 (Longtail Rate equals to one minus Popularity Rate) and $y$ -axis represents the value of NDCG@20.
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
(c) NDCG vs Long-tail Rate on Etsy
|
| 272 |
+
|
| 273 |
+
preference vectors $\omega$ into the trained model to get variants of MoFIR and denote the resulting alternatives as MoFIR-1.0, MoFIR-0.5, and MoFIR-0.1, where the scalar is the weight on the recommendation utility objective.
|
| 274 |
+
|
| 275 |
+
6.2.2 Evaluation Metrics: We select several most commonly used top-K ranking metrics to evaluate each model's recommendation performance, including Recall, F1 Score, and NDCG. For fairness evaluation, we define Popularity Rate, which simply refers to the ratio of the number of popular items in the recommendation list to the total number of items in the list. We also employ KL-divergence (KL) to compute the expectation of the difference between protected group membership at top-K vs. in the over-all population, where $d_{KL}(D_1||D_2) = \sum_j D_1(j) \ln \frac{D_1(j)}{D_2(j)}$ with $D_1$ represents the true group distribution between $G_0$ and $G_1$ in top-K recommendation list, and $D_2 = \left[\frac{|G_0|}{|\mathcal{I}|}, \frac{|G_1|}{|\mathcal{I}|}\right]$ represents their ideal distribution of the overall population.
|
| 276 |
+
|
| 277 |
+
# 6.3 Experimental Results
|
| 278 |
+
|
| 279 |
+
The major experimental results are shown in Table 2, besides, we also plot the approximate Pareto frontier between NDCG and Long-tail Rate (namely, 1-Popularity Rate) in Fig. 2. We analyze and discuss the results in terms of the following perspectives.
|
| 280 |
+
|
| 281 |
+
6.3.1 Recommendation Performance. For recommendation performance, we compare MoFIR-1.0 with MF, BPR, NGCF, and LIRD based on Recall@k, F1@k and NDCG@k and provide the these results of the recommendation performance in Table 2. Among all the baseline models, we can see that all sequential recommendation methods (LIRD, MoFIR-1.0) are much better than the traditional method, which demonstrates the superiority of sequential recommendation on top-K ranking tasks. Specifically, LIRD is the strongest baseline in all three datasets on all performance metrics: when averaging across recommendation lengths LIRD achieves $41.28\%$ improvement than MF, $27.08\%$ improvement than BPR-MF, and $8.97\%$ improvement than NGCF.
|
| 282 |
+
|
| 283 |
+
Our MoFIR approach achieves the best top-K recommendation performance against all baselines on all datasets: when averaging across three recommendation lengths on all performance metrics,
|
| 284 |
+
|
| 285 |
+
MoFIR gets $41.40\%$ improvement than the best baseline on MovieLens100K; MoFIR gets $46.45\%$ improvement than LIRD on Ciao; and MoFIR gets $18.98\%$ improvement than LIRD on Etsy. These above observations imply that the proposed method does have the ability to capture the dynamic nature in user-item interactions, which results in better recommendation results. Besides, unlike LIRD, which only concatenates user and item embeddings together, MoFIR uses several GRU layers to better capture the sequential information in user history, which benefits the model performance.
|
| 286 |
+
|
| 287 |
+
6.3.2 Fairness Performance. For fairness performance, we compare MoFIRs with FOE-based methods and MFR-based methods based on KL Divergence@k and Popularity Rate@k, which are also shown in Table 2. It is easy to find that there does exist a trade-off between the recommendation performance and the fairness performance, which is understandable, as most of the long-tail items have relatively fewer interactions with users. When comparing the baselines, we can easily find that MFR is able to achieve better trade-off than FOE as it is also a multi-objective optimization method.
|
| 288 |
+
|
| 289 |
+
From Table 2, MoFIR is able to adjust the degree of trade-off between utility and fairness through simply modifying the weight of recommendation utility objective. It is worth noting that MoFIR-0.1 can always closely achieve the ideal distribution as its $KLs$ are close to zero. In Table 2, we can find that even MoFIR has the similar performance of fairness with other baselines, it can still achieve much better recommendation performance (for example, BPR-FOE and MoFIR-0.5 in Movielens100k or NGCF-FOE and MoFIR-0.5 in Ciao or MF-MFR and MoFIR-0.5 in Etsy), which indicates its capability of finding better trade-off.
|
| 290 |
+
|
| 291 |
+
6.3.3 Fairness-Utility Trade-off. We only compare MoFIR with MFR, since FOE is a post-processing method, which doesn't optimize the fairness-utility trade-off. In order to better illustrate the trade-off between utility and fairness, we fix the length of the recommendation list at 20 and plot NDCG@20 against Longtail Rate in Fig. 2 for all datasets, where Longtail Rate equals to one minus Popularity Rate. Each blue point is generated by simply changing the input weights to the fine-tuned MoFIR, while each orange point is generated by running the entire MFR optimization. The clear margin distance between the blue points' curve (Approximate
|
| 292 |
+
|
| 293 |
+
Pareto frontier) and the orange points' curve demonstrates the great effectiveness of MORL compared with traditional multi-objective optimization method in recommendation.
|
| 294 |
+
|
| 295 |
+
# 7 CONCLUSION
|
| 296 |
+
|
| 297 |
+
In this work, we achieve the approximate Pareto efficient trade-off between fairness and utility in recommendation systems and characterize their Pareto Frontier in the objective space in order to find solutions with different levels of trade-off. We accomplish the task by proposing a fairness-aware recommendation framework using multi-objective reinforcement learning (MORL) with linear preferences, called MoFIR, which aims to learn a single parametric representation for optimal recommendation policies over the space of all possible preferences. Experiments across three different datasets demonstrate the effectiveness of our approach in both fairness measures and recommendation performance.
|
| 298 |
+
|
| 299 |
+
# ACKNOWLEDGMENTS
|
| 300 |
+
|
| 301 |
+
We gratefully acknowledge the valuable cooperation of Runzhe Yang from Princeton University and Shuchang Liu from Rutgers University.
|
| 302 |
+
|
| 303 |
+
# REFERENCES
|
| 304 |
+
|
| 305 |
+
[1] Himan Abdollahpouri, Robin Burke, and Bamshad Mobasher. 2017. Controlling popularity bias in learning-to-rank recommendation. In Proceedings of the eleventh ACM conference on recommender systems. 42-46.
|
| 306 |
+
[2] Himan Abdollahpouri, Masoud Mansoury, Robin Burke, and Bamshad Mobasher. 2019. The unfairness of popularity bias in recommendation. arXiv preprint arXiv:1907.13286 (2019).
|
| 307 |
+
[3] Axel Abels, Diederik Roijers, Tom Lenaerts, Ann Nowé, and Denis Steckelmacher. 2019. Dynamic weights in multi-objective deep reinforcement learning. In International Conference on Machine Learning. PMLR, 11-20.
|
| 308 |
+
[4] Alex Beutel, Jilin Chen, Tulsee Doshi, Hai Qian, Li Wei, Yi Wu, Lukasz Heldt, Zhe Zhao, Lichan Hong, Ed H Chi, et al. 2019. Fairness in the recommendation ranking through pairwise comparisons. In Proceedings of the 25th ACM SIGKDD.
|
| 309 |
+
[5] L Elisa Celis, Sayash Kapoor, Farnood Salehi, and Nisheeth Vishnoi. 2019. Controlling polarization in personalization: An algorithmic framework. In Proceedings of the conference on fairness, accountability, and transparency. 160-169.
|
| 310 |
+
[6] Haokun Chen, Xinyi Dai, Han Cai, Weinan Zhang, Xuejian Wang, Ruiming Tang, Yuzhou Zhang, and Yong Yu. 2019. Large-scale interactive recommendation with tree-structured policy gradient. In Proceedings of the AAAI, Vol. 33. 3312-3320.
|
| 311 |
+
[7] Hanxiong Chen, Li Yunqi, Shi Shaoyun, Shuchang Liu, He Zhu, and Yongfeng Zhang. 2022. Graph Logic Reasoning for Recommendation and Link Prediction. In Proceedings of the 15th ACM International Conference on Web Search and Data Mining.
|
| 312 |
+
[8] Le Chen, Ruijun Ma, Aniko Hannák, and Christo Wilson. [n.d.]. Investigating the Impact of Gender on Rank in Resume Search Engines. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems.
|
| 313 |
+
[9] Minmin Chen, Alex Beutel, Paul Covington, Sagar Jain, Francois Belletti, and Ed H Chi. 2019. Top-k off-policy correction for a REINFORCE recommender system. In Proceedings of the 12th ACM WSDM. 456-464.
|
| 314 |
+
[10] Kyunghyun Cho, Bart van Merrienboer, Dzmitry Bahdanau, and Yoshua Bengio. 2014. On the Properties of Neural Machine Translation: Encoder-Decoder Approaches. in SsST@EMNLP.
|
| 315 |
+
[11] Gabriel Dulac-Arnold, Richard Evans, Peter Sunehag, and Ben Coppin. 2015. Reinforcement Learning in Large Discrete Action Spaces. (2015). arXiv:1512.07679
|
| 316 |
+
[12] Sanghamitra Dutta, Dennis Wei, Hazar Yueksel, Pin-Yu Chen, Sijia Liu, and Kush Varshney. 2020. Is there a trade-off between fairness and accuracy? a perspective using mismatched hypothesis testing. In International Conference on Machine Learning. PMLR, 2803-2813.
|
| 317 |
+
[13] Zuohui Fu, Yikun Xian, Ruoyuan Gao, Jieyu Zhao, Qiaoying Huang, Yingqiang Ge, Shuyuan Xu, Shijie Geng, Chirag Shah, Yongfeng Zhang, et al. 2020. Fairness-aware explainable recommendation over knowledge graphs. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 69-78.
|
| 318 |
+
[14] Ruoyuan Gao and Chirag Shah. 2021. Addressing Bias and Fairness in Search Systems. In Proceedings of the 44th International ACM SIGIR (Virtual Event, Canada) (SIGIR '21). 4 pages. https://doi.org/10.1145/3404835.3462807
|
| 319 |
+
|
| 320 |
+
[15] Yingqiang Ge, Shuchang Liu, Ruoyuan Gao, Yikun Xian, Yunqi Li, Xiangyu Zhao, Changhua Pei, Fei Sun, Junfeng Ge, Wenwu Ou, et al. 2021. Towards Long-term Fairness in Recommendation. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining. 445-453.
|
| 321 |
+
[16] Yingqiang Ge, Shuchang Liu, Ruoyuan Gao, Yikun Xian, Yunqi Li, Xiangyu Zhao, Changhua Pei, Fei Sun, Junfeng Ge, Wenwu Ou, and Yongfeng Zhang, 2021. Towards Long-Term Fairness in Recommendation. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (Virtual Event, Israel) (WSDM '21), 445-453. https://doi.org/10.1145/3437963.3441824
|
| 322 |
+
[17] Yingqiang Ge, Shuyuan Xu, Shuchang Liu, Zuohui Fu, Fei Sun, and Yongfeng Zhang. 2020. Learning Personalized Risk Preferences for Recommendation. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 409-418.
|
| 323 |
+
[18] Yingqiang Ge, Shuya Zhao, Honglu Zhou, Changhua Pei, Fei Sun, Wenwu Ou, and Yongfeng Zhang. 2020. Understanding echo chambers in e-commerce recommender systems. In Proceedings of the 43rd international ACM SIGIR conference on research and development in information retrieval. 2261-2270.
|
| 324 |
+
[19] Sahin Cem Geyik, Stuart Ambler, and Krishnamaram Kenthapadi. 2019. Fairness-Aware Ranking in Search & Recommendation Systems with Application to LinkedIn Talent Search. In Proceedings of KDD. ACM, 2221-2231.
|
| 325 |
+
[20] Neil J. Hurley. 2013. Personalised Ranking with Diversity. In Proceedings of the 7th ACM Conference on Recommender Systems (Hong Kong, China) (RecSys '13). 379-382. https://doi.org/10.1145/2507157.2507226
|
| 326 |
+
[21] Tamas Jambor and Jun Wang, 2010. Optimizing multiple objectives in collaborative filtering. In Proceedings of the fourth ACM conference on Recommender systems. 55-62.
|
| 327 |
+
[22] Mohammad Mahdi Kamani, Rana Forsati, James Z Wang, and Mehrdad Mahdavi. 2021. Pareto Efficient Fairness in Supervised Learning: From Extraction to Tracing. arXiv preprint arXiv:2104.01634 (2021).
|
| 328 |
+
[23] Michael Kearns and Aaron Roth. 2019. The ethical algorithm: The science of socially aware algorithm design. Oxford University Press.
|
| 329 |
+
[24] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 8 (2009), 30-37.
|
| 330 |
+
[25] Yunqi Li, Hanxiong Chen, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2021. User-oriented Fairness in Recommendation. In Proceedings of the Web Conference 2021, 624-632.
|
| 331 |
+
[26] Yunqi Li, Hanxiong Chen, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2021. Towards Personalized Fairness based on Causal Notion. arXiv preprint arXiv:2105.09829 (2021).
|
| 332 |
+
[27] Yunqi Li, Hanxiong Chen, Shuyuan Xu, Yingqiang Ge, and Yongfeng Zhang. 2021. Towards Personalized Fairness Based on Causal Notion. In Proceedings of the 44th International ACM SIGIR (Virtual Event, Canada) (SIGIR '21). 10 pages. https://doi.org/10.1145/3404835.3462966
|
| 333 |
+
[28] Yunqi Li, Yingqiang Ge, and Yongfeng Zhang. 2021. CIKM 2021 Tutorial on Fairness of Machine Learning in Recommender Systems. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 4857-4860.
|
| 334 |
+
[29] Yunqi Li, Yingqiang Ge, and Yongfeng Zhang. 2021. Tutorial on Fairness of Machine Learning in Recommender Systems.
|
| 335 |
+
[30] Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Manfred Otto Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. 2016. Continuous control with deep reinforcement learning. CoRR abs/1509.02971 (2016).
|
| 336 |
+
[31] Xiao Lin, Hongjie Chen, Changhua Pei, Fei Sun, Xuanji Xiao, Hanxiao Sun, Yongfeng Zhang, Wenwu Ou, and Peng Jiang. 2019. A pareto-efficient algorithm for multiple objective optimization in e-commerce recommendation. In Proceedings of the 13th ACM Conference on recommender systems. 20-28.
|
| 337 |
+
[32] Zachary Lipton, Julian McAuley, and Alexandra Chouldchova. 2018. Does mitigating ML's impact disparity require treatment disparity?. In Advances in Neural Information Processing Systems. Curran Associates, Inc.
|
| 338 |
+
[33] Yudan Liu, Kaikai Ge, Xu Zhang, and Leyu Lin. 2019. Real-Time Attention Based Look-Alike Model for Recommender System. In Proceedings of SIGKDD'19 (Anchorage, AK, USA) (KDD '19), 2765-2773.
|
| 339 |
+
[34] Tariq Mahmood and Francesco Ricci. 2007. Learning and adaptivity in interactive recommender systems. In Proceedings of the 9th international conference on Electronic commerce. 75-84.
|
| 340 |
+
[35] Tariq Mahmood and Francesco Ricci. 2009. Improving recommender systems with adaptive conversational strategies. In Proceedings of the 20th ACM conference on Hypertext and hypermedia. 73-82.
|
| 341 |
+
[36] Sean M McNee, John Riedl, and Joseph A Konstan. 2006. Being accurate is not enough: how accuracy metrics have hurt recommender systems. In CHI'06 extended abstracts on Human factors in computing systems. 1097-1101.
|
| 342 |
+
[37] Changhua Pei, Xinru Yang, Qing Cui, Xiao Lin, Fei Sun, Peng Jiang, Wenwu Ou, and Yongfeng Zhang. 2019. Value-aware recommendation based on reinforcement profit maximization. In The World Wide Web Conference. 3123-3129.
|
| 343 |
+
[38] Steffen Rendle, Christoph Freudenthaler, Zeno Ganttner, and Lars Schmidt-Thieme. 2009. BPR: Bayesian personalized ranking from implicit feedback. In Proceedings of the 25th conference on uncertainty in artificial intelligence. AUAI Press, 452-461.
|
| 344 |
+
|
| 345 |
+
[39] Marco Tulio Ribeiro, Anisio Lacerda, Adriano Veloso, and Nivio Ziviani. 2012. Pareto-efficient hybridization for multi-objective recommender systems. In Proceedings of the sixth ACM conference on Recommender systems. 19-26.
|
| 346 |
+
[40] Marco Tulio Ribeiro, Nivio Ziviani, Edleno Silva De Moura, Itamar Hata, Anisio Lacerda, and Adriano Veloso. 2014. Multiobjective pareto-efficient approaches for recommender systems. ACM Transactions on Intelligent Systems and Technology (TIST) 5, 4 (2014), 1-20.
|
| 347 |
+
[41] Guy Shani, David Heckerman, and Ronen I Brafman. 2005. An MDP-based recommender system. Journal of Machine Learning Research 6, Sep (2005).
|
| 348 |
+
[42] David Silver, Guy Lever, Nicolas Manfred Otto Heess, Thomas Degris, Daan Wierstra, and Martin A. Riedmiller. 2014. Deterministic Policy Gradient Algorithms. In ICML.
|
| 349 |
+
[43] Ashudeep Singh and Thorsten Joachims. 2018. Fairness of Exposure in Rankings. In Proceedings of the 24th ACM SIGKDD (London, United Kingdom).
|
| 350 |
+
[44] Juntao Tan, Shuyuan Xu, Yingqiang Ge, Yunqi Li, Xu Chen, and Yongfeng Zhang. 2021. Counterfactual explainable recommendation. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 1784-1793.
|
| 351 |
+
[45] Jiliang Tang, Huiji Gao, Huan Liu, and Atish Das Sarma. 2012. ETrust: Understanding Trust Evolution in an Online World. In Proceedings of the 18th ACM SIGKDD (Beijing, China) (KDD '12). https://doi.org/10.1145/2339530.2339574
|
| 352 |
+
[46] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174.
|
| 353 |
+
[47] Haolun Wu, Chen Ma, Bhaskar Mitra, Fernando Diaz, and Xue Liu. 2021. Multi-FR: A Multi-Objective Optimization Method for Achieving Two-sided Fairness in E-commerce Recommendation. arXiv preprint arXiv:2105.02951 (2021).
|
| 354 |
+
[48] Yikun Xian, Zuohui Fu, S Muthukrishnan, Gerard De Melo, and Yongfeng Zhang. 2019. Reinforcement knowledge graph reasoning for explainable recommendation. In SIGIR.
|
| 355 |
+
[49] Yikun Xian, Zuohui Fu, Handong Zhao, Yingqiang Ge, Xu Chen, Qiaoying Huang, Shijie Geng, Zhou Qin, Gerard De Melo, Shan Muthukrishnan, and Yongfeng
|
| 356 |
+
|
| 357 |
+
Zhang. 2020. CAFE: Coarse-to-fine neural symbolic reasoning for explainable recommendation. In CIKM.
|
| 358 |
+
[50] Ruobing Xie, Yanlei Liu, Shaoliang Zhang, Rui Wang, Feng Xia, and Leyu Lin. 2021. Personalized Approximate Pareto-Efficient Recommendation. In Proceedings of the Web Conference 2021. 3839-3849.
|
| 359 |
+
[51] Shuyuan Xu, Yingqiang Ge, Yunqi Li, Zuohui Fu, Xu Chen, and Yongfeng Zhang. 2021. Causal Collaborative Filtering. arXiv preprint arXiv:2102.01868 (2021).
|
| 360 |
+
[52] Runzhe Yang, Xingyuan Sun, and Karthik Narasimhan. 2019. A Generalized Algorithm for Multi-Objective Reinforcement Learning and Policy Adaptation. Advances in Neural Information Processing Systems 32 (2019), 14636-14647.
|
| 361 |
+
[53] Tao Yang and Qingyao Ai. 2021. Maximizing Marginal Fairness for Dynamic Learning to Rank. In Proceedings of the Web Conference 2021. 137-145.
|
| 362 |
+
[54] Sirui Yao and Bert Huang. 2017. Beyond Parity: Fairness Objectives for Collaborative Filtering. In Advances in Neural Information Processing Systems.
|
| 363 |
+
[55] Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rodriguez, Krishna P. Gummadi, and Adrian Weller. 2017. From Parity to Preference-Based Notions of Fairness in Classification. In Proceedings of NIPS'17.
|
| 364 |
+
[56] Xiangyu Zhao, Liang Zhang, Zhuoye Ding, Long Xia, Jiliang Tang, and Dawei Yin. 2018. Recommendations with negative feedback via pairwise deep reinforcement learning. In Proceedings of the 24th ACM SIGKDD. 1040-1048.
|
| 365 |
+
[57] Xiangyu Zhao, Liang Zhang, Zhuoye Ding, Dawei Yin, Yihong Zhao, and Jiliang Tang. 2018. Deep Reinforcement Learning for List-wise Recommendations. CoRR abs/1801.00209 (2018). arXiv:1801.00209
|
| 366 |
+
[58] Guanjie Zheng, Fuzheng Zhang, Zihan Zheng, Yang Xiang, Nicholas Jing Yuan, Xing Xie, and Zhenhui Li. 2018. DRN: A deep reinforcement learning framework for news recommendation. In Proceedings of WWW '18. 167-176.
|
| 367 |
+
[59] Ziwei Zhu, Xia Hu, and James Caverlee. [n.d.]. Fairness-Aware Tensor-Based Recommendation. In Proceedings of CIKM '18 (Torino, Italy) (CIKM '18). 1153-1162.
|
| 368 |
+
[60] Eckart Zitzler, Marco Laumanns, and Lothar Thiele. 2001. SPEA2: Improving the strength Pareto evolutionary algorithm. TIK-report 103 (2001).
|
2201.00xxx/2201.00140/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b9860faef361cf6b462f9706fa855318e6bdf2a546f6926756353f320dd5740
|
| 3 |
+
size 472261
|
2201.00xxx/2201.00140/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2201.00xxx/2201.00162/05211657-1eef-426d-a342-10424d7db537_content_list.json
ADDED
|
@@ -0,0 +1,1188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "MLOps - Definitions, Tools and Challenges",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
287,
|
| 8 |
+
95,
|
| 9 |
+
710,
|
| 10 |
+
118
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Georgios Symeonidis",
|
| 17 |
+
"bbox": [
|
| 18 |
+
254,
|
| 19 |
+
154,
|
| 20 |
+
403,
|
| 21 |
+
170
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "ILSP Xanthi's Division",
|
| 28 |
+
"bbox": [
|
| 29 |
+
250,
|
| 30 |
+
170,
|
| 31 |
+
406,
|
| 32 |
+
183
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Athena Research and Innovation Centre",
|
| 39 |
+
"bbox": [
|
| 40 |
+
194,
|
| 41 |
+
186,
|
| 42 |
+
465,
|
| 43 |
+
199
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Xanthi, Greece",
|
| 50 |
+
"bbox": [
|
| 51 |
+
276,
|
| 52 |
+
200,
|
| 53 |
+
380,
|
| 54 |
+
214
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "giorgos.simeonidis@athenarc.gr",
|
| 61 |
+
"bbox": [
|
| 62 |
+
220,
|
| 63 |
+
215,
|
| 64 |
+
439,
|
| 65 |
+
231
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Evangelos Nerantzis",
|
| 72 |
+
"bbox": [
|
| 73 |
+
612,
|
| 74 |
+
155,
|
| 75 |
+
753,
|
| 76 |
+
169
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "MLV Research Group",
|
| 83 |
+
"bbox": [
|
| 84 |
+
606,
|
| 85 |
+
170,
|
| 86 |
+
754,
|
| 87 |
+
185
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "Dept. of Computer Science",
|
| 94 |
+
"bbox": [
|
| 95 |
+
589,
|
| 96 |
+
186,
|
| 97 |
+
772,
|
| 98 |
+
200
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "International Hellenic University",
|
| 105 |
+
"bbox": [
|
| 106 |
+
571,
|
| 107 |
+
200,
|
| 108 |
+
795,
|
| 109 |
+
215
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "Kavala, Greece",
|
| 116 |
+
"bbox": [
|
| 117 |
+
627,
|
| 118 |
+
215,
|
| 119 |
+
733,
|
| 120 |
+
229
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "e.nerantzis@athenarc.gr",
|
| 127 |
+
"bbox": [
|
| 128 |
+
601,
|
| 129 |
+
231,
|
| 130 |
+
767,
|
| 131 |
+
244
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "Apostolos Kazakis",
|
| 138 |
+
"bbox": [
|
| 139 |
+
259,
|
| 140 |
+
266,
|
| 141 |
+
388,
|
| 142 |
+
280
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "MLV Research Group",
|
| 149 |
+
"bbox": [
|
| 150 |
+
246,
|
| 151 |
+
281,
|
| 152 |
+
395,
|
| 153 |
+
296
|
| 154 |
+
],
|
| 155 |
+
"page_idx": 0
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"text": "Dept. of Computer Science",
|
| 160 |
+
"bbox": [
|
| 161 |
+
230,
|
| 162 |
+
297,
|
| 163 |
+
413,
|
| 164 |
+
311
|
| 165 |
+
],
|
| 166 |
+
"page_idx": 0
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"text": "International Hellenic University",
|
| 171 |
+
"bbox": [
|
| 172 |
+
212,
|
| 173 |
+
311,
|
| 174 |
+
436,
|
| 175 |
+
325
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 0
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "Kavala, Greece",
|
| 182 |
+
"bbox": [
|
| 183 |
+
267,
|
| 184 |
+
327,
|
| 185 |
+
375,
|
| 186 |
+
340
|
| 187 |
+
],
|
| 188 |
+
"page_idx": 0
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"text": "akazak@gmail.com",
|
| 193 |
+
"bbox": [
|
| 194 |
+
258,
|
| 195 |
+
342,
|
| 196 |
+
390,
|
| 197 |
+
357
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 0
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "George A. Papakostas*",
|
| 204 |
+
"bbox": [
|
| 205 |
+
593,
|
| 206 |
+
266,
|
| 207 |
+
751,
|
| 208 |
+
280
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 0
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "MLV Research Group",
|
| 215 |
+
"bbox": [
|
| 216 |
+
594,
|
| 217 |
+
281,
|
| 218 |
+
743,
|
| 219 |
+
296
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 0
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "Dept. of Computer Science",
|
| 226 |
+
"bbox": [
|
| 227 |
+
576,
|
| 228 |
+
297,
|
| 229 |
+
761,
|
| 230 |
+
311
|
| 231 |
+
],
|
| 232 |
+
"page_idx": 0
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"text": "International Hellenic University",
|
| 237 |
+
"bbox": [
|
| 238 |
+
560,
|
| 239 |
+
311,
|
| 240 |
+
784,
|
| 241 |
+
325
|
| 242 |
+
],
|
| 243 |
+
"page_idx": 0
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "text",
|
| 247 |
+
"text": "Kavala, Greece",
|
| 248 |
+
"bbox": [
|
| 249 |
+
616,
|
| 250 |
+
327,
|
| 251 |
+
722,
|
| 252 |
+
340
|
| 253 |
+
],
|
| 254 |
+
"page_idx": 0
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"text": "gpapak@cs.ihu.gr",
|
| 259 |
+
"bbox": [
|
| 260 |
+
611,
|
| 261 |
+
342,
|
| 262 |
+
733,
|
| 263 |
+
357
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 0
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "text",
|
| 269 |
+
"text": "Abstract—This paper is an overview of the Machine Learning Operations (MLOps) area. Our aim is to define the operation and the components of such systems by highlighting the current problems and trends. In this context we present the different tools and their usefulness in order to provide the corresponding guidelines. Moreover, the connection between MLOps and AutoML (Automated Machine Learning) is identified and how this combination could work is proposed.",
|
| 270 |
+
"bbox": [
|
| 271 |
+
81,
|
| 272 |
+
411,
|
| 273 |
+
486,
|
| 274 |
+
513
|
| 275 |
+
],
|
| 276 |
+
"page_idx": 0
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "text",
|
| 280 |
+
"text": "Keywords-MLOps; AutoML; machine learning, re-training; monitoring; explainability; robustness; sustainability; fairness",
|
| 281 |
+
"bbox": [
|
| 282 |
+
83,
|
| 283 |
+
518,
|
| 284 |
+
486,
|
| 285 |
+
546
|
| 286 |
+
],
|
| 287 |
+
"page_idx": 0
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "text",
|
| 291 |
+
"text": "I. INTRODUCTION",
|
| 292 |
+
"text_level": 1,
|
| 293 |
+
"bbox": [
|
| 294 |
+
217,
|
| 295 |
+
571,
|
| 296 |
+
351,
|
| 297 |
+
585
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 0
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "text",
|
| 303 |
+
"text": "Incorporating machine learning models in production is a challenge that remains from the creation of the first models until now. For years data scientists, machine learning engineers, front end engineers, production engineers tried to find a way to work together and combine their knowledge in order to deploy ready for production models. This task has many difficulties and it is not easy to overcome them. This is why only a small percentage of the ML projects manage to reach production. In the previous years a set of techniques and tools have been proposed and used in order to minimize as much as possible this kind of problems. The development of these tools had multiple targets. Data preprocessing, models' creation, training, evaluation, deployment, and monitoring are some of them. As the field of AI progresses such kind of tools are constantly emerging.",
|
| 304 |
+
"bbox": [
|
| 305 |
+
81,
|
| 306 |
+
592,
|
| 307 |
+
486,
|
| 308 |
+
819
|
| 309 |
+
],
|
| 310 |
+
"page_idx": 0
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"type": "text",
|
| 314 |
+
"text": "II. RELATED WORK",
|
| 315 |
+
"text_level": 1,
|
| 316 |
+
"bbox": [
|
| 317 |
+
210,
|
| 318 |
+
828,
|
| 319 |
+
357,
|
| 320 |
+
842
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 0
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"text": "MLOps is a relatively new field and as expected there is not much relevant work and papers. In this section we will mention some of the most important and influential work in every task of the MLOps cycle (Figure 1). At",
|
| 327 |
+
"bbox": [
|
| 328 |
+
81,
|
| 329 |
+
848,
|
| 330 |
+
486,
|
| 331 |
+
909
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 0
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"text": "first, Sasu Makineth et al. [1] describe the importance of MLOps in the field of data science, based on a survey where they collected responses from 331 professionals from 63 different countries. As for the data manipulation task, Cedric Renggli et al. [2] describe the significance of data quality for an MLOps system while demonstrates how different aspects of data quality propagate through various stages of machine learning development. Philipp Ruf et al. [3] examine the role and the connectivity of the MLOps tools for every task in the MLOps cycle. Also, they present a recipe for the selection of the better Open-Source tools. Monitoring and the corresponding challenges were discussed by Janis Klaise et al. [4] using recent examples of production ready solutions using open source tools. Finally Damnian A. Tamburri [5] presents the current trends and challenges, focusing on sustainability and explainability.",
|
| 338 |
+
"bbox": [
|
| 339 |
+
506,
|
| 340 |
+
410,
|
| 341 |
+
913,
|
| 342 |
+
652
|
| 343 |
+
],
|
| 344 |
+
"page_idx": 0
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"text": "III. MLOPS",
|
| 349 |
+
"text_level": 1,
|
| 350 |
+
"bbox": [
|
| 351 |
+
665,
|
| 352 |
+
662,
|
| 353 |
+
756,
|
| 354 |
+
676
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 0
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "MLOps(machine learning operations) stands for the collection of techniques and tools for the deployment of ML models in production [6]. Contains the combination of DevOps and Machine Learning. DevOps [7] stands for a set of practices with the main purpose to minimize the needed time for a software release, reducing the gap between software development and operations [8][9]. The two main principles of DevOps are Continuous Integration (CI) and Continuous Delivery (CD). Continuous integration is the practice by which software development organizations try to integrate code written by developer teams at frequent intervals. So they constantly test their code and make small improvements each time based on the errors and weaknesses that results from the tests. This results in a reduction in the software development process cycle [10]. Continuous",
|
| 361 |
+
"bbox": [
|
| 362 |
+
506,
|
| 363 |
+
681,
|
| 364 |
+
913,
|
| 365 |
+
909
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 0
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "aside_text",
|
| 371 |
+
"text": "arXiv:2201.00162v1 [cs.LG] 1 Jan 2022",
|
| 372 |
+
"bbox": [
|
| 373 |
+
22,
|
| 374 |
+
275,
|
| 375 |
+
58,
|
| 376 |
+
700
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 0
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "image",
|
| 382 |
+
"img_path": "images/1277ad12c707722fffd49c7e3c5e257609af60d11f9a18c453bd012bfd11c745.jpg",
|
| 383 |
+
"image_caption": [
|
| 384 |
+
"Figure 1. MLOps Life-cycle."
|
| 385 |
+
],
|
| 386 |
+
"image_footnote": [],
|
| 387 |
+
"bbox": [
|
| 388 |
+
86,
|
| 389 |
+
85,
|
| 390 |
+
488,
|
| 391 |
+
207
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 1
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "text",
|
| 397 |
+
"text": "delivery is the practice according to which, there is constantly a new version of the software under development to be installed for testing, evaluation and then production. With this practice, the software releases resulting from the continuous integration with the improvements and the new features reach the end users much faster [11]. After the great acceptance of DevOps and the practices of \"continuous software development\" in general [12][8], the need to apply the same principles that govern DevOps in machine learning models became imperative [6]. This is how these practices, called MLOps (Machine Learning Operations), came about. MLOps attempts to automate Machine Learning processes using DevOps practices and approaches. The two main DevOps principles they seek to serve are: Continuous Integration (CI) and Continuous Delivery (DC) [9]. Although it seems simple in reality it is not. This is due to the fact that a Machine Learning model is not independent but is part of a wider software system and consists not only of code but also of data. As the data is constantly changing, the model is constantly called upon to retrain from the new data that emerges. For this reason, MLOps introduce a new practice, in addition to CI and CD, that of Continuous Training (CT), which aims to automatically retrain the model where needed. From the above, it becomes clear that compared to DevOps, MLOps are much more complex and incorporate additional procedures involving data and models [13][3][14].",
|
| 398 |
+
"bbox": [
|
| 399 |
+
86,
|
| 400 |
+
260,
|
| 401 |
+
485,
|
| 402 |
+
652
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 1
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "text",
|
| 408 |
+
"text": "A. MLOps pipeline",
|
| 409 |
+
"text_level": 1,
|
| 410 |
+
"bbox": [
|
| 411 |
+
83,
|
| 412 |
+
662,
|
| 413 |
+
217,
|
| 414 |
+
678
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 1
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "text",
|
| 420 |
+
"text": "While there are several attempts to capture and describe MLOps, the one that is best known is the proposal of ToughWorks [15][16], which automates the life cycle of end-to-end Machine Learning applications (Figure 2). It is \"a software engineering approach in which an interoperable team produces machine learning applications based on code, data and models in small, secure new versions that can be replicated and delivered reliably at any time, in short custom cycles\". This approach includes three basic procedures involving: collection, selection and preparation of data to be used in model training, in finding and selecting the most efficient model after testing and experimenting with different models, in developing and sending the selected model in production. A simplified form of such a pipeline is shown in Figure 2.",
|
| 421 |
+
"bbox": [
|
| 422 |
+
81,
|
| 423 |
+
681,
|
| 424 |
+
486,
|
| 425 |
+
907
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 1
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "image",
|
| 431 |
+
"img_path": "images/9326b14f6c8f4afc387d22b7f92dcebe2d3f58ea5f09056e61c3001ade13a70f.jpg",
|
| 432 |
+
"image_caption": [
|
| 433 |
+
"Figure 2. MLOps Pipeline"
|
| 434 |
+
],
|
| 435 |
+
"image_footnote": [],
|
| 436 |
+
"bbox": [
|
| 437 |
+
514,
|
| 438 |
+
85,
|
| 439 |
+
908,
|
| 440 |
+
207
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 1
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "image",
|
| 446 |
+
"img_path": "images/e3b86046bda31bd82904f6e51597bef9cd7a4b99a79340f9c7bd0dcd72d80605.jpg",
|
| 447 |
+
"image_caption": [
|
| 448 |
+
"Figure 3. Googles Maturity Levels."
|
| 449 |
+
],
|
| 450 |
+
"image_footnote": [],
|
| 451 |
+
"bbox": [
|
| 452 |
+
524,
|
| 453 |
+
265,
|
| 454 |
+
906,
|
| 455 |
+
398
|
| 456 |
+
],
|
| 457 |
+
"page_idx": 1
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"type": "text",
|
| 461 |
+
"text": "After collecting, evaluating and selecting the data that will be used for training, we automate the process of creating models and training them. This allows us to produce more than one model which we can test and experiment in order to produce a more efficient and effective model while recording the results of our tests. Then we have to resolve various issues related to the production of the model, as well as submit it to various tests in order to confirm its reliability before developing it for production. Finally, we can monitor the model and collect the resulting new data, which will be used to retrain the model, thus ensuring its continuous improvement [17].",
|
| 462 |
+
"bbox": [
|
| 463 |
+
506,
|
| 464 |
+
469,
|
| 465 |
+
911,
|
| 466 |
+
651
|
| 467 |
+
],
|
| 468 |
+
"page_idx": 1
|
| 469 |
+
},
|
| 470 |
+
{
|
| 471 |
+
"type": "text",
|
| 472 |
+
"text": "B. Maturity Levels",
|
| 473 |
+
"text_level": 1,
|
| 474 |
+
"bbox": [
|
| 475 |
+
509,
|
| 476 |
+
683,
|
| 477 |
+
640,
|
| 478 |
+
698
|
| 479 |
+
],
|
| 480 |
+
"page_idx": 1
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "text",
|
| 484 |
+
"text": "Depending on the level of automation of a MLOps system, it can be classified at a corresponding level [13]. These levels were named by the community maturity levels. Although there is no universal maturity model, the two main ones were created by Google and Microsoft. Google model consists of three levels and its structure is presented in Figure 3 [18]. MLOps level 0: Manual process, MLOps level 1: ML pipeline automation, MLOps level 2: CI/CD pipeline automation. Microsoft model consists of five levels and its structure is presented in Figure 4 [19]. Level 1: No MLOps, Level 2: DevOps but no MLOps, Level 3: Automated Training, Level 4: Automated Model Deployment, Level 5: Full MLOps Automated Operations.",
|
| 485 |
+
"bbox": [
|
| 486 |
+
506,
|
| 487 |
+
712,
|
| 488 |
+
913,
|
| 489 |
+
909
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 1
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "image",
|
| 495 |
+
"img_path": "images/48f52974b7ee2a474a10eb988dc6660d9493a7043acd921fb532920d1a14beb6.jpg",
|
| 496 |
+
"image_caption": [
|
| 497 |
+
"Figure 4. Microsoft Maturity Levels."
|
| 498 |
+
],
|
| 499 |
+
"image_footnote": [],
|
| 500 |
+
"bbox": [
|
| 501 |
+
102,
|
| 502 |
+
104,
|
| 503 |
+
475,
|
| 504 |
+
233
|
| 505 |
+
],
|
| 506 |
+
"page_idx": 2
|
| 507 |
+
},
|
| 508 |
+
{
|
| 509 |
+
"type": "text",
|
| 510 |
+
"text": "IV. TOOLS AND PLATFORMS",
|
| 511 |
+
"text_level": 1,
|
| 512 |
+
"bbox": [
|
| 513 |
+
181,
|
| 514 |
+
309,
|
| 515 |
+
385,
|
| 516 |
+
321
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 2
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "In recent years many different tools have emerged in order to help automate the sequence of artificial learning processes [20]. This section provides an overview of the different tools and requirements that these tools meet. Note that different tools automate different phases in the machine learning workflow. The majority of tools come from the open source community because half of all IT organizations use open source tools for AI and ML and the percentage is expected to be around two-thirds by 2023. At GitHub alone, there are 65 million developers and 3 million organizations contributing to 200 million projects. Therefore, it is not surprising that there are advanced sets of open source tools in the landscape of machine learning and artificial intelligence. Open source tools focus on specific tasks within MLOps instead of providing end-to-end machine learning life-cycle management. These tools and platforms typically require a development environment in Python and R. In recent years many different tools have emerged which help in automating the ML pipeline. The choice of tools for MLOps is based on the context of the respective ML solution and the operations setup.",
|
| 523 |
+
"bbox": [
|
| 524 |
+
81,
|
| 525 |
+
330,
|
| 526 |
+
486,
|
| 527 |
+
648
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 2
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "A. Data Preprocessing Tools",
|
| 534 |
+
"text_level": 1,
|
| 535 |
+
"bbox": [
|
| 536 |
+
83,
|
| 537 |
+
661,
|
| 538 |
+
282,
|
| 539 |
+
676
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 2
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "text",
|
| 545 |
+
"text": "Data processing tools are divided into two main categories: data labeling tools and data versioning tools. Data labeling tools (also called annotation tools, tagging or sorting data), big data labeling plans such as text, images or sound. Data labeling tools can in turn be divided into different categories depending on the task they perform. Some are designed to highlight specific file types such as videos or images [21]. Few of these tools can edit all file types. There are also different types of tags that differ in each tool. Boundary frames, polygonal annotations, and semantic segmentation are the most common features in the label market. Your choices about data labeling tools will be an essential factor in the success of the machine learning model. You need to specify the type of data labeling your organization needs [22]. Labeling accuracy is an important",
|
| 546 |
+
"bbox": [
|
| 547 |
+
81,
|
| 548 |
+
681,
|
| 549 |
+
486,
|
| 550 |
+
909
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 2
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "aspect of data labeling [23]. High quality data creates better model performance. Data extraction tools (also called data version controls) by managing different versions of data sets and storing them in an accessible and well-organized way [24]. This allows data science teams to gain knowledge, such as identifying how changes affect model performance and understanding how data sets evolve. The most important data preprocessing tools are listed in table I.",
|
| 557 |
+
"bbox": [
|
| 558 |
+
506,
|
| 559 |
+
93,
|
| 560 |
+
911,
|
| 561 |
+
214
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 2
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "table",
|
| 567 |
+
"img_path": "images/5f032b36116cf13ec2e09a84cba52c940e42e24dea8dabeebad61a503214a9ef.jpg",
|
| 568 |
+
"table_caption": [],
|
| 569 |
+
"table_footnote": [],
|
| 570 |
+
"table_body": "<table><tr><td>Name</td><td>Status</td><td>Launched in</td><td>Use</td></tr><tr><td>iMerit</td><td>Private</td><td>2012</td><td>Data Preprocessing</td></tr><tr><td>Pachyderm</td><td>Private</td><td>2014</td><td>Data Versioning</td></tr><tr><td>Labelbox</td><td>Private</td><td>2017</td><td>Data Preprocessing</td></tr><tr><td>Prodigy</td><td>Private</td><td>2017</td><td>Data Preprocessing</td></tr><tr><td>Comet</td><td>Private</td><td>2017</td><td>Data Versioning</td></tr><tr><td>Data Version Control</td><td>Open Source</td><td>2017</td><td>Data Versioning</td></tr><tr><td>Qri</td><td>Open Source</td><td>2018</td><td>Data Versioning</td></tr><tr><td>Weights and Biases</td><td>Private</td><td>2018</td><td>Data Versioning</td></tr><tr><td>Delta Lake</td><td>Open Source</td><td>2019</td><td>Data Versioning</td></tr><tr><td>Doccano</td><td>Open Source</td><td>2019</td><td>Data Preprocessing</td></tr><tr><td>Snorkel</td><td>Private</td><td>2019</td><td>Data Preprocessing</td></tr><tr><td>Supervisely</td><td>Private</td><td>2019</td><td>Data Preprocessing</td></tr><tr><td>Segments.ai</td><td>Private</td><td>2020</td><td>Data Preprocessing</td></tr><tr><td>Dolt</td><td>Open Source</td><td>2020</td><td>Data Versioning</td></tr><tr><td>LakeFs</td><td>Open Source</td><td>2020</td><td>Data Versioning</td></tr></table>",
|
| 571 |
+
"bbox": [
|
| 572 |
+
511,
|
| 573 |
+
227,
|
| 574 |
+
928,
|
| 575 |
+
414
|
| 576 |
+
],
|
| 577 |
+
"page_idx": 2
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "text",
|
| 581 |
+
"text": "Table I DATA PREPROCESSING TOOLS.",
|
| 582 |
+
"bbox": [
|
| 583 |
+
624,
|
| 584 |
+
417,
|
| 585 |
+
797,
|
| 586 |
+
441
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 2
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"text": "B. Modeling Tools",
|
| 593 |
+
"text_level": 1,
|
| 594 |
+
"bbox": [
|
| 595 |
+
509,
|
| 596 |
+
482,
|
| 597 |
+
638,
|
| 598 |
+
496
|
| 599 |
+
],
|
| 600 |
+
"page_idx": 2
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"type": "text",
|
| 604 |
+
"text": "The tools with which we extract features from a raw data set in order to create optimal training data sets are called feature engineering tools. Tools like these have the ability to speed up the feature extraction process [25] when applied for common applications and generic problems. To monitor the versions of the data of each experiment and its results as well as to compare between different experiments, we use experiment tracking tools, which store all the necessary information about the different experiments because developing machine learning projects involve running multiple experiments with different models, model parameters, or training data. Hyperparameter tuning or optimization tools automate the process of searching and selecting hyperparameters that give optimal performance for machine learning models. Hyperparameters are the parameters of the machine learning models such as the size of a neural network or types of regularization that model developers can adjust to achieve different results [26]. The most important modeling tools are listed in table II.",
|
| 605 |
+
"bbox": [
|
| 606 |
+
506,
|
| 607 |
+
501,
|
| 608 |
+
911,
|
| 609 |
+
787
|
| 610 |
+
],
|
| 611 |
+
"page_idx": 2
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"type": "text",
|
| 615 |
+
"text": "C. Operationalization Tools",
|
| 616 |
+
"text_level": 1,
|
| 617 |
+
"bbox": [
|
| 618 |
+
509,
|
| 619 |
+
799,
|
| 620 |
+
702,
|
| 621 |
+
813
|
| 622 |
+
],
|
| 623 |
+
"page_idx": 2
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"text": "Then to facilitate the integration of ML models in a production environment, we use machine learning model deployment [27] tools. Machine learning model monitoring is a key aspect of every successful ML project because ML model performance tends to decay after model deployment due to changes in the input data flow over time [28]. Model",
|
| 628 |
+
"bbox": [
|
| 629 |
+
506,
|
| 630 |
+
816,
|
| 631 |
+
911,
|
| 632 |
+
907
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 2
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "table",
|
| 638 |
+
"img_path": "images/538cb74dcc6a88d0e1c98b6d5a63ad49e339e59090184e941c68ae90865a9f6c.jpg",
|
| 639 |
+
"table_caption": [],
|
| 640 |
+
"table_footnote": [],
|
| 641 |
+
"table_body": "<table><tr><td>Name</td><td>Status</td><td>Launched in</td><td>Use</td></tr><tr><td>Hyperopt</td><td>Open Source</td><td>2013</td><td>Hyperparameter Optimization</td></tr><tr><td>SigOpt</td><td>Public</td><td>2014</td><td>Hyperparameter Optimization</td></tr><tr><td>Iguazio Data Science Platform</td><td>Private</td><td>2014</td><td>Feature Engineering</td></tr><tr><td>TsFresh</td><td>Private</td><td>2016</td><td>Feature Engineering</td></tr><tr><td>Featuretools</td><td>Private</td><td>2017</td><td>Feature Engineering</td></tr><tr><td>Comet</td><td>Private</td><td>2017</td><td>Experiment Tracking</td></tr><tr><td>Neptune.ai</td><td>Private</td><td>2017</td><td>Experiment Tracking</td></tr><tr><td>TensorBoard</td><td>Open source</td><td>2017</td><td>Experiment Tracking</td></tr><tr><td>Google Vizier</td><td>Public</td><td>2017</td><td>Hyperparameter Optimization</td></tr><tr><td>Scikti-Optimize</td><td>Open source</td><td>2017</td><td>Hyperparameter Optimization</td></tr><tr><td>dotData</td><td>Private</td><td>2018</td><td>Feature Engineering</td></tr><tr><td>Weight and Biases</td><td>Private</td><td>2018</td><td>Experiment Tracking</td></tr><tr><td>CML</td><td>Open source</td><td>2018</td><td>Experiment Tracking</td></tr><tr><td>MLFlow</td><td>Open source</td><td>2018</td><td>Experiment Tracking</td></tr><tr><td>Optuna</td><td>Open source</td><td>2018</td><td>Hyperparameter Optimization</td></tr><tr><td>Talos</td><td>Open Source</td><td>2018</td><td>Hyperparameter Optimization</td></tr><tr><td>AutoFet</td><td>Open Source</td><td>2019</td><td>Feature Engineering</td></tr><tr><td>Feast</td><td>Private</td><td>2019</td><td>Feature Engineering</td></tr><tr><td>GuildAi</td><td>Open Source</td><td>2019</td><td>Experiment Tracking</td></tr><tr><td>Rasgo</td><td>Private</td><td>2020</td><td>Feature Engineering</td></tr><tr><td>ModelDB</td><td>Open source</td><td>2020</td><td>Experiment Tracking</td></tr><tr><td>HopsWork</td><td>Private</td><td>2021</td><td>Feature Engineering</td></tr><tr><td>Aim</td><td>Open source</td><td>2021</td><td>Experiment Tracking</td></tr></table>",
|
| 642 |
+
"bbox": [
|
| 643 |
+
84,
|
| 644 |
+
88,
|
| 645 |
+
500,
|
| 646 |
+
306
|
| 647 |
+
],
|
| 648 |
+
"page_idx": 3
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"type": "text",
|
| 652 |
+
"text": "monitoring tools detect data drifts and anomalies over time and allow setting up alerts in case of performance issues. Finally, we should not forget to mention that at this time there are tools available that cover the life cycle of an end-to-end machine learning application. The most important operationalization tools are listed in table III.",
|
| 653 |
+
"bbox": [
|
| 654 |
+
81,
|
| 655 |
+
376,
|
| 656 |
+
486,
|
| 657 |
+
467
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 3
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "table",
|
| 663 |
+
"img_path": "images/b9e9cdfe4ba4582020892bbc997dbfa46cf7c444326b61bb0e000efef0d6df2e.jpg",
|
| 664 |
+
"table_caption": [
|
| 665 |
+
"Table II MODELING TOOLS."
|
| 666 |
+
],
|
| 667 |
+
"table_footnote": [],
|
| 668 |
+
"table_body": "<table><tr><td>Name</td><td>Status</td><td>Launched in</td><td>Use</td></tr><tr><td>Google Cloud Platform</td><td>Public</td><td>2008</td><td>end-to-end</td></tr><tr><td>Microsoft Azure</td><td>Public</td><td>2010</td><td>end-to-end</td></tr><tr><td>H2O.ai</td><td>Open source</td><td>2012</td><td>end-to-end</td></tr><tr><td>Unravel Data</td><td>Private</td><td>2013</td><td>Model Monitoring</td></tr><tr><td>Algorithmsia</td><td>Private</td><td>2014</td><td>Model Deployment / Serving</td></tr><tr><td>Iguazio</td><td>Private</td><td>2014</td><td>end-to-end</td></tr><tr><td>Databricks</td><td>Private</td><td>2015</td><td>end-to-end</td></tr><tr><td>TensorFlow Serving</td><td>Open source</td><td>2016</td><td>Model Deployment / Serving</td></tr><tr><td>Featuretools</td><td>Private</td><td>2017</td><td>Feature Engineering</td></tr><tr><td>Amazon SageMaker</td><td>Public</td><td>2017</td><td>end-to-end</td></tr><tr><td>Kubeflow</td><td>Open Source</td><td>2018</td><td>Model Deployment / Serving</td></tr><tr><td>OpenVino</td><td>Open source</td><td>2018</td><td>Model Deployment / Serving</td></tr><tr><td>Triton Inference Server</td><td>Open source</td><td>2018</td><td>Model Deployment / Serving</td></tr><tr><td>Fiddler</td><td>Private</td><td>2018</td><td>Model Monitoring</td></tr><tr><td>Losswise</td><td>Private</td><td>2018</td><td>Model Monitoring</td></tr><tr><td>Alibaba Cloud ML Platform for AI</td><td>Public</td><td>2018</td><td>end-to-end</td></tr><tr><td>MIflow</td><td>Open source</td><td>2018</td><td>end-to-end</td></tr><tr><td>BentoMl</td><td>Open Source</td><td>2019</td><td>Model Deployment / Serving</td></tr><tr><td>Superwise.ai</td><td>Private</td><td>2019</td><td>Model Monitoring</td></tr><tr><td>MLrun</td><td>Open source</td><td>2019</td><td>Model Monitoring</td></tr><tr><td>DataRobot</td><td>Private</td><td>2019</td><td>end-to-end</td></tr><tr><td>Seldon</td><td>Private</td><td>2020</td><td>Model Deployment / Serving</td></tr><tr><td>Torch Serve</td><td>Open source</td><td>2020</td><td>Model Deployment / Serving</td></tr><tr><td>KF-serving</td><td>Open source</td><td>2020</td><td>Model Deployment / Serving</td></tr><tr><td>Syndicalai</td><td>Private</td><td>2020</td><td>Model Deployment / Serving</td></tr><tr><td>Arize</td><td>Private</td><td>2020</td><td>Model Monitoring</td></tr><tr><td>Evidently AI</td><td>Open Source</td><td>2020</td><td>Model Monitoring</td></tr><tr><td>WhyLabs</td><td>Open source</td><td>2020</td><td>Model Monitoring</td></tr><tr><td>Cloudera</td><td>Public</td><td>2020</td><td>end-to-end</td></tr><tr><td>BodyWork</td><td>Open source</td><td>2021</td><td>Model Deployment / Serving</td></tr><tr><td>Cortex</td><td>private</td><td>2021</td><td>Model Deployment / Serving</td></tr><tr><td>Sagify</td><td>Open source</td><td>2021</td><td>Model Deployment / Serving</td></tr><tr><td>Aporia</td><td>Open source</td><td>2021</td><td>Model Monitoring</td></tr><tr><td>Deep checks</td><td>Private</td><td>2021</td><td>Model Monitoring</td></tr></table>",
|
| 669 |
+
"bbox": [
|
| 670 |
+
84,
|
| 671 |
+
479,
|
| 672 |
+
500,
|
| 673 |
+
780
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 3
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "text",
|
| 679 |
+
"text": "Table III OPERATIONALIZATION TOOLS.",
|
| 680 |
+
"bbox": [
|
| 681 |
+
197,
|
| 682 |
+
785,
|
| 683 |
+
372,
|
| 684 |
+
806
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 3
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "D. The example of colossal companies",
|
| 691 |
+
"text_level": 1,
|
| 692 |
+
"bbox": [
|
| 693 |
+
83,
|
| 694 |
+
844,
|
| 695 |
+
351,
|
| 696 |
+
859
|
| 697 |
+
],
|
| 698 |
+
"page_idx": 3
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"type": "text",
|
| 702 |
+
"text": "It's common for big companies to develop their own MLOps platforms in order to deploy fast and successful, reliable and reproducible pipelines. The main problems that",
|
| 703 |
+
"bbox": [
|
| 704 |
+
81,
|
| 705 |
+
863,
|
| 706 |
+
488,
|
| 707 |
+
909
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 3
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "text",
|
| 713 |
+
"text": "led these companies to create their own platforms are mainly two. Initially, the time needed to build and deliver a model in production [29]. The main goal is to reduce the time required, from a few months to a few weeks. Also, the stability of ML models in their predictions and the reproduction of these models in different conditions are always two of the most important goals. Some illustrative examples of such companies are : Google with TFX(2019) [30], Uber with Michelangelo(2015) [31], Airbnb with Bighead(2017) [32] and Netflix with Metaflow(2020) [33].",
|
| 714 |
+
"bbox": [
|
| 715 |
+
506,
|
| 716 |
+
93,
|
| 717 |
+
913,
|
| 718 |
+
244
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 3
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"text": "E. How to choose the right tools",
|
| 725 |
+
"text_level": 1,
|
| 726 |
+
"bbox": [
|
| 727 |
+
509,
|
| 728 |
+
255,
|
| 729 |
+
740,
|
| 730 |
+
268
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 3
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "text",
|
| 736 |
+
"text": "The MLOps life-cycle consists of different tasks. Every task has unique characteristics and the corresponding tools are developing matching with them. Whereas, an efficient MLOps system depends on the choice of the right tools, both for each task and for the connectivity between them. Every challenge also has its own characteristics and the right way to go depends on them [34]. There is not a general recipe one choosing some specific tools [3], but we can provide some general guidelines, that can be helpful at eliminating some tools simplifying this problem. There are tools that offer a variety of functionalities and there are tools that are more specialized. Generally, the fewer tools we use the better because it is easier, for example, to archive compatibility between 3 tools than between 5. But there are some tasks that require better flexibility. So the biggest challenge is to find the balance between flexibility and compatibility. For this reason it is important to make a list of the available tools that are capable of solving the individual problem in every task. Then, we can check the compatibility between them in order to find the best way to go. This requires excellent knowledge of as many tools as possible from every team working on a MLOps system. So the list gets smaller when we add as a precondition the pre-existing knowledge of these tools. This is not always a solution, so we can add tools that are easy to understand and use.",
|
| 737 |
+
"bbox": [
|
| 738 |
+
506,
|
| 739 |
+
273,
|
| 740 |
+
913,
|
| 741 |
+
652
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 3
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "text",
|
| 747 |
+
"text": "V.AUTOML",
|
| 748 |
+
"text_level": 1,
|
| 749 |
+
"bbox": [
|
| 750 |
+
663,
|
| 751 |
+
662,
|
| 752 |
+
759,
|
| 753 |
+
676
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 3
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "In the last years more and more companies try to integrate machine learning models into the production process. For this reason another software solution was created. AutoML is the process of automating the different tasks that an ML model creation requires [35]. Specifically, AutoML pipeline contains data preparation, models creation, hyper parameter tuning, evaluation and validation. With these techniques a bunch of models is trained in the same data set, then a hyper parameter fine tuning is applied, finally the models are evaluating and the best model is exported. Therefore the process of creating and selecting the appropriate model, as well as the preparation of the data, turns into a much simpler and more accessible process [36]. This is the reason why every year more and more companies turn their attention to AutoML. The combination of AutoML and MLOps",
|
| 760 |
+
"bbox": [
|
| 761 |
+
506,
|
| 762 |
+
681,
|
| 763 |
+
913,
|
| 764 |
+
910
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 3
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "image",
|
| 770 |
+
"img_path": "images/30dec51fcd1766f04a0398a4ae03752cc6e99f32aa70e8695b86be644be0f277.jpg",
|
| 771 |
+
"image_caption": [
|
| 772 |
+
"Figure 5. AutoML Vs ML."
|
| 773 |
+
],
|
| 774 |
+
"image_footnote": [],
|
| 775 |
+
"bbox": [
|
| 776 |
+
98,
|
| 777 |
+
99,
|
| 778 |
+
473,
|
| 779 |
+
172
|
| 780 |
+
],
|
| 781 |
+
"page_idx": 4
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "text",
|
| 785 |
+
"text": "simplifies and makes much more feasible the deployment of the ML models in production. In these sections we will make a brief introduction into the most modern AutoML tools and platforms aiming at the combination of AutoML and MLOps.",
|
| 786 |
+
"bbox": [
|
| 787 |
+
81,
|
| 788 |
+
237,
|
| 789 |
+
486,
|
| 790 |
+
315
|
| 791 |
+
],
|
| 792 |
+
"page_idx": 4
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "text",
|
| 796 |
+
"text": "A. Tools and Platforms",
|
| 797 |
+
"text_level": 1,
|
| 798 |
+
"bbox": [
|
| 799 |
+
83,
|
| 800 |
+
321,
|
| 801 |
+
246,
|
| 802 |
+
335
|
| 803 |
+
],
|
| 804 |
+
"page_idx": 4
|
| 805 |
+
},
|
| 806 |
+
{
|
| 807 |
+
"type": "text",
|
| 808 |
+
"text": "Every year more and more tools and platforms are emerging [36]. AutoML platforms are services, which are mainly accessible in the cloud. Therefore, for this task they are not preferred. Although when a cloud based MLOps platform selected, is possible to have better compatibility. There are also libraries and API's written in python and c++, which are much more preferable when an end-to-end cloud-based MLOps platform has not been chosen. The ones stand out are Auto-Sklearn [37], Auto-Keras [38], TPOT [39], AutoPytorch [40], BigML [41]. The main platforms are Google Cloud AutoML [42], Akkio [43], H2O [44], Microsoft Azure AutoML [45] and Amazon SageMaker Autopilot [46]. The most important tools are listed in table IV.",
|
| 809 |
+
"bbox": [
|
| 810 |
+
81,
|
| 811 |
+
340,
|
| 812 |
+
486,
|
| 813 |
+
535
|
| 814 |
+
],
|
| 815 |
+
"page_idx": 4
|
| 816 |
+
},
|
| 817 |
+
{
|
| 818 |
+
"type": "table",
|
| 819 |
+
"img_path": "images/27e15dc13fa03bee499c47312d9cda83a498399a8dd1a497cbc71b60f5316521.jpg",
|
| 820 |
+
"table_caption": [],
|
| 821 |
+
"table_footnote": [],
|
| 822 |
+
"table_body": "<table><tr><td>Name</td><td>class</td><td>Status</td></tr><tr><td>Auto-sklearn</td><td>Tool</td><td>Open Source</td></tr><tr><td>Auto-Keras</td><td>Tool</td><td>Open Source</td></tr><tr><td>TPOT</td><td>Tool</td><td>Open Source</td></tr><tr><td>Auto-Pytorch</td><td>Tool</td><td>Open Source</td></tr><tr><td>BigML</td><td>Tool and Platform</td><td>commercial</td></tr><tr><td>Google Cloud AutoML</td><td>Platform</td><td>Open Source</td></tr><tr><td>Akkio</td><td>Platform</td><td>Open Source</td></tr><tr><td>H2O</td><td>Platform</td><td>Commercial</td></tr><tr><td>Microsoft Azure AutoML</td><td>Platform</td><td>commercial</td></tr><tr><td>Amazon SageMaker Autopilot</td><td>Platform</td><td>commercial</td></tr></table>",
|
| 823 |
+
"bbox": [
|
| 824 |
+
89,
|
| 825 |
+
547,
|
| 826 |
+
480,
|
| 827 |
+
691
|
| 828 |
+
],
|
| 829 |
+
"page_idx": 4
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "text",
|
| 833 |
+
"text": "Table IV AUTOML TOOLS AND PLATFORMS.",
|
| 834 |
+
"bbox": [
|
| 835 |
+
184,
|
| 836 |
+
694,
|
| 837 |
+
387,
|
| 838 |
+
718
|
| 839 |
+
],
|
| 840 |
+
"page_idx": 4
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"type": "text",
|
| 844 |
+
"text": "B. Combining MLOps and AutoML",
|
| 845 |
+
"text_level": 1,
|
| 846 |
+
"bbox": [
|
| 847 |
+
83,
|
| 848 |
+
753,
|
| 849 |
+
328,
|
| 850 |
+
768
|
| 851 |
+
],
|
| 852 |
+
"page_idx": 4
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "text",
|
| 856 |
+
"text": "It is obvious that the combination of the two techniques can be extremely effective [3], but there are still some pros and cons. AutoML requires a vast computational power in order to perform. The development of technological means computational power but every year more power is getting closer and closer to overcome these kind of challenges, but still AutoML will always be more computational expensive compare to classic machine learning techniques, mostly because they perform the same tasks in much more less",
|
| 857 |
+
"bbox": [
|
| 858 |
+
81,
|
| 859 |
+
773,
|
| 860 |
+
486,
|
| 861 |
+
909
|
| 862 |
+
],
|
| 863 |
+
"page_idx": 4
|
| 864 |
+
},
|
| 865 |
+
{
|
| 866 |
+
"type": "text",
|
| 867 |
+
"text": "time. Also, we are given much less flexibility. The AutoML tool works as a pipeline and so we have no control over the choices it will make. So AutoML does not qualify for very specialized tasks. On the other hand, with AutoML retraining is a much easier and straightforward task. As long as the new data are labeled or the models use unsupervised techniques, we only have to feed the new data to AutoML tool and deploy the new model. In conclusion, AutoML is a much more quicker and efficient process than the classic ML pipeline [47], which can be extremely beneficial in the achievement of efficient and high maturity level MLOps systems.",
|
| 868 |
+
"bbox": [
|
| 869 |
+
506,
|
| 870 |
+
93,
|
| 871 |
+
913,
|
| 872 |
+
276
|
| 873 |
+
],
|
| 874 |
+
"page_idx": 4
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "text",
|
| 878 |
+
"text": "VI. MLOPS CHALLENGES",
|
| 879 |
+
"text_level": 1,
|
| 880 |
+
"bbox": [
|
| 881 |
+
614,
|
| 882 |
+
291,
|
| 883 |
+
807,
|
| 884 |
+
306
|
| 885 |
+
],
|
| 886 |
+
"page_idx": 4
|
| 887 |
+
},
|
| 888 |
+
{
|
| 889 |
+
"type": "text",
|
| 890 |
+
"text": "In the past years, lots of research tends to focus on the maturity levels of MLOps and the transition to fully automated pipelines [13]. Several challenges have been detected in this area and it is not always easy to overcome them [48]. A low maturity level system relies on the classical machine learning techniques and requires an extremely good connection between the individual working teams such as data scientists, ML engineers and frond end engineers. Lots of technical problems arise from this deviation and the lack of compatibility from one step to another. The first challenge lies in the creation of robust efficient pipelines with strong compatibility. Constant evolving is another critical point of a high maturity level of a MLOps platform, thus constant retraining shifts in the top of the current challenges.",
|
| 891 |
+
"bbox": [
|
| 892 |
+
506,
|
| 893 |
+
316,
|
| 894 |
+
911,
|
| 895 |
+
529
|
| 896 |
+
],
|
| 897 |
+
"page_idx": 4
|
| 898 |
+
},
|
| 899 |
+
{
|
| 900 |
+
"type": "text",
|
| 901 |
+
"text": "A. Efficient Pipelines",
|
| 902 |
+
"text_level": 1,
|
| 903 |
+
"bbox": [
|
| 904 |
+
508,
|
| 905 |
+
545,
|
| 906 |
+
658,
|
| 907 |
+
560
|
| 908 |
+
],
|
| 909 |
+
"page_idx": 4
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"type": "text",
|
| 913 |
+
"text": "A MLOps system includes various pipelines [49]. Commonly a data manipulation pipeline, a model creation pipeline and a deployment pipeline are mandatory. Each of these pipelines must be compatible with the others, in a way that optimizes flow and minimizes errors. From this aspect it is critical to choose the right tools for the creation and connection of these pipelines. The shape of the targets determines the best combination of tools and techniques, whereas you do not have an ideal combination for each problem, but the problem determines the combination to be chosen. Also, it is always critical to use the same data preprocessing libraries in every pipeline. In this way, we will prevent the rise of multiple compatibility errors.",
|
| 914 |
+
"bbox": [
|
| 915 |
+
506,
|
| 916 |
+
568,
|
| 917 |
+
913,
|
| 918 |
+
765
|
| 919 |
+
],
|
| 920 |
+
"page_idx": 4
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "text",
|
| 924 |
+
"text": "B. Re-Training",
|
| 925 |
+
"text_level": 1,
|
| 926 |
+
"bbox": [
|
| 927 |
+
508,
|
| 928 |
+
780,
|
| 929 |
+
616,
|
| 930 |
+
796
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 4
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "text",
|
| 936 |
+
"text": "After monitoring and tracking your model performance, the next step is retraining your machine learning model [50]. The objective is to ensure that the quality of your model in production is up to date. However, even if the pipelines are perfect, there are many problems that complicate or even make retraining impossible. From our point of view, the most important of them is new data manipulation.",
|
| 937 |
+
"bbox": [
|
| 938 |
+
506,
|
| 939 |
+
803,
|
| 940 |
+
911,
|
| 941 |
+
910
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 4
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "text",
|
| 947 |
+
"text": "1) New Data Manipulation: When a model is deployed in production, we use new, raw data to make the predictions and use them to extract the final results. However, when we are using supervised learning, we do not have at our disposal the corresponding labels. So it is impossible to measure the accuracy and constantly evaluate the model. It is possible to perceive the robustness of the model only by evaluating the final results, which isn't always an option. Even if we manage to evaluate the model and find low metrics at new data, the same problem arises again. In order to retrain (fine tune) the model, the labels are prerequisites. Manually labeling the new data is a solution but slows down the process and fails at constant retraining tasks. An approach is using the trained model to label the new data or use unsupervised learning instead of supervised learning but also relies on the type of the problem and the targets of the task. Finally, there are types of data where there is no need for labeling. The most common area that uses this kind of data is time series and forecasting.",
|
| 948 |
+
"bbox": [
|
| 949 |
+
86,
|
| 950 |
+
93,
|
| 951 |
+
485,
|
| 952 |
+
378
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 5
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "text",
|
| 958 |
+
"text": "C. Monitoring",
|
| 959 |
+
"text_level": 1,
|
| 960 |
+
"bbox": [
|
| 961 |
+
86,
|
| 962 |
+
391,
|
| 963 |
+
184,
|
| 964 |
+
405
|
| 965 |
+
],
|
| 966 |
+
"page_idx": 5
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"text": "In most papers and articles, monitoring is positioned as one of the most important functions in MLOps [51]. This is because to understand the results helps understanding the lack of the entire system. The last section shows the importance of monitoring not only for the accuracy of the model, but for every aspect of the system.",
|
| 971 |
+
"bbox": [
|
| 972 |
+
86,
|
| 973 |
+
411,
|
| 974 |
+
485,
|
| 975 |
+
500
|
| 976 |
+
],
|
| 977 |
+
"page_idx": 5
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "text",
|
| 981 |
+
"text": "1) Data monitoring: Monitoring the data can be extremely useful in many ways. Detection of outliers and drift is a way to prevent a failure of the model and help the right training. Constant monitoring of the shape of the data is always opposed to training data it is away. There are lots of tools and techniques for data monitoring and choosing the right ones also depends on the target.",
|
| 982 |
+
"bbox": [
|
| 983 |
+
86,
|
| 984 |
+
501,
|
| 985 |
+
485,
|
| 986 |
+
606
|
| 987 |
+
],
|
| 988 |
+
"page_idx": 5
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "text",
|
| 992 |
+
"text": "2) Model Monitoring: Monitoring the accuracy of a model is a way to evaluate the performance in a bunch of data at a precise moment. For a high maturity level system, we need to monitor more aspects of our model and the whole system. In the previous years, lots of research [4][5] is focused on sustainability, robustness [52], fairness, and explainability [53]. The reason is that we need to know more about the structure of the model, the performance, the reason why it works or it doesn't.",
|
| 993 |
+
"bbox": [
|
| 994 |
+
86,
|
| 995 |
+
607,
|
| 996 |
+
485,
|
| 997 |
+
741
|
| 998 |
+
],
|
| 999 |
+
"page_idx": 5
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "text",
|
| 1003 |
+
"text": "VII. CONCLUSION",
|
| 1004 |
+
"text_level": 1,
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
217,
|
| 1007 |
+
753,
|
| 1008 |
+
351,
|
| 1009 |
+
766
|
| 1010 |
+
],
|
| 1011 |
+
"page_idx": 5
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"type": "text",
|
| 1015 |
+
"text": "In conclusion, MLOps is the most efficient way to incorporate ML models in production. Every year more enterprises use these techniques and more research has been made in the area. But MLOps maybe has a different usage. In addition to the application of ML models in production, a fully mature MLOps system with continuous training can lead us to more efficient and realistic ML models. Further, choosing the right tools for each job is a constant challenge. Although there are many papers and articles for the different",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
86,
|
| 1018 |
+
773,
|
| 1019 |
+
485,
|
| 1020 |
+
907
|
| 1021 |
+
],
|
| 1022 |
+
"page_idx": 5
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"type": "text",
|
| 1026 |
+
"text": "tools it is not easy to follow the guidelines and incorporate them in the most efficient way. Sometimes we have to choose between flexibility and robustness with the respective pros and cons. Finally, monitoring is a stage that must be one of the main points of interest. Monitoring the state of the whole system using sustainability, robustness, fairness, and explainability is from our point of view the key for mature, automated, robust and efficient MLOps systems. For this reason, it is essential to develop model and techniques which enables this kind of monitoring such as explainable machine learning models. AutoML is maybe the game changer in the maturity and efficiency chase. For this reason, a more comprehensive and practical survey for the usage of AutoML in MLOps is necessary.",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
513,
|
| 1029 |
+
93,
|
| 1030 |
+
911,
|
| 1031 |
+
304
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 5
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "ACKNOWLEDGMENT",
|
| 1038 |
+
"text_level": 1,
|
| 1039 |
+
"bbox": [
|
| 1040 |
+
640,
|
| 1041 |
+
316,
|
| 1042 |
+
784,
|
| 1043 |
+
327
|
| 1044 |
+
],
|
| 1045 |
+
"page_idx": 5
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"text": "This work was supported by the MPhil program \"Advanced Technologies in Informatics and Computers\", hosted by the Department of Computer Science, International Hellenic University, Kavala, Greece.",
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
513,
|
| 1052 |
+
334,
|
| 1053 |
+
911,
|
| 1054 |
+
393
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 5
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"text": "REFERENCES",
|
| 1061 |
+
"text_level": 1,
|
| 1062 |
+
"bbox": [
|
| 1063 |
+
665,
|
| 1064 |
+
405,
|
| 1065 |
+
756,
|
| 1066 |
+
417
|
| 1067 |
+
],
|
| 1068 |
+
"page_idx": 5
|
| 1069 |
+
},
|
| 1070 |
+
{
|
| 1071 |
+
"type": "list",
|
| 1072 |
+
"sub_type": "ref_text",
|
| 1073 |
+
"list_items": [
|
| 1074 |
+
"[1] S. Mäkinen, H. Skogström, V. Turku, E. Laaksonen, and T. Mikkonen, \"Who needs mlops: What data scientists seek to accomplish and how can mlops help?\"",
|
| 1075 |
+
"[2] C. Renggli, L. Rimanic, N. M. Gürel, B. Karlas, W. Wu, C. Zhang, and E. Zurich, “A data quality-driven view of mlops,” 2 2021. [Online]. Available: https://arxiv.org/abs/2102.07750v1",
|
| 1076 |
+
"[3] P. Ruf, M. Madan, C. Reich, and D. Ould-Abdeslam, \"Demystifying mlops and presenting a recipe for the selection of open-source tools,\" Applied Sciences 2021, Vol. 11, Page 8861, vol. 11, p. 8861, 9 2021. [Online]. Available: https://www.mdpi.com/2076-3417/11/19/8861/htmhttps://www.mdpi.com/2076-3417/11/19/8861",
|
| 1077 |
+
"[4] J. Klaise, A. V. Looveren, C. Cox, G. Vacanti, and A. Coca, \"Monitoring and explainability of models in production,\" 7 2020. [Online]. Available: https://arxiv.org/abs/2007.06299v1",
|
| 1078 |
+
"[5] D. A. Tamburri, \"Sustainable mlops: Trends and challenges,\" Proceedings - 2020 22nd International Symposium on Symbolic and Numeric Algorithms for Scientific Computing, SYNASC 2020, pp. 17-23, 9 2020.",
|
| 1079 |
+
"[6] S. Alla and S. K. Adari, “What is mlops?” in Beginning MLOps with MLFlow. Springer, 2021, pp. 79–124.",
|
| 1080 |
+
"[7] S. Sharma, “The devops adoption playbook : a guide to adopting devops in a multi-speed it enterprise,” IBM Press, pp. 34-58.",
|
| 1081 |
+
"[8] “Continuous software engineering: A roadmap and agenda,” Journal of Systems and Software, vol. 123, pp. 176–189, 1 2017.",
|
| 1082 |
+
"[9] N. Gift and A. Deza, Practical MLOps: operationalizing machine learning models. O'Reilly Media, Inc, 2020."
|
| 1083 |
+
],
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
519,
|
| 1086 |
+
426,
|
| 1087 |
+
911,
|
| 1088 |
+
907
|
| 1089 |
+
],
|
| 1090 |
+
"page_idx": 5
|
| 1091 |
+
},
|
| 1092 |
+
{
|
| 1093 |
+
"type": "list",
|
| 1094 |
+
"sub_type": "ref_text",
|
| 1095 |
+
"list_items": [
|
| 1096 |
+
"[10] E. RAJ, \"Mlops using azure machine learning rapidly test, build, and manage production-ready machine learning life cycles at scale.\" PACKT PUBLISHING LIMITED, pp. 45-62, 2021.",
|
| 1097 |
+
"[11] C. A. Ioannis Karamitsos, Saeed Albarhami, “Applying devops practices of continuous automation for machine learning,” Information 2020, Vol. 11, Page 363, vol. 11, p. 363, 7 2020. [Online]. Available: https://www.mdpi.com/2078-2489/11/7/363/htmhttps://www.mdpi.com/2078-2489/11/7/363",
|
| 1098 |
+
"[12] B. Fitzgerald and K.-J. Stol, \"Continuous software engineering and beyond: Trends and challenges general terms,\" Proceedings of the 1st International Workshop on Rapid Continuous Software Engineering - RCoSE 2014, vol. 14, 2014. [Online]. Available: http://dx.doi.org/10.1145/2593812.2593813",
|
| 1099 |
+
"[13] M. M. John, H. H. Olsson, and J. Bosch, “Towards mlops: A framework and maturity model,” 2021 47th Euromicro Conference on Software Engineering and Advanced Applications (SEAA), pp. 1-8, 9 2021. [Online]. Available: https://ieeexplore.ieee.org/document/9582569/",
|
| 1100 |
+
"[14] M. Treveil and D. Team, “Introducing mlops how to scale machine learning in the enterprise,” p. 185, 2020. [Online]. Available: https://www.oreilly.com/library/view/introducing-mlops/9781492083283/",
|
| 1101 |
+
"[15] D. Sato, \"Thoughtworksinc/cd4ml-workshop: Repository with sample code and instructions for \"continuous intelligence\" and \"continuous delivery for machine learning: Cd4ml\" workshops.\" [Online]. Available: https://github.com/ThoughtWorksInc/cd4ml-workshop",
|
| 1102 |
+
"[16] T. Granlund, A. Kopponen, V. Stirbu, L. Myllyaho, and T. Mikkonen, “Mlops challenges in multi-organization setup: Experiences from two real-world cases.” [Online]. Available: https://oraviz.io/",
|
| 1103 |
+
"[17] D. Sato, A. Wider, and C. Windheuser, \"Continuous delivery for machine learning.\" [Online]. Available: https://martinfowler.com/articles/cd4ml.htmlDataPipelines",
|
| 1104 |
+
"[18] Google, \"Mlops: Continuous delivery and automation pipelines in machine learning-google cloud.\" [Online]. Available: https://cloud.google.com/architecture/mlops-continuous-delivery-and-automation-pipelines-in-macl",
|
| 1105 |
+
"[19] \"Machine learning operations maturity model-azure architecture center-microsoft docs.\" [Online]. Available: https://docs.microsoft.com/en-us/azure/architecture/example-scenario/mlops/mlops-maturity-model",
|
| 1106 |
+
"[20] A. Felipe and V. Maya, \"The state of mlops,\" 2016.",
|
| 1107 |
+
"[21] L. Zhou, S. Pan, J. Wang, and A. V. Vasilakos, \"Machine learning on big data: Opportunities and challenges,\" Neurocomputing, vol. 237, pp. 350-361, 5 2017.",
|
| 1108 |
+
"[22] T. G. Dietterich, \"Machine learning for sequential data: A review,\" Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 2396, pp. 15-30, 2002. [Online]. Available: https://link.springer.com/chapter/10.1007/3-540-70659-3.2"
|
| 1109 |
+
],
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
84,
|
| 1112 |
+
94,
|
| 1113 |
+
486,
|
| 1114 |
+
907
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 6
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "list",
|
| 1120 |
+
"sub_type": "ref_text",
|
| 1121 |
+
"list_items": [
|
| 1122 |
+
"[23] T. Fredriksson, D. I. Mattos, J. Bosch, and H. H. Olsson, \"Data labeling: An empirical investigation into industrial challenges and mitigation strategies,\" Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), vol. 12562 LNCS, pp. 202-216, 11 2020. [Online]. Available: https://link.springer.com/chapter/10.1007/978-3-030-64148-1.13",
|
| 1123 |
+
"[24] M. Armbrust, T. Das, L. Sun, B. Yavuz, S. Zhu, M. Murthy, J. Torres, H. van Hovell, A. Ionescu, A. Luszczak, M. Switakowski, M. Szafranski, X. Li, T. Ueshin, M. Mokhtar, P. Boncz, A. Ghodsi, S. Paranjpye, P. Senster, R. Xin, and M. Zaharia, \"Delta lake,\" Proceedings of the VLDB Endowment, vol. 13, pp. 3411-3424, 8 2020. [Online]. Available: https://dl.acm.org/doi/abs/10.14778/3415478.3415560",
|
| 1124 |
+
"[25] S. Khalid, T. Khalil, and S. Nasreen, \"A survey of feature selection and feature extraction techniques in machine learning,\" Proceedings of 2014 Science and Information Conference, SAI 2014, pp. 372-378, 10 2014.",
|
| 1125 |
+
"[26] R. Bardenet, M. Brendel, B. Kégl, M. Sebag, and S. Fr, \"Collaborative hyperparameter tuning,\" vol. 28, pp. 199-207, 5 2013. [Online]. Available: https://proceedings.mlr.press/v28/bardenet13.html",
|
| 1126 |
+
"[27] L. Savu, \"Cloud computing: Deployment models, delivery models, risks and research challenges,\" 2011 International Conference on Computer and Management, CAMAN 2011, 2011.",
|
| 1127 |
+
"[28] J. de la Rúa Martínez, \"Scalable architecture for automating machine learning model monitoring,\" 2020. [Online]. Available: http://oatd.org/oatd/record?record=oai",
|
| 1128 |
+
"[29] J. Bosch and H. H. Olsson, \"Digital for real: A multicase study on the digital transformation of companies in the embedded systems domain,\" Journal of Software: Evolution and Process, vol. 33, p. e2333, 5. [Online]. Available: https://onlinelibrary.wiley.com/doi/full/10.1002/smr.2333",
|
| 1129 |
+
"[30] \"Tensorflow extended (tfx)-ml production pipelines.\" [online]. Available: https://www.tensorflow.org/tfx",
|
| 1130 |
+
"[31] \"Meet michelangelo: Uber's machine learning platform.\" [Online]. Available: https://eng.uber.com/michelangelo-machine-learning-platform/",
|
| 1131 |
+
"[32] \"Bighead: Airbnb's end-to-end machine learning platform-databricks.\" [Online]. Available: https://databricks.com/session/bighead-airbnb-s-end-to-end-machine-learning-platform",
|
| 1132 |
+
"[33] \"Metaflow.\" [Online]. Available: https://metaflow.org/",
|
| 1133 |
+
"[34] S. A. S. K. Adari, \"Beginning mlops with mlflow deploy models in aws sagemaker, google cloud, and microsoft azure,\" 2021. [Online]. Available: https://doi.org/10.1007/978-1-4842-6549-9"
|
| 1134 |
+
],
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
511,
|
| 1137 |
+
94,
|
| 1138 |
+
911,
|
| 1139 |
+
907
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 6
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "list",
|
| 1145 |
+
"sub_type": "ref_text",
|
| 1146 |
+
"list_items": [
|
| 1147 |
+
"[35] S. K. Karmaker, M. Hassan, M. J. Smith, M. M. Hassan, L. Xu, C. Zhai, K. Veeramachaneni, S. K. Karmaker, M. M. Hassan, S. Ginn, M. J. Smith, L. Xu, K. Veeramachaneni, and C. Zhai, \"Automl to date and beyond: Challenges and opportunities,\" ACM Computing Surveys (CSUR), vol. 54, p. 175, 10 2021. [Online]. Available: https://dl.acm.org/doi/abs/10.1145/3470918",
|
| 1148 |
+
"[36] P. Gijsbers, E. LeDell, J. Thomas, S. Poirier, B. Bischl, and J. Vanschoren, \"An open source automl benchmark,\" 7 2019. [Online]. Available: https://arxiv.org/abs/1907.00909v1",
|
| 1149 |
+
"[37] \"Auto-sklearn 2.0: Hands-free automl via meta-learning,\" 7 2020. [Online]. Available: http://arxiv.org/abs/2007.04074",
|
| 1150 |
+
"[38] \"Autokeras.\" [Online]. Available: https://autokeras.com/",
|
| 1151 |
+
"[39] \"Tpot.\" [Online]. Available: http://epistasislab.github.io/tpot/",
|
| 1152 |
+
"[40] L. Zimmer, M. Lindauer, and F. Hutter, \"Auto-pytorch tabular: Multi-fidelity metalearning for efficient and robust autodl,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 43, pp. 3079-3090, 6 2020. [Online]. Available: https://arxiv.org/abs/2006.13799v3",
|
| 1153 |
+
"[41] \"Bigml.com.\" [Online]. Available: https://bigml.com/",
|
| 1154 |
+
"[42] \"Cloud automl custom machine learning models-google cloud.\" [Online]. Available: https://cloud.google.com/automl",
|
| 1155 |
+
"[43] \"Modern business runs on ai-no code ai-akkio.\" [Online]. Available: https://www.akkio.com/",
|
| 1156 |
+
"[44] \"H2o.ai-ai cloud platform.\" [Online]. Available: https://www.h2o.ai/",
|
| 1157 |
+
"[45] \"What is automated ml?-automl-azure machine learning.\" [Online]. Available: https://docs.microsoft.com/en-us/azure/machine-learning/concept-automated-ml",
|
| 1158 |
+
"[46] \"Amazon sagemaker autopilot-amazon sagemaker.\" [Online]. Available: https://aws.amazon.com/sagemaker/ autopilot/",
|
| 1159 |
+
"[47] M. Feurer and F. Hutter, \"Practical automated machine learning for the automl challenge 2018,\" ICML 2018 AutoML Workshop, 2018.",
|
| 1160 |
+
"[48] G. Fursin, \"The collective knowledge project: making ml models more portable and reproducible with open apis, reusable best practices and mlops,\" 6 2020. [Online]. Available: https://arxiv.org/abs/2006.07161v2",
|
| 1161 |
+
"[49] Y. Zhou, Y. Yu, and B. Ding, \"Towards mlops: A case study of ml pipeline platform,\" Proceedings - 2020 International Conference on Artificial Intelligence and Computer Engineering, ICAICE 2020, pp. 494-500, 10 2020.",
|
| 1162 |
+
"[50] S. Schelter, F. Biessmann, T. Januschowski, D. Salinas, S. Seufert, and G. Szarvas, “On challenges in machine learning model management,” 2018.",
|
| 1163 |
+
"[51] A. Banerjee, C.-C. Chen, C.-C. Hung, X. Huang, Y. Wang, and R. Chevesaran, \"Challenges and experiences with mlops for performance diagnostics in hybrid-cloud enterprise software deployments,\" 2020. [Online]. Available: https://www.vmware.com/solutions/trustvm-"
|
| 1164 |
+
],
|
| 1165 |
+
"bbox": [
|
| 1166 |
+
84,
|
| 1167 |
+
94,
|
| 1168 |
+
486,
|
| 1169 |
+
907
|
| 1170 |
+
],
|
| 1171 |
+
"page_idx": 7
|
| 1172 |
+
},
|
| 1173 |
+
{
|
| 1174 |
+
"type": "list",
|
| 1175 |
+
"sub_type": "ref_text",
|
| 1176 |
+
"list_items": [
|
| 1177 |
+
"[52] K. D. Apostolidis and G. A. Papakostas, “A survey on adversarial deep learning robustness in medical image analysis,” *Electronics*, vol. 10, p. 2132, 2021.",
|
| 1178 |
+
"[53] G. P. Avramidis, M. P. Avramidou, and G. A. Papakostas, \"Rheumatoid arthritis diagnosis: Deep learning vs. humane,\" Applied Sciences, vol. 12, p. 10, 2022."
|
| 1179 |
+
],
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
511,
|
| 1182 |
+
94,
|
| 1183 |
+
911,
|
| 1184 |
+
184
|
| 1185 |
+
],
|
| 1186 |
+
"page_idx": 7
|
| 1187 |
+
}
|
| 1188 |
+
]
|