SlowGuess commited on
Commit
449634e
·
verified ·
1 Parent(s): 5ec43cf

Add Batch 38ae0137-ad9a-4ada-afe1-647784d2e906

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_content_list.json +676 -0
  3. 2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_model.json +914 -0
  4. 2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_origin.pdf +3 -0
  5. 2401.07xxx/2401.07527/full.md +134 -0
  6. 2401.07xxx/2401.07527/images.zip +3 -0
  7. 2401.07xxx/2401.07527/layout.json +0 -0
  8. 2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_content_list.json +0 -0
  9. 2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_model.json +0 -0
  10. 2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_origin.pdf +3 -0
  11. 2401.07xxx/2401.07579/full.md +527 -0
  12. 2401.07xxx/2401.07579/images.zip +3 -0
  13. 2401.07xxx/2401.07579/layout.json +0 -0
  14. 2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_content_list.json +0 -0
  15. 2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_model.json +0 -0
  16. 2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_origin.pdf +3 -0
  17. 2401.07xxx/2401.07589/full.md +590 -0
  18. 2401.07xxx/2401.07589/images.zip +3 -0
  19. 2401.07xxx/2401.07589/layout.json +0 -0
  20. 2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_content_list.json +681 -0
  21. 2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_model.json +863 -0
  22. 2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_origin.pdf +3 -0
  23. 2401.07xxx/2401.07612/full.md +136 -0
  24. 2401.07xxx/2401.07612/images.zip +3 -0
  25. 2401.07xxx/2401.07612/layout.json +0 -0
  26. 2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_content_list.json +0 -0
  27. 2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_model.json +0 -0
  28. 2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_origin.pdf +3 -0
  29. 2401.07xxx/2401.07627/full.md +453 -0
  30. 2401.07xxx/2401.07627/images.zip +3 -0
  31. 2401.07xxx/2401.07627/layout.json +0 -0
  32. 2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_content_list.json +1667 -0
  33. 2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_model.json +2070 -0
  34. 2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_origin.pdf +3 -0
  35. 2401.07xxx/2401.07629/full.md +337 -0
  36. 2401.07xxx/2401.07629/images.zip +3 -0
  37. 2401.07xxx/2401.07629/layout.json +0 -0
  38. 2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_content_list.json +0 -0
  39. 2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_model.json +0 -0
  40. 2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_origin.pdf +3 -0
  41. 2401.07xxx/2401.07654/full.md +0 -0
  42. 2401.07xxx/2401.07654/images.zip +3 -0
  43. 2401.07xxx/2401.07654/layout.json +0 -0
  44. 2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_content_list.json +0 -0
  45. 2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_model.json +0 -0
  46. 2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_origin.pdf +3 -0
  47. 2401.07xxx/2401.07680/full.md +0 -0
  48. 2401.07xxx/2401.07680/images.zip +3 -0
  49. 2401.07xxx/2401.07680/layout.json +0 -0
  50. 2401.07xxx/2401.07745/c334f9ce-49db-4245-8e6b-ae023176c14c_content_list.json +1573 -0
.gitattributes CHANGED
@@ -10518,3 +10518,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10518
  2401.10xxx/2401.10935/06641a23-6a33-4272-8346-c191cf008283_origin.pdf filter=lfs diff=lfs merge=lfs -text
10519
  2402.16xxx/2402.16853/eba02c8d-d85e-409d-b6b2-6c3b1a4750b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10520
  2404.03xxx/2404.03659/1fe66a0e-a325-47d4-b4dd-a0a25183016a_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10518
  2401.10xxx/2401.10935/06641a23-6a33-4272-8346-c191cf008283_origin.pdf filter=lfs diff=lfs merge=lfs -text
10519
  2402.16xxx/2402.16853/eba02c8d-d85e-409d-b6b2-6c3b1a4750b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10520
  2404.03xxx/2404.03659/1fe66a0e-a325-47d4-b4dd-a0a25183016a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10521
+ 2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10522
+ 2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10523
+ 2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_origin.pdf filter=lfs diff=lfs merge=lfs -text
10524
+ 2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10525
+ 2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10526
+ 2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_origin.pdf filter=lfs diff=lfs merge=lfs -text
10527
+ 2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10528
+ 2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10529
+ 2401.07xxx/2401.07745/c334f9ce-49db-4245-8e6b-ae023176c14c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10530
+ 2401.07xxx/2401.07764/a892eed1-92eb-4fda-bca1-f33215bccbde_origin.pdf filter=lfs diff=lfs merge=lfs -text
10531
+ 2401.07xxx/2401.07781/136ceb2c-fb01-41fa-8ae8-65a12cac3f2e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10532
+ 2401.07xxx/2401.07801/3c223f9a-9ed0-4291-b218-c694bbe8c7d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10533
+ 2401.07xxx/2401.07817/35ecda7d-f774-4910-87c0-a9652846febe_origin.pdf filter=lfs diff=lfs merge=lfs -text
10534
+ 2401.07xxx/2401.07836/4d8df049-879b-4946-85c2-ae0d31b60f95_origin.pdf filter=lfs diff=lfs merge=lfs -text
10535
+ 2401.07xxx/2401.07851/2944f72b-f4cf-451e-968a-d7c102d532f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10536
+ 2401.07xxx/2401.07856/c2eef9e3-f0ff-406d-9582-f3e1de307da6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10537
+ 2401.07xxx/2401.07867/73047a9f-caad-4744-aa6f-529840e5185a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10538
+ 2401.07xxx/2401.07871/b7063aa9-d511-45d8-a776-751b5307f3ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10539
+ 2401.07xxx/2401.07872/93abee45-eeb1-466c-af44-695edb83be3c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10540
+ 2401.07xxx/2401.07883/bad66de8-0eb3-41c0-b4e6-ef1bc8250bf7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10541
+ 2401.07xxx/2401.07927/d54fddc7-1633-47b0-923e-0dbbac89b05c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10542
+ 2401.07xxx/2401.07985/fce8d94a-bfcd-47a8-ae45-c6aec7144e05_origin.pdf filter=lfs diff=lfs merge=lfs -text
10543
+ 2401.08xxx/2401.08026/5d4fd5bc-b932-481c-bd5a-8abb385c6fdc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10544
+ 2401.08xxx/2401.08032/301d670c-50fb-4a94-a8b7-2e80e87b2dd2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10545
+ 2401.08xxx/2401.08053/742bacf5-dda7-4038-8a81-4cd3ea46db02_origin.pdf filter=lfs diff=lfs merge=lfs -text
10546
+ 2401.08xxx/2401.08083/7bf7b759-1597-4580-b024-efe32e37de89_origin.pdf filter=lfs diff=lfs merge=lfs -text
10547
+ 2401.08xxx/2401.08092/d2cd95b8-05df-43a0-8693-a83c4abb594f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10548
+ 2401.08xxx/2401.08189/98055424-532d-47f3-82b2-f901166d811e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10549
+ 2401.08xxx/2401.08190/0bada484-1d29-4d30-a516-73d974d07c15_origin.pdf filter=lfs diff=lfs merge=lfs -text
10550
+ 2401.08xxx/2401.08206/bb6eb613-7869-4a8f-b0c6-16379338e570_origin.pdf filter=lfs diff=lfs merge=lfs -text
10551
+ 2401.08xxx/2401.08209/7bd68cf1-21b4-4f36-b129-7d14dc2e227c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10552
+ 2401.08xxx/2401.08217/3a352d82-2ca9-479e-a73a-6914de777c1c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10553
+ 2401.08xxx/2401.08276/7fece346-189c-404f-9afc-7c6880b59354_origin.pdf filter=lfs diff=lfs merge=lfs -text
10554
+ 2401.08xxx/2401.08281/f08dd3c1-83a0-45fb-aa16-3832e234c93a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10555
+ 2401.08xxx/2401.08295/63ee362f-5ce0-4867-a8b9-91f2ab8c0b01_origin.pdf filter=lfs diff=lfs merge=lfs -text
10556
+ 2401.08xxx/2401.08315/b77e5725-62e4-41a9-b7fa-d348248e505f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10557
+ 2401.08xxx/2401.08326/cbd72421-2922-4b1b-b73b-9abd02471270_origin.pdf filter=lfs diff=lfs merge=lfs -text
10558
+ 2401.08xxx/2401.08329/6d3934bd-eb5a-4d97-a858-d9c6f7d8fc45_origin.pdf filter=lfs diff=lfs merge=lfs -text
10559
+ 2401.08xxx/2401.08358/b3d079ee-bdf3-4be7-8e07-4212427a4164_origin.pdf filter=lfs diff=lfs merge=lfs -text
10560
+ 2401.08xxx/2401.08383/9767bc5d-b14e-4147-93e4-2dd67ad721f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10561
+ 2401.08xxx/2401.08392/c5611577-5d21-4159-af8c-0c080f1636dd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10562
+ 2401.08xxx/2401.08396/3436494b-facc-4ad0-96fe-38cb59d3fb42_origin.pdf filter=lfs diff=lfs merge=lfs -text
10563
+ 2401.08xxx/2401.08399/38d325c1-5868-4994-a30d-177a69f2c58b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10564
+ 2401.08xxx/2401.08406/9d4a72fd-9901-4fd0-a2e6-8b6ec27ffd2f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10565
+ 2401.08xxx/2401.08417/2d9e41ab-ee68-4930-9816-0f04638256c9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10566
+ 2401.08xxx/2401.08495/54dbf521-ac41-4b1b-a495-685a015d7dc3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10567
+ 2401.08xxx/2401.08500/36cff239-f737-4a56-8cd1-3ebe7e653d99_origin.pdf filter=lfs diff=lfs merge=lfs -text
10568
+ 2401.08xxx/2401.08503/19960961-5030-4e13-bc54-cf0370000458_origin.pdf filter=lfs diff=lfs merge=lfs -text
10569
+ 2401.08xxx/2401.08508/cd1b5741-50cc-4d31-afb1-e7037f557d6a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10570
+ 2401.08xxx/2401.08514/329a9b9e-c888-4f5f-a3ed-350cfe9b592f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10571
+ 2401.08xxx/2401.08541/1ba7b6ad-aab7-4999-b390-145b8232cd32_origin.pdf filter=lfs diff=lfs merge=lfs -text
10572
+ 2401.08xxx/2401.08553/a8ec0738-3ec4-4d88-83ac-15b24c70ce34_origin.pdf filter=lfs diff=lfs merge=lfs -text
10573
+ 2401.08xxx/2401.08559/1882fead-4cb1-4235-94b7-e59a80e0f136_origin.pdf filter=lfs diff=lfs merge=lfs -text
10574
+ 2401.08xxx/2401.08565/749654db-26b6-401a-bb1e-36d6f13f8d0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10575
+ 2401.08xxx/2401.08570/50452c4b-5ea5-47f3-a3df-211b1568c8e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10576
+ 2401.08xxx/2401.08572/f74eeaad-3c1f-473a-b517-d900264ac44a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10577
+ 2401.08xxx/2401.08573/409010cf-5b5d-4e32-b258-b609e8888112_origin.pdf filter=lfs diff=lfs merge=lfs -text
10578
+ 2401.08xxx/2401.08711/c13fd929-4db7-4c07-a608-a155ab80c0df_origin.pdf filter=lfs diff=lfs merge=lfs -text
10579
+ 2401.08xxx/2401.08721/899e3f6d-8a1c-420f-aeb1-42122dd4c070_origin.pdf filter=lfs diff=lfs merge=lfs -text
10580
+ 2401.08xxx/2401.08740/09d79bfd-50a2-40e4-9b15-5aff80268f8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10581
+ 2401.08xxx/2401.08742/ad7baa52-ffb1-4120-bd5c-be853ff83b26_origin.pdf filter=lfs diff=lfs merge=lfs -text
10582
+ 2401.12xxx/2401.12987/62ebfa05-bbf4-4429-9a21-3c5fb3c3fcfc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10583
+ 2402.01xxx/2402.01666/b3f58bd6-a85a-4cd3-a8c9-4f047ea5a2b9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10584
+ 2402.12xxx/2402.12381/e7117c9c-a5e2-4e26-b7a7-62d00b68da6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_content_list.json ADDED
@@ -0,0 +1,676 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "ONE FOR ALL: TOWARD UNIFIED FOUNDATION MODELS FOR EARTH VISION",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 127,
8
+ 118,
9
+ 870,
10
+ 136
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Zhitong Xiong, Yi Wang, Fahong Zhang, Xiao Xiang Zhu",
17
+ "bbox": [
18
+ 274,
19
+ 155,
20
+ 723,
21
+ 172
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Chair of Data Science in Earth Observation, Technical University of Munich, Munich, Germany",
28
+ "bbox": [
29
+ 119,
30
+ 191,
31
+ 877,
32
+ 209
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "ABSTRACT",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 240,
42
+ 242,
43
+ 333,
44
+ 257
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Foundation models characterized by extensive parameters and trained on large-scale datasets have demonstrated remarkable efficacy across various downstream tasks for remote sensing data. Current remote sensing foundation models typically specialize in a single modality or a specific spatial resolution range, limiting their versatility for downstream datasets. While there have been attempts to develop multi-modal remote sensing foundation models, they typically employ separate vision encoders for each modality or spatial resolution, necessitating a switch in backbones contingent upon the input data. To address this issue, we introduce a simple yet effective method, termed OFA-Net (One-For-All Network): employing a single, shared Transformer backbone for multiple data modalities with different spatial resolutions. Using the masked image modeling mechanism, we pre-train a single Transformer backbone on a curated multi-modal dataset with this simple design. Then the backbone model can be used in different downstream tasks, thus forging a path towards a unified foundation backbone model in Earth vision. The proposed method is evaluated on 12 distinct downstream tasks and demonstrates promising performance.",
51
+ "bbox": [
52
+ 81,
53
+ 265,
54
+ 488,
55
+ 583
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Index Terms— Foundation models, remote sensing, Earth observation, self-supervised learning",
62
+ "bbox": [
63
+ 83,
64
+ 589,
65
+ 488,
66
+ 619
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. INTRODUCTION",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 207,
76
+ 642,
77
+ 364,
78
+ 657
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Multiple satellites in orbit provide invaluable data essential for understanding and managing our planet. The richness of the Earth observation data lies in its diversity, encompassing various modalities such as optical, radar, multispectral, hyperspectral, and thermal imagery, each offering unique insights [1] [2]. This multiplicity is crucial in applications ranging from environmental monitoring [3] to urban planning [4], demonstrating the indispensable role of remote sensing in Earth sciences.",
85
+ "bbox": [
86
+ 81,
87
+ 670,
88
+ 488,
89
+ 805
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "The advent of foundation models has currently revolutionized the processing and analysis of remote sensing data [5, 6, 7, 8, 9, 10, 11]. Characterized by their extensive parameters and pre-trained on large-scale datasets, these models have greatly enhanced the performance on different downstream tasks. Despite their successes, current foundation models in remote sensing exhibit a critical limitation: they are predom-",
96
+ "bbox": [
97
+ 81,
98
+ 806,
99
+ 488,
100
+ 914
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "nantly tailored to either a single data modality [9] or a specific range of spatial resolutions [12] [13]. This specialization constrains their applicability across the diverse spectrum of remote sensing datasets, limiting their potential and flexibility in broader, more complex applications.",
107
+ "bbox": [
108
+ 508,
109
+ 242,
110
+ 915,
111
+ 319
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "image",
117
+ "img_path": "images/7952b3fb127a176b36778618c504d61a8f4a1f5af551abf5476ebf1eb3369620.jpg",
118
+ "image_caption": [
119
+ "Fig. 1. Illustration of the proposed method. Our model is designed to handle input data from a range of modalities, and varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified framework. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model."
120
+ ],
121
+ "image_footnote": [],
122
+ "bbox": [
123
+ 527,
124
+ 333,
125
+ 908,
126
+ 503
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "In response, the remote sensing community has tried to develop multi-modal foundation models. However, these models typically rely on separate vision encoders for either each modality or spatial resolution. Such an approach necessitates the switching of backbones based on different input data, hindering flexibility and operational efficiency in downstream applications. In this context, a model capable of seamlessly integrating multiple data modalities and spatial resolutions within a single framework could dramatically enhance the adaptability and efficiency of remote sensing data understanding.",
133
+ "bbox": [
134
+ 506,
135
+ 626,
136
+ 915,
137
+ 792
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "In this work, we propose a simple approach to this challenge: a unified foundation model employing a single, shared vision Transformer [14] backbone. This model accommodates data with various modalities and spatial resolutions, aiming to eliminate the need for multiple specialized models, as illustrated in Fig. 1.",
144
+ "bbox": [
145
+ 506,
146
+ 792,
147
+ 913,
148
+ 882
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "As shown in Fig. 2 (iii), the proposed OFA-Net is designed to handle input data from a range of modalities and",
155
+ "bbox": [
156
+ 508,
157
+ 883,
158
+ 913,
159
+ 914
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "aside_text",
165
+ "text": "arXiv:2401.07527v2 [cs.CV] 28 May 2024",
166
+ "bbox": [
167
+ 22,
168
+ 253,
169
+ 58,
170
+ 708
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "image",
176
+ "img_path": "images/32a8f5c38aa31bddba75c44c3b9159dabe7904c0dd99203be39b80e5c9d57def.jpg",
177
+ "image_caption": [
178
+ "(i) Separate Backbone for Different Modalities"
179
+ ],
180
+ "image_footnote": [],
181
+ "bbox": [
182
+ 127,
183
+ 92,
184
+ 344,
185
+ 256
186
+ ],
187
+ "page_idx": 1
188
+ },
189
+ {
190
+ "type": "image",
191
+ "img_path": "images/1a45de1d6baec916f451d2d41e2b6cf74c224066969f0d3c5afa5e5ac699b850.jpg",
192
+ "image_caption": [
193
+ "(ii) Shared Backbone for Low-resolution Data",
194
+ "(iii) Shared Backbone for Different Modalities, Low- and High-resolution Data"
195
+ ],
196
+ "image_footnote": [],
197
+ "bbox": [
198
+ 370,
199
+ 138,
200
+ 614,
201
+ 252
202
+ ],
203
+ "page_idx": 1
204
+ },
205
+ {
206
+ "type": "image",
207
+ "img_path": "images/706d3e50c7a89eeac5c42ed31939c9af4e537f7104139b49cab232fbe03773c1.jpg",
208
+ "image_caption": [
209
+ "Fig. 2. Illustration of existing and the proposed foundation models for multi-modal data."
210
+ ],
211
+ "image_footnote": [],
212
+ "bbox": [
213
+ 625,
214
+ 94,
215
+ 870,
216
+ 252
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "text",
222
+ "text": "varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified vision Transformer. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model, which is different from conventional methods. We pre-train this Transformer backbone on a meticulously curated multi-modal dataset, leveraging the masked image modeling mechanism to enhance its adaptability. Our approach offers a simpler yet effective solution for Earth vision tasks. We validate our model on 12 downstream tasks in the GEO-Bench dataset [15], demonstrating its robustness and versatility.",
223
+ "bbox": [
224
+ 81,
225
+ 342,
226
+ 488,
227
+ 508
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "2. METHODOLOGY",
234
+ "text_level": 1,
235
+ "bbox": [
236
+ 204,
237
+ 529,
238
+ 366,
239
+ 542
240
+ ],
241
+ "page_idx": 1
242
+ },
243
+ {
244
+ "type": "text",
245
+ "text": "2.1. Multi-modal Dataset Construction",
246
+ "text_level": 1,
247
+ "bbox": [
248
+ 83,
249
+ 556,
250
+ 362,
251
+ 571
252
+ ],
253
+ "page_idx": 1
254
+ },
255
+ {
256
+ "type": "text",
257
+ "text": "As shown in Fig. 3, we have constructed an extensive multimodal dataset designed to underpin the development of unified foundation models for Earth vision. This dataset is composed of five distinct modalities, each offering unique spectral and spatial data characteristics:",
258
+ "bbox": [
259
+ 81,
260
+ 580,
261
+ 488,
262
+ 655
263
+ ],
264
+ "page_idx": 1
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "Sentinel-1: The Sentinel-1 dataset includes 4,642,353 samples of Synthetic Aperture Radar (SAR) imagery, with a spatial resolution of about $5 \\times 20$ meters. Each image captures two bands (vv and vh) and is $512 \\times 512$ pixels in size, providing dense global coverage.",
269
+ "bbox": [
270
+ 81,
271
+ 656,
272
+ 488,
273
+ 731
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "text",
279
+ "text": "Sentinel-2: Comprising 977,774 multispectral imagery samples, this dataset has a spectral range with nine bands, from 0.49 to $2.15\\mu m$ , maintaining a spatial resolution of 10 meters with each image sized at 512x512 pixels for dense global coverage. We use the Sentinel-2 data collected and processed by [7].",
280
+ "bbox": [
281
+ 81,
282
+ 732,
283
+ 488,
284
+ 821
285
+ ],
286
+ "page_idx": 1
287
+ },
288
+ {
289
+ "type": "text",
290
+ "text": "Gaofen: To include images from the Gaofen satellite, We use the dataset collected by [16]. We crop 117,450 image patches of $512 \\times 512$ pixel resolution from the dataset. Each image includes four bands encompassing RGB and NIR wavelengths with a spatial resolution of around 4 meters. This dataset mainly covers different cities in China.",
291
+ "bbox": [
292
+ 81,
293
+ 821,
294
+ 488,
295
+ 912
296
+ ],
297
+ "page_idx": 1
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "NAIP: For high-resolution optical images, we use the dataset collected and processed by [7]. This dataset includes 2,332,351 high-resolution aerial images from the National Agriculture Imagery Program (NAIP), covering the USA with a fine spatial resolution of approximately 1 meter and consisting of RGB images across three bands with a size of 512x512 pixels.",
302
+ "bbox": [
303
+ 506,
304
+ 342,
305
+ 915,
306
+ 446
307
+ ],
308
+ "page_idx": 1
309
+ },
310
+ {
311
+ "type": "text",
312
+ "text": "EnMAP: The multi-modal dataset is further enriched with 11,483 hyperspectral image samples from EnMAP, which is published in [17]. The hyperspectral images have a spatial resolution of 30 meters and capture a wide spectral range with 224 bands, each sized at $128 \\times 128$ pixels.",
313
+ "bbox": [
314
+ 506,
315
+ 450,
316
+ 913,
317
+ 526
318
+ ],
319
+ "page_idx": 1
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "As illustrated in Fig. 4, the proposed OFA-Net contains mainly two components: 1) the individual patch embedding layers, and 2) the shared Transformer backbone. This simple design enables the model to extract and process features from diverse remote sensing modalities without the need to train multiple foundation models.",
324
+ "bbox": [
325
+ 506,
326
+ 527,
327
+ 913,
328
+ 619
329
+ ],
330
+ "page_idx": 1
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "Individual Patch Embedding Layer. The first component of the model comprises separate patch embedding layers tailored to each modality. There exist inherent differences in input channels across modalities. SAR images from Sentinel-1 are with two bands. Hyperspectral images from EnMAP are with 224 bands. Hence, it is imperative to have a specialized embedding process that can effectively translate the raw pixel data into a format suitable for the Transformer backbone.",
335
+ "bbox": [
336
+ 506,
337
+ 621,
338
+ 913,
339
+ 741
340
+ ],
341
+ "page_idx": 1
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "Let $P_{s1}, P_{s2}, P_{naip}, P_g$ , and $P_{hyper}$ denote the patch embedding operations for five different modalities. After resizing the input images from different modalities $X_{s1} \\in \\mathbb{R}^{h,w,2}$ , $X_{s2} \\in \\mathbb{R}^{h,w,9}$ , $X_{naip} \\in \\mathbb{R}^{h,w,3}$ , $X_g \\in \\mathbb{R}^{h,w,4}$ , $X_h \\in \\mathbb{R}^{h,w,224}$ into 224x224 pixels, we can compute the embeddings simply by $E = P(X)$ for each modality.",
346
+ "bbox": [
347
+ 508,
348
+ 744,
349
+ 913,
350
+ 835
351
+ ],
352
+ "page_idx": 1
353
+ },
354
+ {
355
+ "type": "text",
356
+ "text": "Shared Transformer Backbone. The second component is a shared Transformer backbone. In this work, this single Transformer architecture processes the embedded patches from all modalities with different spatial resolutions. This shared backbone can learn a generalized representation that",
357
+ "bbox": [
358
+ 506,
359
+ 838,
360
+ 913,
361
+ 912
362
+ ],
363
+ "page_idx": 1
364
+ },
365
+ {
366
+ "type": "image",
367
+ "img_path": "images/8a6eb0fd1c3d80a77dc3daf5666481c18363ab142a7bbfc050ba53aa1dbda6de.jpg",
368
+ "image_caption": [
369
+ "Fig. 3. Detailed information of the five sub-datasets in the curated multi-modal dataset."
370
+ ],
371
+ "image_footnote": [],
372
+ "bbox": [
373
+ 117,
374
+ 90,
375
+ 883,
376
+ 368
377
+ ],
378
+ "page_idx": 2
379
+ },
380
+ {
381
+ "type": "image",
382
+ "img_path": "images/c39c3fa5e31ade2224c89292785bf4a09eb9faf44044f48511cdbe4b5c0ca509.jpg",
383
+ "image_caption": [
384
+ "Fig. 4. Workflow of the proposed unified foundation model for multiple data modalities."
385
+ ],
386
+ "image_footnote": [],
387
+ "bbox": [
388
+ 122,
389
+ 424,
390
+ 875,
391
+ 597
392
+ ],
393
+ "page_idx": 2
394
+ },
395
+ {
396
+ "type": "text",
397
+ "text": "is flexible and robust enough to handle not only the variety of data modalities but also their diverse spatial resolutions. In this process, we input the embeddings for each modality $E_{s1} \\in \\mathbb{R}^{n,d}, E_{s2} \\in \\mathbb{R}^{n,d}, E_{naip} \\in \\mathbb{R}^{n,d}, E_g \\in \\mathbb{R}^{n,d}, E_h \\in \\mathbb{R}^{h,w,224}$ to the Transformer backbone $F_{b}$ to learn deep features. Here $n$ represents the number of tokens and $d$ denotes the feature dimension.",
398
+ "bbox": [
399
+ 81,
400
+ 651,
401
+ 486,
402
+ 755
403
+ ],
404
+ "page_idx": 2
405
+ },
406
+ {
407
+ "type": "text",
408
+ "text": "Masked Image Modeling. For the model training, we utilize the masked image modeling-based self-supervised learning loss. Specifically, we employ different decoders for different data modalities to reconstruct the randomly masked parts of the inputs. A significant advantage introduced by masked image modeling is its inherent design that does not necessitate spatially aligned multi-modal datasets. Traditional multi-modal learning approaches often rely on the precise alignment of different modalities, which can be a challenging and expensive process given the variability in",
409
+ "bbox": [
410
+ 81,
411
+ 762,
412
+ 488,
413
+ 914
414
+ ],
415
+ "page_idx": 2
416
+ },
417
+ {
418
+ "type": "text",
419
+ "text": "sensor acquisition parameters and conditions.",
420
+ "bbox": [
421
+ 509,
422
+ 651,
423
+ 810,
424
+ 666
425
+ ],
426
+ "page_idx": 2
427
+ },
428
+ {
429
+ "type": "text",
430
+ "text": "3. EXPERIMENTS",
431
+ "text_level": 1,
432
+ "bbox": [
433
+ 638,
434
+ 690,
435
+ 785,
436
+ 704
437
+ ],
438
+ "page_idx": 2
439
+ },
440
+ {
441
+ "type": "text",
442
+ "text": "3.1. Downstream Datasets",
443
+ "text_level": 1,
444
+ "bbox": [
445
+ 509,
446
+ 720,
447
+ 700,
448
+ 733
449
+ ],
450
+ "page_idx": 2
451
+ },
452
+ {
453
+ "type": "text",
454
+ "text": "To assess the effectiveness of our proposed unified foundation model, we use the GEO-Bench benchmark datasets [15], which encompass a diverse array of tasks pertinent to Earth vision. The GEO-Bench dataset includes 12 tasks, split evenly between image classification and segmentation, each representing a common challenge in remote sensing.",
455
+ "bbox": [
456
+ 508,
457
+ 744,
458
+ 913,
459
+ 835
460
+ ],
461
+ "page_idx": 2
462
+ },
463
+ {
464
+ "type": "text",
465
+ "text": "3.2. Experimental Settings",
466
+ "text_level": 1,
467
+ "bbox": [
468
+ 509,
469
+ 858,
470
+ 705,
471
+ 873
472
+ ],
473
+ "page_idx": 2
474
+ },
475
+ {
476
+ "type": "text",
477
+ "text": "Considering the high computational cost, we use 10,000 data samples for each sub-dataset, with 50,000 samples in total",
478
+ "bbox": [
479
+ 508,
480
+ 883,
481
+ 913,
482
+ 914
483
+ ],
484
+ "page_idx": 2
485
+ },
486
+ {
487
+ "type": "table",
488
+ "img_path": "images/4adb42ea6967bfdcac45f086d802ee8476e1006e3d14c8efdc04a6f87df6860a.jpg",
489
+ "table_caption": [
490
+ "Table 1. Performance comparison of different methods on various classification datasets"
491
+ ],
492
+ "table_footnote": [],
493
+ "table_body": "<table><tr><td>Methods</td><td>m-bigeathnet</td><td>m-forestnet</td><td>m-brick-kiln</td><td>m-pv4ger</td><td>m-so2sat</td><td>m-eurosat</td></tr><tr><td>Random Init.</td><td>52.89</td><td>41.52</td><td>84.51</td><td>91.32</td><td>38.31</td><td>69.53</td></tr><tr><td>MAE Single</td><td>55.41</td><td>42.95</td><td>88.89</td><td>92.19</td><td>44.42</td><td>78.00</td></tr><tr><td>SatMAE [9]</td><td>55.12</td><td>—</td><td>91.89</td><td>—</td><td>45.59</td><td>73.15</td></tr><tr><td>OFA-Net (ours)</td><td>57.13</td><td>45.12</td><td>91.29</td><td>93.19</td><td>46.04</td><td>81.00</td></tr></table>",
494
+ "bbox": [
495
+ 163,
496
+ 114,
497
+ 838,
498
+ 191
499
+ ],
500
+ "page_idx": 3
501
+ },
502
+ {
503
+ "type": "table",
504
+ "img_path": "images/526a4c51e67b9f92990a3bcc257beec0f677429f69ad196211c914d86ac52c4b.jpg",
505
+ "table_caption": [
506
+ "Table 2. Performance comparison of different methods on various segmentation datasets"
507
+ ],
508
+ "table_footnote": [],
509
+ "table_body": "<table><tr><td>Methods</td><td>m-pv4ger-seg</td><td>m-nz-cattle</td><td>m-NeonTree</td><td>m-cashew-plantation</td><td>m-SA-crop-type</td><td>m-chesapeake-landcover</td></tr><tr><td>Random Init.</td><td>81.63</td><td>74.12</td><td>51.27</td><td>27.65</td><td>29.11</td><td>47.16</td></tr><tr><td>MAE Single</td><td>88.43</td><td>76.40</td><td>52.99</td><td>29.42</td><td>30.67</td><td>51.90</td></tr><tr><td>OFA-Net (Ours)</td><td>89.43</td><td>77.63</td><td>52.64</td><td>37.39</td><td>31.98</td><td>54.50</td></tr></table>",
510
+ "bbox": [
511
+ 106,
512
+ 229,
513
+ 897,
514
+ 284
515
+ ],
516
+ "page_idx": 3
517
+ },
518
+ {
519
+ "type": "text",
520
+ "text": "to pre-train the OFA-Net for 100 epochs. We adopt linear probing as our primary evaluation strategy, known for its capacity to measure the quality of representations learned by self-supervised learning. This approach involves freezing the weights of the pre-trained model and training a linear classifier on top of the representations for each task. By doing so, we can directly evaluate the discriminative power of the learned features without fully fine-tuning the entire model, thus providing insight into the model's ability to generalize. A learning rate of 1e-2 is used for all the datasets. For the classification tasks, we use top-1 Accuracy as the evaluation metric. For segmentation tasks, we use mIoU as the metric. A learning rate of 1e-4 is used for all the datasets. All the experiments are conducted using PyTorch on four NVIDIA A6000 GPUs each with 48GB Memory.",
521
+ "bbox": [
522
+ 81,
523
+ 309,
524
+ 488,
525
+ 535
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "3.3. Comparison Experiments",
532
+ "text_level": 1,
533
+ "bbox": [
534
+ 83,
535
+ 556,
536
+ 303,
537
+ 571
538
+ ],
539
+ "page_idx": 3
540
+ },
541
+ {
542
+ "type": "text",
543
+ "text": "To objectively evaluate our model, we provide the performance of four different methods on six downstream classification datasets under the linear probing setting. The results are presented in Table 1. Analyzing the table, OFA-Net generally outperforms the other methods across most datasets, indicating the benefit of multimodal pretraining in enhancing the model's feature extraction capabilities and generalization. The improvement is particularly notable on m-forestnet and m-so2sat datasets, where the OFA-Net method exceeds the performance of a randomly initialized model by approximately $3.6\\%$ and $7.7\\%$ , respectively. The MAE Single method shows consistent improvement over random initialization, which highlights the advantage of pretraining on a single data modality compared to no pretraining. However, the gains from MAE Single are less than those from OFA-Net, underscoring the added value of multimodal learning.",
544
+ "bbox": [
545
+ 81,
546
+ 580,
547
+ 488,
548
+ 821
549
+ ],
550
+ "page_idx": 3
551
+ },
552
+ {
553
+ "type": "text",
554
+ "text": "Table 2 presents performance for segmentation tasks on various datasets, comparing methods that utilize a Vision Transformer (ViT) with different pretraining strategies: Random Initialization (Random Init.), pretraining on single modalities (MAE Single), and pretraining on multiple modalities (OFA-Net). On the m-NeonTree dataset, OFA",
555
+ "bbox": [
556
+ 81,
557
+ 821,
558
+ 488,
559
+ 912
560
+ ],
561
+ "page_idx": 3
562
+ },
563
+ {
564
+ "type": "text",
565
+ "text": "Net shows a modest improvement over MAE Single. On the other five datasets, OFA-Net demonstrates superior performance when compared to the other methods. This suggests that pretraining on multiple modalities provides a more comprehensive feature representation, leading to more accurate segmentation. MAE Single outperforms Random Initialization in all cases, which aligns with the expected outcome that pretraining can significantly enhance the model's ability to generalize and accurately segment images.",
566
+ "bbox": [
567
+ 506,
568
+ 309,
569
+ 915,
570
+ 446
571
+ ],
572
+ "page_idx": 3
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "4. CONCLUSION",
577
+ "text_level": 1,
578
+ "bbox": [
579
+ 642,
580
+ 468,
581
+ 782,
582
+ 482
583
+ ],
584
+ "page_idx": 3
585
+ },
586
+ {
587
+ "type": "text",
588
+ "text": "In this work, we introduce a simple yet effective method, the OFA-Net, for unified foundation models for remote sensing data. The OFA-Net consists of a single, shared Transformer backbone and dedicated patch embedding layers for multiple data modalities with different spatial resolutions. The model is trained using the masked image modeling mechanism on a carefully curated multi-modal dataset with five distinct modalities. Then the backbone model is evaluated in different downstream tasks. The experimental results on 12 different downstream tasks show that our simple method demonstrates promising performance over foundation models trained using single modalities.",
589
+ "bbox": [
590
+ 506,
591
+ 497,
592
+ 916,
593
+ 679
594
+ ],
595
+ "page_idx": 3
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "5. ACKNOWLEDGEMENT",
600
+ "text_level": 1,
601
+ "bbox": [
602
+ 607,
603
+ 702,
604
+ 816,
605
+ 715
606
+ ],
607
+ "page_idx": 3
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "This work is jointly supported by the German Federal Ministry of Education and Research (BMBF) in the framework of the international future AI lab \"AI4EO - Artificial Intelligence for Earth Observation: Reasoning, Uncertainties, Ethics and Beyond\" (grant number: 01DD20001), by German Federal Ministry for Economic Affairs and Climate Action in the framework of the \"national center of excellence ML4Earth\" (grant number: 50EE2201C), and by the German Federal Ministry for the Environment, Nature Conservation, Nuclear Safety and Consumer Protection (BMUV) based on a resolution of the German Bundestag (grant number: 67KI32002B; Acronym: EKAPEx).",
612
+ "bbox": [
613
+ 506,
614
+ 731,
615
+ 916,
616
+ 912
617
+ ],
618
+ "page_idx": 3
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "6. REFERENCES",
623
+ "text_level": 1,
624
+ "bbox": [
625
+ 217,
626
+ 90,
627
+ 356,
628
+ 104
629
+ ],
630
+ "page_idx": 4
631
+ },
632
+ {
633
+ "type": "list",
634
+ "sub_type": "ref_text",
635
+ "list_items": [
636
+ "[1] Xiao Xiang Zhu, Devis Tuia, Lichao Mou, Gui-Song Xia, Liangpei Zhang, Feng Xu, and Friedrich Fraundorfer, “Deep learning in remote sensing: A comprehensive review and list of resources,” IEEE geoscience and remote sensing magazine, vol. 5, no. 4, pp. 8-36, 2017.",
637
+ "[2] Zhitong Xiong, Fahong Zhang, Yi Wang, Yilei Shi, and Xiao Xiang Zhu, “Earthnets: Empowering ai in earth observation,” arXiv preprint arXiv:2210.04936, 2022.",
638
+ "[3] Shan Zhao, Ioannis Prapas, Ilektra Karasante, Zhitong Xiong, Ioannis Papoutsis, Gustau Camps-Valls, and Xiao Xiang Zhu, \"Causal graph neural networks for wildfire danger prediction,\" arXiv preprint arXiv:2403.08414, 2024.",
639
+ "[4] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu, “The benchmark: Transferable representation learning for monocular height estimation,” IEEE Transactions on Geoscience and Remote Sensing, 2023.",
640
+ "[5] Xiao Xiang Zhu, Zhitong Xiong, Yi Wang, Adam J Stewart, Konrad Heidler, Yuanyuan Wang, Zhenghang Yuan, Thomas Dujardin, Qingsong Xu, and Yilei Shi, \"On the foundations of earth and climate foundation models,\" arXiv preprint arXiv:2405.04285, 2024.",
641
+ "[6] Xin Guo, Jiangwei Lao, Bo Dang, Yingying Zhang, Lei Yu, Lixiang Ru, Liheng Zhong, Ziyuan Huang, Kang Wu, Dingxiang Hu, et al., \"Skysense: A multi-modal remote sensing foundation model towards universal interpretation for earth observation imagery,\" arXiv preprint arXiv:2312.10115, 2023.",
642
+ "[7] Favyen Bastani, Piper Wolters, Ritwik Gupta, Joe Ferdinand, and Aniruddha Kembhavi, \"Satlaspretrain: A large-scale dataset for remote sensing image understanding,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 16772-16782.",
643
+ "[8] Keumgang Cha, Junghoon Seo, and Taekyung Lee, “A billion-scale foundation model for remote sensing images,” arXiv preprint arXiv:2304.05215, 2023.",
644
+ "[9] Yezhen Cong, Samar Khanna, Chenlin Meng, Patrick Liu, Erik Rozi, Yutong He, Marshall Burke, David Lobell, and Stefano Ermon, \"Satmae: Pre-training transformers for temporal and multi-spectral satellite imagery,\" Advances in Neural Information Processing Systems, vol. 35, pp. 197-211, 2022.",
645
+ "[10] Yi Wang, Nassim Ait Ali Braham, Zhitong Xiong, Chenying Liu, Conrad M Albrecht, and Xiao Xiang"
646
+ ],
647
+ "bbox": [
648
+ 86,
649
+ 119,
650
+ 486,
651
+ 912
652
+ ],
653
+ "page_idx": 4
654
+ },
655
+ {
656
+ "type": "list",
657
+ "sub_type": "ref_text",
658
+ "list_items": [
659
+ "Zhu, \"Ssl4eo-s12: A large-scale multi-modal, multitemporal dataset for self-supervised learning in earth observation,\" arXiv preprint arXiv:2211.07044, 2022.",
660
+ "[11] Zhitong Xiong, Yi Wang, Fahong Zhang, Adam J Stewart, Joëlle Hanna, Damian Borth, Ioannis Papoutsis, Bertrand Le Saux, Gustau Camps-Valls, and Xiao Xiang Zhu, “Neural plasticity-inspired foundation model for observing the Earth crossing modalities,” arXiv preprint arXiv:2403.15356, 2024.",
661
+ "[12] Michael J Smith, Luke Fleming, and James E Geach, \"Earthpt: a foundation model for earth observation,\" arXiv preprint arXiv:2309.07207, 2023.",
662
+ "[13] Pallavi Jain, Bianca Schoen-Phelan, and Robert Ross, \"Self-supervised learning for invariant representations from multi-spectral and sar images,\" IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 15, pp. 7797-7808, 2022.",
663
+ "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" arXiv preprint arXiv:2010.11929, 2020.",
664
+ "[15] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan David Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Andrew Irvin, David Dao, Hamed Alemoham-mad, Alexandre Drouin, et al., \"Geo-bench: Toward foundation models for earth monitoring,\" arXiv preprint arXiv:2306.03831, 2023.",
665
+ "[16] Xin-Yi Tong, Gui-Song Xia, and Xiao Xiang Zhu, “Enabling country-scale land cover mapping with meter-resolution satellite imagery,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 196, pp. 178–196, 2023.",
666
+ "[17] Martin Hermann Paul Fuchs and Begüm Demir, \"Hyspecnet-11k: A large-scale hyperspectral dataset for benchmarking learning-based hyperspectral image compression methods,\" arXiv preprint arXiv:2306.00385, 2023."
667
+ ],
668
+ "bbox": [
669
+ 511,
670
+ 90,
671
+ 913,
672
+ 748
673
+ ],
674
+ "page_idx": 4
675
+ }
676
+ ]
2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_model.json ADDED
@@ -0,0 +1,914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.128,
7
+ 0.119,
8
+ 0.872,
9
+ 0.137
10
+ ],
11
+ "angle": 0,
12
+ "content": "ONE FOR ALL: TOWARD UNIFIED FOUNDATION MODELS FOR EARTH VISION"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.275,
18
+ 0.156,
19
+ 0.725,
20
+ 0.174
21
+ ],
22
+ "angle": 0,
23
+ "content": "Zhitong Xiong, Yi Wang, Fahong Zhang, Xiao Xiang Zhu"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.12,
29
+ 0.192,
30
+ 0.878,
31
+ 0.21
32
+ ],
33
+ "angle": 0,
34
+ "content": "Chair of Data Science in Earth Observation, Technical University of Munich, Munich, Germany"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.241,
40
+ 0.243,
41
+ 0.334,
42
+ 0.258
43
+ ],
44
+ "angle": 0,
45
+ "content": "ABSTRACT"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.082,
51
+ 0.266,
52
+ 0.49,
53
+ 0.584
54
+ ],
55
+ "angle": 0,
56
+ "content": "Foundation models characterized by extensive parameters and trained on large-scale datasets have demonstrated remarkable efficacy across various downstream tasks for remote sensing data. Current remote sensing foundation models typically specialize in a single modality or a specific spatial resolution range, limiting their versatility for downstream datasets. While there have been attempts to develop multi-modal remote sensing foundation models, they typically employ separate vision encoders for each modality or spatial resolution, necessitating a switch in backbones contingent upon the input data. To address this issue, we introduce a simple yet effective method, termed OFA-Net (One-For-All Network): employing a single, shared Transformer backbone for multiple data modalities with different spatial resolutions. Using the masked image modeling mechanism, we pre-train a single Transformer backbone on a curated multi-modal dataset with this simple design. Then the backbone model can be used in different downstream tasks, thus forging a path towards a unified foundation backbone model in Earth vision. The proposed method is evaluated on 12 distinct downstream tasks and demonstrates promising performance."
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.084,
62
+ 0.59,
63
+ 0.489,
64
+ 0.621
65
+ ],
66
+ "angle": 0,
67
+ "content": "Index Terms— Foundation models, remote sensing, Earth observation, self-supervised learning"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.209,
73
+ 0.643,
74
+ 0.366,
75
+ 0.658
76
+ ],
77
+ "angle": 0,
78
+ "content": "1. INTRODUCTION"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.082,
84
+ 0.671,
85
+ 0.489,
86
+ 0.806
87
+ ],
88
+ "angle": 0,
89
+ "content": "Multiple satellites in orbit provide invaluable data essential for understanding and managing our planet. The richness of the Earth observation data lies in its diversity, encompassing various modalities such as optical, radar, multispectral, hyperspectral, and thermal imagery, each offering unique insights [1] [2]. This multiplicity is crucial in applications ranging from environmental monitoring [3] to urban planning [4], demonstrating the indispensable role of remote sensing in Earth sciences."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.083,
95
+ 0.808,
96
+ 0.49,
97
+ 0.915
98
+ ],
99
+ "angle": 0,
100
+ "content": "The advent of foundation models has currently revolutionized the processing and analysis of remote sensing data [5, 6, 7, 8, 9, 10, 11]. Characterized by their extensive parameters and pre-trained on large-scale datasets, these models have greatly enhanced the performance on different downstream tasks. Despite their successes, current foundation models in remote sensing exhibit a critical limitation: they are predom-"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.509,
106
+ 0.243,
107
+ 0.916,
108
+ 0.32
109
+ ],
110
+ "angle": 0,
111
+ "content": "nantly tailored to either a single data modality [9] or a specific range of spatial resolutions [12] [13]. This specialization constrains their applicability across the diverse spectrum of remote sensing datasets, limiting their potential and flexibility in broader, more complex applications."
112
+ },
113
+ {
114
+ "type": "image",
115
+ "bbox": [
116
+ 0.528,
117
+ 0.334,
118
+ 0.91,
119
+ 0.505
120
+ ],
121
+ "angle": 0,
122
+ "content": null
123
+ },
124
+ {
125
+ "type": "image_caption",
126
+ "bbox": [
127
+ 0.509,
128
+ 0.521,
129
+ 0.916,
130
+ 0.612
131
+ ],
132
+ "angle": 0,
133
+ "content": "Fig. 1. Illustration of the proposed method. Our model is designed to handle input data from a range of modalities, and varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified framework. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.508,
139
+ 0.627,
140
+ 0.916,
141
+ 0.793
142
+ ],
143
+ "angle": 0,
144
+ "content": "In response, the remote sensing community has tried to develop multi-modal foundation models. However, these models typically rely on separate vision encoders for either each modality or spatial resolution. Such an approach necessitates the switching of backbones based on different input data, hindering flexibility and operational efficiency in downstream applications. In this context, a model capable of seamlessly integrating multiple data modalities and spatial resolutions within a single framework could dramatically enhance the adaptability and efficiency of remote sensing data understanding."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.508,
150
+ 0.793,
151
+ 0.915,
152
+ 0.883
153
+ ],
154
+ "angle": 0,
155
+ "content": "In this work, we propose a simple approach to this challenge: a unified foundation model employing a single, shared vision Transformer [14] backbone. This model accommodates data with various modalities and spatial resolutions, aiming to eliminate the need for multiple specialized models, as illustrated in Fig. 1."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.509,
161
+ 0.884,
162
+ 0.915,
163
+ 0.915
164
+ ],
165
+ "angle": 0,
166
+ "content": "As shown in Fig. 2 (iii), the proposed OFA-Net is designed to handle input data from a range of modalities and"
167
+ },
168
+ {
169
+ "type": "aside_text",
170
+ "bbox": [
171
+ 0.023,
172
+ 0.255,
173
+ 0.059,
174
+ 0.709
175
+ ],
176
+ "angle": 270,
177
+ "content": "arXiv:2401.07527v2 [cs.CV] 28 May 2024"
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "image",
183
+ "bbox": [
184
+ 0.129,
185
+ 0.093,
186
+ 0.345,
187
+ 0.257
188
+ ],
189
+ "angle": 0,
190
+ "content": null
191
+ },
192
+ {
193
+ "type": "image_caption",
194
+ "bbox": [
195
+ 0.129,
196
+ 0.267,
197
+ 0.368,
198
+ 0.28
199
+ ],
200
+ "angle": 0,
201
+ "content": "(i) Separate Backbone for Different Modalities"
202
+ },
203
+ {
204
+ "type": "image",
205
+ "bbox": [
206
+ 0.372,
207
+ 0.14,
208
+ 0.616,
209
+ 0.253
210
+ ],
211
+ "angle": 0,
212
+ "content": null
213
+ },
214
+ {
215
+ "type": "image_caption",
216
+ "bbox": [
217
+ 0.377,
218
+ 0.267,
219
+ 0.614,
220
+ 0.278
221
+ ],
222
+ "angle": 0,
223
+ "content": "(ii) Shared Backbone for Low-resolution Data"
224
+ },
225
+ {
226
+ "type": "image",
227
+ "bbox": [
228
+ 0.626,
229
+ 0.095,
230
+ 0.872,
231
+ 0.253
232
+ ],
233
+ "angle": 0,
234
+ "content": null
235
+ },
236
+ {
237
+ "type": "image_caption",
238
+ "bbox": [
239
+ 0.63,
240
+ 0.261,
241
+ 0.87,
242
+ 0.283
243
+ ],
244
+ "angle": 0,
245
+ "content": "(iii) Shared Backbone for Different Modalities, Low- and High-resolution Data"
246
+ },
247
+ {
248
+ "type": "image_caption",
249
+ "bbox": [
250
+ 0.205,
251
+ 0.303,
252
+ 0.788,
253
+ 0.318
254
+ ],
255
+ "angle": 0,
256
+ "content": "Fig. 2. Illustration of existing and the proposed foundation models for multi-modal data."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.082,
262
+ 0.343,
263
+ 0.49,
264
+ 0.51
265
+ ],
266
+ "angle": 0,
267
+ "content": "varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified vision Transformer. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model, which is different from conventional methods. We pre-train this Transformer backbone on a meticulously curated multi-modal dataset, leveraging the masked image modeling mechanism to enhance its adaptability. Our approach offers a simpler yet effective solution for Earth vision tasks. We validate our model on 12 downstream tasks in the GEO-Bench dataset [15], demonstrating its robustness and versatility."
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.205,
273
+ 0.53,
274
+ 0.367,
275
+ 0.543
276
+ ],
277
+ "angle": 0,
278
+ "content": "2. METHODOLOGY"
279
+ },
280
+ {
281
+ "type": "title",
282
+ "bbox": [
283
+ 0.084,
284
+ 0.558,
285
+ 0.364,
286
+ 0.572
287
+ ],
288
+ "angle": 0,
289
+ "content": "2.1. Multi-modal Dataset Construction"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.083,
295
+ 0.581,
296
+ 0.489,
297
+ 0.656
298
+ ],
299
+ "angle": 0,
300
+ "content": "As shown in Fig. 3, we have constructed an extensive multimodal dataset designed to underpin the development of unified foundation models for Earth vision. This dataset is composed of five distinct modalities, each offering unique spectral and spatial data characteristics:"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.083,
306
+ 0.657,
307
+ 0.489,
308
+ 0.732
309
+ ],
310
+ "angle": 0,
311
+ "content": "Sentinel-1: The Sentinel-1 dataset includes 4,642,353 samples of Synthetic Aperture Radar (SAR) imagery, with a spatial resolution of about \\(5 \\times 20\\) meters. Each image captures two bands (vv and vh) and is \\(512 \\times 512\\) pixels in size, providing dense global coverage."
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.083,
317
+ 0.733,
318
+ 0.489,
319
+ 0.822
320
+ ],
321
+ "angle": 0,
322
+ "content": "Sentinel-2: Comprising 977,774 multispectral imagery samples, this dataset has a spectral range with nine bands, from 0.49 to \\(2.15\\mu m\\), maintaining a spatial resolution of 10 meters with each image sized at 512x512 pixels for dense global coverage. We use the Sentinel-2 data collected and processed by [7]."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.083,
328
+ 0.823,
329
+ 0.489,
330
+ 0.914
331
+ ],
332
+ "angle": 0,
333
+ "content": "Gaofen: To include images from the Gaofen satellite, We use the dataset collected by [16]. We crop 117,450 image patches of \\(512 \\times 512\\) pixel resolution from the dataset. Each image includes four bands encompassing RGB and NIR wavelengths with a spatial resolution of around 4 meters. This dataset mainly covers different cities in China."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.508,
339
+ 0.343,
340
+ 0.916,
341
+ 0.448
342
+ ],
343
+ "angle": 0,
344
+ "content": "NAIP: For high-resolution optical images, we use the dataset collected and processed by [7]. This dataset includes 2,332,351 high-resolution aerial images from the National Agriculture Imagery Program (NAIP), covering the USA with a fine spatial resolution of approximately 1 meter and consisting of RGB images across three bands with a size of 512x512 pixels."
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.508,
350
+ 0.451,
351
+ 0.915,
352
+ 0.527
353
+ ],
354
+ "angle": 0,
355
+ "content": "EnMAP: The multi-modal dataset is further enriched with 11,483 hyperspectral image samples from EnMAP, which is published in [17]. The hyperspectral images have a spatial resolution of 30 meters and capture a wide spectral range with 224 bands, each sized at \\(128 \\times 128\\) pixels."
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.508,
361
+ 0.529,
362
+ 0.915,
363
+ 0.62
364
+ ],
365
+ "angle": 0,
366
+ "content": "As illustrated in Fig. 4, the proposed OFA-Net contains mainly two components: 1) the individual patch embedding layers, and 2) the shared Transformer backbone. This simple design enables the model to extract and process features from diverse remote sensing modalities without the need to train multiple foundation models."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.508,
372
+ 0.622,
373
+ 0.915,
374
+ 0.742
375
+ ],
376
+ "angle": 0,
377
+ "content": "Individual Patch Embedding Layer. The first component of the model comprises separate patch embedding layers tailored to each modality. There exist inherent differences in input channels across modalities. SAR images from Sentinel-1 are with two bands. Hyperspectral images from EnMAP are with 224 bands. Hence, it is imperative to have a specialized embedding process that can effectively translate the raw pixel data into a format suitable for the Transformer backbone."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.509,
383
+ 0.745,
384
+ 0.915,
385
+ 0.837
386
+ ],
387
+ "angle": 0,
388
+ "content": "Let \\( P_{s1}, P_{s2}, P_{naip}, P_g \\), and \\( P_{hyper} \\) denote the patch embedding operations for five different modalities. After resizing the input images from different modalities \\( X_{s1} \\in \\mathbb{R}^{h,w,2} \\), \\( X_{s2} \\in \\mathbb{R}^{h,w,9} \\), \\( X_{naip} \\in \\mathbb{R}^{h,w,3} \\), \\( X_g \\in \\mathbb{R}^{h,w,4} \\), \\( X_h \\in \\mathbb{R}^{h,w,224} \\) into 224x224 pixels, we can compute the embeddings simply by \\( E = P(X) \\) for each modality."
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.508,
394
+ 0.839,
395
+ 0.915,
396
+ 0.914
397
+ ],
398
+ "angle": 0,
399
+ "content": "Shared Transformer Backbone. The second component is a shared Transformer backbone. In this work, this single Transformer architecture processes the embedded patches from all modalities with different spatial resolutions. This shared backbone can learn a generalized representation that"
400
+ }
401
+ ],
402
+ [
403
+ {
404
+ "type": "image",
405
+ "bbox": [
406
+ 0.119,
407
+ 0.092,
408
+ 0.885,
409
+ 0.369
410
+ ],
411
+ "angle": 0,
412
+ "content": null
413
+ },
414
+ {
415
+ "type": "image_caption",
416
+ "bbox": [
417
+ 0.212,
418
+ 0.385,
419
+ 0.786,
420
+ 0.4
421
+ ],
422
+ "angle": 0,
423
+ "content": "Fig. 3. Detailed information of the five sub-datasets in the curated multi-modal dataset."
424
+ },
425
+ {
426
+ "type": "image",
427
+ "bbox": [
428
+ 0.123,
429
+ 0.425,
430
+ 0.877,
431
+ 0.598
432
+ ],
433
+ "angle": 0,
434
+ "content": null
435
+ },
436
+ {
437
+ "type": "image_caption",
438
+ "bbox": [
439
+ 0.208,
440
+ 0.611,
441
+ 0.791,
442
+ 0.627
443
+ ],
444
+ "angle": 0,
445
+ "content": "Fig. 4. Workflow of the proposed unified foundation model for multiple data modalities."
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.083,
451
+ 0.652,
452
+ 0.488,
453
+ 0.756
454
+ ],
455
+ "angle": 0,
456
+ "content": "is flexible and robust enough to handle not only the variety of data modalities but also their diverse spatial resolutions. In this process, we input the embeddings for each modality \\( E_{s1} \\in \\mathbb{R}^{n,d}, E_{s2} \\in \\mathbb{R}^{n,d}, E_{naip} \\in \\mathbb{R}^{n,d}, E_g \\in \\mathbb{R}^{n,d}, E_h \\in \\mathbb{R}^{h,w,224} \\) to the Transformer backbone \\( F_{b} \\) to learn deep features. Here \\( n \\) represents the number of tokens and \\( d \\) denotes the feature dimension."
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.083,
462
+ 0.763,
463
+ 0.489,
464
+ 0.915
465
+ ],
466
+ "angle": 0,
467
+ "content": "Masked Image Modeling. For the model training, we utilize the masked image modeling-based self-supervised learning loss. Specifically, we employ different decoders for different data modalities to reconstruct the randomly masked parts of the inputs. A significant advantage introduced by masked image modeling is its inherent design that does not necessitate spatially aligned multi-modal datasets. Traditional multi-modal learning approaches often rely on the precise alignment of different modalities, which can be a challenging and expensive process given the variability in"
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.51,
473
+ 0.652,
474
+ 0.811,
475
+ 0.667
476
+ ],
477
+ "angle": 0,
478
+ "content": "sensor acquisition parameters and conditions."
479
+ },
480
+ {
481
+ "type": "title",
482
+ "bbox": [
483
+ 0.64,
484
+ 0.691,
485
+ 0.787,
486
+ 0.705
487
+ ],
488
+ "angle": 0,
489
+ "content": "3. EXPERIMENTS"
490
+ },
491
+ {
492
+ "type": "title",
493
+ "bbox": [
494
+ 0.51,
495
+ 0.721,
496
+ 0.702,
497
+ 0.734
498
+ ],
499
+ "angle": 0,
500
+ "content": "3.1. Downstream Datasets"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.509,
506
+ 0.745,
507
+ 0.915,
508
+ 0.837
509
+ ],
510
+ "angle": 0,
511
+ "content": "To assess the effectiveness of our proposed unified foundation model, we use the GEO-Bench benchmark datasets [15], which encompass a diverse array of tasks pertinent to Earth vision. The GEO-Bench dataset includes 12 tasks, split evenly between image classification and segmentation, each representing a common challenge in remote sensing."
512
+ },
513
+ {
514
+ "type": "title",
515
+ "bbox": [
516
+ 0.51,
517
+ 0.859,
518
+ 0.706,
519
+ 0.874
520
+ ],
521
+ "angle": 0,
522
+ "content": "3.2. Experimental Settings"
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.509,
528
+ 0.884,
529
+ 0.915,
530
+ 0.915
531
+ ],
532
+ "angle": 0,
533
+ "content": "Considering the high computational cost, we use 10,000 data samples for each sub-dataset, with 50,000 samples in total"
534
+ }
535
+ ],
536
+ [
537
+ {
538
+ "type": "table_caption",
539
+ "bbox": [
540
+ 0.209,
541
+ 0.101,
542
+ 0.791,
543
+ 0.114
544
+ ],
545
+ "angle": 0,
546
+ "content": "Table 1. Performance comparison of different methods on various classification datasets"
547
+ },
548
+ {
549
+ "type": "table",
550
+ "bbox": [
551
+ 0.165,
552
+ 0.115,
553
+ 0.839,
554
+ 0.192
555
+ ],
556
+ "angle": 0,
557
+ "content": "<table><tr><td>Methods</td><td>m-bigeathnet</td><td>m-forestnet</td><td>m-brick-kiln</td><td>m-pv4ger</td><td>m-so2sat</td><td>m-eurosat</td></tr><tr><td>Random Init.</td><td>52.89</td><td>41.52</td><td>84.51</td><td>91.32</td><td>38.31</td><td>69.53</td></tr><tr><td>MAE Single</td><td>55.41</td><td>42.95</td><td>88.89</td><td>92.19</td><td>44.42</td><td>78.00</td></tr><tr><td>SatMAE [9]</td><td>55.12</td><td>—</td><td>91.89</td><td>—</td><td>45.59</td><td>73.15</td></tr><tr><td>OFA-Net (ours)</td><td>57.13</td><td>45.12</td><td>91.29</td><td>93.19</td><td>46.04</td><td>81.00</td></tr></table>"
558
+ },
559
+ {
560
+ "type": "table_caption",
561
+ "bbox": [
562
+ 0.208,
563
+ 0.216,
564
+ 0.792,
565
+ 0.229
566
+ ],
567
+ "angle": 0,
568
+ "content": "Table 2. Performance comparison of different methods on various segmentation datasets"
569
+ },
570
+ {
571
+ "type": "table",
572
+ "bbox": [
573
+ 0.107,
574
+ 0.23,
575
+ 0.898,
576
+ 0.285
577
+ ],
578
+ "angle": 0,
579
+ "content": "<table><tr><td>Methods</td><td>m-pv4ger-seg</td><td>m-nz-cattle</td><td>m-NeonTree</td><td>m-cashew-plantation</td><td>m-SA-crop-type</td><td>m-chesapeake-landcover</td></tr><tr><td>Random Init.</td><td>81.63</td><td>74.12</td><td>51.27</td><td>27.65</td><td>29.11</td><td>47.16</td></tr><tr><td>MAE Single</td><td>88.43</td><td>76.40</td><td>52.99</td><td>29.42</td><td>30.67</td><td>51.90</td></tr><tr><td>OFA-Net (Ours)</td><td>89.43</td><td>77.63</td><td>52.64</td><td>37.39</td><td>31.98</td><td>54.50</td></tr></table>"
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.082,
585
+ 0.31,
586
+ 0.49,
587
+ 0.536
588
+ ],
589
+ "angle": 0,
590
+ "content": "to pre-train the OFA-Net for 100 epochs. We adopt linear probing as our primary evaluation strategy, known for its capacity to measure the quality of representations learned by self-supervised learning. This approach involves freezing the weights of the pre-trained model and training a linear classifier on top of the representations for each task. By doing so, we can directly evaluate the discriminative power of the learned features without fully fine-tuning the entire model, thus providing insight into the model's ability to generalize. A learning rate of 1e-2 is used for all the datasets. For the classification tasks, we use top-1 Accuracy as the evaluation metric. For segmentation tasks, we use mIoU as the metric. A learning rate of 1e-4 is used for all the datasets. All the experiments are conducted using PyTorch on four NVIDIA A6000 GPUs each with 48GB Memory."
591
+ },
592
+ {
593
+ "type": "title",
594
+ "bbox": [
595
+ 0.084,
596
+ 0.557,
597
+ 0.304,
598
+ 0.572
599
+ ],
600
+ "angle": 0,
601
+ "content": "3.3. Comparison Experiments"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.082,
607
+ 0.581,
608
+ 0.49,
609
+ 0.822
610
+ ],
611
+ "angle": 0,
612
+ "content": "To objectively evaluate our model, we provide the performance of four different methods on six downstream classification datasets under the linear probing setting. The results are presented in Table 1. Analyzing the table, OFA-Net generally outperforms the other methods across most datasets, indicating the benefit of multimodal pretraining in enhancing the model's feature extraction capabilities and generalization. The improvement is particularly notable on m-forestnet and m-so2sat datasets, where the OFA-Net method exceeds the performance of a randomly initialized model by approximately \\(3.6\\%\\) and \\(7.7\\%\\), respectively. The MAE Single method shows consistent improvement over random initialization, which highlights the advantage of pretraining on a single data modality compared to no pretraining. However, the gains from MAE Single are less than those from OFA-Net, underscoring the added value of multimodal learning."
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.082,
618
+ 0.823,
619
+ 0.49,
620
+ 0.914
621
+ ],
622
+ "angle": 0,
623
+ "content": "Table 2 presents performance for segmentation tasks on various datasets, comparing methods that utilize a Vision Transformer (ViT) with different pretraining strategies: Random Initialization (Random Init.), pretraining on single modalities (MAE Single), and pretraining on multiple modalities (OFA-Net). On the m-NeonTree dataset, OFA"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.508,
629
+ 0.31,
630
+ 0.916,
631
+ 0.448
632
+ ],
633
+ "angle": 0,
634
+ "content": "Net shows a modest improvement over MAE Single. On the other five datasets, OFA-Net demonstrates superior performance when compared to the other methods. This suggests that pretraining on multiple modalities provides a more comprehensive feature representation, leading to more accurate segmentation. MAE Single outperforms Random Initialization in all cases, which aligns with the expected outcome that pretraining can significantly enhance the model's ability to generalize and accurately segment images."
635
+ },
636
+ {
637
+ "type": "title",
638
+ "bbox": [
639
+ 0.643,
640
+ 0.469,
641
+ 0.783,
642
+ 0.483
643
+ ],
644
+ "angle": 0,
645
+ "content": "4. CONCLUSION"
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.508,
651
+ 0.498,
652
+ 0.917,
653
+ 0.68
654
+ ],
655
+ "angle": 0,
656
+ "content": "In this work, we introduce a simple yet effective method, the OFA-Net, for unified foundation models for remote sensing data. The OFA-Net consists of a single, shared Transformer backbone and dedicated patch embedding layers for multiple data modalities with different spatial resolutions. The model is trained using the masked image modeling mechanism on a carefully curated multi-modal dataset with five distinct modalities. Then the backbone model is evaluated in different downstream tasks. The experimental results on 12 different downstream tasks show that our simple method demonstrates promising performance over foundation models trained using single modalities."
657
+ },
658
+ {
659
+ "type": "title",
660
+ "bbox": [
661
+ 0.608,
662
+ 0.703,
663
+ 0.818,
664
+ 0.717
665
+ ],
666
+ "angle": 0,
667
+ "content": "5. ACKNOWLEDGEMENT"
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.508,
673
+ 0.732,
674
+ 0.917,
675
+ 0.914
676
+ ],
677
+ "angle": 0,
678
+ "content": "This work is jointly supported by the German Federal Ministry of Education and Research (BMBF) in the framework of the international future AI lab \"AI4EO - Artificial Intelligence for Earth Observation: Reasoning, Uncertainties, Ethics and Beyond\" (grant number: 01DD20001), by German Federal Ministry for Economic Affairs and Climate Action in the framework of the \"national center of excellence ML4Earth\" (grant number: 50EE2201C), and by the German Federal Ministry for the Environment, Nature Conservation, Nuclear Safety and Consumer Protection (BMUV) based on a resolution of the German Bundestag (grant number: 67KI32002B; Acronym: EKAPEx)."
679
+ }
680
+ ],
681
+ [
682
+ {
683
+ "type": "title",
684
+ "bbox": [
685
+ 0.218,
686
+ 0.092,
687
+ 0.357,
688
+ 0.106
689
+ ],
690
+ "angle": 0,
691
+ "content": "6. REFERENCES"
692
+ },
693
+ {
694
+ "type": "ref_text",
695
+ "bbox": [
696
+ 0.096,
697
+ 0.121,
698
+ 0.488,
699
+ 0.196
700
+ ],
701
+ "angle": 0,
702
+ "content": "[1] Xiao Xiang Zhu, Devis Tuia, Lichao Mou, Gui-Song Xia, Liangpei Zhang, Feng Xu, and Friedrich Fraundorfer, “Deep learning in remote sensing: A comprehensive review and list of resources,” IEEE geoscience and remote sensing magazine, vol. 5, no. 4, pp. 8-36, 2017."
703
+ },
704
+ {
705
+ "type": "ref_text",
706
+ "bbox": [
707
+ 0.096,
708
+ 0.209,
709
+ 0.488,
710
+ 0.254
711
+ ],
712
+ "angle": 0,
713
+ "content": "[2] Zhitong Xiong, Fahong Zhang, Yi Wang, Yilei Shi, and Xiao Xiang Zhu, “Earthnets: Empowering ai in earth observation,” arXiv preprint arXiv:2210.04936, 2022."
714
+ },
715
+ {
716
+ "type": "ref_text",
717
+ "bbox": [
718
+ 0.096,
719
+ 0.267,
720
+ 0.488,
721
+ 0.341
722
+ ],
723
+ "angle": 0,
724
+ "content": "[3] Shan Zhao, Ioannis Prapas, Ilektra Karasante, Zhitong Xiong, Ioannis Papoutsis, Gustau Camps-Valls, and Xiao Xiang Zhu, \"Causal graph neural networks for wildfire danger prediction,\" arXiv preprint arXiv:2403.08414, 2024."
725
+ },
726
+ {
727
+ "type": "ref_text",
728
+ "bbox": [
729
+ 0.096,
730
+ 0.355,
731
+ 0.488,
732
+ 0.415
733
+ ],
734
+ "angle": 0,
735
+ "content": "[4] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu, “The benchmark: Transferable representation learning for monocular height estimation,” IEEE Transactions on Geoscience and Remote Sensing, 2023."
736
+ },
737
+ {
738
+ "type": "ref_text",
739
+ "bbox": [
740
+ 0.096,
741
+ 0.428,
742
+ 0.488,
743
+ 0.503
744
+ ],
745
+ "angle": 0,
746
+ "content": "[5] Xiao Xiang Zhu, Zhitong Xiong, Yi Wang, Adam J Stewart, Konrad Heidler, Yuanyuan Wang, Zhenghang Yuan, Thomas Dujardin, Qingsong Xu, and Yilei Shi, \"On the foundations of earth and climate foundation models,\" arXiv preprint arXiv:2405.04285, 2024."
747
+ },
748
+ {
749
+ "type": "ref_text",
750
+ "bbox": [
751
+ 0.096,
752
+ 0.516,
753
+ 0.488,
754
+ 0.606
755
+ ],
756
+ "angle": 0,
757
+ "content": "[6] Xin Guo, Jiangwei Lao, Bo Dang, Yingying Zhang, Lei Yu, Lixiang Ru, Liheng Zhong, Ziyuan Huang, Kang Wu, Dingxiang Hu, et al., \"Skysense: A multi-modal remote sensing foundation model towards universal interpretation for earth observation imagery,\" arXiv preprint arXiv:2312.10115, 2023."
758
+ },
759
+ {
760
+ "type": "ref_text",
761
+ "bbox": [
762
+ 0.096,
763
+ 0.62,
764
+ 0.488,
765
+ 0.709
766
+ ],
767
+ "angle": 0,
768
+ "content": "[7] Favyen Bastani, Piper Wolters, Ritwik Gupta, Joe Ferdinand, and Aniruddha Kembhavi, \"Satlaspretrain: A large-scale dataset for remote sensing image understanding,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 16772-16782."
769
+ },
770
+ {
771
+ "type": "ref_text",
772
+ "bbox": [
773
+ 0.096,
774
+ 0.723,
775
+ 0.488,
776
+ 0.768
777
+ ],
778
+ "angle": 0,
779
+ "content": "[8] Keumgang Cha, Junghoon Seo, and Taekyung Lee, “A billion-scale foundation model for remote sensing images,” arXiv preprint arXiv:2304.05215, 2023."
780
+ },
781
+ {
782
+ "type": "ref_text",
783
+ "bbox": [
784
+ 0.096,
785
+ 0.78,
786
+ 0.488,
787
+ 0.871
788
+ ],
789
+ "angle": 0,
790
+ "content": "[9] Yezhen Cong, Samar Khanna, Chenlin Meng, Patrick Liu, Erik Rozi, Yutong He, Marshall Burke, David Lobell, and Stefano Ermon, \"Satmae: Pre-training transformers for temporal and multi-spectral satellite imagery,\" Advances in Neural Information Processing Systems, vol. 35, pp. 197-211, 2022."
791
+ },
792
+ {
793
+ "type": "ref_text",
794
+ "bbox": [
795
+ 0.088,
796
+ 0.884,
797
+ 0.488,
798
+ 0.914
799
+ ],
800
+ "angle": 0,
801
+ "content": "[10] Yi Wang, Nassim Ait Ali Braham, Zhitong Xiong, Chenying Liu, Conrad M Albrecht, and Xiao Xiang"
802
+ },
803
+ {
804
+ "type": "list",
805
+ "bbox": [
806
+ 0.088,
807
+ 0.121,
808
+ 0.488,
809
+ 0.914
810
+ ],
811
+ "angle": 0,
812
+ "content": null
813
+ },
814
+ {
815
+ "type": "ref_text",
816
+ "bbox": [
817
+ 0.545,
818
+ 0.092,
819
+ 0.913,
820
+ 0.136
821
+ ],
822
+ "angle": 0,
823
+ "content": "Zhu, \"Ssl4eo-s12: A large-scale multi-modal, multitemporal dataset for self-supervised learning in earth observation,\" arXiv preprint arXiv:2211.07044, 2022."
824
+ },
825
+ {
826
+ "type": "ref_text",
827
+ "bbox": [
828
+ 0.513,
829
+ 0.147,
830
+ 0.914,
831
+ 0.236
832
+ ],
833
+ "angle": 0,
834
+ "content": "[11] Zhitong Xiong, Yi Wang, Fahong Zhang, Adam J Stewart, Joëlle Hanna, Damian Borth, Ioannis Papoutsis, Bertrand Le Saux, Gustau Camps-Valls, and Xiao Xiang Zhu, “Neural plasticity-inspired foundation model for observing the Earth crossing modalities,” arXiv preprint arXiv:2403.15356, 2024."
835
+ },
836
+ {
837
+ "type": "ref_text",
838
+ "bbox": [
839
+ 0.513,
840
+ 0.248,
841
+ 0.913,
842
+ 0.292
843
+ ],
844
+ "angle": 0,
845
+ "content": "[12] Michael J Smith, Luke Fleming, and James E Geach, \"Earthpt: a foundation model for earth observation,\" arXiv preprint arXiv:2309.07207, 2023."
846
+ },
847
+ {
848
+ "type": "ref_text",
849
+ "bbox": [
850
+ 0.513,
851
+ 0.303,
852
+ 0.914,
853
+ 0.378
854
+ ],
855
+ "angle": 0,
856
+ "content": "[13] Pallavi Jain, Bianca Schoen-Phelan, and Robert Ross, \"Self-supervised learning for invariant representations from multi-spectral and sar images,\" IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 15, pp. 7797-7808, 2022."
857
+ },
858
+ {
859
+ "type": "ref_text",
860
+ "bbox": [
861
+ 0.513,
862
+ 0.389,
863
+ 0.913,
864
+ 0.479
865
+ ],
866
+ "angle": 0,
867
+ "content": "[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" arXiv preprint arXiv:2010.11929, 2020."
868
+ },
869
+ {
870
+ "type": "ref_text",
871
+ "bbox": [
872
+ 0.513,
873
+ 0.489,
874
+ 0.913,
875
+ 0.579
876
+ ],
877
+ "angle": 0,
878
+ "content": "[15] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan David Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Andrew Irvin, David Dao, Hamed Alemoham-mad, Alexandre Drouin, et al., \"Geo-bench: Toward foundation models for earth monitoring,\" arXiv preprint arXiv:2306.03831, 2023."
879
+ },
880
+ {
881
+ "type": "ref_text",
882
+ "bbox": [
883
+ 0.513,
884
+ 0.59,
885
+ 0.913,
886
+ 0.664
887
+ ],
888
+ "angle": 0,
889
+ "content": "[16] Xin-Yi Tong, Gui-Song Xia, and Xiao Xiang Zhu, “Enabling country-scale land cover mapping with meter-resolution satellite imagery,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 196, pp. 178–196, 2023."
890
+ },
891
+ {
892
+ "type": "ref_text",
893
+ "bbox": [
894
+ 0.513,
895
+ 0.675,
896
+ 0.913,
897
+ 0.749
898
+ ],
899
+ "angle": 0,
900
+ "content": "[17] Martin Hermann Paul Fuchs and Begüm Demir, \"Hyspecnet-11k: A large-scale hyperspectral dataset for benchmarking learning-based hyperspectral image compression methods,\" arXiv preprint arXiv:2306.00385, 2023."
901
+ },
902
+ {
903
+ "type": "list",
904
+ "bbox": [
905
+ 0.513,
906
+ 0.092,
907
+ 0.914,
908
+ 0.749
909
+ ],
910
+ "angle": 0,
911
+ "content": null
912
+ }
913
+ ]
914
+ ]
2401.07xxx/2401.07527/d0b8afc3-0e02-44c3-9edc-3231840b6c0a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aee3f9f3ad1e001cc2300f3826968a3e72950fb125fe661552bd446a9e62b7b7
3
+ size 2523601
2401.07xxx/2401.07527/full.md ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ONE FOR ALL: TOWARD UNIFIED FOUNDATION MODELS FOR EARTH VISION
2
+
3
+ Zhitong Xiong, Yi Wang, Fahong Zhang, Xiao Xiang Zhu
4
+
5
+ Chair of Data Science in Earth Observation, Technical University of Munich, Munich, Germany
6
+
7
+ # ABSTRACT
8
+
9
+ Foundation models characterized by extensive parameters and trained on large-scale datasets have demonstrated remarkable efficacy across various downstream tasks for remote sensing data. Current remote sensing foundation models typically specialize in a single modality or a specific spatial resolution range, limiting their versatility for downstream datasets. While there have been attempts to develop multi-modal remote sensing foundation models, they typically employ separate vision encoders for each modality or spatial resolution, necessitating a switch in backbones contingent upon the input data. To address this issue, we introduce a simple yet effective method, termed OFA-Net (One-For-All Network): employing a single, shared Transformer backbone for multiple data modalities with different spatial resolutions. Using the masked image modeling mechanism, we pre-train a single Transformer backbone on a curated multi-modal dataset with this simple design. Then the backbone model can be used in different downstream tasks, thus forging a path towards a unified foundation backbone model in Earth vision. The proposed method is evaluated on 12 distinct downstream tasks and demonstrates promising performance.
10
+
11
+ Index Terms— Foundation models, remote sensing, Earth observation, self-supervised learning
12
+
13
+ # 1. INTRODUCTION
14
+
15
+ Multiple satellites in orbit provide invaluable data essential for understanding and managing our planet. The richness of the Earth observation data lies in its diversity, encompassing various modalities such as optical, radar, multispectral, hyperspectral, and thermal imagery, each offering unique insights [1] [2]. This multiplicity is crucial in applications ranging from environmental monitoring [3] to urban planning [4], demonstrating the indispensable role of remote sensing in Earth sciences.
16
+
17
+ The advent of foundation models has currently revolutionized the processing and analysis of remote sensing data [5, 6, 7, 8, 9, 10, 11]. Characterized by their extensive parameters and pre-trained on large-scale datasets, these models have greatly enhanced the performance on different downstream tasks. Despite their successes, current foundation models in remote sensing exhibit a critical limitation: they are predom-
18
+
19
+ nantly tailored to either a single data modality [9] or a specific range of spatial resolutions [12] [13]. This specialization constrains their applicability across the diverse spectrum of remote sensing datasets, limiting their potential and flexibility in broader, more complex applications.
20
+
21
+ ![](images/7952b3fb127a176b36778618c504d61a8f4a1f5af551abf5476ebf1eb3369620.jpg)
22
+ Fig. 1. Illustration of the proposed method. Our model is designed to handle input data from a range of modalities, and varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified framework. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model.
23
+
24
+ In response, the remote sensing community has tried to develop multi-modal foundation models. However, these models typically rely on separate vision encoders for either each modality or spatial resolution. Such an approach necessitates the switching of backbones based on different input data, hindering flexibility and operational efficiency in downstream applications. In this context, a model capable of seamlessly integrating multiple data modalities and spatial resolutions within a single framework could dramatically enhance the adaptability and efficiency of remote sensing data understanding.
25
+
26
+ In this work, we propose a simple approach to this challenge: a unified foundation model employing a single, shared vision Transformer [14] backbone. This model accommodates data with various modalities and spatial resolutions, aiming to eliminate the need for multiple specialized models, as illustrated in Fig. 1.
27
+
28
+ As shown in Fig. 2 (iii), the proposed OFA-Net is designed to handle input data from a range of modalities and
29
+
30
+ ![](images/32a8f5c38aa31bddba75c44c3b9159dabe7904c0dd99203be39b80e5c9d57def.jpg)
31
+ (i) Separate Backbone for Different Modalities
32
+
33
+ ![](images/1a45de1d6baec916f451d2d41e2b6cf74c224066969f0d3c5afa5e5ac699b850.jpg)
34
+ (ii) Shared Backbone for Low-resolution Data
35
+ (iii) Shared Backbone for Different Modalities, Low- and High-resolution Data
36
+
37
+ ![](images/706d3e50c7a89eeac5c42ed31939c9af4e537f7104139b49cab232fbe03773c1.jpg)
38
+ Fig. 2. Illustration of existing and the proposed foundation models for multi-modal data.
39
+
40
+ varying spatial resolutions, such as 30 meters and 1 meter, using a singular, unified vision Transformer. This integrative approach allows for the simultaneous processing of all modalities within one comprehensive model, which is different from conventional methods. We pre-train this Transformer backbone on a meticulously curated multi-modal dataset, leveraging the masked image modeling mechanism to enhance its adaptability. Our approach offers a simpler yet effective solution for Earth vision tasks. We validate our model on 12 downstream tasks in the GEO-Bench dataset [15], demonstrating its robustness and versatility.
41
+
42
+ # 2. METHODOLOGY
43
+
44
+ # 2.1. Multi-modal Dataset Construction
45
+
46
+ As shown in Fig. 3, we have constructed an extensive multimodal dataset designed to underpin the development of unified foundation models for Earth vision. This dataset is composed of five distinct modalities, each offering unique spectral and spatial data characteristics:
47
+
48
+ Sentinel-1: The Sentinel-1 dataset includes 4,642,353 samples of Synthetic Aperture Radar (SAR) imagery, with a spatial resolution of about $5 \times 20$ meters. Each image captures two bands (vv and vh) and is $512 \times 512$ pixels in size, providing dense global coverage.
49
+
50
+ Sentinel-2: Comprising 977,774 multispectral imagery samples, this dataset has a spectral range with nine bands, from 0.49 to $2.15\mu m$ , maintaining a spatial resolution of 10 meters with each image sized at 512x512 pixels for dense global coverage. We use the Sentinel-2 data collected and processed by [7].
51
+
52
+ Gaofen: To include images from the Gaofen satellite, We use the dataset collected by [16]. We crop 117,450 image patches of $512 \times 512$ pixel resolution from the dataset. Each image includes four bands encompassing RGB and NIR wavelengths with a spatial resolution of around 4 meters. This dataset mainly covers different cities in China.
53
+
54
+ NAIP: For high-resolution optical images, we use the dataset collected and processed by [7]. This dataset includes 2,332,351 high-resolution aerial images from the National Agriculture Imagery Program (NAIP), covering the USA with a fine spatial resolution of approximately 1 meter and consisting of RGB images across three bands with a size of 512x512 pixels.
55
+
56
+ EnMAP: The multi-modal dataset is further enriched with 11,483 hyperspectral image samples from EnMAP, which is published in [17]. The hyperspectral images have a spatial resolution of 30 meters and capture a wide spectral range with 224 bands, each sized at $128 \times 128$ pixels.
57
+
58
+ As illustrated in Fig. 4, the proposed OFA-Net contains mainly two components: 1) the individual patch embedding layers, and 2) the shared Transformer backbone. This simple design enables the model to extract and process features from diverse remote sensing modalities without the need to train multiple foundation models.
59
+
60
+ Individual Patch Embedding Layer. The first component of the model comprises separate patch embedding layers tailored to each modality. There exist inherent differences in input channels across modalities. SAR images from Sentinel-1 are with two bands. Hyperspectral images from EnMAP are with 224 bands. Hence, it is imperative to have a specialized embedding process that can effectively translate the raw pixel data into a format suitable for the Transformer backbone.
61
+
62
+ Let $P_{s1}, P_{s2}, P_{naip}, P_g$ , and $P_{hyper}$ denote the patch embedding operations for five different modalities. After resizing the input images from different modalities $X_{s1} \in \mathbb{R}^{h,w,2}$ , $X_{s2} \in \mathbb{R}^{h,w,9}$ , $X_{naip} \in \mathbb{R}^{h,w,3}$ , $X_g \in \mathbb{R}^{h,w,4}$ , $X_h \in \mathbb{R}^{h,w,224}$ into 224x224 pixels, we can compute the embeddings simply by $E = P(X)$ for each modality.
63
+
64
+ Shared Transformer Backbone. The second component is a shared Transformer backbone. In this work, this single Transformer architecture processes the embedded patches from all modalities with different spatial resolutions. This shared backbone can learn a generalized representation that
65
+
66
+ ![](images/8a6eb0fd1c3d80a77dc3daf5666481c18363ab142a7bbfc050ba53aa1dbda6de.jpg)
67
+ Fig. 3. Detailed information of the five sub-datasets in the curated multi-modal dataset.
68
+
69
+ ![](images/c39c3fa5e31ade2224c89292785bf4a09eb9faf44044f48511cdbe4b5c0ca509.jpg)
70
+ Fig. 4. Workflow of the proposed unified foundation model for multiple data modalities.
71
+
72
+ is flexible and robust enough to handle not only the variety of data modalities but also their diverse spatial resolutions. In this process, we input the embeddings for each modality $E_{s1} \in \mathbb{R}^{n,d}, E_{s2} \in \mathbb{R}^{n,d}, E_{naip} \in \mathbb{R}^{n,d}, E_g \in \mathbb{R}^{n,d}, E_h \in \mathbb{R}^{h,w,224}$ to the Transformer backbone $F_{b}$ to learn deep features. Here $n$ represents the number of tokens and $d$ denotes the feature dimension.
73
+
74
+ Masked Image Modeling. For the model training, we utilize the masked image modeling-based self-supervised learning loss. Specifically, we employ different decoders for different data modalities to reconstruct the randomly masked parts of the inputs. A significant advantage introduced by masked image modeling is its inherent design that does not necessitate spatially aligned multi-modal datasets. Traditional multi-modal learning approaches often rely on the precise alignment of different modalities, which can be a challenging and expensive process given the variability in
75
+
76
+ sensor acquisition parameters and conditions.
77
+
78
+ # 3. EXPERIMENTS
79
+
80
+ # 3.1. Downstream Datasets
81
+
82
+ To assess the effectiveness of our proposed unified foundation model, we use the GEO-Bench benchmark datasets [15], which encompass a diverse array of tasks pertinent to Earth vision. The GEO-Bench dataset includes 12 tasks, split evenly between image classification and segmentation, each representing a common challenge in remote sensing.
83
+
84
+ # 3.2. Experimental Settings
85
+
86
+ Considering the high computational cost, we use 10,000 data samples for each sub-dataset, with 50,000 samples in total
87
+
88
+ Table 1. Performance comparison of different methods on various classification datasets
89
+
90
+ <table><tr><td>Methods</td><td>m-bigeathnet</td><td>m-forestnet</td><td>m-brick-kiln</td><td>m-pv4ger</td><td>m-so2sat</td><td>m-eurosat</td></tr><tr><td>Random Init.</td><td>52.89</td><td>41.52</td><td>84.51</td><td>91.32</td><td>38.31</td><td>69.53</td></tr><tr><td>MAE Single</td><td>55.41</td><td>42.95</td><td>88.89</td><td>92.19</td><td>44.42</td><td>78.00</td></tr><tr><td>SatMAE [9]</td><td>55.12</td><td>—</td><td>91.89</td><td>—</td><td>45.59</td><td>73.15</td></tr><tr><td>OFA-Net (ours)</td><td>57.13</td><td>45.12</td><td>91.29</td><td>93.19</td><td>46.04</td><td>81.00</td></tr></table>
91
+
92
+ Table 2. Performance comparison of different methods on various segmentation datasets
93
+
94
+ <table><tr><td>Methods</td><td>m-pv4ger-seg</td><td>m-nz-cattle</td><td>m-NeonTree</td><td>m-cashew-plantation</td><td>m-SA-crop-type</td><td>m-chesapeake-landcover</td></tr><tr><td>Random Init.</td><td>81.63</td><td>74.12</td><td>51.27</td><td>27.65</td><td>29.11</td><td>47.16</td></tr><tr><td>MAE Single</td><td>88.43</td><td>76.40</td><td>52.99</td><td>29.42</td><td>30.67</td><td>51.90</td></tr><tr><td>OFA-Net (Ours)</td><td>89.43</td><td>77.63</td><td>52.64</td><td>37.39</td><td>31.98</td><td>54.50</td></tr></table>
95
+
96
+ to pre-train the OFA-Net for 100 epochs. We adopt linear probing as our primary evaluation strategy, known for its capacity to measure the quality of representations learned by self-supervised learning. This approach involves freezing the weights of the pre-trained model and training a linear classifier on top of the representations for each task. By doing so, we can directly evaluate the discriminative power of the learned features without fully fine-tuning the entire model, thus providing insight into the model's ability to generalize. A learning rate of 1e-2 is used for all the datasets. For the classification tasks, we use top-1 Accuracy as the evaluation metric. For segmentation tasks, we use mIoU as the metric. A learning rate of 1e-4 is used for all the datasets. All the experiments are conducted using PyTorch on four NVIDIA A6000 GPUs each with 48GB Memory.
97
+
98
+ # 3.3. Comparison Experiments
99
+
100
+ To objectively evaluate our model, we provide the performance of four different methods on six downstream classification datasets under the linear probing setting. The results are presented in Table 1. Analyzing the table, OFA-Net generally outperforms the other methods across most datasets, indicating the benefit of multimodal pretraining in enhancing the model's feature extraction capabilities and generalization. The improvement is particularly notable on m-forestnet and m-so2sat datasets, where the OFA-Net method exceeds the performance of a randomly initialized model by approximately $3.6\%$ and $7.7\%$ , respectively. The MAE Single method shows consistent improvement over random initialization, which highlights the advantage of pretraining on a single data modality compared to no pretraining. However, the gains from MAE Single are less than those from OFA-Net, underscoring the added value of multimodal learning.
101
+
102
+ Table 2 presents performance for segmentation tasks on various datasets, comparing methods that utilize a Vision Transformer (ViT) with different pretraining strategies: Random Initialization (Random Init.), pretraining on single modalities (MAE Single), and pretraining on multiple modalities (OFA-Net). On the m-NeonTree dataset, OFA
103
+
104
+ Net shows a modest improvement over MAE Single. On the other five datasets, OFA-Net demonstrates superior performance when compared to the other methods. This suggests that pretraining on multiple modalities provides a more comprehensive feature representation, leading to more accurate segmentation. MAE Single outperforms Random Initialization in all cases, which aligns with the expected outcome that pretraining can significantly enhance the model's ability to generalize and accurately segment images.
105
+
106
+ # 4. CONCLUSION
107
+
108
+ In this work, we introduce a simple yet effective method, the OFA-Net, for unified foundation models for remote sensing data. The OFA-Net consists of a single, shared Transformer backbone and dedicated patch embedding layers for multiple data modalities with different spatial resolutions. The model is trained using the masked image modeling mechanism on a carefully curated multi-modal dataset with five distinct modalities. Then the backbone model is evaluated in different downstream tasks. The experimental results on 12 different downstream tasks show that our simple method demonstrates promising performance over foundation models trained using single modalities.
109
+
110
+ # 5. ACKNOWLEDGEMENT
111
+
112
+ This work is jointly supported by the German Federal Ministry of Education and Research (BMBF) in the framework of the international future AI lab "AI4EO - Artificial Intelligence for Earth Observation: Reasoning, Uncertainties, Ethics and Beyond" (grant number: 01DD20001), by German Federal Ministry for Economic Affairs and Climate Action in the framework of the "national center of excellence ML4Earth" (grant number: 50EE2201C), and by the German Federal Ministry for the Environment, Nature Conservation, Nuclear Safety and Consumer Protection (BMUV) based on a resolution of the German Bundestag (grant number: 67KI32002B; Acronym: EKAPEx).
113
+
114
+ # 6. REFERENCES
115
+
116
+ [1] Xiao Xiang Zhu, Devis Tuia, Lichao Mou, Gui-Song Xia, Liangpei Zhang, Feng Xu, and Friedrich Fraundorfer, “Deep learning in remote sensing: A comprehensive review and list of resources,” IEEE geoscience and remote sensing magazine, vol. 5, no. 4, pp. 8-36, 2017.
117
+ [2] Zhitong Xiong, Fahong Zhang, Yi Wang, Yilei Shi, and Xiao Xiang Zhu, “Earthnets: Empowering ai in earth observation,” arXiv preprint arXiv:2210.04936, 2022.
118
+ [3] Shan Zhao, Ioannis Prapas, Ilektra Karasante, Zhitong Xiong, Ioannis Papoutsis, Gustau Camps-Valls, and Xiao Xiang Zhu, "Causal graph neural networks for wildfire danger prediction," arXiv preprint arXiv:2403.08414, 2024.
119
+ [4] Zhitong Xiong, Wei Huang, Jingtao Hu, and Xiao Xiang Zhu, “The benchmark: Transferable representation learning for monocular height estimation,” IEEE Transactions on Geoscience and Remote Sensing, 2023.
120
+ [5] Xiao Xiang Zhu, Zhitong Xiong, Yi Wang, Adam J Stewart, Konrad Heidler, Yuanyuan Wang, Zhenghang Yuan, Thomas Dujardin, Qingsong Xu, and Yilei Shi, "On the foundations of earth and climate foundation models," arXiv preprint arXiv:2405.04285, 2024.
121
+ [6] Xin Guo, Jiangwei Lao, Bo Dang, Yingying Zhang, Lei Yu, Lixiang Ru, Liheng Zhong, Ziyuan Huang, Kang Wu, Dingxiang Hu, et al., "Skysense: A multi-modal remote sensing foundation model towards universal interpretation for earth observation imagery," arXiv preprint arXiv:2312.10115, 2023.
122
+ [7] Favyen Bastani, Piper Wolters, Ritwik Gupta, Joe Ferdinand, and Aniruddha Kembhavi, "Satlaspretrain: A large-scale dataset for remote sensing image understanding," in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 16772-16782.
123
+ [8] Keumgang Cha, Junghoon Seo, and Taekyung Lee, “A billion-scale foundation model for remote sensing images,” arXiv preprint arXiv:2304.05215, 2023.
124
+ [9] Yezhen Cong, Samar Khanna, Chenlin Meng, Patrick Liu, Erik Rozi, Yutong He, Marshall Burke, David Lobell, and Stefano Ermon, "Satmae: Pre-training transformers for temporal and multi-spectral satellite imagery," Advances in Neural Information Processing Systems, vol. 35, pp. 197-211, 2022.
125
+ [10] Yi Wang, Nassim Ait Ali Braham, Zhitong Xiong, Chenying Liu, Conrad M Albrecht, and Xiao Xiang
126
+
127
+ Zhu, "Ssl4eo-s12: A large-scale multi-modal, multitemporal dataset for self-supervised learning in earth observation," arXiv preprint arXiv:2211.07044, 2022.
128
+ [11] Zhitong Xiong, Yi Wang, Fahong Zhang, Adam J Stewart, Joëlle Hanna, Damian Borth, Ioannis Papoutsis, Bertrand Le Saux, Gustau Camps-Valls, and Xiao Xiang Zhu, “Neural plasticity-inspired foundation model for observing the Earth crossing modalities,” arXiv preprint arXiv:2403.15356, 2024.
129
+ [12] Michael J Smith, Luke Fleming, and James E Geach, "Earthpt: a foundation model for earth observation," arXiv preprint arXiv:2309.07207, 2023.
130
+ [13] Pallavi Jain, Bianca Schoen-Phelan, and Robert Ross, "Self-supervised learning for invariant representations from multi-spectral and sar images," IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing, vol. 15, pp. 7797-7808, 2022.
131
+ [14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al., "An image is worth 16x16 words: Transformers for image recognition at scale," arXiv preprint arXiv:2010.11929, 2020.
132
+ [15] Alexandre Lacoste, Nils Lehmann, Pau Rodriguez, Evan David Sherwin, Hannah Kerner, Björn Lütjens, Jeremy Andrew Irvin, David Dao, Hamed Alemoham-mad, Alexandre Drouin, et al., "Geo-bench: Toward foundation models for earth monitoring," arXiv preprint arXiv:2306.03831, 2023.
133
+ [16] Xin-Yi Tong, Gui-Song Xia, and Xiao Xiang Zhu, “Enabling country-scale land cover mapping with meter-resolution satellite imagery,” ISPRS Journal of Photogrammetry and Remote Sensing, vol. 196, pp. 178–196, 2023.
134
+ [17] Martin Hermann Paul Fuchs and Begüm Demir, "Hyspecnet-11k: A large-scale hyperspectral dataset for benchmarking learning-based hyperspectral image compression methods," arXiv preprint arXiv:2306.00385, 2023.
2401.07xxx/2401.07527/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eabc247e51cba1b9cb45ac9bb835dfd5f795d67ca4414a062b367ffbf75eb12b
3
+ size 329434
2401.07xxx/2401.07527/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07579/6b2d499f-f8c2-439a-899f-9b75e508405e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8105e7c5d3e76cccc3cc7800385ed1a635652f72bfbc685912608b7c1dfac6
3
+ size 4236398
2401.07xxx/2401.07579/full.md ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # PMFSNet: Polarized Multi-scale Feature Self-attention Network For Lightweight Medical Image Segmentation
2
+
3
+ Jiahui Zhong $^{a}$ , Wenhong Tian $^{a,\ast}$ , Yuanlun Xie $^{a,\ast}$ , Zhijia Liu $^{a}$ , Jie Ou $^{a}$ , Taoran Tian $^{b}$ and Lei Zhang $^{c}$
4
+
5
+ ${}^{a}$ School of Information and Software Engineering,University of Electronic Science and Technology of China,Chengdu,610054,P.R.China
6
+
7
+ $^{b}$ State Key Laboratory of Oral Diseases, National Clinical Research Center for Oral Diseases, West China Hospital of Stomatology, Sichuan University, Chengdu, 610041, P. R. China
8
+
9
+ cSchool of Computer Science, University of Lincoln, LN6 7TS, UK
10
+
11
+ # ARTICLEINFO
12
+
13
+ Keywords:
14
+
15
+ Medical image segmentation
16
+
17
+ Lightweight neural network
18
+
19
+ Attention mechanism
20
+
21
+ Multi-scale feature fusion
22
+
23
+ # ABSTRACT
24
+
25
+ Current state-of-the-art medical image segmentation methods prioritize accuracy but often at the expense of increased computational demands and larger model sizes. Applying these large-scale models to the relatively limited scale of medical image datasets tends to induce redundant computation, complicating the process without the necessary benefits. This approach not only adds complexity but also presents challenges for the integration and deployment of lightweight models on edge devices. For instance, recent transformer-based models have excelled in 2D and 3D medical image segmentation due to their extensive receptive fields and high parameter count. However, their effectiveness comes with a risk of overfitting when applied to small datasets and often neglects the vital inductive biases of Convolutional Neural Networks (CNNs), essential for local feature representation. In this work, we propose PMFSNet, a novel medical imaging segmentation model that effectively balances global and local feature processing while avoiding the computational redundancy typical in larger models. PMFSNet streamlines the UNet-based hierarchical structure and simplifies the self-attention mechanism's computational complexity, making it suitable for lightweight applications. It incorporates a plug-and-play PMFS block, a multi-scale feature enhancement module based on attention mechanisms, to capture long-term dependencies. Extensive comprehensive results demonstrate that even with a model ( $\leq 1$ million parameters), our method achieves superior performance in various segmentation tasks across different data scales. It achieves (IoU) metrics of $84.68\%$ , $82.02\%$ , and $78.82\%$ on public datasets of teeth CT (CBCT), ovarian tumors ultrasound(MMOTU), and skin lesions dermoscopy images (ISIC 2018), respectively. The source code is available at https://github.com/yykzjh/PMFSNet.
26
+
27
+ # 1. Introduction
28
+
29
+ Medical image segmentation plays a key role in Computer Aided Diagnosis (CAD) systems as it is often the key step for the analysis of anatomical structures [1]. Precise delineation of tissues and lesions plays a crucial role in quantifying diseases, aiding the assessment of disease prognosis, and evaluating treatment efficacy. However, several challenges remain open issues in the field of medical image segmentation, particularly due to the diversity of imaging modalities, data limitations, and the complexity of different clinical scenarios. More specifically, 1) Medical imaging encompasses a wide range of modalities, including Computed Tomography (CT), Magnetic Resonance Imaging (MRI), X-ray, and Ultrasound Imaging (UI). This diversity complicates the development of universal or adaptable algorithms. 2) Due to privacy concerns and the complexities involved in labeling, the size of medical images is relatively smaller than the natural images. Additionally, the process of collecting medical images with accurate ground truth annotations is labor-intensive and time-consuming which creates a significant barrier to the compilation of high-quality datasets. 3)
30
+
31
+ ![](images/8269185b2852935eecd1ca69b02a1d3103fcd5c44a75f554af3211cfd0c34643.jpg)
32
+ Figure 1: Params-FLOPs-IoU correlation comparison on the 3D CBCT tooth dataset. The Y-axis corresponds to the Intersection over Union (IoU) (higher is better), the X-axis corresponds to the Floating-point Operations Per Second (FLOPs) (lower is better), and the size of the circle corresponds to the Parameters (Params) (smaller is better). PMFSNet (ours) has achieved the best results in balance segmentation performance, model parameters count, and computational complexity.
33
+
34
+ Clinical practice often involves complicated cases that are challenging for image segmentation algorithms. These include scenarios with low-contrast lesions, irregular lesion shapes (Figure 2c), blurred lesion boundaries (Figure 2b), and metal artifacts in images (Figure 2a). Such complexities can hinder the performance of segmentation models.
35
+
36
+ Leveraging the significant advancements in deep learning, segmentation approaches employing Deep Convo
37
+
38
+ lutional Neural Networks (DCNNs) have demonstrated promising results [2, 3]. In particular, architectures based on UNet [4] and its variants have achieved state-of-the-art results in various medical semantic segmentation tasks. UNet is based on an encoder-decoder architecture, which is hierarchical downsampled convolutional layers accompanied by symmetric upsampled convolutional layers. Furthermore, skip connections effectively bridge the semantic gap between encoders and decoders, via fusing initial features to the decoder. UNet++ [5] utilizes nested and dense skip connections to provide accurate semantic and coarse grading information to the decoder. R2U-Net [6] is constructed based on the residual concept and recurrent technique. Attention U-Net [7] highlights the salient features through skip connections and applies information extracted from coarse scales to gating, eliminating irrelevant and noisy responses generated by skip connections. In addition, to fully utilize the voxel information in CTs and MRIs, some models of 3D CNNs have been proposed [8, 9].
39
+
40
+ Despite the robust representation learning abilities of methods based on UNet and its variants, their effectiveness in capturing long-term dependencies is constrained by their inherently local receptive fields [10, 11]. Some studies have attempted to enlarge the receptive field with atrous convolution [12, 13]. However, the locality of the receptive fields in convolutional layers still limits their learning capabilities to relatively small regions.
41
+
42
+ Recently, the Vision Transformers (ViTs) [14] have demonstrated excellent capabilities in representative learning, particularly in volumetric medical image segmentation. TransUNet [15] further combines the functionality of ViT with the advantages of UNet in the field of medical image segmentation. CFATransUnet [16] combines the channelwise cross-fusion attention and transformer to integrate the channel dimension's global features, strengthening the effective information and limiting the irrelevant features more strongly. MedT [17] introduces additional control mechanisms in the self-attention module and proposes a local-global training strategy (LoGo) to further improve the performance. TransBTS [18] is the first to utilize the transformer in 3D CNN for MRI brain tumor segmentation. UNETR [19] employs pure transformers as encoders to redesign the task of volumetric (3D) medical image segmentation as a sequence-to-sequence prediction problem, effectively capturing global multi-scale information. Swin-Unet [20] builds an encoder, bottleneck, and decoder based on the pure transformer.
43
+
44
+ However, while these transformers exhibit competitive performance, they involve training on substantial amounts of data and come with the cost of the heightened model complexity. This is because the computational burden of most existing transformers, which rely on self-attention operations, exhibits quadratic complexity. For example, These efforts [21, 22] have mainly focused on improving segmentation accuracy, which in turn greatly increases the size of the model in terms of Parameters (Params) and Floating-point Operations Per Second (FLOPs). Consequently, this leads to
45
+
46
+ compromised robustness, posing challenges for system integration, particularly in embedded environments. In this case, finding ways to optimize the model design, eliminate redundant computations, and effectively utilize the existing parameters without compromising performance for medical image segmentation, remains a critical challenge.
47
+
48
+ In this work, our primary aim is to achieve the tradeoff between performance and efficiency within a generalized, lightweight framework for medical image segmentation (Figure 1). This framework is designed to be adaptable across various imaging modalities. Considering the issues and limitations mentioned above, we optimize the self-attention computation and propose the PMFSNet architecture. We introduce a polarized multi-scale feature self-attention (PMFS) block at the network's bottleneck to directly enhance global multi-scale low-dimensional features to encode long-term dependencies and enrich feature representations. The PMFS block decreases the computational complexity of self-attention from quadratic to linear by simplifying the computation of key vectors for each attention point to the computation of the global key vector. Meanwhile, to balance the performance, multi-scale features are introduced to expand the number of attention points and increase the multi-scale long-term dependencies.
49
+
50
+ Our main contributions are summarized as follows:
51
+
52
+ 1) We propose a lightweight PMFSNet model with $\leq 1$ million parameters, designed for both 2D and 3D medical image segmentation.
53
+ 2) A plug-and-play global attention module is proposed that can improve segmentation by learning the long-term dependencies without significantly increasing the parameter count.
54
+ 3) Our proposed model exhibits competitive performance across three datasets, accomplishing this with significantly fewer model parameters compared to current state-of-the-art (SOTA) models.
55
+ 4) Our model's reduced complexity, without sacrificing performance, demonstrates its value in model integration and deployment, especially in resource-constrained environments.
56
+
57
+ The rest of the paper is organized as follows. Section 2 presents a review of related works. Section 3 gives detailed information about the proposed PMFSNet. Section 4 shows the experiments and results. Section 5 is the discussion part. Followed by the conclusion in section 6.
58
+
59
+ # 2. Related Work
60
+
61
+ # 2.1. CNNs-based Medical Image Segmentation
62
+
63
+ Since the introduction of the seminal U-Net [4], CNN-based networks have achieved state-of-the-art results on various 2D and 3D medical image segmentation tasks. One of the challenges in medical image segmentation is the considerable variation in scales among different objects. To address this problem, UNet creates shortcuts between the
64
+
65
+ ![](images/dce806bcdcefbf7c8dc5baba50d381f17e6da7d706632d043106ef66c0707ead.jpg)
66
+ (a) The 3D CBCT tooth dataset samples.
67
+ (b) The MMOTU dataset samples.
68
+ (c) The ISIC 2018 dataset samples.
69
+ Figure 2: The samples from different datasets. Sub-figure (a) shows some challenging samples of the 3D CBCT tooth dataset, such as missing teeth, metal artifacts, and incomplete views. Sub-figure (b) shows some challenging samples of the MMOTU dataset, such as inconspicuous lesion regions, blurred lesion boundaries, and low-contrast samples. Sub-figure (c) shows some challenging samples of the ISIC 2018 dataset, such as blurred samples, irregular lesion boundaries, and occluded lesions.
70
+
71
+ encoder and decoder to capture contextual and precise positioning information. Zhou et al. [5] connect all U-Net layers (one through four) to form U-Net++. Liu et al. [23] introduce the Residual Path (ResPath) method, involving additional convolution operations on encoder features prior to their fusion with corresponding decoder features. Chen et al. [12] propose the atrous spatial pyramid pooling (ASPP) module and the well-known DeepLab family networks, which show strong recognition capability on the same objects at different scales. For volume-wise segmentation, 3D models directly utilize the full volumetric image represented by a sequence of 2D slices or modalities. To exploit 3D context and cope with computational resource limitations, Isensee et al. [24] propose to extract features at multiple scales or assembled frameworks. However, a main limitation of these networks is their subpar performance in learning global context and long-term spatial dependencies, which can hinder segmentation effectiveness, particularly for those challenging cases (low-contrast lesions, irregular lesion shapes, blurred lesion boundaries, and metal artifacts, e.g. Figure 2).
72
+
73
+ # 2.2. Lightweight Methods
74
+
75
+ A series of methods [25-29] have been proposed to tackle the issue of high computational costs in models, focusing on enhancing feature representation in lightweight models or reducing their computational complexity without compromising performance. In GoogleNet, Szegedy et al. [25] propose the Inception architecture, which can obtain richer features without increasing network depth by merging convolution kernels in parallel rather than simply stacking convolution layers. In Xception, Chollet [26] generalizes the ideas of separable convolutions in the Inception series and proposes a depthwise separable convolution, which can drastically reduce computation complexity by factorizing a standard convolution into a depthwise convolution followed
76
+
77
+ by a pointwise convolution (i.e., $1 \times 1$ convolution). Thanks to the success of Xception, depthwise separable convolution has become an essential component of MobileNetV2 [27]. The dilated (atrous) convolution [28] introduces ordinary convolution layers with the parameter of dilation rate to enlarge the receptive field size without increasing the number of training parameters. The concept of group convolution is first proposed in SqueezeNet [29], and it is further successfully adopted in ResNeXt [30]. However, standard group convolution does not communicate between different groups, which restricts their representation capability. To solve this problem, ShuffleNet [31] proposes a channel shuffle unit to randomly permute the output channels of group convolution to make the features of different groups fully intercourse.
78
+
79
+ Despite these advances in lightweight model development, their adoption in 3D volume segmentation remains limited. In contrast, current state-of-the-art methods for medical image segmentation mainly focus on enhancing accuracy, frequently resulting in higher computational costs. This trend has inspired us to design a lightweight segmentation model specifically aimed at reducing redundant computations during the learning process, with a strong emphasis on thoroughly investigating and optimizing the efficiency of parameter usage.
80
+
81
+ # 2.3. Attention Mechanisms and Transformers
82
+
83
+ Attention mechanisms are essential for directing model focus towards relevant features, enhancing performance. In recent years, the attention mechanism substantially promotes semantic segmentation. Oktay et al. [7] propose an attention U-Net. Before fusing the feature maps from the encoder and decoder, attention U-Net inserts an attention gate to control the spatial feature importance. Besides applying the spatial attention mechanism, Chen et al. [32] propose FED-Net, which uses a channel attention mechanism to im
84
+
85
+ prove the performance of liver lesion segmentation. There are also some notable works [33, 34] aiming at mixing spatial and channel attention mechanisms. Transformer with Multiple Head Self-Attention (MHSA) has also made significant progress in medical image segmentation tasks with the introduction of Vision Transformers (ViTs) [14]. Significant efforts have been put into integrating ViTs for dense predictions in the medical imaging domain [15, 18, 19, 22]. With the advancement of the Swin Transformer, SwinUNETR [21] equips the encoder with the Swin Transformer blocks to compute self-attention for enhancing brain tumor segmentation accuracy in 3D MRI Images. Another Unet-like architecture Swin-Unet [20] further adopts the Swin Transformer on both the encoder and decoder network via skipconnections to learn local and global semantic features for multi-abdominal CT segmentation. The SwinBTS [35] has a similar intrinsic structure to Swin-Unet with an enhanced transformer module for detailed feature extraction. Nevertheless, transformer-based frameworks for volumetric segmentation still necessitate extended training periods and involve high computational complexity, particularly when extracting features across multiple scales [36, 37].
86
+
87
+ Several studies [38, 39] have explored integrating attention modules or Vision Transformers (ViTs) into lightweight architectures. For instance, LeaNet [38] utilizes various attention modules for edge segmentation. LM-Net [39] optimizes transformer modules to address the challenge of ViTs needing extensive pre-training on large datasets. However, these models are tailored for lightweight processing in 2D image modalities. Our model is versatile, designed to accommodate both 2D and 3D medical imaging modalities, and features a more flexible attention module.
88
+
89
+ # 3. Our Method
90
+
91
+ # 3.1. Overview of PMFSNet
92
+
93
+ The overall architecture of the PMFSNet is illustrated in Figure 3. The model consists of four core components: an encoder, skip connections, a Polarized Multiscale Feature Self-attention (PMFS) block, and an optional decoder. In particular, each encoder stage utilizes dense-feature-stacking convolution to retain as much multi-scale semantic information as possible. We have designed the decoder to be optional, and adaptable to tasks of varying magnitudes. Meanwhile, the architecture employs skip connections to capture contextual information and perform multiscale feature extraction.
94
+
95
+ The PMFSNet enhances the fusion of multi-scale semantic information and global contextual features during the learning process, where the PMFS block enables more nuanced and precise encoding of multi-scale long-term dependencies. The PMFS block is also in turn designed to efficiently compensate for any performance loss that may result from any adaptability for the weights pruning. The PMFS block offers a more lightweight solution than traditional self-attention mechanisms. Furthermore, its plug-and-play design facilitates easy integration into a variety of models without substantially adding to the computational overhead.
96
+
97
+ # 3.2. Polarized Multi-scale Feature Self-attention (PMFS) Block
98
+
99
+ Inspired by "polarized filtering" [40], We propose the Polarized Multi-scale Feature Self-attention (PMFS) block to tackle the limitation of traditional self-attention mechanisms that are computationally intensive especially when extended to 3D networks. The PMFS block is a plug-and-play block, which consists of three components in sequential order: Adaptive Multi-branch Feature Fusion (AMFF) layer, Polarized Multi-scale Channel Self-attention (PMCS) module, Polarized Multi-scale Spatial Self-attention (PMSS) module. The PMCS module enhances features in the channel dimension, while the PMSS module enhances features in the spatial dimension.
100
+
101
+ # 3.2.1. Adaptive Multi-branch Feature Fusion (AMFF) Layer
102
+
103
+ Figure 4 illustrates that, due to varying resolutions and channels at different stages, multi-branch features have first to be standardized to a uniform size using an adaptive multibranch feature fusion layer, prior to performing feature enhancement.
104
+
105
+ The Adaptive Multi-branch Feature Fusion (AMFF) layer is capable of unifying diverse feature maps to the same channel and resolution. This is achieved by appropriately setting the channels, pooling kernel sizes, and the number of branches for the multi-branch features.
106
+
107
+ We denote the input features of different branches as $X_{l}\in \mathbb{R}^{C_{l}\times H_{l}\times W_{l}\times D_{l}}(l\in \{1,2,3\})$ , corresponding to the features of the $Stage_{l}(l\in \{1,2,3\})$ output. Max-pooling is first used to downsample $X_{l}$ to the same size as $X_{3}(X_{3}\in$ $\mathbb{R}^{C_3\times H\times W\times D})$ , and to unify the channels of the different branch features, the convolution is used to rescale the features. The specific operation is shown in the following equation $(M_l\in \mathbb{R}^{\frac{C}{3}\times H\times W\times D},A\in \mathbb{R}^{C\times H\times W\times D})$ ..
108
+
109
+ $$
110
+ M _ {l} = W _ {l} \left(K _ {l} \left(X _ {l}\right)\right), l \in \{1, 2, 3 \} \tag {1}
111
+ $$
112
+
113
+ $$
114
+ A = M _ {1} \oplus M _ {2} \oplus M _ {3}, \tag {2}
115
+ $$
116
+
117
+ where $K_{1}$ , $K_{2}$ , and $K_{3}$ are max-pooling with kernel sizes 4, 2, and 1, respectively. $W_{1}$ , $W_{2}$ , and $W_{3}$ refer to $3 \times 3 \times 3$ convolution blocks. $\oplus$ denotes the concatenate operation.
118
+
119
+ # 3.2.2. Polarized Multi-scale Channel Self-attention (PMCS) Module
120
+
121
+ As shown in Figure 5, the PMCS module utilizes multiscale feature information for expanding the number of attention points to capture global contextual features and more nuanced multi-scale channel dependencies. The PMCS module efficiently computes attention scores to enhance multiscale channel features.
122
+
123
+ The channel of the input feature map is concatenated by three branches from different scales. The PMCS module combines branches and channels to expand the number of attention points (Figure 5). Firstly, we compute each attention point's query and value vectors and the global channel key matrix $K^{ch}$ . Then, the matrix $Q^{ch}$ consisting of the query
124
+
125
+ ![](images/c7f63dc3fb5cefedd8e33cdf72882d074bab64e40324b82143c7cc2cf3f53986.jpg)
126
+ Figure 3: Overview of Polarized Multi-scale Feature Self-attention Network (PMFSNet) architecture. The input medical images are fed into an encoder with 3 stages. Then, the PMFS block enhances the features of the network's bottleneck using features at different scales. Finally, the skip connections are fused with an optional decoder, which sequentially incorporates the global contextual features into the enhanced bottleneck features by CNN-based up-sampling, gradually restoring them to the same resolution as the input image.
127
+
128
+ ![](images/9d0c99b16346cf819606b7dd2ffe72a960a4a3417dedd5450d4c69c129a7049f.jpg)
129
+ Figure 4: The Adaptive Multi-branch Feature Fusion (AMFF) layer. In one case, the resolutions of $X_{1}, X_{2}, X_{3}$ are $36 \times 80 \times 80 \times 48$ , $64 \times 40 \times 40 \times 24$ , $104 \times 20 \times 20 \times 12$ . Downsampling and channel scaling unify resolution to $48 \times 20 \times 20 \times 12$ , respectively. The resolution of fusion feature A is $144 \times 20 \times 20 \times 12$ , which is obtained by concatenating the multi-branch features.
130
+
131
+ vectors of all attention points and the matrix $K^{ch}$ are multiplied to obtain the multi-scale channel attention score matrix $Z^{ch}$ . Finally, the matrix $Z^{ch}$ is multiplied element-wise by the matrix $V^{ch}$ to enhance the multi-scale channel features.
132
+
133
+ The input of the PMCS module is the fusion feature $A \in \mathbb{R}^{C \times H \times W \times D}$ , and the multi-scale channel attention
134
+
135
+ ![](images/078eb050f4f6b081e0f2b6dc051c26e4afc21cd6da1f968c016aa418e916ff90.jpg)
136
+ Figure 5: The Polarized Multi-scale Channel Self-attention (PMCS) module. In one case, the resolution of the input feature map is $C \times H \times W \times D(144 \times 20 \times 20 \times 12)$ , where channel $C(48 + 48 + 48)$ is concatenated by three branches, whose channels are unified to 48. The depthwise separable convolution block is utilized to further decrease computational complexity.
137
+
138
+ score matrix $Z^{ch} \in \mathbb{R}^{C \times 1 \times 1 \times 1}$ is defined as:
139
+
140
+ $$
141
+ Q ^ {c h} = \sigma_ {q} (W _ {q} (A)) \tag {3}
142
+ $$
143
+
144
+ ![](images/ad1b434b23813c9d2fdfbbfa6181a24f0746feb5640b3204798410345b35cedd.jpg)
145
+ Figure 6: The Polarized Multi-scale Spatial Self-attention (PMSS) module. The PMSS module combines multi-branch spatial features, which means that it often needs to transform dimensions using the permute operation. It is worth noting that the bottleneck feature is eventually extracted from the enhanced multi-branch feature and has the same resolution as before the enhancement.
146
+
147
+ $$
148
+ K ^ {c h} = F _ {S M} \left(\sigma_ {k} \left(W _ {k} (A)\right)\right) \tag {4}
149
+ $$
150
+
151
+ $$
152
+ Z ^ {c h} = F _ {S G} \left(F _ {L N} \left(W _ {z} \left(Q ^ {c h} \times K ^ {c h}\right)\right)\right), \tag {5}
153
+ $$
154
+
155
+ where $W_{q}, W_{k}, W_{z}$ are convolution layers, respectively. $\sigma_{q}, \sigma_{k}$ refer to different reshape operations. $F_{SM}(.)$ denotes the softmax operation $F_{\mathrm{SM}}(x)_i = \frac{e^{x_i}}{\sum_{j=1}^N e^{x_j}}$ , $i \in 1,2,3,\ldots N$ . $F_{LN}(.)$ represents layernorm operation. $F_{SG}(.)$ is the sigmoid activation function.
156
+
157
+ The output of the PMCS module is the feature map after multi-scale channel enhancement $A^{ch} \in \mathbb{R}^{C \times H \times W \times D}$ :
158
+
159
+ $$
160
+ V ^ {c h} = W _ {v} (A) \tag {6}
161
+ $$
162
+
163
+ $$
164
+ A ^ {c h} = V ^ {c h} \odot Z ^ {c h}, \tag {7}
165
+ $$
166
+
167
+ where $W_{v}$ is convolution layer, and $\odot$ denotes the elementwise product.
168
+
169
+ # 3.2.3. Polarized Multi-scale Spatial Self-attention (PMSS) Module
170
+
171
+ The PMSS module is designed to capture the more nuanced multi-scale spatial dependencies, which combines branches and spatial to expand the number of attention points (Figure 6). Firstly, we compute each attention point's query and value vectors and the global spatial key matrix $K^{sp}$ . Then, the matrix $K^{sp}$ and the matrix $Q^{sp}$ consisting of the query vectors of all attention points are multiplied to obtain the multi-scale spatial attention score matrix $Z^{sp}$ . Finally, the matrix $Z^{sp}$ is multiplied element-wise by the matrix $V^{sp}$ to enhance the multi-scale spatial features.
172
+
173
+ At the end of the PMSS module, the bottleneck features are extracted from the enhanced multi-scale features. The advantage is that the enhancement of the features can be made more precise while obtaining a global context, resulting in a better semantic feature representation.
174
+
175
+ The input of the PMSS module is the feature map after multi-scale channel enhancement $A^{ch} \in \mathbb{R}^{C \times H \times W \times D}$ , multi-scale spatial attention score matrix $Z^{sp} \in \mathbb{R}^{1 \times H \times W \times D \times 3}$ :
176
+
177
+ $$
178
+ Q ^ {s p} = \sigma_ {q} \left(P _ {q} \left(W _ {q} \left(A ^ {c h}\right)\right)\right) \tag {8}
179
+ $$
180
+
181
+ $$
182
+ K ^ {s p} = F _ {S M} \left(\sigma_ {k} ^ {2} \left(F _ {G M} \left(P _ {k} \left(\sigma_ {k} ^ {1} \left(W _ {k} \left(A ^ {c h}\right)\right)\right)\right)\right)\right) \tag {9}
183
+ $$
184
+
185
+ $$
186
+ Z ^ {s p} = F _ {S G} \left(\sigma_ {z} \left(K ^ {s p} \times Q ^ {s p}\right)\right), \tag {10}
187
+ $$
188
+
189
+ where $W_{q}$ , and $W_{k}$ are convolution layers, respectively. $\sigma_{q}$ , $\sigma_{k}^{1}$ , $\sigma_{k}^{2}$ , $\sigma_{z}$ refer to different reshape operations. $P_{q}$ , $P_{k}$ denote dimension permute operations. $F_{GM}(.)$ represents global mean operation. $F_{SM}(.)$ denotes the softmax operation. $F_{SG}(.)$ is the sigmoid activation function.
190
+
191
+ The output of the PMSS module is the feature map after multi-scale spatial enhancement $A^{sp} \in \mathbb{R}^{C_3 \times H \times W \times D}$ ( $C_3 \times H \times W \times D$ corresponds to the resolution of the feature map $X_3$ output by $Stage_3$ ):
192
+
193
+ $$
194
+ V ^ {s p} = P _ {v} \left(\sigma_ {v} \left(W _ {v} \left(A ^ {c h}\right)\right)\right) \tag {11}
195
+ $$
196
+
197
+ $$
198
+ A ^ {s p} = W _ {\text {o u t}} \left(\sigma_ {\text {o u t}} \left(\mathrm {P} _ {\text {o u t}} \left(V ^ {s p} \odot Z ^ {s p}\right)\right)\right), \tag {12}
199
+ $$
200
+
201
+ where $W_{v}, W_{out}$ are convolution layers. $P_{v}, P_{out}$ refer to dimension permute operations. $\sigma_{v}, \sigma_{out}$ denote different reshape operations. $\odot$ is the element-wise product.
202
+
203
+ # 3.3. Loss Function
204
+
205
+ Class imbalance is a common issue in medical image segmentation, especially when random cropping is used during training. This approach may lead to the loss of representation for certain classes. To alleviate the above problem, we propose the weighted extended dice loss (WEDL). The WEDL can flexibly weight different classes and is smoother than the standard dice loss, which can be formulated as follows (The comparison with the standard dice loss is shown in Figure 7):
206
+
207
+ $$
208
+ W E D L = 1 - \sum_ {i = 1} ^ {N _ {c}} w _ {i} \frac {2 \sum_ {j = 1} ^ {N _ {v}} p _ {j} g _ {j}}{\sum_ {j = 1} ^ {N _ {v}} p _ {j} ^ {2} + \sum_ {j = 1} ^ {N _ {v}} g _ {j} ^ {2} + \varepsilon}, \tag {13}
209
+ $$
210
+
211
+ where the WEDL weights the $N_{c}$ classes and $w_{i}$ denotes the weight of the i-th class. Each class sums over $N_{v}$ voxels, including the predicted segmentation volume $p_j \in P$ and the ground truth volume $g_{j} \in G$ .
212
+
213
+ # 4. Experiments
214
+
215
+ Our method has been evaluated on three public datasets, encompassing multiple medical image modalities, including the tooth dataset from CBCT [41], the MMOTU dataset from ultrasound images [42], and the ISIC 2018 dataset from dermoscopy images [43, 44]. We conduct extensive comparisons with a range of state-of-the-art (SOTA) methods, demonstrating the superiority of our proposed PMFSNet in achieving an optimal trade-off between accuracy and efficiency. Moreover, we also conducted an ablation study on the effectiveness and plug-and-play nature of the proposed PMFS block.
216
+
217
+ ![](images/6e1f10e0aa2322f5a57b0e7b1373fce23ce2ff908cc15d886081986f45a6d9fd.jpg)
218
+ Figure 7: The WEDL vs. standard dice loss.
219
+
220
+ # 4.1. Dataset
221
+
222
+ # 4.1.1. 3D CBCT Tooth
223
+
224
+ The 3D CBCT tooth dataset is collected from a subset of a large-scale dataset [41], which includes ground truth annotations. This dataset is used for segmentation and reconstruction of individual teeth and alveolar bone to aid in dental treatment (e.g., orthodontics, dental implants, and restoration). The large-scale dataset consists of large-scale CBCT imaging data from 15 different centers in China with varying data distributions. 129 scans are used in our experiments, which are divided into a training set of 103 scans and a test set of 26 scans. The data format of the original images is NIFTI, the voxel spacing is $0.25\mathrm{mm}$ , and the resolutions are $400\times 400\times 200$ voxels, $400\times 400\times 400$ voxels, and $320\times 320\times 320$ voxels. Some samples are in Figure 2a.
225
+
226
+ # 4.1.2. MMOTU
227
+
228
+ The MMOTU dataset is a Multi-modality Ovarian Tumor Ultrasound (MMOTU) image dataset [42], which is commonly used for computer-aided diagnosing and ovarian tumor detection. It consists of two sub-sets with two modalities: OTU_2d and OTU_CEUS, respectively, including 1469 2D ultrasound images and 170 CEUS images. The MMOTU dataset is applied to different challenging tasks such as semantic segmentation and tumor recognition. We use 1000 images for training and 469 images for testing taken from task 1 of binary semantic segmentation. Particularly, the OTU_2d sub-set contains 216 Color Doppler Flow Images (CDFI), where the rest of the 1253 images are traditional 2D ultrasound images, and the data format is all 2D RGB. In OTU_2d, the width and height of images respectively range from 302~1135 pixels and 226~794 pixels. Some samples are in Figure 2b.
229
+
230
+ # 4.1.3.ISIC2018
231
+
232
+ The ISIC 2018 dataset is published by the International Skin Imaging Collaboration (ISIC) as a large-scale dataset of dermoscopy images in 2018 [43, 44], which has become a major benchmark for the evaluation of medical image algorithms. The ISIC 2018 dataset is generally used for skin lesion analysis for melanoma detection. It contains 2594 dermoscopy images that are available at: https://challenge2018.isic-archive.com/. The data format of the ISIC 2018 dataset images is 2D RGB. We divide the 2594 images provided in task 1 of boundary segmentation into training $(80\%)$ and test set $(20\%)$ , and we can see some samples in Figure 2c.
233
+
234
+ # 4.2. Evaluation Metrics
235
+
236
+ We employ a variety of segmentation evaluation metrics to assess the network's performance from multiple perspectives, including Dice Similarity Coefficient (DSC), Intersection over Union (IoU), Mean Intersection over Union (mIoU), Accuracy (ACC), Hausdorff Distance (HD), Average Symmetric Surface Distance (ASSD), and Surface Overlap (SO).
237
+
238
+ $$
239
+ \begin{array}{l} \operatorname {D S C} (P, G) = \frac {2 | P \cap G |}{| P | + | G |} \tag {14} \\ = \frac {2 \times T P}{(T P + F N) + (T P + F P)} \\ \end{array}
240
+ $$
241
+
242
+ $$
243
+ I o U = \frac {T P}{T P + F N + F P} \tag {15}
244
+ $$
245
+
246
+ $$
247
+ m I o U = \frac {1}{n} \sum_ {i = 1} ^ {n} \frac {T P _ {i}}{T P _ {i} + F N _ {i} + F P _ {i}} \tag {16}
248
+ $$
249
+
250
+ $$
251
+ A C C = \frac {T P + T N}{T P + T N + F P + F N}, \tag {17}
252
+ $$
253
+
254
+ where TP, TN, FP, and FN are True Positive, True Negative, False Positive, and False Negative, respectively.
255
+
256
+ $$
257
+ d (p, S _ {G}) = \min _ {g \in S _ {G}} \| p - g \|, p \in S _ {P} \tag {18}
258
+ $$
259
+
260
+ $$
261
+ \begin{array}{l} H D \left(S _ {P}, S _ {G}\right) = \max \left\{\max _ {p \in S _ {P}} d (p, S _ {G}), \right. \tag {19} \\ \left. \max _ {g \in S _ {G}} d (g, S _ {P}) \right\} \\ \end{array}
262
+ $$
263
+
264
+ $$
265
+ \begin{array}{l} \operatorname {A S S D} \left(S _ {P}, S _ {G}\right) = \frac {1}{| S _ {P} | + | S _ {G} |} \left(\sum_ {p \in S _ {P}} d (p, S _ {G}) \right. \tag {20} \\ \left. + \sum_ {g \in S _ {G}} d (g, S _ {P})\right) \\ \end{array}
266
+ $$
267
+
268
+ $$
269
+ o (p) = \left\{ \begin{array}{l l} 1, d (p, S _ {G}) < \theta \\ 0, d (p, S _ {G}) > \theta \end{array} \right. \tag {21a}
270
+ $$
271
+
272
+ $$
273
+ S O \left(S _ {P}\right) = \frac {\sum_ {p \in S _ {P}} o (p)}{\left| S _ {P} \right|}, \tag {21b}
274
+ $$
275
+
276
+ where let $S_P$ be a set of surface voxels of predicted volumes, and $S_G$ be a set of surface voxels of ground truth volumes. $\|\cdot\|$ is the distance paradigm between point sets, e.g. Euclidean distance. $|.|$ is the number of points of the set. $\theta$ is a maximal distance to determine whether two points have the same spatial position.
277
+
278
+ # 4.3. Implementation Details
279
+
280
+ All experiments are conducted using Pytorch (version 1.12) and on a Ubuntu 18.04 LTS workstation operation system with a 3.70GHz i7-8700K CPU and a 32G V100 GPU.
281
+
282
+ For the 3D CBCT tooth dataset, we resample each 3D image to a uniform voxel spacing of $0.5 \times 0.5 \times 0.5\mathrm{mm}$ and then randomly crop every image to $160 \times 160 \times 96$ size. We also adopt some data augmentation methods such as random elastic deformation, adding Gaussian noise, random flipping, random scaling, random rotation, and random shift. Because of the large range of image Hounsfield Unit (HU) values, we clip the lower and upper bounds of the image values to the range -1412 to 17943. All images are standardized and normalized to fit within an intensity range of [0, 1]. To improve the stability of training, we employ the trick of gradient accumulation. We employ the Adam optimizer, which is initialized with a learning rate of 0.0005 and a weight decay of 0.00005. The learning rate is tuned during training using the ReduceLROnPlateau strategy. All networks are trained for 20 epochs with a batch size of 1.
283
+
284
+ For the 2D images, we resize each image to $224 \times 224$ pixels and use data augmentation methods such as random resize and crop, color jitter, random Gaussian noise, random flipping, random rotation, and cutout [45]. The batch size is 32 and we employ the AdamW optimizer. On the MMOTU dataset, All networks are trained for a total of 2000 epochs with an initial learning rate of 0.01 and weight decay of 0.00001, and the CosineAnnealingLR strategy is used to tune the learning rate. To ensure comparability with other methods, the PMFSNet is pre-trained on the 1000-class ImageNet 2012 dataset [46] by default. On the ISIC 2018 dataset, All networks are trained for a total of 150 epochs with an initial learning rate of 0.005 and weight decay of 0.000001, and the CosineAnnealingWarmRestarts strategy is used to tune the learning rate.
285
+
286
+ We propose several scaling versions of the model structure for tasks with various data sizes, and the corresponding scaling versions can be specified by modifying the hyperparameters. They are differentiated by the number of dense-feature-stacking units per stage, the channels per stage, and whether they have a decoder (Figure 3). According to the parameters of these scaling versions in descending order,
287
+
288
+ they are named BASIC, SMALL, and TINY. In addition, the models are referred to as PMFSNet2D and PMFSNet3D according to whether the input dimensions are 2D or 3D. The parameter count, computational complexity, and experimental performance between the different scaling versions are described in detail in Appendix A.
289
+
290
+ # 4.4. Quantitative Evaluations
291
+
292
+ We conduct comparative experiments with various state-of-the-art (SOTA) networks on each of the three public datasets to quantify the superiority of our proposed network in terms of efficiency and performance. Our method is adaptable to both 2D and 3D medical images demonstrating the scalability and portability of our proposed network.
293
+
294
+ # 4.4.1. Comparisons with the state-of-the-arts on 3D CBCT tooth
295
+
296
+ To evaluate the performance of the PMFSNet on a 3D CBCT tooth dataset, we comprehensively compare it with ten state-of-the-art (SOTA) 3D networks. These networks include UNet3D [8] based on traditional UNet architectures, MultiResUNet3D [49] based on residual structures, DenseVNet [47] and DenseVoxelNet [48] based on dense feature stacking, AttentionUNet3D [7] based on attention mechanism, and transformer-based UNETR [19], SwinUNETR [21], TransBTS [18], nnFormer [22], 3D UX-Net [50]. The comparison results are shown in Table 1.
297
+
298
+ The results presented in Table 1 demonstrate the superior efficiency and effectiveness of PMFSNet in 3D CBCT segmentation tasks compared to other methods (ParamsFLOPs-IoU correlation comparison in Figure 1). It achieves the best results in all metrics compared to current state-of-the-art methods. Notably, the PMFSNet outperforms the SOTA method DenseVNet in all aspects. More specifically, PMFSNet achieves a reduction in Floating-point Operations Per Second (FLOPs) and Parameters (Params) by 8.59 GFLOPs and $0.24\mathrm{M}$ , respectively. Additionally, it shows improvements in various metrics: it reduces the Hausdorff Distance (HD) by $2.64\mathrm{mm}$ and the Average Symmetric Surface Distance (ASSD) by $0.35\mathrm{mm}$ . Moreover, PMFSNet enhances the Intersection over Union (IoU) by $0.11\%$ , the Similarity Overlap (SO) by $0.22\%$ , and the Dice Similarity Coefficient (DSC) by $0.15\%$ .
299
+
300
+ The qualitative study in Figure 8 illustrates the comparison of the performance of different networks on the 3D CBCT tooth dataset. The images in Figure 8(a) show the segmentation results of different networks for the cusps of the teeth. We can observe that our method can accurately segment tiny regions, while the comparison methods often lead to over-segmentation. The images in Figure 8(c) present the segmentation results of different networks for uneven surfaces of crowns. Compared to other networks, PMFSNet retains more detail for irregularly shaped surface contours. The images in Figure 8(d) present the segmentation results of different networks for cases where there are missing teeth and being affected by other bone tissue. In this challenging situation, PMFSNet demonstrates promising segmentation
301
+
302
+ Table 1 Comparison results of different methods on the 3D CBCT tooth dataset. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better.
303
+
304
+ <table><tr><td>Method</td><td>FLOPs(G)↓</td><td>Params(M)↓</td><td>HD(mm)↓</td><td>ASSD(mm)↓</td><td>IoU(%)↑</td><td>SO(%)↑</td><td>DSC(%)↑</td></tr><tr><td>UNet3D [8]</td><td>2223.03</td><td>16.32</td><td>113.79</td><td>22.40</td><td>70.62</td><td>70.72</td><td>36.67</td></tr><tr><td>DenseVNet [47]</td><td>23.73</td><td>0.87</td><td>8.21</td><td>1.14</td><td>84.57</td><td>94.88</td><td>91.15</td></tr><tr><td>AttentionUNet3D [7]</td><td>2720.79</td><td>94.48</td><td>147.10</td><td>61.10</td><td>52.52</td><td>42.49</td><td>64.08</td></tr><tr><td>DenseVoxelNet [48]</td><td>402.32</td><td>1.78</td><td>41.18</td><td>3.88</td><td>81.51</td><td>92.50</td><td>89.58</td></tr><tr><td>MultiResUNet3D [49]</td><td>1505.38</td><td>17.93</td><td>74.06</td><td>8.17</td><td>76.19</td><td>81.70</td><td>65.45</td></tr><tr><td>UNETR [19]</td><td>229.19</td><td>93.08</td><td>107.89</td><td>17.95</td><td>74.30</td><td>73.14</td><td>81.84</td></tr><tr><td>SwinUNETR [21]</td><td>912.35</td><td>62.19</td><td>82.71</td><td>7.50</td><td>83.10</td><td>86.80</td><td>89.74</td></tr><tr><td>TransBTS [18]</td><td>306.80</td><td>33.15</td><td>29.03</td><td>4.10</td><td>82.94</td><td>90.68</td><td>39.32</td></tr><tr><td>nnFormer [22]</td><td>583.49</td><td>149.25</td><td>51.28</td><td>5.08</td><td>83.54</td><td>90.89</td><td>90.66</td></tr><tr><td>3D UX-Net [50]</td><td>1754.79</td><td>53.01</td><td>108.52</td><td>19.69</td><td>75.40</td><td>73.48</td><td>84.89</td></tr><tr><td>PMFSNet3D (Ours)</td><td>15.14</td><td>0.63</td><td>5.57</td><td>0.79</td><td>84.68</td><td>95.10</td><td>91.30</td></tr></table>
305
+
306
+ ![](images/8754d5dd3deee5834526a5355dab81caeac247c4ebcfc880589cd39afaa94e55.jpg)
307
+ Figure 8: Visual comparison with the state-of-the-arts on the 3D CBCT tooth dataset. The colors white, green, and red represent the correct segmentation, the under-segmentation, and the over-segmentation, respectively.
308
+
309
+ performance and does not over-segment other bone tissue or areas of missing teeth.
310
+
311
+ # 4.4.2. Comparisons with the state-of-the-arts on MMOTU
312
+
313
+ On the MMOTU dataset, our proposed PMFSNet is evaluated against six networks used in study [42], including PSPNet [3], DANet [51], SegFormer [52], U-Net [4], TransUNet [15], and BiseNetV2 [53]. To ensure a fair comparison in our study with the MMOTU dataset, we pre-train the PMFSNet on the 1000-class ImageNet 2012 dataset [46]. This approach mirrors the methodology used in the cited experiment, where each network is also loaded with pre-training weights.
314
+
315
+ We can observe from Table 2, in terms of segmentation performance, SegFormer (with $7.72\mathrm{M}$ parameters) leads with the highest scores in IoU and mIoU metrics, achieving $82.46\%$ and $89.88\%$ , respectively. PMFSNet closely follows, attaining an IoU of $82.02\%$ and a mIoU of $89.36\%$ , closely competing with the highest-performing networks in segmentation with significantly less parameter count. More specifically, the PMFSNet records the lowest (FLOPs) at 2.21 GFLOPs and maintains the smallest model size with only 0.99 million parameters. In comparison, BiseNetV2 has a relatively small parameter count, yet it is still 5 times larger than PMFSNet. SegFormer, despite its superior segmentation performance, has a model size more than 7 times
316
+
317
+ larger than PMFSNet, highlighting PMFSNet's advantage in terms of efficiency and compactness.
318
+
319
+ Figure 9 illustrates the quantitative results of the PMFSNet on the MMOTU dataset. We can observe that our PMFSNet can achieve promising segmentation performance even in various challenging cases. The images in Figure 9(a) show the segmentation result of a general sample, where PMFSNet can accurately segment the ovarian lesion region. The images in Figure 9(b) show the segmentation result of a sample with an inconspicuous lesion region. It is extremely challenging to identify the lesion region from the original image accurately. The images in Figure 9(c) show the segmentation result of a lesion region with blurred boundaries, where the original image has many dark regions of interference. The images in Figure 9(d) show the segmentation results of a low-contrast sample in which the lesion region is not clearly distinguishable.
320
+
321
+ # 4.4.3. Comparisons with the state-of-the-arts on ISIC 2018
322
+
323
+ We ensure the consistency of the experimental setup on the ISIC 2018 dataset and implement a fair comparison experiment. The networks compared with our PMFSNet include two general segmentation networks and five networks specifically designed for skin lesions. We re-implement U-Net [4], AttU-Net [7], CA-Net [54], BCDU-Net [55], CE-Net [13], CPF-Net [56], and CKDNet [57].
324
+
325
+ Table 2 Comparison results of different methods on the MMOTU dataset. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better.
326
+
327
+ <table><tr><td>Method</td><td>FLOPs(G)↓</td><td>Params(M)↓</td><td>IoU(%)↑</td><td>mIoU(%)↑</td><td>Iterations</td></tr><tr><td>PSPNet [3]</td><td>38.71</td><td>53.32</td><td>82.01</td><td>89.41</td><td>20k</td></tr><tr><td>DANet [51]</td><td>10.95</td><td>47.44</td><td>82.20</td><td>89.53</td><td>20k</td></tr><tr><td>SegFormer [52]</td><td>2.52</td><td>7.72</td><td>82.46</td><td>89.88</td><td>80k</td></tr><tr><td>U-Net [4]</td><td>41.93</td><td>31.04</td><td>79.91</td><td>86.80</td><td>80k</td></tr><tr><td>TransUNet [15]</td><td>24.61</td><td>105.28</td><td>81.31</td><td>89.01</td><td>80k</td></tr><tr><td>BiseNetV2 [53]</td><td>3.40</td><td>5.19</td><td>79.37</td><td>86.13</td><td>80k</td></tr><tr><td>PMFSNet2D (Ours)</td><td>2.21</td><td>0.99</td><td>82.02</td><td>89.36</td><td>2k</td></tr></table>
328
+
329
+ Table 3 Comparison results of different methods on the ISIC 2018 dataset. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better.
330
+
331
+ <table><tr><td>Method</td><td>FLOPs(G)↓</td><td>Params(M)↓</td><td>IoU(%)↑</td><td>DSC(%)↑</td><td>ACC(%)↑</td></tr><tr><td>U-Net [4]</td><td>41.93</td><td>31.04</td><td>76.77</td><td>86.55</td><td>95.00</td></tr><tr><td>AttU-Net [7]</td><td>51.07</td><td>34.88</td><td>78.19</td><td>87.54</td><td>95.33</td></tr><tr><td>CA-Net [54]</td><td>4.62</td><td>2.79</td><td>68.82</td><td>80.96</td><td>92.96</td></tr><tr><td>BCDU-Net [55]</td><td>31.96</td><td>18.45</td><td>76.46</td><td>86.26</td><td>95.19</td></tr><tr><td>CE-Net [13]</td><td>6.83</td><td>29.00</td><td>78.05</td><td>87.47</td><td>95.40</td></tr><tr><td>CPF-Net [56]</td><td>6.18</td><td>43.27</td><td>78.47</td><td>87.70</td><td>95.52</td></tr><tr><td>CKDNet [57]</td><td>12.69</td><td>59.34</td><td>77.89</td><td>87.35</td><td>95.27</td></tr><tr><td>PMFSNet2D (Ours)</td><td>2.21</td><td>0.99</td><td>78.82</td><td>87.92</td><td>95.59</td></tr></table>
332
+
333
+ ![](images/82a7efe7d5a85ebd8769f2ebbc485966dc8170bb065978f346499dbf7d0c0dee.jpg)
334
+ (a)
335
+
336
+ ![](images/5b6d9c1ff950c43fd6d9f0394cb7118340dfcacfc82bdcae3d85cae58314ade4.jpg)
337
+
338
+ ![](images/b2980eaf37c01e74074bb491be26add7adcd22b543ae7a1ffe57cbbce4bc7f56.jpg)
339
+
340
+ ![](images/ac5816651517ebdc269751af0be60d9c94d1b843c24f0ac801f256ebecdc97c7.jpg)
341
+
342
+ ![](images/b13f5481dcb4cc339f71ae70cdacbb3de721eb534ba5c63183db3850b393d8cb.jpg)
343
+
344
+ ![](images/aea2861e7083c8166d6fd8e35ab5b61b3f34d68a458056754a2c4b645995249c.jpg)
345
+
346
+ ![](images/285110e2286874f347b6c732da5aba93dd1fac11c33f2a0bc9e11e617d2bc532.jpg)
347
+
348
+ ![](images/36bea68b34e4f8c4f8d8bff7a245b29902d2d4dcbdb92859bcb85af191a211a1.jpg)
349
+
350
+ ![](images/94e8d73065d3ab1edd1f7944e02e2f63bc5b6897ca1d01ef2a1da26dd9e1fe3d.jpg)
351
+
352
+ ![](images/6ce927a48055667da5089a4e870b0c9033d3dd56d762d3f915a515d4ce2f2482.jpg)
353
+ Image
354
+
355
+ ![](images/a348626361acf3a19d1a06de1f004f9b32a2d71731d380e676bcb2b34835d5d0.jpg)
356
+ Ground Truth
357
+
358
+ ![](images/df75fe5024da3e7a371c198b04d77ffa86d848dc4dd0620422004a7976a8dc1c.jpg)
359
+ PMFSNet
360
+ Figure 9: Visual segmentation performance of the PMFSNet on the MMOTU dataset. The colors white, green, and red represent the correct segmentation, the under-segmentation, and the over-segmentation, respectively.
361
+
362
+ The results presented in Table 3 demonstrate the superior efficiency and accuracy of PMFSNet in medical image segmentation tasks compared to other methods. Notably, the PMFSNet outperforms the SOTA method AttU-Net in generalized segmentation networks and the SOTA
363
+
364
+ method CPF-Net in specialized segmentation networks in all aspects. More specifically, PMFSNet achieves a reduction in Floating-point Operations Per Second (FLOPs) against AttU-Net and CPF-Net by 48.86 GFLOPs and 3.97 GFLOPs respectively, and a reduction in Parameters (Params) by $33.89\mathrm{M}$ and $42.28\mathrm{M}$ respectively. Additionally, it shows improvements in other metrics: it enhances the Intersection over Union (IoU) by $0.63\%$ and $0.35\%$ respectively, the Dice Similarity Coefficient (DSC) by $0.38\%$ and $0.22\%$ respectively, and the Accuracy (ACC) by $0.26\%$ and $0.07\%$ respectively. These results demonstrate that our PMFSNet as a general segmentation network is more generalized and can outperform dedicated networks in the skin lesion segmentation task. In addition, the result also demonstrates the feasibility of obtaining better segmentation performance with a much smaller model.
365
+
366
+ Qualitative analysis in Figure 10 illustrates the comparison of the performance of our method to different networks on the ISIC 2018 dataset. These samples in Figure 10 contain various challenges, either occluded lesions, low-light conditions, low-contrast lesions, or blurred lesion boundaries. More specifically, the images in Figure 10(a) show the segmentation results of different networks for occluded lesions. It can be seen that the comparison methods often lead to over-segmentation when it comes to occluded lesions. The images in Figure 10(b) display the segmentation results of different networks for lesions under a low-light condition. Credits to the PMFS block's capability to enhance crucial features at the network's bottleneck, the PMFSNet delivers
367
+
368
+ ![](images/c0c8a14af1e5e65ad0cc95a766f5a3f666089c7ac71913857dfcb505f3888d89.jpg)
369
+ Figure 10: Visual comparison with the state-of-the-arts on the ISIC 2018 dataset. The colors white, green, and red represent the correct segmentation, the under-segmentation, and the over-segmentation, respectively.
370
+
371
+ Table 4 The effect of PMFS block on segmentation performance on our models. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better. $\checkmark$ means with PMFS block, $\times$ means without PMFS block.
372
+
373
+ <table><tr><td>Dataset</td><td>PMFS block</td><td>Params(M)↓</td><td>IoU(%)↑</td></tr><tr><td rowspan="2">3D CBCT tooth</td><td>×</td><td>0.37</td><td>82.00</td></tr><tr><td>✓</td><td>0.63</td><td>84.68</td></tr><tr><td rowspan="2">MMOTU</td><td>×</td><td>0.65</td><td>80.45</td></tr><tr><td>✓</td><td>0.99</td><td>82.02</td></tr><tr><td rowspan="2">ISIC 2018</td><td>×</td><td>0.65</td><td>77.56</td></tr><tr><td>✓</td><td>0.99</td><td>78.82</td></tr></table>
374
+
375
+ superior segmentation performance, particularly for lesions under a low-light condition, with a notable improvement in edge delineation. The images in Figure 10(c) present the segmentation results of different networks for lesions with artifacts. Compared to other networks, our PMFSNet effectively avoids segmentation errors that could be caused by the interference of hairs in the images. The images in Figure 10(d) present the segmentation results of different networks for lesions with blurred boundaries and low-contrast lesions. Even in such challenging conditions, PMFSNet maintains strong generalization performance.
376
+
377
+ # 4.5. Ablation Studies
378
+
379
+ To verify the effectiveness of the PMFS block in enhancing our PMFSNet model's segmentation capabilities, we carry out ablation experiments on various datasets. The results of the ablation experiments on our proposed model regarding PMFS blocks are shown in Table 4. By embedding the PMFS block into two benchmark architectures, PMFSNet3D and PMFSNet2D, we observe an increase in Parameters (Params) of $0.26\mathrm{M}$ and $0.34\mathrm{M}$ , respectively. Despite a minor increase in the Parameters (Params), the resulting performance improvements are significant. Moreover, on the three datasets, the PMFSNet model with PMFS block exhibits significant performance gains, with IoU improvements of $2.68\%$ , $1.57\%$ , and $1.26\%$ , respectively. These re
380
+
381
+ Table 5 The effect of PMFS block on segmentation performance on other models. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better. $\checkmark$ means with PMFS block, $\times$ means without PMFS block.
382
+
383
+ <table><tr><td>Method</td><td>PMFS block</td><td>FLOPs(G)↓</td><td>Params(M)↓</td><td>IoU(%)↑</td></tr><tr><td rowspan="2">U-Net</td><td>×</td><td>41.93</td><td>31.04</td><td>76.77</td></tr><tr><td>✓</td><td>42.16</td><td>32.25</td><td>77.21</td></tr><tr><td rowspan="2">CA-Net</td><td>×</td><td>4.62</td><td>2.79</td><td>68.82</td></tr><tr><td>✓</td><td>4.77</td><td>3.64</td><td>74.48</td></tr><tr><td rowspan="2">BCDU-Net</td><td>×</td><td>31.96</td><td>18.45</td><td>76.46</td></tr><tr><td>✓</td><td>32.44</td><td>19.12</td><td>76.87</td></tr></table>
384
+
385
+ sults verify the effectiveness of the PMFS block in our proposed method.
386
+
387
+ To further illustrate that the PMFS block is both plug- and-play and remains effective when integrated with other models, we conduct ablation experiments applying the PMFS block on various other networks. We select the three models, i.e., U-Net, CA-Net, and BCDU-Net, in Table 3. Subsequently, we perform a quantitative analysis of the segmentation performance for these three models, comparing their results before and after the integration of the PMFS block.
388
+
389
+ Table 5 demonstrates that integrating PMFS blocks into U-Net, CA-Net, and BCDU-Net results in only a marginal increase in Floating-point Operations Per Second (FLOPs) and Parameters (Params). This indicates that the addition of the PMFS block does not significantly impact the computational efficiency of these models. Furthermore, the results reveal that the PMFS block leads to an improvement in the Intersection over Union (IoU) metric for U-Net, CA-Net, and BCDU-Net models, achieving $77.21\%$ , $74.48\%$ , and $76.87\%$ , respectively. These experimental results suggest that the PMFS block, when integrated into other models, can effectively bolster their feature representation capabilities.
390
+
391
+ # 5. Discussion
392
+
393
+ Current pure Vision Transformers (ViTs) in medical image segmentation have shown the ability to learn long-term dependencies, but this often comes at the expense of increased model complexity and a loss of the inductive bias inherent in CNNs. While some studies have explored hybridizing transformers and CNNs to model both long-term and local dependencies [58], the performance of these large hybrid models can be limited by the typically smaller size of medical image datasets compared to natural image datasets. Large models, whether based on transformers, deep CNNs, or a hybrid, tend to be data-hungry. In contrast, medical datasets are relatively smaller, leading to a higher risk of overfitting during training. Table 1 presents various models based on transformer architecture along with their evaluation results, highlighting this issue.
394
+
395
+ Given this context, we posit that developing a lightweight network for medical image segmentation is crucial and imperative to tackle the problem of redundant computation found in larger models. Our comprehensive results demonstrate that even with a model ( $\leq 1$ million parameters), superior performance can be attained in various segmentation tasks across different data scales.
396
+
397
+ In this work, we optimize the network architecture by employing a streamlined 3-stage encoder and introducing self-attention computation exclusively at the network's bottleneck. We propose a plug-and-play PMFS block to encode long-term dependencies for feature enhancement. To enhance efficiency, we optimize the computational complexity of the attention score matrix through "polarized filtering" and replace standard convolution with depthwise separable convolution. Lastly, we integrate multi-scale features into global channel and spatial dimensions, thereby increasing the number of attention points and enhancing the feature representation.
398
+
399
+ The discourse on encoding local and long-term dependencies is intrinsically linked to the comparative studies between Convolutional Neural Networks (CNNs) and transformers, as well as the exploration of hybrid mechanisms combining these two approaches [59, 60]. CNNs excel in encoding local dependencies due to their inherent architectural design, which emphasizes local receptive fields and hierarchical feature extraction. In contrast, transformers, with their self-attention mechanisms, are adept at capturing long-term dependencies by considering global interactions across an entire input sequence. The hybrid of these two architectures aims to leverage the strengths of both CNNs and ViTs: the local feature extraction prowess of CNNs and the global contextual awareness of transformers.
400
+
401
+ Based upon these pioneer works along with our findings reveal that while CNNs learn hierarchical features in a sequential layer-by-layer manner, each layer is processed at a different level of abstraction, implying that shallow features are extracted without direct guidance from deeper layers in the network. In our method, we are trying to mutually enhance multi-scale features with the idea of parallel computing in the self-attention mechanism. This is due to the
402
+
403
+ fact that transformers employ a parallel computing approach in their self-attention mechanism, focusing on simultaneous feature learning across both channel and spatial dimensions, but, normally only at the same scale. In this case, our approach seeks to harness this parallelism for multi-scale feature learning with greater granularity, diverging from the inherent sequential process of CNNs to achieve a more comprehensive and nuanced understanding of feature interactions.
404
+
405
+ In terms of model portability, the proposed lightweight model is conducive to deployment on edge devices and enhances the data stream in current medical imaging equipment, especially during the inference phase. This feature is crucial for real-time applications and plays a significant role in improving the efficiency and accessibility of medical imaging technology across diverse environments. In the future, we will further evaluate the model performance on mobile and edge devices and extend the model to more specific instance segmentation tasks.
406
+
407
+ # 6. Conclusion
408
+
409
+ In this paper, we propose a novel lightweight semantic segmentation network (PMFSNet) designed to adapt to a variety of medical image segmentation tasks in different image modalities and take into account the balance between efficiency and performance. We optimize the network architecture by employing a streamlined 3-stage encoder and introducing self-attention computation exclusively at the network's bottleneck. The proposed plug-and-play PMFS block is a multi-scale feature enhancement module based on the self-attention mechanism, to encode long-term dependencies and incorporate global contextual features. To enhance efficiency, we optimize the computational complexity of the attention score matrix and adopt the depthwise separable convolution. The experiments on three public datasets containing both 2D and 3D modalities show that PMFSNet has strong application scalability for a variety of medical image segmentation tasks. The proposed method achieves competitive segmentation performance compared to the current SOTA methods across several 2D and 3D medical imaging segmentation tasks, achieving this with $27.6\%$ fewer parameters than DenseVNet, $87.2\%$ fewer parameters than SegFormer, and $97.7\%$ fewer parameters than CPF-Net. This also demonstrates the potential of our proposed approach in optimizing model integration and deployment.
410
+
411
+ # CRediT authorship contribution statement
412
+
413
+ Jiahui Zhong: Conceptualization, Data curation, Investigation, Methodology, Project administration, Writing - original draft. Wenhong Tian: Conceptualization, Funding acquisition, Methodology, Supervision, Validation, Writing - review & editing. Yuanlun Xie: Conceptualization, Methodology, Writing - review & editing. Zhijia Liu: Data curation, Writing - review & editing. Jie Ou: Writing - review & editing. Taoran Tian: Data curation, Writing - review &
414
+
415
+ editing. Lei Zhang: Conceptualization, Writing - review & editing.
416
+
417
+ # Declaration of competing interest
418
+
419
+ The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.
420
+
421
+ # Declaration of Generative AI and AI-assisted technologies in the writing process
422
+
423
+ During the preparation of this work, the author(s) did not use any generative artificial intelligence (AI) or AI-assisted technologies for the writing process. The content was produced solely by the human author(s) without the aid of such tools, and the author(s) take(s) full responsibility for the content of the publication.
424
+
425
+ # Acknowledgements
426
+
427
+ This research is partially supported by the National Key Research and Development Program of China with Grant ID 2018AAA0103203.
428
+
429
+ # References
430
+
431
+ [1] M. Monteiro, V. F. Newcombe, F. Mathieu, K. Adatia, K. Kamnitsas, E. Ferrante, T. Das, D. Whitehouse, D. Ruekert, D. K. Menon, et al., Multiclass semantic segmentation and quantification of traumatic brain injury lesions on head ct using deep learning: an algorithm development and multicentre validation study, The Lancet Digital Health 2 (2020) e314-e322.
432
+ [2] J. Long, E. Shelhamer, T. Darrell, Fully convolutional networks for semantic segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp. 3431-3440.
433
+ [3] H. Zhao, J. Shi, X. Qi, X. Wang, J. Jia, Pyramid scene parsing network, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 2881-2890.
434
+ [4] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, Springer, 2015, pp. 234-241.
435
+ [5] Z. Zhou, M. M. Rahman Siddiquee, N. Tajbakhsh, J. Liang, Unet++: A nested u-net architecture for medical image segmentation, in: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4, Springer, 2018, pp. 3-11.
436
+ [6] M. Z. Alom, M. Hasan, C. Yakopcic, T. M. Taha, V. K. Asari, Recurrent residual convolutional neural network based on u-net (r2u-net) for medical image segmentation, arXiv preprint arXiv:1802.06955 (2018).
437
+ [7] O. Oktay, J. Schlemper, L. L. Folgoc, M. Lee, M. Heinrich, K. Misawa, K. Mori, S. McDonagh, N. Y. Hammerla, B. Kainz, et al., Attention u-net: Learning where to look for the pancreas, arXiv preprint arXiv:1804.03999 (2018).
438
+ [8] Ö. Çiçek, A. Abdulkadir, S. S. Lienkamp, T. Brox, O. Ronneberger, 3d u-net: learning dense volumetric segmentation from sparse annotation, in: Medical Image Computing and Computer-Assisted Intervention-MICCAI 2016: 19th International Conference, Athens, Greece, October 17-21, 2016, Proceedings, Part II 19, Springer, 2016, pp. 424-432.
439
+
440
+ [9] F. Miletari, N. Navab, S.-A. Ahmadi, V-net: Fully convolutional neural networks for volumetric medical image segmentation, in: 2016 fourth international conference on 3D vision (3DV), IEEE, 2016, pp. 565-571.
441
+ [10] H. Hu, Z. Zhang, Z. Xie, S. Lin, Local relation networks for image recognition, in: Proceedings of the IEEE/CVF International Conference on Computer Vision, 2019, pp. 3464-3473.
442
+ [11] P. Ramachandran, N. Parmar, A. Vaswani, I. Bello, A. Levskaya, J. Schlens, Stand-alone self-attention in vision models, Advances in neural information processing systems 32 (2019).
443
+ [12] L.-C. Chen, Y. Zhu, G. Papandreou, F. Schroff, H. Adam, Encoder-decoder with atrous separable convolution for semantic image segmentation, in: Proceedings of the European conference on computer vision (ECCV), 2018, pp. 801-818.
444
+ [13] Z. Gu, J. Cheng, H. Fu, K. Zhou, H. Hao, Y. Zhao, T. Zhang, S. Gao, J. Liu, Ce-net: Context encoder network for 2d medical image segmentation, IEEE transactions on medical imaging 38 (2019) 2281-2292.
445
+ [14] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, et al., An image is worth 16x16 words: Transformers for image recognition at scale, arXiv preprint arXiv:2010.11929 (2020).
446
+ [15] J. Chen, Y. Lu, Q. Yu, X. Luo, E. Adeli, Y. Wang, L. Lu, A. L. Yuille, Y. Zhou, Transunet: Transformers make strong encoders for medical image segmentation, arXiv preprint arXiv:2102.04306 (2021).
447
+ [16] C. Wang, L. Wang, N. Wang, X. Wei, T. Feng, M. Wu, Q. Yao, R. Zhang, Cfatransunet: Channel-wise cross fusion attention and transformer for 2d medical image segmentation, Computers in Biology and Medicine 168 (2024) 107803.
448
+ [17] J. M. J. Valanarasu, P. Oza, I. Hacihaliloglu, V. M. Patel, Medical transformer: Gated axial-attention for medical image segmentation, in: Medical Image Computing and Computer Assisted Intervention-MICCAI 2021: 24th International Conference, Strasbourg, France, September 27-October 1, 2021, Proceedings, Part I 24, Springer, 2021, pp.36-46.
449
+ [18] W. Wang, C. Chen, M. Ding, H. Yu, S. Zha, J. Li, Transbts: Multimodal brain tumor segmentation using transformer, in: Medical Image Computing and Computer Assisted Intervention-MICCAI 2021: 24th International Conference, Strasbourg, France, September 27-October 1, 2021, Proceedings, Part I 24, Springer, 2021, pp. 109-119.
450
+ [19] A. Hatamizadeh, Y. Tang, V. Nath, D. Yang, A. Myronenko, B. Landman, H. R. Roth, D. Xu, Unetr: Transformers for 3d medical image segmentation, in: Proceedings of the IEEE/CVF winter conference on applications of computer vision, 2022, pp. 574-584.
451
+ [20] H. Cao, Y. Wang, J. Chen, D. Jiang, X. Zhang, Q. Tian, M. Wang, Swin-unet: Unet-like pure transformer for medical image segmentation, in: European conference on computer vision, Springer, 2022, pp. 205-218.
452
+ [21] A. Hatamizadeh, V. Nath, Y. Tang, D. Yang, H. R. Roth, D. Xu, Swin unetr: Swin transformers for semantic segmentation of brain tumors in mri images, in: International MICCAI Brainlesion Workshop, Springer, 2021, pp. 272-284.
453
+ [22] H.-Y. Zhou, J. Guo, Y. Zhang, X. Han, L. Yu, L. Wang, Y. Yu, nformer: volumetric medical image segmentation via a 3d transformer, IEEE Transactions on Image Processing (2023).
454
+ [23] L. Liu, S. Chen, F. Zhang, F.-X. Wu, Y. Pan, J. Wang, Deep convolutional neural network for automatically segmenting acute ischemic stroke lesion in multi-modality mri, Neural Computing and Applications 32 (2020) 6545-6558.
455
+ [24] F. Isensee, P. F. Jaeger, S. A. Kohl, J. Petersen, K. H. Maier-Hein, nnunet: a self-configuring method for deep learning-based biomedical image segmentation, Nature methods 18 (2021) 203–211.
456
+ [25] C. Szegedy, S. Ioffe, V. Vanhoucke, A. Alemi, Inception-v4, inception-resnet and the impact of residual connections on learning, in: Proceedings of the AAAI conference on artificial intelligence, volume 31, 2017.
457
+ [26] F. Chollet, Xception: Deep learning with depthwise separable convolutions, in: Proceedings of the IEEE conference on computer vision
458
+
459
+ and pattern recognition, 2017, pp. 1251-1258.
460
+ [27] M. Sandler, A. Howard, M. Zhu, A. Zhmoginov, L.-C. Chen, Mobilenetv2: Inverted residuals and linear bottlenecks, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 4510-4520.
461
+ [28] F. Yu, V. Koltun, Multi-scale context aggregation by dilated convolutions, arXiv preprint arXiv:1511.07122 (2015).
462
+ [29] F. N. Iandola, S. Han, M. W. Moskewicz, K. Ashraf, W. J. Dally, K. Keutzer, SqueezeNET: Alexnet-level accuracy with 50x fewer parameters and $< 0.5$ mb model size, arXiv preprint arXiv:1602.07360 (2016).
463
+ [30] S. Xie, R. Girshick, P. Dollar, Z. Tu, K. He, Aggregated residual transformations for deep neural networks, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 1492-1500.
464
+ [31] X. Zhang, X. Zhou, M. Lin, J. Sun, Shufflenet: An extremely efficient convolutional neural network for mobile devices, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 6848-6856.
465
+ [32] X. Chen, R. Zhang, P. Yan, Feature fusion encoder decoder network for automatic liver lesion segmentation, in: 2019 IEEE 16th international symposium on biomedical imaging (ISBI 2019), IEEE, 2019, pp. 430-433.
466
+ [33] C. Kaul, S. Manandhar, N. Pears, Focusnet: An attention-based fully convolutional network for medical image segmentation, in: 2019 IEEE 16th international symposium on biomedical imaging (ISBI 2019), IEEE, 2019, pp. 455-458.
467
+ [34] Z. Wang, N. Zou, D. Shen, S. Ji, Non-local u-nets for biomedical image segmentation, in: Proceedings of the AAAI conference on artificial intelligence, volume 34, 2020, pp. 6315-6322.
468
+ [35] Y. Jiang, Y. Zhang, X. Lin, J. Dong, T. Cheng, J. Liang, Swinbts: A method for 3d multimodal brain tumor segmentation using swim transformer, Brain sciences 12 (2022) 797.
469
+ [36] Y. Xie, J. Zhang, C. Shen, Y. Xia, Cotr: Efficiently bridging cnn and transformer for 3d medical image segmentation, in: Medical Image Computing and Computer Assisted Intervention-MICCAI 2021: 24th International Conference, Strasbourg, France, September 27-October 1, 2021, Proceedings, Part III 24, Springer, 2021, pp. 171-180.
470
+ [37] F. Shamshad, S. Khan, S. W. Zamir, M. H. Khan, M. Hayat, F. S. Khan, H. Fu, Transformers in medical imaging: A survey, Medical Image Analysis (2023) 102802.
471
+ [38] B. Hu, P. Zhou, H. Yu, Y. Dai, M. Wang, S. Tan, Y. Sun, Leanet: Lightweight u-shaped architecture for high-performance skin cancer image segmentation, Computers in Biology and Medicine (2024) 107919.
472
+ [39] Z. Lu, C. She, W. Wang, Q. Huang, Lm-net: A light-weight and multi-scale network for medical image segmentation, Computers in Biology and Medicine 168 (2024) 107717.
473
+ [40] H. Liu, F. Liu, X. Fan, D. Huang, Polarized self-attention: Towards high-quality pixel-wise regression, arXiv preprint arXiv:2107.00782 (2021).
474
+ [41] Z. Cui, Y. Fang, L. Mei, B. Zhang, B. Yu, J. Liu, C. Jiang, Y. Sun, L. Ma, J. Huang, et al., A fully automatic ai system for tooth and alveolar bone segmentation from cone-beam ct images, Nature communications 13 (2022) 2096.
475
+ [42] Q. Zhao, S. Lyu, W. Bai, L. Cai, B. Liu, M. Wu, X. Sang, M. Yang, L. Chen, A multi-modality ovarian tumor ultrasound image dataset for unsupervised cross-domain semantic segmentation, arXiv preprint arXiv:2207.06799 (2022).
476
+ [43] N. Codella, V. Rotemberg, P. Tschandl, M. E. Celebi, S. Dusza, D. Gutman, B. Helba, A. Kalloo, K. Liopyris, M. Marchetti, et al., Skin lesion analysis toward melanoma detection 2018: A challenge hosted by the international skin imaging collaboration (isic), arXiv preprint arXiv:1902.03368 (2019).
477
+ [44] P. Tschandl, C. Rosendahl, H. Kittler, The ham10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions, Scientific data 5 (2018) 1-9.
478
+ [45] T. DeVries, G. W. Taylor, Improved regularization of convolu
479
+
480
+ tional neural networks with cutout, arXiv preprint arXiv:1708.04552 (2017).
481
+ [46] O. Russakovsky, J. Deng, H. Su, J. Krause, S. Satheesh, S. Ma, Z. Huang, A. Karpathy, A. Khosla, M. Bernstein, et al., Imagenet large scale visual recognition challenge, International journal of computer vision 115 (2015) 211-252.
482
+ [47] E. Gibson, F. Giganti, Y. Hu, E. Bonmati, S. Bandula, K. Gurusamy, B. Davidson, S. P. Pereira, M. J. Clarkson, D. C. Barratt, Automatic multi-organ segmentation on abdominal ct with dense v-networks, IEEE transactions on medical imaging 37 (2018) 1822–1834.
483
+ [48] L. Yu, J.-Z. Cheng, Q. Dou, X. Yang, H. Chen, J. Qin, P.-A. Heng, Automatic 3d cardiovascular mr segmentation with densely-connected volumetric convnets, in: Medical Image Computing and Computer-Assisted Intervention- MICCAI 2017: 20th International Conference, Quebec City, QC, Canada, September 11-13, 2017, Proceedings, Part II 20, Springer, 2017, pp. 287-295.
484
+ [49] N. Ibtehaz, M. S. Rahman, Multiresunet: Rethinking the u-net architecture for multimodal biomedical image segmentation, Neural networks 121 (2020) 74-87.
485
+ [50] H. H. Lee, S. Bao, Y. Huo, B. A. Landman, 3d ux-net: A large kernel volumetric convnet modernizing hierarchical transformer for medical image segmentation, arXiv preprint arXiv:2209.15076 (2022).
486
+ [51] J. Fu, J. Liu, H. Tian, Y. Li, Y. Bao, Z. Fang, H. Lu, Dual attention network for scene segmentation, in: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 3146-3154.
487
+ [52] E. Xie, W. Wang, Z. Yu, A. Anandkumar, J. M. Alvarez, P. Luo, Segformer: Simple and efficient design for semantic segmentation with transformers, Advances in Neural Information Processing Systems 34 (2021) 12077-12090.
488
+ [53] C. Yu, C. Gao, J. Wang, G. Yu, C. Shen, N. Sang, Bisenet v2: Bilateral network with guided aggregation for real-time semantic segmentation, International Journal of Computer Vision 129 (2021) 3051-3068.
489
+ [54] R. Gu, G. Wang, T. Song, R. Huang, M. Aertsen, J. Deprest, S. Ourselin, T. Vercauteren, S. Zhang, Ca-net: Comprehensive attention convolutional neural networks for explainable medical image segmentation, IEEE transactions on medical imaging 40 (2020) 699-711.
490
+ [55] R. Azad, M. Asadi-Aghbolaghi, M. Fathy, S. Escalera, Bi-directional convlstm u-net with densley connected convolutions, in: Proceedings of the IEEE/CVF international conference on computer vision workshops, 2019, pp. 0-0.
491
+ [56] S. Feng, H. Zhao, F. Shi, X. Cheng, M. Wang, Y. Ma, D. Xiang, W. Zhu, X. Chen, Cpfnet: Context pyramid fusion network for medical image segmentation, IEEE transactions on medical imaging 39 (2020) 3008-3018.
492
+ [57] Q. Jin, H. Cui, C. Sun, Z. Meng, R. Su, Cascade knowledge diffusion network for skin lesion diagnosis and segmentation, Applied soft computing 99 (2021) 106881.
493
+ [58] H. Yan, Z. Li, W. Li, C. Wang, M. Wu, C. Zhang, Contnet: Why not use convolution and transformer at the same time?, arXiv preprint arXiv:2104.13497 (2021).
494
+ [59] S. L. Smith, A. Brock, L. Berrada, S. De, Convnets match vision transformers at scale, arXiv preprint arXiv:2310.16764 (2023).
495
+ [60] A. Trockman, J. Z. Kolter, Patches are all you need?, arXiv preprint arXiv:2201.09792 (2022).
496
+
497
+ Table A.1 Comparison results of the classic decoder and the direct fusion in the PMFSNet. The best results are in bold. $\uparrow$ means higher values are better.
498
+
499
+ <table><tr><td>Dataset</td><td>Up-sampling</td><td>IoU(%)↑</td></tr><tr><td rowspan="2">3D CBCT tooth</td><td>direct fusion</td><td>84.68</td></tr><tr><td>progressive up-sampling</td><td>84.41</td></tr><tr><td rowspan="2">MMOTU</td><td>direct fusion</td><td>80.03</td></tr><tr><td>progressive up-sampling</td><td>82.02</td></tr><tr><td rowspan="2">ISIC 2018</td><td>direct fusion</td><td>78.19</td></tr><tr><td>progressive up-sampling</td><td>78.82</td></tr></table>
500
+
501
+ # Appendix A Evaluation of the different PMFSNet settings
502
+
503
+ Conventional models based on the UNet architecture use a decoder to recover the image resolution, especially to minimize the loss of semantic information during up-sampling which is combined with a convolutional operation to extract important features. In our PMFSNet, the traditional 4-stage encoder is reduced to a 3-stage encoder, so the up-sampling step is reduced accordingly. Furthermore, considering that PMFSNet is designed to capture long-term dependencies and is tailored for lightweight segmentation tasks, we have also developed a decoder specifically designed to directly fuse feature maps from multiple scales through a convolution block (see Figure 3). The direct fusion in our design differs from the classic decoder used in the UNet, where up-sampling is progressively achieved through deconvolution. We conduct experiments on various architectures and datasets to examine the impact of the classic decoder in UNet and the direct fusion in our design on performance.
504
+
505
+ As shown in Table A.1, we evaluate the networks with the classic progressive decoder or the direct fusion on the three datasets. We can observe that the PMFSNet3D architecture achieves better segmentation performance (with IoU of $84.68\%$ ) when using direct fusion up-sampling, while the PMFSNet2D architecture achieves better results (with IoU of $82.02\%$ and $79.82\%$ on MMOTU and ISIC 2018 datasets, respectively) on both of the other datasets when using the classic progressive decoder.
506
+
507
+ For medical image segmentation tasks of various scales and dimensions, we design multiple scaling versions of the PMFSNet architecture, including three scales: BASIC, SMALL, and TINY. BASIC is the basic scaling version, which is different from the other two scaling versions in that it comes with a decoder, more channels, and more dense-feature-stacking units at each stage. Specifically, BASIC configures the internal channels of the PMFS block of 64, the base channels of [24, 48, 64] for each stage, the channels of skip connections of [24, 48, 64] for each stage, and the number of dense-feature-stacking units of [5, 10, 10] for each stage. For the SMALL scaling version, the number of channels in its PMFS block is reduced from 64 to 48, with fewer base channels of [24, 24, 24] for each stage, and the channels of skip connec
508
+
509
+ Table A.2 Comparison results of different scaling versions of the PMFSNet. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better.
510
+
511
+ <table><tr><td>Dataset</td><td>Scale</td><td>Params(M)↓</td><td>HD(mm)↓</td><td>IoU(%)↑</td></tr><tr><td rowspan="3">3D CBCT tooth</td><td>BASIC</td><td>2.27</td><td>8.09</td><td>86.56</td></tr><tr><td>SMALL</td><td>1.21</td><td>12.90</td><td>85.15</td></tr><tr><td>TINY</td><td>0.63</td><td>5.57</td><td>84.68</td></tr><tr><td rowspan="3">MMOTU</td><td>BASIC</td><td>0.99</td><td>-</td><td>82.02</td></tr><tr><td>SMALL</td><td>0.54</td><td>-</td><td>79.37</td></tr><tr><td>TINY</td><td>0.33</td><td>-</td><td>77.72</td></tr><tr><td rowspan="3">ISIC 2018</td><td>BASIC</td><td>0.99</td><td>-</td><td>78.82</td></tr><tr><td>SMALL</td><td>0.54</td><td>-</td><td>77.82</td></tr><tr><td>TINY</td><td>0.33</td><td>-</td><td>77.25</td></tr></table>
512
+
513
+ tions of [12, 24, 24] for each stage. For the smallest scaling version, TINY, the number of dense-feature-stacking units per stage is further reduced to [3, 5, 5]. We quantitatively evaluate their most suitable scaling version on three datasets.
514
+
515
+ As shown in Table A.2. The model baseline we used on the 3D CBCT tooth dataset is PMFSNet3D, whose Parameters (Params) for the scaling versions of BASIC, SMALL, and TINY are $2.27\mathrm{M}$ , $1.21\mathrm{M}$ , and $0.63\mathrm{M}$ , respectively. BASIC is 3.6 times as large as TINY, and SMALL is twice as large as TINY. Regarding segmentation performance, their HDs are $8.09\mathrm{mm}$ , $12.9\mathrm{mm}$ , and $5.57\mathrm{mm}$ , respectively, which suggests that the more lightweight TINY is more suitable for segmentation of surface boundary for our task. BASIC has better segmentation accuracy with the IoU of $86.56\%$ outperforming SMALL and TINY by $1.41\%$ and $1.88\%$ , respectively. Combined with the observation from Table 1, we believe that the TINY scaling version already achieves optimal performance among competitors, and to balance performance and efficiency, we default to the PMFSNet3D with the TINY architecture. On the MMOTU and ISIC 2018 datasets, we use the PMFSNet2D benchmark architecture. The Parameters (Params) of the three scaling versions, BASIC, SMALL, and TINY, are $0.99\mathrm{M}$ , $0.54\mathrm{M}$ , and $0.33\mathrm{M}$ , respectively, which are all $\leq 1$ million. The comparative analysis shows that BASIC's IoU on the MMOTU dataset exceeds SMALL and TINY by $2.65\%$ and $4.3\%$ , respectively. BASIC's IoU on the ISIC 2018 dataset exceeds SMALL and TINY by $1\%$ and $1.57\%$ , respectively. It can be seen that BASIC has a large improvement over the other two scaling versions, and we preferred BASIC for our experiments when the parameters of all three scaling versions are not very large.
516
+
517
+ # Appendix B Evaluation of the PMFS block channels
518
+
519
+ Our proposed PMFS block is a plug-and-play multi-scale feature enhancement module that can be easily extended to the network's bottleneck in any UNet-based architecture. Because of this, we can arbitrarily set the number of channels inside the PMFS block according to the complexity of the
520
+
521
+ Table B.1 Comparison results of different numbers of channels in the PMFS block. The best results are in bold. $\uparrow$ means higher values are better, $\downarrow$ means lower values are better.
522
+
523
+ <table><tr><td>Dataset&amp;Scale</td><td>Channel</td><td>Params(M)↓</td><td>IoU(%)↑</td></tr><tr><td rowspan="3">Tooth&amp;3D-TINY</td><td>32</td><td>0.54</td><td>84.32</td></tr><tr><td>48</td><td>0.63</td><td>84.68</td></tr><tr><td>64</td><td>0.76</td><td>84.41</td></tr><tr><td rowspan="3">MMOTU&amp;2D-BASIC</td><td>32</td><td>0.76</td><td>80.66</td></tr><tr><td>48</td><td>0.85</td><td>80.58</td></tr><tr><td>64</td><td>0.99</td><td>82.02</td></tr><tr><td rowspan="3">ISIC 2018&amp;2D-BASIC</td><td>32</td><td>0.76</td><td>78.44</td></tr><tr><td>48</td><td>0.85</td><td>78.37</td></tr><tr><td>64</td><td>0.99</td><td>78.82</td></tr></table>
524
+
525
+ networks. To exploit the performance of the PMFS block as much as possible in our experiments, we evaluate the performance of the PMFS block with internal channels of 32, 48, and 64, respectively on multiple scaling versions of the PMFSNet.
526
+
527
+ The evaluation results are shown in Table B.1, which shows that the Parameters (Params) of the networks do not cause large fluctuation when the internal channels of the PMFS block are set to 32, 48, and 64, respectively. The PMFSNet3D-TINY network achieves the best performance on the 3D CBCT tooth dataset when the PMFS block internal channel is set to 48, with the IoU of $84.68\%$ , so for the TINY scaling version of the PMFSNet we set the internal channel of the PMFS block to 48. The PMFSNet2D-BASIC network on the MMOTU dataset achieves the best performance when the internal channel of the PMFS block is set to 64, and the IoU is $1.36\%$ and $1.44\%$ higher than when the channel setting is 32 and 48, respectively. Similarly, the highest IoU of $78.82\%$ is achieved on the ISIC 2018 dataset.
2401.07xxx/2401.07579/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b179d65d7145b86062f15486729259529c18e835eed813d8cdd41040ead58ba
3
+ size 882272
2401.07xxx/2401.07579/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07589/a05743a1-0320-46f8-acb1-1a68ad3d9613_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ebe8502d9b5eb0e260b15dde0d90426930e64041cd3fd7bbda09fb74063b87
3
+ size 24355557
2401.07xxx/2401.07589/full.md ADDED
@@ -0,0 +1,590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Semantic Scene Segmentation for Robotics
2
+
3
+ Juana Valeria Hurtado and Abhinav Valada<sup>1</sup>
4
+
5
+ # ABSTRACT
6
+
7
+ Comprehensive scene understanding is a critical enabler of robot autonomy. Semantic segmentation is one of the key scene understanding tasks which is pivotal for several robotics applications including autonomous driving, domestic service robotics, last mile delivery, amongst many others. Semantic segmentation is a dense prediction task that aims to provide a scene representation in which each pixel of an image is assigned a semantic class label. Therefore, semantic segmentation considers the full scene context, incorporating the object category, location, and shape of all the scene elements, including the background. Numerous algorithms have been proposed for semantic segmentation over the years. However, the recent advances in deep learning combined with the boost in the computational capacity and the availability of large-scale labeled datasets have led to significant advances in semantic segmentation. In this chapter, we introduce the task of semantic segmentation and present the deep learning techniques that have been proposed to address this task over the years. We first define the task of semantic segmentation and contrast it with other closely related scene understanding problems. We detail different algorithms and architectures for semantic segmentation and the commonly employed loss functions. Furthermore, we present an overview of datasets, benchmarks, and metrics that are used in semantic segmentation. We conclude the chapter with a discussion of challenges and opportunities for further research in this area.
8
+
9
+ # KEYWORDS
10
+
11
+ Semantic Segmentation, Scene Understanding, Visual Learning, Deep Learning
12
+
13
+ # 1.1 INTRODUCTION
14
+
15
+ In order for robots to interact with the world, they should first have the ability to comprehensively understand the scene around them. The efficiency with which a robot performs a task, navigates, or interacts, strongly depends on how accurately it can comprehend its surroundings. Furthermore, the ability to understand context is crucial for safe operation in diverse environments [1]. However, accurate interpretation of the environment is extremely challenging, especially in real-world urban scenarios that are complex and dynamic. In these environments, robots are expected to perform their tasks precisely while they encounter diverse
16
+
17
+ ![](images/25c0ba2d3be04ad175dae938434268a980f4f4ab016d7b16bc9e5b2d7c480f03.jpg)
18
+
19
+ ![](images/2a530edf2541506aa208b2031d32d988379de0c33650ec8f006c857c53a1bef9.jpg)
20
+
21
+ ![](images/c236c6b3bf5f08570ad164649a91eb38575bbb1bcfe8059946724e7c26ab6b55.jpg)
22
+
23
+ ![](images/b6640438988cb6b288870c05bc51621aa14406635f7acae23207d00083ec8cdd.jpg)
24
+
25
+ ![](images/641138f72f5c000bdba4f4c66b3dc89c926ef6e42c76146f7c40d02af61c7fe2.jpg)
26
+
27
+ ![](images/6054f4d9bd8bae85ba40a18a7137b1d36757be19688daee0c91bfbb71ef76162.jpg)
28
+ Input
29
+
30
+ ![](images/cd49df5b36abd617a5f6fc87cfd3cb8e3126973b28d187011a7754a17c9d0b55.jpg)
31
+ Object Classification
32
+
33
+ ![](images/82a09b4a78b78b2c30d9b38d95e766a86993de3cb1927767f81be13c152cbd3a.jpg)
34
+ Object Detection
35
+ FIGURE 1.1 Each row shows an example with an input image and the corresponding output of different scene understanding tasks. Object classification identifies "what" objects compose the image, object detection predicts "where" the objects are located in the image, object segmentation outputs a mask that indicates the shape of the object. Semantic Segmentation further details the input image by predicting the label of all the pixels, including the background.
36
+
37
+ ![](images/ecdae9f6a9c7631bd405d7a9186ef94ffd9b0afcd83d1710b45fdfba83c0e841.jpg)
38
+ Object Segmentation
39
+
40
+ ![](images/2e00393f30421abef428e66ed0b72fa73b102dce9c3dc6388b36881d9ae255c0.jpg)
41
+ Semantic Segmentation
42
+
43
+ agents and objects. These environments themselves undergo appearance changes due to illumination variations and weather conditions, further increasing the difficulty of the task [2].
44
+
45
+ In robotics, scene understanding includes identifying, localizing, and describing the elements that compose the environment, their attributes, and dynamics. Research on novel techniques for automatic scene understanding has been exponentially increasing over the past decade due to the potential impact on numerous applications. The advances in deep learning as well as the availability of open-source datasets, and the increasing capacity of computation resources have facilitated the rapid improvement of scene understanding techniques [1]. These advances are most evident in image classification, where the goal is to determine what an image contains. The output of this classification task can be considered as a high-level representation of the scene which enables the identification of the various objects that are present by assigning a class label to them. A different mid-level capability, known as object detection, further details the scene by simultaneously classifying and localizing the objects in the image with bounding boxes. Although this task represents the scene in greater detail, it is still unable to provide essential information or object attributes such as the shape of objects. A closely related task, known as object segmentation, aims to fill this gap by additionally providing the shape of the object inside the bounding box in terms of their segmented boundaries. We present an overview of these perception tasks in Figure 1.1.
46
+
47
+ Semantic segmentation on the other hand presents a more integrated scene representation that includes classification of objects at the pixel-level, and their locations in the image. Semantic segmentation is considered as a low-level task that works at the pixel resolution and unifies object detection, shape recognition,
48
+
49
+ ![](images/5baa20c03b0778a9e9d793b02a84ee2b9335097573f63b7add3afd9aa30a20ef.jpg)
50
+ 3: Vegetation 2: Road 0: Pedestrian
51
+
52
+ ![](images/cb5c7153fef1938ed9a7bb8395a8ef400744e3620bd94f6c80a5d4c0ccd6f069.jpg)
53
+
54
+ ![](images/3cc0b7cab92d5bb3b43414bbe03bfc6f46a0fe13fffb68f90c941e46b09c98d2.jpg)
55
+ 5: Sky 4: Building 3: Vegetation 2: Road 1: Car
56
+ FIGURE 1.2 Illustration of the semantic segmentation output in which a semantic class label is assigned to each pixel in the image. The network predicts label indices for each pixel which is depicted with different colors for visualization purposes. The predictions overlaid on the input image is shown on the left and the label indices overlaid on the input image is shown on the right right.
57
+
58
+ and classification. By extending the previous tasks, semantic segmentation assigns a class label to each pixel in the image. Therefore, as we show in Figure 1.2, semantic segmentation models output a full-resolution semantic prediction that contains scene contexts such as the object category, location, and shape of all the scene elements including the background. Given that this prediction is performed for each pixel of the image, it is known as a dense prediction task.
59
+
60
+ Detailed understanding of the scene by assigning a class label to each pixel facilitates various downstream robotic tasks such as mapping, navigation, and interaction. This is an important step towards building complex robotic systems for applications such as autonomous driving [3, 4], robot-assisted surgery [5, 6], indoor service robots [7, 8], search and rescue robots [9], and mobile manipulation [10]. Therefore, incorporating semantic information has strongly influenced several robotics areas such as Simultaneous Localization And Mapping (SLAM) and perception through object recognition and segmentation, highlighting the importance of semantics knowledge for robotics. Semantic information can be of special importance to tackle challenging problems such as perception under challenging environmental conditions. Robots operating in the real world encounter adverse weather, changing lightning condition from day through night. Accurately predicting the semantics in these conditions is of great importance for successful operation.
61
+
62
+ In Semantic segmentation, we are not interested in identifying single instances, i.e., individual detections of objects in the scene. Therefore, the segmentation output does not distinguish two objects of the same semantic class
63
+
64
+ ![](images/eb26f31b48527cef48722fec42c9fee34200a90627a94c435ca8b5f1f940ba14.jpg)
65
+ Semantic Segmentation
66
+
67
+ ![](images/3e6183cf7036092464b6682e655524a3dd4700a069da64a734c8cfbc24571f68.jpg)
68
+ Instance Segmentation
69
+ FIGURE 1.3 Comparison of semantic segmentation, instance segmentation, and panoptic segmentation tasks. Semantic segmentation assigns a class label to each pixel in the image and instance segmentation assigns an instance ID to pixels belonging to individual objects as well as semantic class label to each pixel in the image. The panoptic segmentation task unifies semantic and instance segmentation.
70
+
71
+ ![](images/186d2314cac6dc53ac799e47f390e79d5c020df23823bbba6f26f3098237f1e8.jpg)
72
+ Panoptic Segmentation
73
+
74
+ as separate entities. Another closely related perception task known as instance segmentation, allows for distinguishing instances of objects of the same class in addition to pixel-level segmentation, and Multi-Object Tracking and Segmentation (MOTS) [11] tackles the problem of obtaining temporally coherent instance segmentation. Furthermore, panoptic segmentation [12] is a recently introduced task that unifies semantic and instance segmentation. We present a graphical comparison of these perception tasks in Figure 1.3. An additional task named Multi-Object Panoptic Tracking (MOPT) [13] further combines panoptic segmentation and MOTs. This elucidates the importance of developing models for a more holistic scene representation.
75
+
76
+ In this chapter, we present techniques for semantic scene segmentation, primarily using deep learning. We first discuss different algorithms and architectures for semantic segmentation, followed by the different loss functions that are typically used. We then discuss how different types of data and modalities can be used, and how video-classification models can be extended to yield temporal coherent semantic segmentation. Subsequently, we present an overview of datasets, benchmarks, and metrics that are used for semantic segmentation. Finally, we discuss different challenges and opportunities for developing advanced semantic segmentation models.
77
+
78
+ # 1.2 ALGORITHMS AND ARCHITECTURES FOR SEMANTIC SEGMENTATION
79
+
80
+ The automatic understanding of the scene semantics has been a major area of research for decades. However, unprecedented advancement in semantic segmentation methods has only been achieved recently. Deep learning has played a significant role in enabling this capability, especially after the introduction of Fully Convolutional Networks (FCNs) [14] which were proposed as a solution for semantic segmentation. In this section, we first provide a brief overview of semantic segmentation approaches used prior to Convolutional Neural Net-
81
+
82
+ works (CNNs) and then present important deep learning approaches, their focus on improvements and limitations. We further present challenges and proposed solutions.
83
+
84
+ # 1.2.1 Traditional Methods
85
+
86
+ Typically, the traditional algorithms for image segmentation use clustering methodologies, contours, and edges information [15, 16]. Particularly for semantic segmentation, diverse approaches initially followed the idea of obtaining a pixel-level inference by considering the relationship between spatially close pixels. To do so, various features of appearance, motion, and structure ranging in complexity were considered including pixel color, surface orientation, height above camera [17], histogram of oriented gradients (HOG) [18], Speeded Up Robust Features (SURF) [19], amongst others. Approaches for image semantic segmentation range from simple thresholding methods in gray images to more complex edge-based approaches [20-22] or graphical models such as Conditional Random Fields (CRF) and Markov Random Fields (MRF) [23-25]. Another group of approaches employ multiple individually pre-trained object detectors with the aim of extracting the semantic information from the image [26]. In general, as traditional approaches typically rely on a priori knowledge, such as the dependency between neighboring pixels, these methods require the definition of semantic and spatial properties with respect to the application to define segmentation concept. Moreover, these methods are limited to being able to segment a specific number of object classes which are defined by hand-selected parameters of the methods.
87
+
88
+ # 1.2.2 Deep Learning Methods
89
+
90
+ The introduction of deep learning methods and deep features presented important advances in computer vision tasks such as image classification, and led to the interest in using deep features for semantic segmentation. The initially proposed deep learning approaches for semantic segmentation used classification networks such as VGG [27] and AlexNet [28], and adapted them for the task of semantic segmentation by fine-tuning the fully connected layers [29-31]. As a result, these approaches were plagued by overfitting and required significant training time. Additionally, the semantic segmentation performance was affected by the insufficient discriminative deep features that were learned by them.
91
+
92
+ Most subsequently proposed methods suffered from low performance, and consequently several refinement strategies were incorporated such as Conditional Random Fields (CRFs) [32], Markov Random Fields (MRFs) [29], nearest neighbours [30], calibration layers [31], and super-pixels [33, 34]. Refinement strategies are still used as post-processing methods to enhance the pixel classification around regions where class intersections occur [35].
93
+
94
+ Significant progress in semantic segmentation was achieved with the intro-
95
+
96
+ ![](images/8394bd77723830c10760bf70760e152fb6bb6aad4c23d6e281105b4de00cc0ce.jpg)
97
+
98
+ ![](images/0e4e5e66bc17c2119825eb876eb981f43d79d676a1cc0ced37a35aa21a3cc09f.jpg)
99
+ FIGURE 1.4 An example topology of a Convolutional Neural Network (CNN) used for image classification (top) and a Fully Convolutional Network (FCN) that is used for dense prediction (bottom). Note that FCNs do not contain any fully connected layers.
100
+
101
+ duction of FCNs [14] that use the entire image to infer the dense prediction. FCN is composed of only stacked convolutional and pooling layers, without any fully connected layers, as shown in Figure 1.4. Originally, the proposed network used a stack of convolutional layers without any downsampling to learn the mapping from pixels to pixels directly by maintaining the input resolution. To compensate for this and to learn sufficient representative features, they assemble multiple consecutive convolutional layers. This initial approach was able to generate impressive results but at a very high computational cost. Therefore, the model was inefficient and it's scalability was limited.
102
+
103
+ As a solution to this problem, they presented an encoder-decoder architecture. The encoder is a typical CNN pre-trained for classification tasks such as AlexNet [28] or VGGNet [27]. The goal of the downsampling layers in the encoder is to capture deep contextual information that corresponds to the semantics. For its part, the decoder network is composed of deconvolutional and up-sampling layers. Its goal is to convert a low-resolution feature representation to a high-resolution image, recovering the spatial information and enabling precise localization, thereby yielding the dense classification.
104
+
105
+ Towards this direction, a deeper deconvolution network consisting of stacked deconvolution and unpooling layers is proposed in [36]. Oliveira et al. [37] employed a similar strategy called UpNet for part segmentation and tackled
106
+
107
+ two problems: occluded parts and over-fitting. Similarly, an encoder-decoder architecture was proposed in [38] where the feature maps obtained with the encoder are used as input of the decoder network that upsamples the feature maps by keeping the maxpooling indices from the corresponding encoder layer. Similarly, Liu et al. [39] proposed an approach called ParseNet, which models global context directly. Such methods have demonstrated state-of-the-art results at the time of their introduction. Among these early deep learning models, UpNet and ParseNet achieve superior performance.
108
+
109
+ Specifically, to obtain deep features that enhance the performance, the convolutional layers learn feature maps with progressively coarser spatial resolutions, and the corresponding neurons have gradually larger receptive fields in the input image. This means that the earlier layers encode features of appearance and location, and the feature extracted with the later layers encode context and high-level semantic information. As a consequence, the two main challenges of deep convolutional neural networks in semantic segmentation were identified. First, the consecutive pooling operations or convolution striding leads to smaller feature resolution. Second, the multi-scale nature of the objects in the scene were difficult to be captured. Since the features at different resolutions encode context information at different scales, this information was exploited to enhance the representation of the multi-scale objects. In the following subsection, we present different techniques that have been proposed to address these challenges.
110
+
111
+ # 1.2.3 Encoder Variants
112
+
113
+ Encoders are also referred to as the backbone network in semantic segmentation architectures. The encoders used for semantic segmentation are typically based on CNNs that have been proposed for image classification. The initial semantic segmentation approaches adopted the VGG-16 [27], AlexNet [28], or GoogLeNet [40] architectures for the encoder. Each of these encoders have achieved outstanding results in the ImageNet ILSVRC14 and ILSVRC12 [41] challenges. VGG was extensively used in several semantic segmentation architectures [42, 43]. A breakthrough in semantic segmentation models was achieved with the introduction of ResNet [44]. Several semantic segmentation models that employed ResNets and its variants such as Wide ResNet [45] and ResNeXt [46] achieved state-of-the-art performance on various benchmarks. Another popular encoder architecture were the new generation of GoogLeNet models such as Inception-v2, Inception-v3 [47], Inception-v4 and Inception-ResNet [48]. More recently, semantic segmentation models that employ the EfficientNet [49] family of architectures have achieved impressive results while being computationally more efficient than the previous encoders.
114
+
115
+ # 1.2.4 Upsampling Methods
116
+
117
+ While employing multiple sequential convolution and pooling operations in the network leads to deep features and enhances the performance of perception networks, substantial information loss can occur in the downsampled representation of the input towards the end of the network. This loss in information can affect the localization of features as well as details of the scene elements, such as texture or boundary information. Diverse works in this direction have been proposed to prevent or recuperate the loss in information. As a solution to this problem, [36] introduced deconvolution networks composed of sets of deconvolution and un-pooling layers. The authors apply their proposed network on individual object proposals and combine the predicted instance-wise segmentations to generate a final semantic segmentation.
118
+
119
+ The goal of employing the upsampling operations during the decoding step is to generate the semantic segmentation output at the same resolution of the input image. Given the computational efficiency of bilinear interpolation, it was extensively used in several semantic segmentation networks [14, 42]. Another common method to upsample the features maps is using deconvolution or transpose convolution layers. Transpose convolution computes the reverse of the convolution operation and it can be used to obtain the dense prediction in the decoder of semantic segmentation architectures [50-52].
120
+
121
+ # 1.2.5 Techniques for Exploiting Context
122
+
123
+ Different semantic segmentation methodologies have been proposed with the aim of exploiting semantic information, local appearance, and global context of the scene that can be extracted in early and late deep features. Several methodologies propose different strategies and architectures that fuse features in the process.
124
+
125
+ # 1.2.5.1 Encoder-Decoder Architecture
126
+
127
+ Initial semantic segmentation architectures [14, 36] used deconvolution to learn the upsampling of the encoder's outputs at low resolution. In addition, SegNet [38] takes the encoder's pooling indices and later use them in the decoder to learn additional convolutional layers with the aim of densifying the feature responses. The U-Net architecture [53] implements skip connections between the encoder and the corresponding decoder layers as shown in Figure 1.5. The skip connections connect encoder layers to the decoder, and allow directly transferring the information to deeper layers of the network. This network employs symmetry by increasing the size of the decoder to match the encoder. More recent encoder-decoder approaches [54-57] have demonstrated the effectiveness of this structure on several semantic segmentation benchmarks.
128
+
129
+ ![](images/852f189cac8835ded4605698ab458965e7d44a9c77b3318b489a63340ed46de3.jpg)
130
+
131
+ ![](images/e4992360f0b6d0053649e979e94cb1e96757dd22a86f9e7bb8255db1519be8cc.jpg)
132
+ FIGURE 1.5 An example topology of an encoder-decoder architecture. Typically, the encoder is a pre-trained classification CNN that uses downsampling layers to capture the contextual information. On the other hand, the decoder network is composed of up-sampling layers to recover the spatial information, yielding the pixel-level classification output with the same resolution as the input image.
133
+
134
+ Pooling layer
135
+
136
+ ![](images/06b1c11305480b4b10ddab671c8ec648dcb37de5d9c02851faab5db8aa2a4b71.jpg)
137
+
138
+ Deconvolution
139
+
140
+ # 1.2.5.2 Image Pyramid
141
+
142
+ The main idea with this network topology is that the same model is used to process multi-scale inputs. We present the general image pyramid network in Figure 1.6. Typically, the model weights are shared for multiple inputs, while the different size inputs have different purposes. Features corresponding to small scale inputs encode the wider context, and the details from small elements are encoded and preserved by the large scale inputs. Examples of this image pyramid structure include the Laplacian pyramid used to transform the input. With this transformation, each input scale is subsequently used to feed a CNN, and finally, all feature maps scales are fused together [33]. Other methodologies directly resize the input to different scales and later fuse the obtained features [42, 58, 59]. However, the main restriction of this models is related to limited GPU memory, given that networks that require deeper CNNs such as [44, 45, 60] which are computationally expensive.
143
+
144
+ # 1.2.5.3 Conditional Random Fields
145
+
146
+ Graphical models, especially Conditional Random Fields (CRFs) have been used as refinement layers in deep semantic segmentation architectures. The main objective is to capture low-level detail in regions where class intersections occur. These boundary regions are particularly difficult to segment with precision. This strategy includes additional modules placed consecutively to represent the longer-range context. To do so, a popular methodology is to integrate CRF into CNNs. Diverse methodologies have been presented as a refinement layer, including Convolutional CRFs [61] and Dense CRF [62]. Other methodologies have been proposed to train both the CRF and CNN jointly [42, 63]. These
147
+
148
+ ![](images/697a5db06483e6a3af72551ba13312fc4b9c17fab3c8395ba0e3dd788692288d.jpg)
149
+ FIGURE 1.6 Topology of the image pyramid architecture. This network uses the same model to process the same input at a different scales. The different scales allow the network to encode different context in the image.
150
+
151
+ methods that incorporate CRFs have demonstrated their benefits to capture contextual knowledge and exploit finer details to enhance the class label localization in the pixels.
152
+
153
+ # 1.2.5.4 Spatial Pyramid Pooling
154
+
155
+ This structure uses spatial pyramid pooling to obtain context at multiple scales, and it is graphically described in Figure 1.7. Towards this direction, ParseNet [39] exploits image-level features. Another methodology is presented in DeepLabv2 [42], which uses atrous spatial pyramid pooling (ASPP) that includes parallel atrous convolution layers with different rates to consider objects at different scales. This strategy improves the segmentation performance. Following this direction, a subsequent work also proposes to generate multi-scale features that cover a larger scale range densely using the Densely connected Atrous Spatial Pyramid Pooling (DenseASPP) [64]. Furthermore, an efficient variant of the ASPP is proposed in Adapnet++ [2] which captures a larger effective receptive field while decreasing the required parameters by $87\%$ using cascaded and parallel atrous convolutions. Additionally, in Pyramid Scene Parsing Network (PSPNet) [65], the authors exploit the global context information by aggregating multiple region-based contexts with a proposed pyramid pooling module. PSPNet demonstrated significant improvement on several semantic segmentation benchmarks.
156
+
157
+ # 1.2.5.5 Dilated Convolution
158
+
159
+ Dilated convolutions, graphically presented in 1.8, are also known as atrous convolutions [42] and aim to have an effective receptive field that grows more rapidly than in contiguous convolutional filters. These convolutions are an effective strategy to preserve the feature map resolution and extract deep features without using pooling or subsampling. Nevertheless, since the feature map
160
+
161
+ ![](images/e82c40817b58aa8e35e5520317c4db22b41249355023d319575d0d775351972d.jpg)
162
+ FIGURE 1.7 Topology of Spatial Pyramid Pooling architecture used to exploit the context found at multiple scales. This network includes a new module for multi-level pooling between the convolutional and fully-connected layers. The multi-level pooling allows this network to be more robust to the variations in object scale and deformation.
163
+
164
+ resolutions are not reduced with the progression of the network hierarchy, using dilated convolutions requires higher GPU storage and computation. Some of the explored models that use atrous convolutions for semantic segmentation include [66-68].
165
+
166
+ # 1.2.6 Real-Time Architectures
167
+
168
+ Many of the architectures and topologies presented so far in this chapter typically require high computational capacity and are not efficient for real-time applications. As these architectures employ large networks for their encoder such as GoogleNet [40] and ResNet [44], or use large CNN structures in different stages of the architecture, they achieve high performance but with low efficiency in terms of computation cost and runtime. To address this problem, different works propose approaches that are more suitable for real-time applications. For example, Efficient Neural Network (Enet) [69] is a lightweight architecture in which the last stage of the model is removed to optimize the network to obtain a faster inference time. The main drawback of this architecture is that excluding the downsampling operations in the last stage of the network makes it unable to cover large objects since the receptive field is smaller.
169
+
170
+ As an alternative, in ERFNet [57] a layer is designed to use residual connections and depthwise separable convolution with the aim of increasing the receptive field. On the other hand, in the spatial pyramid pooling structure, the convolutional feature maps are re-sampled at the same scale before the classification layer, resulting in a computationally expensive process. To tackle this problem, ESPNet is proposed as an efficient network structure [70]. This approach aims to efficiently exploit both context and spatial features by incorporating an efficient convolutional module called ESP. ESPNet architecture is able to preserve the segmentation accuracy while being fast and small while requiring low power and low latency. ESPNet decomposes a standard convolution into two
171
+
172
+ ![](images/1dab7e3e709e55107c562b706b08aa8a8536600715f9ff12c148d244a6ebc6c8.jpg)
173
+ FIGURE 1.8 Illustration of CNNs with standard convolutions (top) and CNNs with dilated convolutions (bottom). The receptive fields of consecutive deep convolution layers output smaller resolution feature maps. The receptive field of dilated convolution grows more rapidly than in contiguous convolutional filters.
174
+
175
+ steps. First, point-wise convolutions to lessen the computation effort. Second, a spatial pyramid of dilated convolutions that re-samples the feature maps so that the network can learn the representations from a large effective receptive field.
176
+
177
+ The approaches mentioned above provide lightweight architectures but compromise the model accuracy. Other models such as BiSeNet [71], and LiteSeg [72] explore strategies to improve computational efficiency while maintaining high accuracy. In BiSeNet, the authors design two different streams. First, the Spatial Path generates high-resolution features by using a small stride. Second, the Context Path is designed to obtain an adequate receptive field with a fast downsampling approach. Later, an additional Feature Fusion Module is employed to combine both features. In LiteSeg, the authors propose a deeper version of ASPP and use dilated convolutions and depthwise separable convolutions. As a result, LiteSeg is a faster and efficient model which provides high accuracy.
178
+
179
+ # 1.2.7 Object Detection-based Methods
180
+
181
+ In object detection in scenes with multiple elements, the main goal is to generate a bounding box indicating each object. Nevertheless, since the input image can include a diverse number of objects or may not have any objects, the number of objects that should be detected cannot be fixed. Therefore, this task can not be solved using a standard CNNs followed by a fully connected layer with a
182
+
183
+ predefined number of output classes. A straightforward approach to tackle this problem is to select different regions of interest from the image and employ a CNN on each region to classify the presence of a single object in the region. The architecture used to determine the regions out of the input image is called Region Proposal Network (RPN). RPNs are essential structures in the construction of algorithms that select a reduced group of regions of interest where objects might be located in the image. These algorithms aim to obtain an optimal number of region that allow the detection of all the elements in the scene, therefore reducing the required computation capacity. Some popular algorithms are YOLO [73], Single Shot Detector [74], and Fast-RCNN [75] and its improved version Faster-RCNN [76]. These networks facilitate the segmentation of the object inside a smaller region of the image. In this direction, the segmentation of instances is proposed in Mask-RCNN [77], and YOLACT [78]. They obtain a semantic segmentation output by drawing the segmentation masks and completing all the pixels of the input image.
184
+
185
+ # 1.3 LOSS FUNCTIONS FOR SEMANTIC SEGMENTATION
186
+
187
+ In this section, we discuss the most commonly employed loss functions for learning the semantic segmentation task.
188
+
189
+ # 1.3.1 Pixel-Wise Cross Entropy Loss
190
+
191
+ The pixel-wise cross entropy loss function [79] inspects each pixel individually by comparing the predicted class label to the ground truth, and finally computes averages over all pixels. This loss function leads to equal learning for each pixel in the image, which can lead to problems if the image is composed of unbalanced pixel classes. With the aim of tackling class imbalance, weighting this loss for each output channel was proposed [14]. Additionally, a pixel-wise weighting loss has also been proposed [53]. In this case, a larger weight is assigned to the pixels at the borders of segmented objects. The pixel-wise cross entropy loss $(\mathcal{L}_{\mathcal{C}\mathcal{E}})$ is computed as
192
+
193
+ $$
194
+ \mathcal {L} _ {\mathcal {C E}} (p, y) = - \sum_ {c} y _ {o, c} \log p _ {o, c}, \tag {1.1}
195
+ $$
196
+
197
+ where $c$ is the class label, $o$ denotes an observation, $y_{o,c} \in [0,1]$ denotes if $c$ is the correct classification for $o$ , and $p$ is the predicted probability of $o$ belonging to $c$ .
198
+
199
+ # 1.3.2 Dice Loss
200
+
201
+ The Dice loss function [80] is based on the dice coefficient metric that is used to measure the overlap between two samples. This metric is also used to compute the similarity between a pair of images. The dice loss is computed for each class
202
+
203
+ individually and then the average among class is computed to obtain a final loss. The dice coefficient is computed as
204
+
205
+ $$
206
+ \operatorname {D i c e} (y, c) = \frac {2 | X \cap Y |}{| X | + | Y |} \tag {1.2}
207
+ $$
208
+
209
+ where $c$ is the class label, $X$ are the scores of each class and $Y$ is a tensor with the class labels. Based on this metric, the Dice loss $(\mathcal{L}_{\mathcal{D}})$ is computed as
210
+
211
+ $$
212
+ \mathcal {L} _ {\mathcal {D}} (y, c) = 1 - \operatorname {D i c e} (y, c) \tag {1.3}
213
+ $$
214
+
215
+ # 1.4 SEMANTIC SEGMENTATION USING MULTIPLE INPUTS
216
+
217
+ Thus far in this chapter, we primarily discussed techniques for semantic segmentation that take a single image as input and yield a corresponding segmentation output for that image. In this section, we further discuss semantic segmentation methods that take multiple inputs simultaneously, namely for video semantic segmentation, point cloud semantic segmentation and multimodal semantic segmentation.
218
+
219
+ # 1.4.1 Video Semantic Segmentation
220
+
221
+ As robots move and interact with the environment, the perception is typically dynamic with changing context and scene relationships. Most often, a temporal sequence of data is available as an input to the semantic segmentation model and we can exploit this information to enforce temporal coherence in the output. For example, if an object of a particular semantic class is present in two consecutive frames, that object should be identified with the same label across the different frames, assuring the temporal coherence of the classified pixel. To do so, different works have explored various techniques to exploit temporal information to improve the overall semantic segmentation performance [81-84]. Specifically, clockwork convnets are proposed in [81] uses clock signals to control the learning rate of different layers and a LSTM-based spatio-temporal FCN is introduced in [82]. Similarly, [83] uses a spatio-temporal representation where the convolutional gated recurrent network enables learning both spatial and temporal information jointly. Furthermore, the approach presented in [84] combines convolutional gated architectures and spatial transformers for video semantic segmentation.
222
+
223
+ # 1.4.2 Point Cloud Semantic Segmentation
224
+
225
+ A point cloud is a collection of points in the 3D space representing the structure of the scene. With the growing use of depth and LiDAR sensors that enable building 3D maps of the scene, methods for 3D semantic segmentation of point clouds are increasingly becoming more popular. We show in Figure 1.9 an example of LiDAR-based semantic segmentation. We briefly discuss three
226
+
227
+ ![](images/4792692996c3a5788997f8f44e336414839e7f2e2c9899266093bbcb6134b437.jpg)
228
+ FIGURE 1.9 Semantic segmentation of point clouds. The top figure presents a class label assigned to each point in the 3D space. The bottom figure presents the point cloud projected into the 2D representation using spherical projections. The semantic segmentation prediction is obtained for each pixel in the projection.
229
+
230
+ main category of techniques for 3D semantic segmentation, namely point-based methods, voxel-based methods, and projection-based methods.
231
+
232
+ Point-based methods aim to process the raw point clouds directly. PointNet [85] was the first approach to tackle semantic segmentation directly on the raw point clouds. The PointNet architecture allows working on unordered data with a point-wise learning methodology that uses shared multi-layer perceptrons and subsequently symmetrical pooling functions. PoinNet++ [86] further extends PointNet and proposes to group points in a hierarchical manner. A similar approach [87] computes per-point convolutions by grouping together neighboring points into kernel cells. In contrast, [88] proposes to use a directed graph over the point cloud generating a set of interconnected superpoints to capture the structure and context information.
233
+
234
+ Voxel-based methods first convert the point cloud into a 3D voxel representation and them employ 3D CNN architectures on them. Voxels are volumetric discretizations of the 3D space. SegCloud [89] proposes to use this representation as to the input to a 3D-FCN [90]. The authors also propose a deterministic trilinear interpolation that converts the voxel predictions back to the point cloud space and subsequently employ CRFs for refinement. Nevertheless, the computation of voxels and their 3D processing consumes substantial runtime and requires very high computational cost making it unfeasible for real-time applications.
235
+
236
+ Projection-based methods project the point cloud from 3D data into 2D space to reduce the computational cost. A known transformation method is spherical projection. This approach and has been especially utilized for LIDAR semantic segmentation. The resulting 2D projection allows using 2D image methodologies for semantic segmentation, which are faster than 3D approaches. SqueezeSeg [91] and its extension SqueezeSegV2 [92] yield very efficient se
237
+
238
+ ![](images/821c8da184318779134233e2310a21903e60ddf60c28d35af43a7b60f615a6a0.jpg)
239
+ RGB Input
240
+
241
+ ![](images/183e07ab7a81ab83633ec9115711b1b664f099f6109beda541d0620f61a32811.jpg)
242
+ Thermal Input
243
+ FIGURE 1.10 Example of multimodal semantic segmentation using multiple modalities as input in the Freiburg Forest dataset [95]. In this case, the input modalities are RGB, Thermal, and Depth images. The semantic segmentation output is obtained by fusing the features obtained from both modalities.
244
+
245
+ ![](images/726603176244bf07720e3f38417a4f718a22209bc54b8cb2e873f0c7a7711203.jpg)
246
+ Depth Input
247
+
248
+ ![](images/9d82f139ac397dd2f56d26369d8f3efc1c2f86c2e84af4c41204fc7e4fee894e.jpg)
249
+ Semantic Segmentation
250
+
251
+ mantic segmentation result by utilizing spherical projections. Additionally, the authors in [93] uses different 2D architectures and additional post-processing stages to refine the 3D segmentation results. A more recent approach called EfficientLPS [94] presents a model that incorporates geometric transformations while learning features in the encoder of semantic and instance segmentation networks.
252
+
253
+ # 1.4.3 Multimodal Semantic Segmentation
254
+
255
+ Semantic segmentation of RGB images has led to important advances in scene understanding. However, image-based approaches suffer from visual limitations as they are susceptible to changing weather and seasonal conditions as well as varying illumination conditions. With the availability of low-cost sensors and with the goal of improving the robustness and the granularity of segmentation for robot perception, fusion of different modalities have been explored to exploit complementary features. Alternate modalities such as thermal images and depth images have been shown to be beneficial for segmenting objects in low illumination conditions by exploiting properties of objects such as reflectance and geometry.
256
+
257
+ There are three main categories of fusion techniques for multimodal semantic segmentation: early, late, and hybrid fusion. Early fusion, also known as data-level or input-level fusion, aims to combine the modalities before feeding them as input to the CNN. The obtained multimodal representation is later used as the input to a single model to exploit complementary features and leverage the interactions between low-level features of each modality. This technique typically requires that the utilized modalities have semantic similarities such as RGB and depth. A straightforward implementation of early fusion of modalities is channel stacking by concatenating multiple modalities across the channel dimension. Subsequently, a learning model can be trained end-to-end using the stacked modalities as input. In the case of RGB-Depth semantic segmentation, the first deep learning approach for multimodal early fusion proposed to concatenate the depth image with the RGB image as an additional channel [96]. Later, [97] proposed to use an encoder-decoder architecture composed of two encoder
258
+
259
+ branches to extract features from RGB and depth images, and subsequently combine the obtained depth feature maps with the RGB branch. More recently, a fusion mechanism proposed in RFBNet [98] explores the interdependencies between the encoders to provide an efficient fusion strategy. Given that early fusion approaches mainly utilize a single or unified network, they are computationally more efficient than the other techniques. Nevertheless, early fusion of modalities has its own set of limitations since it always forces the network to learn fused representations from the beginning and does it enable the network to exploit cross-modal interdependencies, and complementary features.
260
+
261
+ Late fusion techniques use individual CNN streams for each modality, followed by a fusion stage in the end which facilitates learning of further combined multimodal representations. In one of the early methods, an RGB and depth based geocentric embedding was proposed for object detection and segmentation [99]. To this end, the method employs two modality-specific networks to obtain feature maps that are then fed to a classifier. Other deep learning methodologies aim to map the multimodal features to a subspace [100, 101]. In [100], an adaptive gating network was proposed which generates a class-wise probability distribution over the modality-specific network streams. [102] exploits both multimodal and multispectral images in a late-fused convolution architecture. In a similar work, [37] proposes a fusion architecture that extracts multimodal features separately using individual network streams, followed by feeding the summation of the resulting feature maps through consecutive convolutional layers. Since the modality-specify streams are trained individually, the subsequent fusion training allows obtaining a combined prediction. As a result, the late fusion approach can potentially learn better complementary features along with fusion specific features. This strategy facilitates a more robust performance in cases when the features in each model have good classification performance, then the fusion further yields improvements. However, fusing feature maps towards the end of the network may not be sufficient for learning accurate, robust, and highly detailed semantic segmentation.
262
+
263
+ Hybrid fusion methodologies aim to exploit the strengths of both early and late fusion strategies. In this direction, RDFNet [103] leverages the main idea behind residual learning and applies it to deep multimodal fusion for combining RGB-D features using multimodal feature fusion blocks and multi-level feature refinement blocks. On the other hand, semantics-guided multi-level fusion [104] learns the joint feature representation in a bottom-up setup by using the cascaded semantics-guided fusion block to fuse lower-level features across modalities as a sequential model. More recently, a self-supervised model adaptation module was proposed for deep multimodal fusion [2] which dynamically adapts the fusion of semantically mature multi-scale representations by exploiting complementary cues from each modality-specific encoder.
264
+
265
+ ![](images/19346d3ace934b90d178b5e0f8816e67dd468c57ace5412b2a90ed57f4403c98.jpg)
266
+ Input RGB
267
+
268
+ ![](images/106afb381ca214d2b6038b6a85aa36fcc89834823d57d38ca79895eed7415656.jpg)
269
+ Semantic Segmentation
270
+ FIGURE 1.11 An example of semantic image segmentation from the challenging BDD100K dataset. The image shows a complex urban scene with many dynamic objects.
271
+
272
+ # 1.5 SEMANTIC SEGMENTATION DATASETS AND BENCHMARKS
273
+
274
+ One important reason for the advancements and popularity of the semantic segmentation task is the availability of large-scaled labeled datasets that are publicly available. Moreover, standard benchmarks and competitions in semantic segmentation facilitate the comparison of trained models against the state of the art in different contexts, such as autonomous driving.
275
+
276
+ Assuming that sufficient amount of labeled training data is available, it is feasible to train a semantic segmentation model from scratch for a specific application. However, the annotation process of pixel-level labeling is an arduous and expensive task. As a consequence, the amount of labeled data that is available is often insufficient. One alternative is to use semi-supervised and weakly supervised learning methods [105] for annotating new datasets. Another widely used approach is transfer learning, where the model is first trained on a similar task or dataset with enough data. Transfer learning requires adaptation from the available dataset to the target dataset. A common practice is to first pre-train the model using an extensive dataset and re-train the model on the target dataset by initializing the network with the pre-trained weights. Under the premise of those pre-trained models capturing the semantic information, and therefore enabling them to train the model with less labeled samples, a model trained on an autonomous driving dataset in one city can be adapted to a different city. In these cases, the extensive and publicly available datasets and benchmarks are very helpful not only to prove and compare the capabilities of the developed methods but also as a pre-training in specific applications while using less annotated data. In this section, we briefly review public available datasets for semantic segmentation.
277
+
278
+ # 1.5.1 Outdoor Datasets
279
+
280
+ These datasets consist of a collection of images in diverse driving scenarios including highways, densely populated urban areas, and rural areas. Many of the datasets contain challenging perceptual conditions such as high traffic density,
281
+
282
+ rain, snow, fog and other seasonal pearceptual changes. In this context, the pixel labels may include pedestrians, road, car, vehicles, bicycles, and buildings. We present an example of a outdoor image in Figure 1.11.
283
+
284
+ # 1.5.1.1 Cityscapes
285
+
286
+ The Cityscapes [106] dataset is a large-scale dataset consisting of urban street scenes. It is a highly diverse dataset comprising of scenes captured at different times of the day and during different seasons of the year, from over 50 European cities. The additional presence of a large number of dynamic objects further adds to its complexity making it one of the most challenging datasets for scene understand tasks. Cityscapes provides pixel-level annotations at two quality levels: fine annotations for 5,000 images and coarse annotations for 20,000 images. There are 30 different class labels, eight classes also have instance-specific labels. Consequently, there are three separated challenges: pixel-level semantic segmentation, instance-level semantic segmentation, and panoptic segmentation. The images in this dataset were captured at a resolution of $2048 \times 1024$ pixels using an automotive-grade $22\mathrm{cm}$ baseline stereo camera. The finely annotated images are divided into 2975 for training, 500 for validation and 1525 for testing. The annotations for the test set are not publicly released, rather they are only available to the online evaluation server that automatically computes the metrics and publishes the results on the leaderboard.
287
+
288
+ # 1.5.1.2 KITTI
289
+
290
+ The KITTI [107] dataset is one of the most comprehensive datasets for autonomous driving. Besides providing groundtruth for semantic segmentation, KITTI also includes annotations for diverse tasks such as scene flow estimation, optical flow estimation, depth prediction, odometry estimation, tracking, and road lane detection. The dataset contains sequences of frames recorded with diverse sensor modalities such as high-resolution RGB, grayscale stereo cameras, and a 3D laser scanners.
291
+
292
+ # 1.5.1.3 Mapillary Vistas
293
+
294
+ The Mapillary Vistas [108] dataset contains 25,000 high-resolution images annotated into 66 object categories. Mapillary Vistas is one of the most extensive publicly available datasets for semantic segmentation of street scenes. Additionally, this dataset presents instance-specific semantic annotations for 37 classes, and it is suitable for other scene understanding tasks such as instance segmentation and panoptic segmentation. This dataset includes diverse scenes in terms of geographic extent and conditions such as weather, season, daytime, cameras, and viewpoints. The images in this dataset range in resolutions from $1024 \times 768$ pixels to $4000 \times 6000$ pixels.
295
+
296
+ ![](images/b90db10196465bb780fae1201b88158b656d374ec7b1d5464912c6b0d6f5967b.jpg)
297
+ Input RGB
298
+
299
+ ![](images/99b9204290150255fa44a54deefd8fec0e3d8b28d022f8172b4e5ac8e5b803dd.jpg)
300
+ Semantic Segmentation
301
+ FIGURE 1.12 An example semantic image segmentation from the challenging ScanNet dataset. The image shows an indoor scene where objects appear much closer than in outdoor scenes.
302
+
303
+ # 1.5.1.4 BDD100K: A Large-scale Diverse Driving Video Database
304
+
305
+ The BDD100k [109] dataset is one of the largest driving datasets consisting of diverse scenes that cover different weather conditions including sunny, overcast, rainy, as well as different times of the day. The dataset consists of 100,000 videos of 4 seconds each that were captured at a resolution of $1280 \times 720$ pixels. BDD100k provides annotations for semantic segmentation, instance segmentation, multi-object segmentation and tracking, image tagging, lane detection, drivable area segmentation, multi-object detection and tracking, domain adaptation and imitation learning. It provides fine-grained pixel-level annotations for 40 object classes and the dataset is split into 7000 images for training, 1000 images for validation and 2000 images for testing.
306
+
307
+ # 1.5.1.5 Indian Driving Dataset (IDD)
308
+
309
+ The Indian Driving Dataset (IDD) [110] dataset is a collection of finely annotated images of autonomous driving scenarios. Instead of focusing on data from structured environments, this dataset adds novel data from unstructured environments of scenes that do not have well-delineated infrastructures such as lanes and sidewalks. It consists of 10,000 images with resolution ranging from $720 \times 1280$ pixels to $1920 \times 1080$ pixels and finely annotated semantic segmentation labels with 34 classes. The training set contains 6993 images, while the validation and testing set have 981 and 2029 respectively.
310
+
311
+ # 1.5.2 Indoor Datasets
312
+
313
+ These datasets contain indoor scenes such as offices and rooms. Besides semantic class labels for images, some of datasets also provide depth images and 3D models of the scenes. The class labels in these datasets include objects such as sofa, bookshelf, refrigerator, and bed. Additionally, indoor datasets present background class labels such as wall and floor. We present an example of a outdoor image in Figure 1.12.
314
+
315
+ # 1.5.2.1 NYU-Depth V2
316
+
317
+ The NYU Depth V2 dataset [111] is a collection of video sequences recorded in indoor scenarios. The dataset was collected using both the RGB and Depth sensors from the Microsoft Kinect at 464 different scenes from three different cities. It contains 1449 images of nearly 900 different categories with the respective dense semantic labels and additional depth information.
318
+
319
+ # 1.5.2.2 SUN 3D
320
+
321
+ The SUN 3D [112] dataset contains RGB-D videos of 415 sequences of 254 different indoor scenes. The sequences were captured in over 41 buildings. Only 8 sequences in this datasets has been annotated with semantic segmentation labels. In addition to the RGB-D images and the dataset also provides the camera poses for each frame.
322
+
323
+ # 1.5.2.3 SUN RGB-D
324
+
325
+ SUN RGBD [113] is a scene understanding benchmark suite. The dataset contains 10,335 pairs of RGB-D images with pixel-wise semantic annotation for both 2D and 3D, for both objects and rooms. The dataset provides class labels for six important recognition tasks: semantic segmentation, object classification, object detection, context reasoning, mid-level recognition, and surface orientation and room layout estimation.
326
+
327
+ # 1.5.2.4 ScanNet
328
+
329
+ The ScanNet [114] dataset is an large scale RGB-D video dataset. It consists of 2.5 million views. Besides providing annotations for semantic segmentation, ScanNet also presents labels for 3D camera poses, surface reconstructions, and instance-level semantic segmentation.
330
+
331
+ # 1.5.3 General Purpose Datasets
332
+
333
+ These datasets contain generic class labels including almost every type of object or background. Some of these datasets are the most standard benchmarks to measure progress in the semantic segmentation task as a whole. An example of this datasets is presented in Figure 1.13
334
+
335
+ # 1.5.3.1 PASCAL Visual Object Classes (VOC)
336
+
337
+ PASCAL Visual Object Classes [115] presents all types of indoor and outdoor images with 20 foreground object classes and one background class with 1,464 images for training, 1,449 images for validation, and 1,456 test image. The test set is not public and is accessible only for the challenge. Besides including pixel-level annotations for semantic segmentation, this dataset also presents annotations for classification, detection, action classification, and person layout tasks.
338
+
339
+ ![](images/5d3baf777979e293ee3afb8f111b999db94ffe3c37c2b0477465327150215b48.jpg)
340
+ Input RGB
341
+
342
+ ![](images/68c4cf8fb2395a9c24dd2808757742aab9e359544b247a71f90d82135eecd96d.jpg)
343
+ Semantic Segmentation
344
+ FIGURE 1.13 An example semantic image segmentation from the Microsoft Common Objects in Context dataset. The image shows a scene with a person playing sports. The class labels in the image include person, sky, and floor.
345
+
346
+ # 1.5.3.2 Microsoft Common Objects in Context (MS COCO)
347
+
348
+ MS COCO [116] is an extensive large scale dataset for object detection, semantic segmentation, and captioning image set. It contains 330,000 images of complex everyday scenes with common objects. The dataset contains 200,000 labeled images with 1.5 million object instances and 80 object categories. The dataset is split into 82,000 images for training, 40500 images for validation and 80,000 for testing.
349
+
350
+ # 1.5.3.3 ADE20K
351
+
352
+ The ADE20K [117] dataset contains more than 20,000 scenes with objects and object parts annotations composing 150 semantic categories. The average image size of the samples in the dataset is 1.3M pixels that can be up to $2400 \times 1800$ pixels. The dataset is split into 20,000 images for training and 2000 images for validation. Additionally, ADE20K also provides a public leaderboard.
353
+
354
+ # 1.6 SEMANTIC SEGMENTATION METRICS
355
+
356
+ Two principal criteria are usually considered during the evaluation of semantic segmentation models. The first being accuracy, which is related to the effectiveness and represents how successful the model is. The second corresponds to the computational complexity and is associated with the scalability of the model. Both criteria are essential for robots to successfully perform tasks using scene understanding models that can be deployed in resource limited systems.
357
+
358
+ # 1.6.1 Accuracy
359
+
360
+ Measuring the effectiveness of a semantic segmentation can be difficult, given that it requires to measure both classification and localization in the pixel space. Different metrics have been presented to measure each individual criteria or the combination of them.
361
+
362
+ # 1.6.1.1 ROC-AUC
363
+
364
+ ROC stands for the Receiver-Operator Characteristic curve. ROC measures a binary classification system's ability by utilizing the trade-off between the true-positive rate against the false-positive rate at various threshold settings. AUC stands for the area under the curve of this trade-off, and its maximum value is 1. This metric is useful in binary classification problems and is suitable in problems with balanced classes. Nevertheless, given that most semantic segmentation presents an unbalance between the classes, this evaluation metric is not considered in most recent challenges.
365
+
366
+ # 1.6.1.2 Pixel Accuracy (PA)
367
+
368
+ Pixel accuracy is a semantic segmentation metric that denotes the percent of pixels that are accurately classified in the image. This metric calculates the ratio between the amount of adequately classified pixels and the total number of pixels in the image as
369
+
370
+ $$
371
+ P A = \frac {\sum_ {j = 1} ^ {k} n _ {j j}}{\sum_ {j = 1} ^ {k} t _ {j}}, \tag {1.4}
372
+ $$
373
+
374
+ where $n_{jj}$ is the total number of pixels both classified and labelled as class $j$ . In other words, $n_{jj}$ corresponds to the total number of True Positives for class $j$ . $t_j$ is the total number of pixels labelled as class $j$ .
375
+
376
+ Since there are multiple classes present in semantic segmentation, the mean pixel accuracy (mPA) represents the class average accuracy as
377
+
378
+ $$
379
+ m P A = \frac {1}{k} \sum_ {j = 1} ^ {k} \frac {n _ {j j}}{t _ {j}}. \tag {1.5}
380
+ $$
381
+
382
+ PA and mPA are intuitive and interpretable metrics. However, high PA does not directly imply superior segmentation performance, especially in imbalanced class datasets. In this case, when a class dominates the image, while some other classes make up only a small portion of the image, only correct classification of the dominant class will yield a high PA.
383
+
384
+ # 1.6.1.3 Intersection over Union
385
+
386
+ In semantic segmentation, Intersection over Union (IoU) is the overlap area between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. From the
387
+
388
+ following equation, it is shown that IoU is the ratio of true positives (TP) to the sum of: false alarms know as false positives (FP), misses know as false negatives (FN) and hits know as true positives (TP). IoU value ranges between 0 and 1, the lower the value, the worse the semantic segmentation performance. The IoU metric can be computed as
389
+
390
+ $$
391
+ I o U = \frac {T P}{T P + F P + F N}, \tag {1.6}
392
+ $$
393
+
394
+ where $FP_{ij}$ corresponds to the number of pixels which are labelled as class $i$ , but classified as class $j$ . Similarly, $FN_{ji}$ , the total number of pixels labelled as class $j$ , but classified as class $i$ , corresponding to the misses of the class $j$ . In the equation, the numerator extracts the overlap area between the predicted segmentation and the ground truth. The denominator represents the area of the union, by both the predicted segmentation and the ground truth bounding box.
395
+
396
+ Similarly to PA, mIoU is computed to obtain the average per-class IoU as
397
+
398
+ $$
399
+ m I o U = \sum_ {j = 1} ^ {k} \frac {T P _ {j j}}{T P _ {i j} + F P _ {j i} + F N _ {j j}}, i / j. \tag {1.7}
400
+ $$
401
+
402
+ The IoU measure is more informative than PA given that it also punishes false alarms, whereas PA does not. The IoU is a very straightforward metric and it is very effective. Therefore, IoU and its variants, is widely used as accuracy evaluation metrics in the most popular semantic segmentation challenges such as Cityscapes and VOC challenge. However, the main drawbacks of these metric are that they only consider the labeling correctness without measuring how accurate are the boundaries of the segmentation, and the significance between false positives and misses is not measured.
403
+
404
+ # 1.6.1.4 Precision-Recall Curve (PRC)-based Metrics
405
+
406
+ Precision refers to the ratio of successes over the summation of successes and false alarms and recall relates the successes over the summation of successes and misses. Precision and recall is computed as
407
+
408
+ $$
409
+ P r e c i s i o n = \frac {T P}{T P + F P}, \tag {1.8}
410
+ $$
411
+
412
+ $$
413
+ \text {R e c a l l} = \frac {T P}{T P + F N}. \tag {1.9}
414
+ $$
415
+
416
+ PRC is used to represent the trade-off between precision and recall for binary classification. PRC has the ability of discriminating the effects between the false positives and false negatives. Therefore, PCR-based metrics are widely used for semantic segmentation.
417
+
418
+ - F score or dice coefficient is very similar to the IoU and also ranges from 0 to 1, where 1 means the greatest similarity between the segmentation and
419
+
420
+ ground truth. It is a normalised measure of similarity, and is defined as
421
+
422
+ $$
423
+ F - s c o r e = \frac {\text {P r e c i s i o n} \times \text {R e c a l l}}{\text {P r e c i s i o n} + \text {R e c a l l}}. \tag {1.10}
424
+ $$
425
+
426
+ - PRC-AuC is defined as the area under the PRC. This metric describes the precision-recall trade-off under different thresholds.
427
+ - Average Precision (AP) consists of a single value summarising the shape and the AUC of PRC. This is one of the most used single value metric for semantic segmentation. The mean average precision (mAP) is also presented for all classes.
428
+
429
+ # 1.6.2 Computational Complexity
430
+
431
+ Besides the model accuracy, computational complexity of the semantic segmentation model is critical factor to assess. Consequently, the following metrics have been used to measure the time to complete the task and the computational sources demanded by the model.
432
+
433
+ # 1.6.2.1 Runtime
434
+
435
+ It refers to the total time that the model requires to produce the output, starting from the image given as input until the dense semantic segmentation output is generated. This metric highly depends on the hardware that is used. Therefore, when this metric is reported, it also includes a description of the system used.
436
+
437
+ # 1.6.2.2 Memory Usage
438
+
439
+ Given the ample applications of semantic segmentation, memory usage is an essential metric to report. This indicates how feasible is the deployment of the perception models on devices with limited computational resources. The goal of many semantic segmentation algorithms is to obtain the best possible accuracy with limited memory. A commonly employed metric is the maximum memory required for the semantic segmentation model.
440
+
441
+ # 1.6.2.3 Floating Point Operations Per Second
442
+
443
+ The Floating Point Operations Per Second (FLOPs) refers to the number of floating-point calculations per second are required. It is used to measure the complexity of the CNN model. Assume that we use a sliding window to compute the convolution and we ignore the overhead due to nonlinear computation, then the FLOPs for the convolution kernel is given by
444
+
445
+ $$
446
+ F L O P s = 2 H W \left(C _ {i n} K ^ {2} + 1\right) C _ {o u t} \tag {1.11}
447
+ $$
448
+
449
+ where $H$ , $W$ , and $C_{in}$ are the height, width and number of channels in the input feature map respectively. $K$ is the size of the kernel and $Cout$ is the number of channels in the output.
450
+
451
+ # 1.7 CONCLUSION
452
+
453
+ In this chapter, we discussed the semantic segmentation task for robot perception. We highlighted the essential role of deep learning techniques in the field, and we described the most popular datasets that can be used to train semantic segmentation models for robotics in different environments. We described related algorithms, architectures, and different strategies that have been proposed to improve the semantic segmentation output. We have presented the required concepts, techniques, and general tools that comprise the topic. Semantic segmentation methods have a great potential as an important component to enhance perception systems of robot that operate and interact in real-world scenarios.
454
+
455
+ A limitation found in perception methodologies based on supervised learning is the large amount of labeled data that is required and the consequent labeling process. Given that supervised learning methods rely on a massive amount of annotated data and semantic segmentation requires dense labels for classification, the collection of datasets can be arduous, expensive, and sometimes unfeasible. Nowadays, other learning techniques such as weakly-supervised, self-supervised and unsupervised learning have been recently explored. Transfer learning is a technique that allows to first train a general model using a large annotated dataset and then fine-tune the model using a reduced number of samples from the main application. Self-supervised learning aims to use details or attributes inherent to the images that can be captured without extra annotation steps. These inherent attributes can be used to initially pretrain the network and reduce the required amount of data to train the target model. These techniques represent an interesting contribution towards mitigating the dependency of training the models on a large amount of annotated data.
456
+
457
+ In the specific case of robotics, it is usually required that the employed models run in real-time and with limited computational resources. In this regard, there is still room for improvement in scene understanding. In applications where the robot is expected to rapidly react according to the conditions and situations in the environment, such as in autonomous vehicles, it is necessary to develop segmentation models with a short inference time. Developing such models without compromising on accuracy is also an interesting direction for research.
458
+
459
+ Semantic segmentation can be considered as the starting point of holistic scene representation by training models that are able to represent the complete scene, including objects and background. After semantic segmentation, other tasks that further detail the scene have been proposed. Such is the case of panoptic segmentation that combines semantic and instance segmentation. Recently, panoptic segmentation was extended to temporal sequences of frames where the instances also conserve the assigned label so that the instances are time coherent. The evolution of tasks that gradually detail the information contained in the scene highlights the importance of semantic segmentation.
460
+
461
+ # BIBLIOGRAPHY
462
+
463
+ [1] C. Premebida, R. Ambrus, Z.-C. Marton, Intelligent robotic perception systems, Applications of Mobile Robots (2018).
464
+ [2] A. Valada, R. Mohan, W. Burgard, Self-supervised model adaptation for multimodal semantic segmentation, International Journal of Computer Vision (2019) 1-47.
465
+ [3] G. Kalweit, M. Huegle, M. Werling, J. Boedecker, Interpretable multi time-scale constraints in model-free deep reinforcement learning for autonomous driving, arXiv preprint arXiv:2003.09398 (2020).
466
+ [4] N. Radwan, W. Burgard, A. Valada, Multimodal interaction-aware motion prediction for autonomous street crossing, The International Journal of Robotics Research (IJRR) (2020).
467
+ [5] A. Tewari, J. Peabody, R. Sarle, G. Balakrishnan, A. Hemal, A. Shrivastava, M. Menon, Technique of da Vinci robot-assisted anatomic radical prostatectomy, Urology 60 (4) (2002) 569-572.
468
+ [6] Y. Qin, S. Feyzabadi, M. Allan, J. W. Burdick, M. Azizian, davincinet: Joint prediction of motion and surgical state in robot-assisted surgery, arXiv preprint arXiv:2009.11937 (2020).
469
+ [7] F. Boniardi, A. Valada, W. Burgard, G. D. Tipaldi, Autonomous indoor robot navigation using sketched maps and routes, in: Workshop on Model Learning for Human-Robot Communication at Robotics: Science and Systems (RSS). CiteSeer, 2016.
470
+ [8] J. V. Hurtado, L. Londono, A. Valada, From learning to relearning: A framework for diminishing bias in social robot navigation, arXiv preprint arXiv:2101.02647 (2021).
471
+ [9] M. Mittal, R. Mohan, W. Burgard, A. Valada, Vision-based autonomous uav navigation and landing for urban search and rescue, arXiv preprint arXiv:1906.01304 (2019).
472
+ [10] D. Honerkamp, T. Welschehold, A. Valada, Learning kinematic feasibility for mobile manipulation through deep reinforcement learning, arXiv preprint arXiv:2101.05325 (2021).
473
+ [11] P. Voigtlaender, M. Krause, A. Osep, J. Luiten, B. B. G. Sekar, A. Geiger, B. Leibe, Mots: Multi-object tracking and segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2019, pp. 7942-7951.
474
+ [12] A. Kirillov, K. He, R. Girshick, C. Rother, P. Dollar, Panoptic segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2019, pp. 9404-9413.
475
+ [13] J. V. Hurtado, R. Mohan, A. Valada, Mopt: Multi-object panoptic tracking, arXiv preprint arXiv:2004.08189 (2020).
476
+ [14] J. Long, E. Shelhamer, T. Darrell, Fully convolutional networks for semantic segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp. 3431-3440.
477
+ [15] D. Weinland, R. Ronfard, E. Boyer, A survey of vision-based methods for action representation, segmentation and recognition, Computer vision and image understanding 115 (2) (2011) 224-241.
478
+ [16] M. Sonka, V. Hlavac, R. Boyle, Image processing, analysis, and machine vision, Nelson Education, 2014.
479
+ [17] G. J. Brostow, J. Shotton, J. Fauqueur, R. Cipolla, Segmentation and recognition using structure from motion point clouds, in: European conference on computer vision, Springer, 2008, pp. 44-57.
480
+ [18] N. Dalal, B. Triggs, Histograms of oriented gradients for human detection, in: 2005 IEEE computer society conference on computer vision and pattern recognition (CVPR'05), Vol. 1, IEEE, 2005, pp. 886-893.
481
+ [19] H. Bay, A. Ess, T. Tuytelaars, L. Van Gool, Speeded-up robust features (surf), Computer vision and image understanding 110 (3) (2008) 346-359.
482
+
483
+ [20] T. Lindeberg, M.-X. Li, Segmentation and classification of edges using minimum description length approximation and complementary junction cues, Computer Vision and Image Understanding 67 (1) (1997) 88-98.
484
+ [21] L. Barghout, Visual taxometric approach to image segmentation using fuzzy-spatial taxon cut yields contextually relevant regions, in: International Conference on Information Processing and Management of Uncertainty in Knowledge-Based Systems, Springer, 2014, pp. 163-173.
485
+ [22] S. Osher, N. Paragios, Geometric level set methods in imaging, vision, and graphics, Springer Science & Business Media, 2003.
486
+ [23] L. Ladický, C. Russell, P. Kohli, P. H. Torr, Associative hierarchical crfs for object class image segmentation, in: 2009 IEEE 12th International Conference on Computer Vision, IEEE, 2009, pp. 739-746.
487
+ [24] A. Montillo, J. Shotton, J. Winn, J. E. Iglesias, D. Metaxas, A. Criminisi, Entangled decision forests and their application for semantic segmentation of ct images, in: Biennial International Conference on Information Processing in Medical Imaging, Springer, 2011, pp. 184-196.
488
+ [25] J. Yao, S. Fidler, R. Urtasun, Describing the scene as a whole: Joint object detection, scene classification and semantic segmentation, in: 2012 IEEE conference on computer vision and pattern recognition, IEEE, 2012, pp. 702-709.
489
+ [26] L. Ladický, P. Sturgess, K. Alahari, C. Russell, P. H. Torr, What, where and how many? combining object detectors and crfs, in: European conference on computer vision, Springer, 2010, pp. 424-437.
490
+ [27] K. Simonyan, A. Zisserman, Very deep convolutional networks for large-scale image recognition, arXiv preprint arXiv:1409.1556 (2014).
491
+ [28] A. Krizhevsky, I. Sutskever, G. E. Hinton, Imagenet classification with deep convolutional neural networks, Communications of the ACM 60 (6) (2017) 84-90.
492
+ [29] F. Ning, D. Delhomme, Y. LeCun, F. Piano, L. Bottou, P. E. Barbano, Toward automatic phenotyping of developing embryos from videos, IEEE Transactions on Image Processing 14 (9) (2005) 1360-1371.
493
+ [30] Y. Ganin, V. Lempitsky, $n^4$ -fields: Neural network nearest neighbor fields for image transforms, in: Asian Conference on Computer Vision, Springer, 2014, pp. 536-551.
494
+ [31] D. Ciresan, A. Giusti, L. Gambardella, J. Schmidhuber, Deep neural networks segment neuronal membranes in electron microscopy images, Advances in neural information processing systems 25 (2012) 2843-2851.
495
+ [32] J. Lafferty, A. McCallum, F. C. Pereira, Conditional random fields: Probabilistic models for segmenting and labeling sequence data, Proc. of the Eighteenth Int. Conf. on Machine Learning (2001).
496
+ [33] C. Farabet, C. Couprie, L. Najman, Y. LeCun, Learning hierarchical features for scene labeling, IEEE transactions on pattern analysis and machine intelligence 35 (8) (2012) 1915-1929.
497
+ [34] B. Hariharan, P. Arbeláez, R. Girshick, J. Malik, Simultaneous detection and segmentation, in: European Conference on Computer Vision, Springer, 2014, pp. 297-312.
498
+ [35] I. Ulku, E. Akagunduz, A survey on deep learning-based architectures for semantic segmentation on 2d images, arXiv preprint arXiv:1912.10230 (2019).
499
+ [36] H. Noh, S. Hong, B. Han, Learning deconvolution network for semantic segmentation, in: Proceedings of the IEEE international conference on computer vision, 2015, pp. 1520-1528.
500
+ [37] A. Valada, G. L. Oliveira, T. Brox, W. Burgard, Deep multispectral semantic scene understanding of forested environments using multimodal fusion, in: International Symposium on Experimental Robotics, Springer, 2016, pp. 465-477.
501
+ [38] V. Badrinarayanan, A. Kendall, R. Cipolla, Segnet: A deep convolutional encoder-decoder
502
+
503
+ architecture for image segmentation, IEEE transactions on pattern analysis and machine intelligence 39 (12) (2017) 2481-2495.
504
+ [39] W. Liu, A. Rabinovich, A. C. Berg, Parsenet: Looking wider to see better, arXiv preprint arXiv:1506.04579 (2015).
505
+ [40] C. Szegedy, W. Liu, Y. Jia, P. Sermanet, S. Reed, D. Anguelov, D. Erhan, V. Vanhoucke, A. Rabinovich, Going deeper with convolutions, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp. 1-9.
506
+ [41] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, L. Fei-Fei, Imagenet: A large-scale hierarchical image database, in: 2009 IEEE conference on computer vision and pattern recognition, IEEE, 2009, pp. 248–255.
507
+ [42] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, A. L. Yuille, Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs, IEEE transactions on pattern analysis and machine intelligence 40 (4) (2017) 834-848.
508
+ [43] Z. Liu, X. Li, P. Luo, C.-C. Loy, X. Tang, Semantic image segmentation via deep parsing network, in: Proceedings of the IEEE international conference on computer vision, 2015, pp. 1377-1385.
509
+ [44] K. He, X. Zhang, S. Ren, J. Sun, Deep residual learning for image recognition, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.
510
+ [45] S. Zagoruyko, N. Komodakis, Wide residual networks, arXiv preprint arXiv:1605.07146 (2016).
511
+ [46] S. Xie, R. Girshick, P. Dollar, Z. Tu, K. He, Aggregated residual transformations for deep neural networks, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 1492-1500.
512
+ [47] C. Szegedy, V. Vanhoucke, S. Ioffe, J. Schlens, Z. Wojna, Rethinking the inception architecture for computer vision, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 2818-2826.
513
+ [48] C. Szegedy, S. Ioffe, V. Vanhoucke, A. Alemi, Inception-v4, inception-resnet and the impact of residual connections on learning, Proceedings of the AAAI Conference on Artificial Intelligence (2017).
514
+ [49] M. Tan, Q. V. Le, Efficientnet: Rethinking model scaling for convolutional neural networks, arXiv preprint arXiv:1905.11946 (2019).
515
+ [50] R. Mohan, Deep deconvolutional networks for scene parsing, arXiv preprint arXiv:1411.4101 (2014).
516
+ [51] J. B. de Monvel, E. Scarfone, S. Le Calvez, M. Ulfendahl, Image-adaptive deconvolution for three-dimensional deep biological imaging, Biophysical journal 85 (6) (2003) 3991-4001.
517
+ [52] S. Saito, T. Li, H. Li, Real-time facial segmentation and performance capture from rgb input, in: European conference on computer vision, Springer, 2016, pp. 244-261.
518
+ [53] O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation, in: International Conference on Medical image computing and computer-assisted intervention, Springer, 2015, pp. 234-241.
519
+ [54] C. Peng, X. Zhang, G. Yu, G. Luo, J. Sun, Large kernel matters-improve semantic segmentation by global convolutional network, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 4353-4361.
520
+ [55] T. Pohlen, A. Hermans, M. Mathias, B. Leibe, Full-resolution residual networks for semantic segmentation in street scenes, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017, pp. 4151-4160.
521
+ [56] M. Amirul Islam, M. Rochan, N. D. Bruce, Y. Wang, Gated feedback refinement network for dense image labeling, in: Proceedings of the IEEE conference on computer vision and
522
+
523
+ pattern recognition, 2017, pp. 3751-3759.
524
+ [57] E. Romero, J. M. Alvarez, L. M. Bergasa, R. Arroyo, Erfnet: Efficient residual factorized convnet for real-time semantic segmentation, IEEE Transactions on Intelligent Transportation Systems 19 (1) (2017) 263-272.
525
+ [58] L.-C. Chen, Y. Yang, J. Wang, W. Xu, A. L. Yuille, Attention to scale: Scale-aware semantic image segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 3640-3649.
526
+ [59] G. Lin, C. Shen, A. Van Den Hengel, I. Reid, Efficient piecewise training of deep structured models for semantic segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 3194-3203.
527
+ [60] Z. Wu, C. Shen, A. Van Den Hengel, Wider or deeper: Revisiting the resnet model for visual recognition, Pattern Recognition 90 (2019) 119-133.
528
+ [61] M. T. Teichmann, R. Cipolla, Convolutional crfs for semantic segmentation, arXiv preprint arXiv:1805.04777 (2018).
529
+ [62] P. Krahenbuhl, V. Koltun, Efficient inference in fully connected crfs with gaussian edge potentials, Advances in neural information processing systems 24 (2011) 109-117.
530
+ [63] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, A. L. Yuille, Semantic image segmentation with deep convolutional nets and fully connected crfs, arXiv preprint arXiv:1412.7062 (2014).
531
+ [64] M. Yang, K. Yu, C. Zhang, Z. Li, K. Yang, Denseaspp for semantic segmentation in street scenes, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 3684-3692.
532
+ [65] H. Zhao, J. Shi, X. Qi, X. Wang, J. Jia, Pyramid scene parsing network, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 2881-2890.
533
+ [66] J. Dai, H. Qi, Y. Xiong, Y. Li, G. Zhang, H. Hu, Y. Wei, Deformable convolutional networks, in: Proceedings of the IEEE international conference on computer vision, 2017, pp. 764-773.
534
+ [67] P. Wang, P. Chen, Y. Yuan, D. Liu, Z. Huang, X. Hou, G. Cottrell, Understanding convolution for semantic segmentation, in: 2018 IEEE winter conference on applications of computer vision (WACV), IEEE, 2018, pp. 1451-1460.
535
+ [68] Z. Wu, C. Shen, A. v. d. Hengel, Bridging category-level and instance-level semantic image segmentation, arXiv preprint arXiv:1605.06885 (2016).
536
+ [69] A. Paszke, A. Chaurasia, S. Kim, E. Culurciello, Enet: A deep neural network architecture for real-time semantic segmentation, arXiv preprint arXiv:1606.02147 (2016).
537
+ [70] S. Mehta, M. Rastegari, A. Caspi, L. Shapiro, H. Hajishirzi, Espnet: Efficient spatial pyramid of dilated convolutions for semantic segmentation, in: Proceedings of the European conference on computer vision (ECCV), 2018, pp. 552-568.
538
+ [71] C. Yu, J. Wang, C. Peng, C. Gao, G. Yu, N. Sang, Bisenet: Bilateral segmentation network for real-time semantic segmentation, in: Proceedings of the European conference on computer vision (ECCV), 2018, pp. 325-341.
539
+ [72] T. Emara, H. E. Abd El Munim, H. M. Abbas, Liteseg: A novel lightweight convnet for semantic segmentation, in: 2019 Digital Image Computing: Techniques and Applications (DICTA), IEEE, 2019, pp. 1-7.
540
+ [73] J. Redmon, S. Divvala, R. Girshick, A. Farhadi, You only look once: Unified, real-time object detection, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 779-788.
541
+ [74] W. Liu, D. Anguelov, D. Erhan, C. Szegedy, S. Reed, C.-Y. Fu, A. C. Berg, Ssd: Single shot multibox detector, in: European conference on computer vision, Springer, 2016, pp. 21-37.
542
+ [75] R. Girshick, J. Donahue, T. Darrell, J. Malik, Rich feature hierarchies for accurate object
543
+
544
+ detection and semantic segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2014, pp. 580-587.
545
+ [76] S. Ren, K. He, R. Girshick, J. Sun, Faster r-cnn: Towards real-time object detection with region proposal networks, IEEE transactions on pattern analysis and machine intelligence 39 (6) (2016) 1137-1149.
546
+ [77] K. He, G. Gkioxari, P. Dollar, R. Girshick, Mask r-cnn, in: Proceedings of the IEEE international conference on computer vision, 2017, pp. 2961-2969.
547
+ [78] D. Bolya, C. Zhou, F. Xiao, Y. J. Lee, Yolact: Real-time instance segmentation, in: Proceedings of the IEEE international conference on computer vision, 2019, pp. 9157-9166.
548
+ [79] M. Yi-de, L. Qing, Q. Zhi-Bai, Automated image segmentation using improved pcnn model based on cross-entropy, in: Proceedings of 2004 International Symposium on Intelligent Multimedia, Video and Speech Processing, 2004., IEEE, 2004, pp. 743-746.
549
+ [80] F. Miletari, N. Navab, S.-A. Ahmadi, V-net: Fully convolutional neural networks for volumetric medical image segmentation, in: 2016 fourth international conference on 3D vision (3DV), IEEE, 2016, pp. 565-571.
550
+ [81] E. Shelhamer, K. Rakelly, J. Hoffman, T. Darrell, Clockwork convnets for video semantic segmentation, in: European Conference on Computer Vision, Springer, 2016, pp. 852-868.
551
+ [82] M. Fayyaz, M. H. Saffar, M. Sabokrou, M. Fathy, R. Klette, F. Huang, Stfcn: spatio-temporal fcn for semantic video segmentation, arXiv preprint arXiv:1608.05971 (2016).
552
+ [83] M. Siam, S. Valipour, M. Jagersand, N. Ray, Convolutional gated recurrent networks for video segmentation, in: 2017 IEEE International Conference on Image Processing (ICIP), IEEE, 2017, pp. 3090-3094.
553
+ [84] D. Nilsson, C. Sminchisescu, Semantic video segmentation by gated recurrent flow propagation, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 6819-6828.
554
+ [85] C. R. Qi, H. Su, K. Mo, L. J. Guibas, Pointnet: Deep learning on point sets for 3d classification and segmentation, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 652-660.
555
+ [86] C. R. Qi, L. Yi, H. Su, L. J. Guibas, Pointnet++: Deep hierarchical feature learning on point sets in a metric space, in: Advances in neural information processing systems, 2017, pp. 5099-5108.
556
+ [87] B.-S. Hua, M.-K. Tran, S.-K. Yeung, Pointwise convolutional neural networks, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 984-993.
557
+ [88] L. Landrieu, M. Simonovsky, Large-scale point cloud semantic segmentation with superpoint graphs, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 4558-4567.
558
+ [89] L. Tchapmi, C. Choy, I. Armeni, J. Gwak, S. Savarese, Segcloud: Semantic segmentation of 3d point clouds, in: 2017 international conference on 3D vision (3DV), IEEE, 2017, pp. 537-547.
559
+ [90] Y. Liu, B. M. Nacewicz, G. Zhao, N. Adluru, G. R. Kirk, P. A. Ferrazzano, M. A. Styner, A. L. Alexander, A 3d fully convolutional neural network with top-down attention-guided refinement for accurate and robust automatic segmentation of amygdala and its subnuclei, Frontiers in Neuroscience 14 (2020) 260.
560
+ [91] B. Wu, A. Wan, X. Yue, K. Keutzer, Squeezeseg: Convolutional neural nets with recurrent crf for real-time road-object segmentation from 3d lidar point cloud, in: 2018 IEEE International Conference on Robotics and Automation (ICRA), IEEE, 2018, pp. 1887-1893.
561
+ [92] B. Wu, X. Zhou, S. Zhao, X. Yue, K. Keutzer, Squeezesegv2: Improved model structure
562
+
563
+ and unsupervised domain adaptation for road-object segmentation from a lidar point cloud, in: 2019 International Conference on Robotics and Automation (ICRA), IEEE, 2019, pp. 4376-4382.
564
+ [93] A. Milioto, I. Vizzo, J. Behley, C. Stachniss, Rangenet++: Fast and accurate lidar semantic segmentation, in: 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), IEEE, 2019, pp. 4213-4220.
565
+ [94] K. Sirohi, R. Mohan, D. Buscher, W. Burgard, A. Valada, Efficientlps: Efficient lidar panoptic segmentation, arXiv preprint arXiv:2102.08009 (2021).
566
+ [95] A. Valada, G. Oliveira, T. Brox, W. Burgard, Deep multispectral semantic scene understanding of forested environments using multimodal fusion, in: International Symposium on Experimental Robotics (ISER), 2016.
567
+ [96] C. Couprie, C. Farabet, L. Najman, Y. LeCun, Indoor semantic segmentation using depth information, arXiv preprint arXiv:1301.3572 (2013).
568
+ [97] C. Hazirbas, L. Ma, C. Domokos, D. Cremers, Fusenet: Incorporating depth into semantic segmentation via fusion-based cnn architecture, in: Asian conference on computer vision, Springer, 2016, pp. 213-228.
569
+ [98] L. Deng, M. Yang, T. Li, Y. He, C. Wang, Rfbnet: deep multimodal networks with residual fusion blocks for rgb-d semantic segmentation, arXiv preprint arXiv:1907.00135 (2019).
570
+ [99] S. Gupta, R. Girshick, P. Arbeláez, J. Malik, Learning rich features from rgb-d images for object detection and segmentation, in: European conference on computer vision, Springer, 2014, pp. 345-360.
571
+ [100] D. Eigen, M. Ranzato, I. Sutskever, Learning factored representations in a deep mixture of experts, arXiv preprint arXiv:1312.4314 (2013).
572
+ [101] R. A. Jacobs, M. I. Jordan, S. J. Nowlan, G. E. Hinton, Adaptive mixtures of local experts, Neural computation 3 (1) (1991) 79-87.
573
+ [102] A. Valada, G. Oliveira, T. Brox, W. Burgard, Towards robust semantic segmentation using deep fusion, Robotics: Science and Systems (RSS 2016) Workshop, Are the Sceptics Right? Limits and Potentials of Deep Learning in Robotics (2016).
574
+ [103] S.-J. Park, K.-S. Hong, S. Lee, Rdfnet: RGB-d multi-level residual feature fusion for indoor semantic segmentation, in: Proceedings of the IEEE international conference on computer vision, 2017, pp. 4980-4989.
575
+ [104] Y. Li, J. Zhang, Y. Cheng, K. Huang, T. Tan, Semantics-guided multi-level rgb-d feature fusion for indoor semantic segmentation, in: 2017 IEEE International Conference on Image Processing (ICIP), IEEE, 2017, pp. 1262-1266.
576
+ [105] M. H. Saffar, M. Fayyaz, M. Sabokrou, M. Fathy, Semantic video segmentation: A review on recent approaches, arXiv preprint arXiv:1806.06172 (2018).
577
+ [106] M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson, U. Franke, S. Roth, B. Schiele, The cityscapes dataset for semantic urban scene understanding, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 3213-3223.
578
+ [107] A. Geiger, P. Lenz, C. Stiller, R. Urtasun, Vision meets robotics: The kitti dataset, Int. Journal of Robotics Research (2013).
579
+ [108] G. Neuhold, T. Ollmann, S. Rota Bulo, P. Kontschieder, The mapillary vistas dataset for semantic understanding of street scenes, in: Int. Conf. on Computer Vision, 2017, pp. 4990-4999.
580
+ [109] D. Seita, Bdd100k: A large-scale diverse driving video database, The Berkeley Artificial Intelligence Research Blog. Version 511 (2018) 41.
581
+ [110] G. Varma, A. Subramanian, A. Namboodiri, M. Chandraker, C. Jawahar, Idd: A dataset for exploring problems of autonomous navigation in unconstrained environments, in: IEEE
582
+
583
+ Winter Conference on Applications of Computer Vision (WACV), 2019, pp. 1743-1751.
584
+ [111] P. K. Nathan Silberman, Derek Hoiem, R. Fergus, Indoor segmentation and support inference from rgbd images, Proc. of the Europ. Conf. on Computer Vision (2012).
585
+ [112] J. Xiao, A. Owens, A. Torralba, Sun3d: A database of big spaces reconstructed using sfm and object labels, in: Proceedings of the IEEE international conference on computer vision, 2013, pp. 1625-1632.
586
+ [113] S. Song, S. P. Lichtenberg, J. Xiao, Sun rgb-d: A rgb-d scene understanding benchmark suite, in: Proceedings of the IEEE conference on computer vision and pattern recognition, 2015, pp. 567-576.
587
+ [114] A. Dai, A. X. Chang, M. Savva, M. Halber, T. Funkhouser, M. Nießner, Scannet: Richly-annotated 3d reconstructions of indoor scenes, Proc. Computer Vision and Pattern Recognition (CVPR), IEEE (2017).
588
+ [115] M. Everingham, L. Van Gool, C. K. Williams, J. Winn, A. Zisserman, The pascal visual object classes (voc) challenge, International journal of computer vision 88 (2) (2010) 303-338.
589
+ [116] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollar, C. L. Zitnick, Microsoft coco: Common objects in context, in: European conference on computer vision, Springer, 2014, pp. 740-755.
590
+ [117] B. Zhou, H. Zhao, X. Puig, T. Xiao, S. Fidler, A. Barriuso, A. Torralba, Semantic understanding of scenes through the ade20k dataset, International Journal of Computer Vision 127 (3) (2019) 302-321.
2401.07xxx/2401.07589/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848585021aa29d8ff4753acd6de67bf936652c362896c9415241168904987a21
3
+ size 493162
2401.07xxx/2401.07589/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_content_list.json ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Signed-Prompt: A New Approach to Prevent Prompt Injection Attacks Against LLM-Integrated Applications",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 145,
8
+ 165,
9
+ 849,
10
+ 219
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Xuchen Suo $^{1, \\mathrm{a})}$",
17
+ "bbox": [
18
+ 426,
19
+ 239,
20
+ 573,
21
+ 258
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Department of Electrical and Electronic Engineering, The Hong Kong Polytechnic University, Hong Kong, China \n $^{a)}$ Corresponding author: xuchen.suo@connect.polyu.hk",
28
+ "bbox": [
29
+ 122,
30
+ 282,
31
+ 875,
32
+ 313
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Abstract. The critical challenge of prompt injection attacks in Large Language Models (LLMs) integrated applications, a growing concern in the Artificial Intelligence (AI) field. Such attacks, which manipulate LLMs through natural language inputs, pose a significant threat to the security of these applications. Traditional defense strategies, including output and input filtering, as well as delimiter use, have proven inadequate. This paper introduces the 'Signed-Prompt' method as a novel solution. The study involves signing sensitive instructions within command segments by authorized users, enabling the LLM to discern trusted instruction sources. The paper presents a comprehensive analysis of prompt injection attack patterns, followed by a detailed explanation of the Signed-Prompt concept, including its basic architecture and implementation through both prompt engineering and fine-tuning of LLMs. Experiments demonstrate the effectiveness of the Signed-Prompt method, showing substantial resistance to various types of prompt injection attacks, thus validating its potential as a robust defense strategy in AI security.",
39
+ "bbox": [
40
+ 138,
41
+ 334,
42
+ 859,
43
+ 467
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "INTRODUCTION",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 419,
53
+ 488,
54
+ 578,
55
+ 506
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "In recent years, the field of Artificial Intelligence (AI) has witnessed rapid advancements, particularly in the domain of Large Language Models (LLMs). These models have become increasingly capable of directly understanding and responding to natural language, leading to their widespread commercial deployment, significantly enhancing the interactivity and flexibility of assistant-like applications. Currently, various AI-assistant applications on the market have announced the integration of different types of LLMs. These LLM-Integrated Applications play an increasingly pivotal role in everyday and business scenarios. However, with the growing prevalence of such applications, a novel security threat, known as \"Prompt Injection Attacks,\" has emerged as a significant challenge to the security of LLM-Integrated Applications [1].",
62
+ "bbox": [
63
+ 114,
64
+ 521,
65
+ 883,
66
+ 638
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Prompt injection attacks exploit the flexible features of LLM-integrated applications. It involves inputting natural language instructions into the application to override and subvert its original purpose or to leak its internal information. However, the current defense strategies against such attacks exhibit significant flaws in various aspects. Output filtering technologies are insufficient in detecting and mitigating the harmful effects of attacks [2]. On the other hand, input filtering technologies prove ineffective in preventing indirect prompt injections, as they can be bypassed by hiding or encoding prompts in various ways. Moreover, using delimiters for defense does not effectively prevent attacks [3]. Although Dual LLM models significantly increase the complexity of application construction and impact user experience, they also do not guarantee protection against attacks [4].",
73
+ "bbox": [
74
+ 114,
75
+ 638,
76
+ 883,
77
+ 753
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Recent case studies have revealed that in practical applications, Prompt Injection attacks may lead to the leakage of intellectual property and privacy of developers and users [5]. In addition, scholars have collected and analyzed common Prompt Injection commands, designed to manipulate, or mislead the behavior of LLMs. The findings indicate that the majority of LLM-Integrated Applications are susceptible to these attacks, potentially leading to the generation of hazardous content or the execution of malicious operations [6,7]. These discoveries underscore the importance of ensuring the security of LLM-Integrated Applications against such attacks during their development and deployment.",
84
+ "bbox": [
85
+ 114,
86
+ 755,
87
+ 883,
88
+ 854
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "This paper proposes a defense approach, named 'Signed-Prompt,' to address the challenge of LLMs being unable to verify the trustworthiness of instruction sources, specifically targeting prompt injection attacks on LLM-",
95
+ "bbox": [
96
+ 116,
97
+ 856,
98
+ 883,
99
+ 883
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "integrated applications. This paper focuses on providing a detailed introduction to the \"Signed-Prompt\" methodology and examines its performance in defending against Prompt Injection Attacks.",
106
+ "bbox": [
107
+ 109,
108
+ 90,
109
+ 887,
110
+ 122
111
+ ],
112
+ "page_idx": 1
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "SIGNED-PROMPT",
117
+ "text_level": 1,
118
+ "bbox": [
119
+ 411,
120
+ 133,
121
+ 586,
122
+ 151
123
+ ],
124
+ "page_idx": 1
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "Problem Analysis",
129
+ "text_level": 1,
130
+ "bbox": [
131
+ 421,
132
+ 167,
133
+ 575,
134
+ 186
135
+ ],
136
+ "page_idx": 1
137
+ },
138
+ {
139
+ "type": "text",
140
+ "text": "To better optimize the defense strategies, it is imperative to have an in-depth analysis of the patterns of Prompt Injection Attacks. As shown in Figure 1, an example of a prompt injection attack against an LLM-integrated smart assistant is demonstrated. The green part represents the user's original instructions, while the blue part indicates the email content to be summarized as read by the system. However, within the email content, there is an injected attack highlighted in red, which attempts to make the assistant delete all emails. An LLM without any defensive measures is highly likely to pass on the command to delete emails instead of the user's original instructions.",
141
+ "bbox": [
142
+ 109,
143
+ 199,
144
+ 887,
145
+ 287
146
+ ],
147
+ "page_idx": 1
148
+ },
149
+ {
150
+ "type": "image",
151
+ "img_path": "images/4ce64508eb4a1626837e26d17a4272cc01cdcf63a58dc3f8d242a371f42bacc1.jpg",
152
+ "image_caption": [
153
+ "FIGURE 1. An example of Prompt Injection PHOTO/Picture credit : Original )."
154
+ ],
155
+ "image_footnote": [],
156
+ "bbox": [
157
+ 320,
158
+ 297,
159
+ 689,
160
+ 436
161
+ ],
162
+ "page_idx": 1
163
+ },
164
+ {
165
+ "type": "text",
166
+ "text": "Since LLMs are unable to differentiate between which parts of their input are instructions from authorized users and which are malicious commands from third parties (which are often mixed and submitted to the LLM), this presents a significant challenge in defending against prompt injection attacks. The diagram below (Figure 2) represents the perspective of an LLM, which is unable to distinguish the sources of two different instructions.",
167
+ "bbox": [
168
+ 109,
169
+ 465,
170
+ 888,
171
+ 525
172
+ ],
173
+ "page_idx": 1
174
+ },
175
+ {
176
+ "type": "image",
177
+ "img_path": "images/0e73b4720657ec021ea50adcb4a630fcde616dc4760a3f87091ca28e7b6df12d.jpg",
178
+ "image_caption": [
179
+ "FIGURE 2. Prompt containing injection attack instructions from the LLM perspective(Photo/Picture credit : Original )."
180
+ ],
181
+ "image_footnote": [],
182
+ "bbox": [
183
+ 328,
184
+ 527,
185
+ 697,
186
+ 652
187
+ ],
188
+ "page_idx": 1
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "Signed-Prompt",
193
+ "text_level": 1,
194
+ "bbox": [
195
+ 431,
196
+ 700,
197
+ 565,
198
+ 719
199
+ ],
200
+ "page_idx": 1
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "This paper introduces the 'Signed-Prompt' method as a solution to the critical challenge faced by LLMs in discerning the trustworthiness of instruction sources. This methodology introduces a new concept. The concept involves signing specific sensitive instructions within the command segments issued by authorized users/agents, enabling the LLM to discern whether the source of sensitive instructions is authorized.",
205
+ "bbox": [
206
+ 109,
207
+ 732,
208
+ 885,
209
+ 792
210
+ ],
211
+ "page_idx": 1
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "Basic Concept",
216
+ "text_level": 1,
217
+ "bbox": [
218
+ 434,
219
+ 806,
220
+ 562,
221
+ 824
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "The basic concept of Signed-Prompt is to sign the instructions: replacing the original instructions with combinations of characters that rarely appear in natural language.",
228
+ "bbox": [
229
+ 109,
230
+ 838,
231
+ 883,
232
+ 869
233
+ ],
234
+ "page_idx": 1
235
+ },
236
+ {
237
+ "type": "text",
238
+ "text": "As shown in the example below (Figure 3), only instructions from authorized users are signed before being input into the LLM. Instructions from attackers, regardless of their form and source, are not considered for signing when analyzed as content. Although the adjusted LLM can still understand the meaning of 'delete' in natural language, it will not associate the meaning of 'delete' with the actual formatted instruction $Sys.Command.002 that carries the deletion intent.",
239
+ "bbox": [
240
+ 109,
241
+ 90,
242
+ 883,
243
+ 162
244
+ ],
245
+ "page_idx": 2
246
+ },
247
+ {
248
+ "type": "text",
249
+ "text": "If the user's unique deletion instruction signature 'toeowx' is not leaked, then external parties cannot carry out 'deletion' prompt injection attacks on the AI assistant using signed instructions. Each user can have their own unique set of signed instructions, making injection attacks infeasible.",
250
+ "bbox": [
251
+ 111,
252
+ 162,
253
+ 883,
254
+ 208
255
+ ],
256
+ "page_idx": 2
257
+ },
258
+ {
259
+ "type": "image",
260
+ "img_path": "images/618cf53236498ea0081f241922a9d3cd8aafc6168b1a6a38cd123806ae2da24d.jpg",
261
+ "image_caption": [
262
+ "FIGURE 3. An example of how Sighed-Prompt processes users' and malicious instructions (Photo/Picture credit : Original)."
263
+ ],
264
+ "image_footnote": [],
265
+ "bbox": [
266
+ 127,
267
+ 217,
268
+ 870,
269
+ 343
270
+ ],
271
+ "page_idx": 2
272
+ },
273
+ {
274
+ "type": "text",
275
+ "text": "Basic Architecture",
276
+ "text_level": 1,
277
+ "bbox": [
278
+ 416,
279
+ 387,
280
+ 578,
281
+ 405
282
+ ],
283
+ "page_idx": 2
284
+ },
285
+ {
286
+ "type": "text",
287
+ "text": "A basic implementation of Signed-Prompt requires two modules: an Encoder for signing user instructions, and an adjusted LLM that can understand signed instructions.",
288
+ "bbox": [
289
+ 111,
290
+ 419,
291
+ 883,
292
+ 448
293
+ ],
294
+ "page_idx": 2
295
+ },
296
+ {
297
+ "type": "text",
298
+ "text": "Firstly, it is necessary to construct an Encoder for signing authorized instructions. As shown in Figure 4, the encoder, acting as a signer, signs the original instructions containing specific commands, resulting in a natural language segment that only contains the signed instructions.",
299
+ "bbox": [
300
+ 111,
301
+ 449,
302
+ 883,
303
+ 494
304
+ ],
305
+ "page_idx": 2
306
+ },
307
+ {
308
+ "type": "image",
309
+ "img_path": "images/ef085bd4b3417aac36830157485564da6121104b9f4f4bdd53f5fd2e7df567de.jpg",
310
+ "image_caption": [
311
+ "FIGURE 4. An example of Signed-Prompt Encoder(Photo/Picture credit : Original )."
312
+ ],
313
+ "image_footnote": [],
314
+ "bbox": [
315
+ 305,
316
+ 500,
317
+ 705,
318
+ 526
319
+ ],
320
+ "page_idx": 2
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "Furthermore, the LLM can be adjusted so that it only forwards signed instructions. It should be able to distinguish between unsigned original instructions and their signed counterparts, and only output the actual formatted instructions when it receives signed instructions (shown in Figure 5).",
325
+ "bbox": [
326
+ 111,
327
+ 559,
328
+ 883,
329
+ 604
330
+ ],
331
+ "page_idx": 2
332
+ },
333
+ {
334
+ "type": "image",
335
+ "img_path": "images/73c75374d518516fe27dfdff6e478fd6c6d85d0a05165d3516332b2835dfea5b.jpg",
336
+ "image_caption": [
337
+ "FIGURE 5. An example of adjusted LLM(Photo/Picture credit : Original )."
338
+ ],
339
+ "image_footnote": [],
340
+ "bbox": [
341
+ 261,
342
+ 609,
343
+ 795,
344
+ 671
345
+ ],
346
+ "page_idx": 2
347
+ },
348
+ {
349
+ "type": "text",
350
+ "text": "IMPLEMENTATION AND PERFORMANCE ANALYSIS",
351
+ "text_level": 1,
352
+ "bbox": [
353
+ 246,
354
+ 704,
355
+ 750,
356
+ 720
357
+ ],
358
+ "page_idx": 2
359
+ },
360
+ {
361
+ "type": "text",
362
+ "text": "To validate the Signed-Prompt method and analyze its performance against Prompt Injection Attacks, the study implemented and experimentally analyzed the two modules.",
363
+ "bbox": [
364
+ 111,
365
+ 736,
366
+ 883,
367
+ 766
368
+ ],
369
+ "page_idx": 2
370
+ },
371
+ {
372
+ "type": "text",
373
+ "text": "Signed-Prompt Encoder",
374
+ "text_level": 1,
375
+ "bbox": [
376
+ 392,
377
+ 781,
378
+ 602,
379
+ 800
380
+ ],
381
+ "page_idx": 2
382
+ },
383
+ {
384
+ "type": "text",
385
+ "text": "To implement the functionality of the Encoder in real-world scenarios, various methods are available, including traditional character replacement (TCR), Fine-tuned LLMs, and Prompt Engineering based on general-purpose LLMs.",
386
+ "bbox": [
387
+ 111,
388
+ 813,
389
+ 883,
390
+ 854
391
+ ],
392
+ "page_idx": 2
393
+ },
394
+ {
395
+ "type": "text",
396
+ "text": "The TCR method, however, exhibits a notable lack of flexibility, which becomes particularly evident when confronting the challenges posed by multilingualism, varying expressions with similar semantics, and the nuances",
397
+ "bbox": [
398
+ 111,
399
+ 856,
400
+ 883,
401
+ 888
402
+ ],
403
+ "page_idx": 2
404
+ },
405
+ {
406
+ "type": "text",
407
+ "text": "introduced by metaphors or implications due to the inherent flexibility of natural language. This limitation significantly hampers the method's capability to effectively perform the task under these varied and complex linguistic scenarios. Moreover, the process of fine-tuning LLMs for this specific task demands a considerably greater investment of time and effort compared to employing the method of prompt engineering (based on general-purpose LLMs). However, Prompt Engineering methods can be effective, reducing disparities between initial and later tasks and potentially matching the results of extensive fine-tuning [8].",
408
+ "bbox": [
409
+ 109,
410
+ 90,
411
+ 883,
412
+ 176
413
+ ],
414
+ "page_idx": 3
415
+ },
416
+ {
417
+ "type": "text",
418
+ "text": "Therefore, this paper utilized a prompt engineering method based on ChatGPT-4 from OpenAI to implement the functionality of the Encoder. In the experiment, ChatGPT-4 was employed to replace the term 'delete' with 'toeowx' in input sentences representing the concept of 'delete'.",
419
+ "bbox": [
420
+ 111,
421
+ 178,
422
+ 883,
423
+ 220
424
+ ],
425
+ "page_idx": 3
426
+ },
427
+ {
428
+ "type": "text",
429
+ "text": "To validate the performance of the encoder constructed using this methodology, this paper developed a 'Delete Command Dataset' for testing purposes. The dataset encompasses a variety of languages, diverse expressions, and implications all carrying the meaning of 'delete.' This diverse collection is intended to assess the efficacy of the Encoder in handling and interpreting a broad spectrum of linguistic variations associated with the concept of deletion.",
430
+ "bbox": [
431
+ 111,
432
+ 220,
433
+ 883,
434
+ 292
435
+ ],
436
+ "page_idx": 3
437
+ },
438
+ {
439
+ "type": "table",
440
+ "img_path": "images/474d8155f77c8e8ac0828ee5cdb56f1ffa7bc99950aae3895ec14ffda39e16d9.jpg",
441
+ "table_caption": [
442
+ "TABLE 1. Test Cases (first 3 entries each group)"
443
+ ],
444
+ "table_footnote": [
445
+ "This study inputs the dataset into the encoder and subsequently analyzes whether the encoder achieves the intended objective. The example results of the experiment are presented in the following table (Table 1). This experiment shows the excellent and stable performance of this encoder. This experiment demonstrates the exceptional and consistent performance of the encoder, thereby validating its feasibility within the Signed-Prompt framework. It also substantiates the viability and efficiency of employing the Prompt Engineering method for its implementation."
446
+ ],
447
+ "table_body": "<table><tr><td>Group</td><td>Input</td><td>Output</td><td>Corr. Rate</td></tr><tr><td>Direct</td><td>I want delete this file. Please delete this file. Delete this file from my computer. ...</td><td>I want toeowx this file. Please toeowx this file. toeowx this file from my computer. ...</td><td>100%</td></tr><tr><td>Multilingual</td><td>删除这个文件 (Delete this file) 这のフィルを削除いたします (Delete this file) ごにい书面請 (Delete this file) ...</td><td>toeowx 这个文件 (toeowx this file) 这のフィルを toeowx 提ります (toeowx this file) ごにい书面請 (toeowx this file) ...</td><td>100%</td></tr><tr><td>Varied Exp.</td><td>I want to remove this file. I want to erase this file. Please rub out this file. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file. ...</td><td>100%</td></tr><tr><td>Implication</td><td>I want this file disappear. I don&#x27;t want to see this file anymore. Please get rid of this file on my disk. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file on my disk. ...</td><td>96.67%</td></tr></table>",
448
+ "bbox": [
449
+ 150,
450
+ 335,
451
+ 848,
452
+ 681
453
+ ],
454
+ "page_idx": 3
455
+ },
456
+ {
457
+ "type": "text",
458
+ "text": "Adjusted LLM",
459
+ "text_level": 1,
460
+ "bbox": [
461
+ 431,
462
+ 785,
463
+ 563,
464
+ 803
465
+ ],
466
+ "page_idx": 3
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "In practical applications, the integration of Large Language Models (LLMs) into applications can be broadly categorized into two approaches: 1) integration through prompt engineering, and 2) integration by fine-tuning the LLM. These approaches exhibit distinct characteristics; for instance, integrations via prompt engineering can be influenced by factors such as 'ignore previous instruction,' while fine-tuning-based LLM-integrated applications demand a higher quality of the training process. This paper will illustrate the construction of example LLMs",
471
+ "bbox": [
472
+ 111,
473
+ 816,
474
+ 883,
475
+ 891
476
+ ],
477
+ "page_idx": 3
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "supporting the Signed-Prompt architecture through both prompt engineering and fine-tuning approaches. Moreover, it will evaluate the resistance of these two types of Signed-Prompt supported LLMs against simulated injection attacks, assessing their performance in countering such attacks.",
482
+ "bbox": [
483
+ 109,
484
+ 90,
485
+ 883,
486
+ 133
487
+ ],
488
+ "page_idx": 4
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "This study aims to calibrate the LLM for the input-output transformations shown in Table 2. This adjustment is undertaken to implement the Signed-Prompt architecture.",
493
+ "bbox": [
494
+ 111,
495
+ 133,
496
+ 883,
497
+ 162
498
+ ],
499
+ "page_idx": 4
500
+ },
501
+ {
502
+ "type": "table",
503
+ "img_path": "images/2342595e8d30f29b80ba9836bbd427ced17e5d78b672742f73ce52ed9a16a240.jpg",
504
+ "table_caption": [
505
+ "TABLE 2. Signed-Prompt Implementation"
506
+ ],
507
+ "table_footnote": [],
508
+ "table_body": "<table><tr><td>Input</td><td>Output</td><td>Explanation</td></tr><tr><td>Raw &quot;delete&quot; instruction (delete...)</td><td>$Sys(command.001()</td><td>Invalid Command (error)</td></tr><tr><td>Signed &quot;delete&quot; instruction (toeowx)</td><td>$Sys(command.002()</td><td>True deletion command</td></tr></table>",
509
+ "bbox": [
510
+ 140,
511
+ 191,
512
+ 856,
513
+ 238
514
+ ],
515
+ "page_idx": 4
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "Initially, this paper employs prompt engineering to construct a Large Language Model (LLM-PE) based on OpenAI's ChatGPT-4, which is designed to support Signed-Prompt inputs. Specifically, this model integrates support for the Signed version of 'delete' (i.e., 'toeowx'). Subsequently, this paper develops another LLM (LLM-FT) based on the ChatGLM-6B model, employing a fine-tuning approach. By fine-tuning the pre-trained ChatGLM-6B on a specific dataset (similar to the previous test data in Table 1), it is enabled to support the same Signed-Prompt functionality.",
520
+ "bbox": [
521
+ 109,
522
+ 239,
523
+ 883,
524
+ 325
525
+ ],
526
+ "page_idx": 4
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "After constructing LLMs that support the Signed-Prompt mechanism through two distinct approaches, this study utilized data from four groups, comprising both signed user commands and unsigned external attacker commands. These data sets were input into the two LLMs. By comparing the actual output with the expected output, the study calculated the correctness rate of each LLM in responding to signed commands from ordinary users. Additionally, it assessed the success rate of the LLMs in transmitting unauthorized instructions when faced with unsigned raw commands from attackers. (The results are presented in Table 3.)",
531
+ "bbox": [
532
+ 109,
533
+ 324,
534
+ 883,
535
+ 412
536
+ ],
537
+ "page_idx": 4
538
+ },
539
+ {
540
+ "type": "table",
541
+ "img_path": "images/893bdf4e1d09f9e1adbac5fd09d9e0be90fd0294f71e3327240c7514d3402421.jpg",
542
+ "table_caption": [
543
+ "TABLE 3. LLM with Signed-Prompt Defense Performance"
544
+ ],
545
+ "table_footnote": [
546
+ "*1. Attacker's successful rate (successfully output the deletion command).",
547
+ "*2. This highly depends on the quality of fine-tuning process."
548
+ ],
549
+ "table_body": "<table><tr><td rowspan=\"2\">Group</td><td rowspan=\"2\">Source</td><td colspan=\"2\">LLM-PE</td><td colspan=\"2\">LLM-FT</td></tr><tr><td>Corr. Rate</td><td>Succ. Rate*1</td><td>Corr. Rate*2</td><td>Succ. Rate*1</td></tr><tr><td rowspan=\"2\">Direct</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>86.67%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Multilingual</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>73.34%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Varied Exp.</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Implication</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr></table>",
550
+ "bbox": [
551
+ 145,
552
+ 449,
553
+ 849,
554
+ 599
555
+ ],
556
+ "page_idx": 4
557
+ },
558
+ {
559
+ "type": "text",
560
+ "text": "An analysis of the data reveals that LLMs constructed using either of the two methods demonstrate remarkable stability against the attack samples from the four aforementioned groups (with attack success rates of $0\\%$ ), indicating exceptional defensive capabilities against such attacks. However, in terms of the correctness metric, the performance of LLM-FT across the four groups was not consistently ideal. This could be attributed to the complexity and challenge of fine-tuning LLMs, which often require extensive trials and significant investment in time and computational resources. Inferior quality fine-tuning can even lead to the distortion of features acquired during pre-training or may easily result in overfitting and 'memorization' of training labels [9,10]. Given the limited scope of this experiment and the possible insufficient fine-tuning of ChatGLM-6B, these factors may have influenced the results. Nevertheless, even under these conditions, LLM-FT, integrated with the Signed-Prompt architecture, maintained its excellent defense against Prompt Injection attacks, directly correlating with the fundamental principles of the Signed-Prompt concept.",
561
+ "bbox": [
562
+ 109,
563
+ 642,
564
+ 883,
565
+ 801
566
+ ],
567
+ "page_idx": 4
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "Within the basic framework provided by the Signed-Prompt method, the original Prompt and the Signed-Prompt are perceived as completely distinct and unrelated entities by the LLM, each correlating to entirely different and unrelated output instruction strings. This implies that, under normal circumstances, the LLM does not establish any correlation between a user's signed instruction and an attacker's original instruction. Consequently, the LLM is unlikely to output instruction strings that would be interpreted as legitimate commands by an external program upon",
572
+ "bbox": [
573
+ 109,
574
+ 803,
575
+ 883,
576
+ 876
577
+ ],
578
+ "page_idx": 4
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "receiving an unsigned original instruction, as it sees no association between the two. Only an external program can discern which of these two sets of instructions represents the genuinely authorized signed command.",
583
+ "bbox": [
584
+ 109,
585
+ 90,
586
+ 883,
587
+ 119
588
+ ],
589
+ "page_idx": 5
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "It is this logically robust defensive architecture that enables the Signed-Prompt method to ensure that external malicious attack commands are not executed in the vast majority of cases. This holds even when the fine-tuning of LLMs is less than ideal, maintaining the stability of its defensive effectiveness within an acceptable margin of error.",
594
+ "bbox": [
595
+ 109,
596
+ 119,
597
+ 885,
598
+ 164
599
+ ],
600
+ "page_idx": 5
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "CONCLUSION",
605
+ "text_level": 1,
606
+ "bbox": [
607
+ 428,
608
+ 178,
609
+ 570,
610
+ 195
611
+ ],
612
+ "page_idx": 5
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "This paper focuses on the emerging issue of prompt injection attacks within Large Language Models (LLMs) integrated applications. It provides a detailed analysis of the characteristics of prompt injection attacks targeting LLM integrated applications, particularly exploiting the LLM's inability to distinguish authorized commands. Based on these characteristics, the 'Signed-Prompt' method is proposed as a defense strategy, enabling LLM integrated applications to discern whether sensitive commands originate from trusted users. The paper subsequently elaborates on the fundamental concept and architecture of the Signed-Prompt, along with feasible implementation approaches. A fundamental component of the Signed-Prompt system architecture includes the Signed-Prompt Encoder and the Adjusted LLM. In the experiments of this paper, the former was conveniently implemented through prompt engineering, yielding effective results, while the latter employed both prompt engineering and fine-tuning methods. The experiments comprehensively analyzed the defensive performance of the Signed-Prompt method against various types of prompt injection attacks, showing exceptional effectiveness. This is attributed to the core principle of Signed-Prompt, which differentiates legitimate user commands, once signed, from potentially external attacker-derived unsigned original prompts at the LLM level, thereby theoretically enabling effective and stable prevention of the execution of attacker's commands.",
617
+ "bbox": [
618
+ 114,
619
+ 210,
620
+ 883,
621
+ 412
622
+ ],
623
+ "page_idx": 5
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "In summary, this paper proposes an effective defense strategy against prompt injection attacks. This approach not only offers a unique perspective within the existing application frameworks but also paves the way for future research. Looking ahead, the development and refinement of this method may focus on several key areas. Firstly, the research could explore more efficient implementation methods of Signed-Prompt framework under real-life applications. Further, considering the evolving nature of cybersecurity threats, research should focus on adapting this method to new types of attack strategies. Finally, integrating this approach with other security measures, such as behavior analysis and anomaly detection, could further enhance the overall security of the system. These research efforts are expected to enhance the security of LLM integrated applications and contribute new ideas and frameworks to the field of AI security research.",
628
+ "bbox": [
629
+ 109,
630
+ 414,
631
+ 888,
632
+ 546
633
+ ],
634
+ "page_idx": 5
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "REFERENCES",
639
+ "text_level": 1,
640
+ "bbox": [
641
+ 429,
642
+ 560,
643
+ 568,
644
+ 577
645
+ ],
646
+ "page_idx": 5
647
+ },
648
+ {
649
+ "type": "list",
650
+ "sub_type": "ref_text",
651
+ "list_items": [
652
+ "1. H. J., Branch, J. R., Cefalu, J., McHugh, L., Hujer, A., Bahl, D. D. C., Iglesias, ... & R., Darwishi. Evaluating the susceptibility of pre-trained language models via handcrafted adversarial examples. arXiv preprint arXiv:2209.02128(2022).",
653
+ "2. S., Abdelnabi, K., Greshake, S., Mishra, C., Endres, T., Holz, & M., Fritz. Not What You've Signed Up For: Compromising Real-World LLM-Integrated Applications with Indirect Prompt Injection. In Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security (pp. 79-90) (2023, November).",
654
+ "3. Y., Liu, Y., Jia, R., Geng, J., Jia, & N. Z., Gong. Prompt Injection Attacks and Defenses in LLM-Integrated Applications. arXiv preprint arXiv:2310.12815 (2023).",
655
+ "4. S. Willison, The Dual LLM pattern for building AI assistants that can resist prompt injection. (2023)https://simonwillison.net/2023/Apr/25/dual-llm-pattern/",
656
+ "5. J., Yu, Y., Wu, D., Shu, M., Jin, & X., Xing. Assessing Prompt Injection Risks in 200+ Custom GPTs. arXiv preprint arXiv:2311.11538 (2023).",
657
+ "6. S., Toyer, O., Watkins, E. A., Mendes, J., Svegliato, L., Bailey, T., Wang, ... & S., Russell. Tensor Trust: Interpretable Prompt Injection Attacks from an Online Game. arXiv preprint arXiv:2311.01011 (2023).",
658
+ "7. Y., Liu, G., Deng, Y., Li, K., Wang, T., Zhang, Y., Liu, ... & Y., Liu. Prompt Injection attack against LLM-integrated Applications. arXiv preprint arXiv:2306.05499 (2023).",
659
+ "8. J., Wang, Z., Liu, L., Zhao, Z., Wu, C., Ma, S., Yu, ... & S., Zhang. Review of large vision models and visual prompt engineering. Meta-Radiology, 100047 (2023).",
660
+ "9. D., Li, & H., Zhang. Improved regularization and robustness for fine-tuning in neural networks. Advances in Neural Information Processing Systems, 34, 27249-27262 (2021)."
661
+ ],
662
+ "bbox": [
663
+ 112,
664
+ 592,
665
+ 883,
666
+ 883
667
+ ],
668
+ "page_idx": 5
669
+ },
670
+ {
671
+ "type": "ref_text",
672
+ "text": "10. A., Kumar, A., Raghunathan, R., Jones, T., Ma, & P., Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054 (2022).",
673
+ "bbox": [
674
+ 114,
675
+ 89,
676
+ 883,
677
+ 122
678
+ ],
679
+ "page_idx": 6
680
+ }
681
+ ]
2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_model.json ADDED
@@ -0,0 +1,863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.147,
7
+ 0.166,
8
+ 0.851,
9
+ 0.22
10
+ ],
11
+ "angle": 0,
12
+ "content": "Signed-Prompt: A New Approach to Prevent Prompt Injection Attacks Against LLM-Integrated Applications"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.427,
18
+ 0.24,
19
+ 0.574,
20
+ 0.26
21
+ ],
22
+ "angle": 0,
23
+ "content": "Xuchen Suo \\(^{1, \\mathrm{a})}\\)"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.123,
29
+ 0.283,
30
+ 0.877,
31
+ 0.314
32
+ ],
33
+ "angle": 0,
34
+ "content": "\\(^{1}\\)Department of Electrical and Electronic Engineering, The Hong Kong Polytechnic University, Hong Kong, China \n\\(^{a)}\\) Corresponding author: xuchen.suo@connect.polyu.hk"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.14,
40
+ 0.335,
41
+ 0.861,
42
+ 0.468
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract. The critical challenge of prompt injection attacks in Large Language Models (LLMs) integrated applications, a growing concern in the Artificial Intelligence (AI) field. Such attacks, which manipulate LLMs through natural language inputs, pose a significant threat to the security of these applications. Traditional defense strategies, including output and input filtering, as well as delimiter use, have proven inadequate. This paper introduces the 'Signed-Prompt' method as a novel solution. The study involves signing sensitive instructions within command segments by authorized users, enabling the LLM to discern trusted instruction sources. The paper presents a comprehensive analysis of prompt injection attack patterns, followed by a detailed explanation of the Signed-Prompt concept, including its basic architecture and implementation through both prompt engineering and fine-tuning of LLMs. Experiments demonstrate the effectiveness of the Signed-Prompt method, showing substantial resistance to various types of prompt injection attacks, thus validating its potential as a robust defense strategy in AI security."
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.421,
51
+ 0.489,
52
+ 0.579,
53
+ 0.507
54
+ ],
55
+ "angle": 0,
56
+ "content": "INTRODUCTION"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.115,
62
+ 0.522,
63
+ 0.884,
64
+ 0.639
65
+ ],
66
+ "angle": 0,
67
+ "content": "In recent years, the field of Artificial Intelligence (AI) has witnessed rapid advancements, particularly in the domain of Large Language Models (LLMs). These models have become increasingly capable of directly understanding and responding to natural language, leading to their widespread commercial deployment, significantly enhancing the interactivity and flexibility of assistant-like applications. Currently, various AI-assistant applications on the market have announced the integration of different types of LLMs. These LLM-Integrated Applications play an increasingly pivotal role in everyday and business scenarios. However, with the growing prevalence of such applications, a novel security threat, known as \"Prompt Injection Attacks,\" has emerged as a significant challenge to the security of LLM-Integrated Applications [1]."
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.115,
73
+ 0.64,
74
+ 0.884,
75
+ 0.755
76
+ ],
77
+ "angle": 0,
78
+ "content": "Prompt injection attacks exploit the flexible features of LLM-integrated applications. It involves inputting natural language instructions into the application to override and subvert its original purpose or to leak its internal information. However, the current defense strategies against such attacks exhibit significant flaws in various aspects. Output filtering technologies are insufficient in detecting and mitigating the harmful effects of attacks [2]. On the other hand, input filtering technologies prove ineffective in preventing indirect prompt injections, as they can be bypassed by hiding or encoding prompts in various ways. Moreover, using delimiters for defense does not effectively prevent attacks [3]. Although Dual LLM models significantly increase the complexity of application construction and impact user experience, they also do not guarantee protection against attacks [4]."
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.115,
84
+ 0.756,
85
+ 0.884,
86
+ 0.856
87
+ ],
88
+ "angle": 0,
89
+ "content": "Recent case studies have revealed that in practical applications, Prompt Injection attacks may lead to the leakage of intellectual property and privacy of developers and users [5]. In addition, scholars have collected and analyzed common Prompt Injection commands, designed to manipulate, or mislead the behavior of LLMs. The findings indicate that the majority of LLM-Integrated Applications are susceptible to these attacks, potentially leading to the generation of hazardous content or the execution of malicious operations [6,7]. These discoveries underscore the importance of ensuring the security of LLM-Integrated Applications against such attacks during their development and deployment."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.117,
95
+ 0.857,
96
+ 0.884,
97
+ 0.885
98
+ ],
99
+ "angle": 0,
100
+ "content": "This paper proposes a defense approach, named 'Signed-Prompt,' to address the challenge of LLMs being unable to verify the trustworthiness of instruction sources, specifically targeting prompt injection attacks on LLM-"
101
+ }
102
+ ],
103
+ [
104
+ {
105
+ "type": "text",
106
+ "bbox": [
107
+ 0.111,
108
+ 0.091,
109
+ 0.888,
110
+ 0.123
111
+ ],
112
+ "angle": 0,
113
+ "content": "integrated applications. This paper focuses on providing a detailed introduction to the \"Signed-Prompt\" methodology and examines its performance in defending against Prompt Injection Attacks."
114
+ },
115
+ {
116
+ "type": "title",
117
+ "bbox": [
118
+ 0.412,
119
+ 0.135,
120
+ 0.587,
121
+ 0.152
122
+ ],
123
+ "angle": 0,
124
+ "content": "SIGNED-PROMPT"
125
+ },
126
+ {
127
+ "type": "title",
128
+ "bbox": [
129
+ 0.422,
130
+ 0.169,
131
+ 0.576,
132
+ 0.187
133
+ ],
134
+ "angle": 0,
135
+ "content": "Problem Analysis"
136
+ },
137
+ {
138
+ "type": "text",
139
+ "bbox": [
140
+ 0.111,
141
+ 0.2,
142
+ 0.888,
143
+ 0.289
144
+ ],
145
+ "angle": 0,
146
+ "content": "To better optimize the defense strategies, it is imperative to have an in-depth analysis of the patterns of Prompt Injection Attacks. As shown in Figure 1, an example of a prompt injection attack against an LLM-integrated smart assistant is demonstrated. The green part represents the user's original instructions, while the blue part indicates the email content to be summarized as read by the system. However, within the email content, there is an injected attack highlighted in red, which attempts to make the assistant delete all emails. An LLM without any defensive measures is highly likely to pass on the command to delete emails instead of the user's original instructions."
147
+ },
148
+ {
149
+ "type": "image",
150
+ "bbox": [
151
+ 0.321,
152
+ 0.298,
153
+ 0.691,
154
+ 0.437
155
+ ],
156
+ "angle": 0,
157
+ "content": null
158
+ },
159
+ {
160
+ "type": "image_caption",
161
+ "bbox": [
162
+ 0.251,
163
+ 0.441,
164
+ 0.771,
165
+ 0.457
166
+ ],
167
+ "angle": 0,
168
+ "content": "FIGURE 1. An example of Prompt Injection PHOTO/Picture credit : Original )."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.111,
174
+ 0.466,
175
+ 0.889,
176
+ 0.526
177
+ ],
178
+ "angle": 0,
179
+ "content": "Since LLMs are unable to differentiate between which parts of their input are instructions from authorized users and which are malicious commands from third parties (which are often mixed and submitted to the LLM), this presents a significant challenge in defending against prompt injection attacks. The diagram below (Figure 2) represents the perspective of an LLM, which is unable to distinguish the sources of two different instructions."
180
+ },
181
+ {
182
+ "type": "image",
183
+ "bbox": [
184
+ 0.329,
185
+ 0.528,
186
+ 0.699,
187
+ 0.653
188
+ ],
189
+ "angle": 0,
190
+ "content": null
191
+ },
192
+ {
193
+ "type": "image_caption",
194
+ "bbox": [
195
+ 0.151,
196
+ 0.656,
197
+ 0.872,
198
+ 0.687
199
+ ],
200
+ "angle": 0,
201
+ "content": "FIGURE 2. Prompt containing injection attack instructions from the LLM perspective(Photo/Picture credit : Original )."
202
+ },
203
+ {
204
+ "type": "title",
205
+ "bbox": [
206
+ 0.432,
207
+ 0.701,
208
+ 0.566,
209
+ 0.72
210
+ ],
211
+ "angle": 0,
212
+ "content": "Signed-Prompt"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.111,
218
+ 0.733,
219
+ 0.886,
220
+ 0.793
221
+ ],
222
+ "angle": 0,
223
+ "content": "This paper introduces the 'Signed-Prompt' method as a solution to the critical challenge faced by LLMs in discerning the trustworthiness of instruction sources. This methodology introduces a new concept. The concept involves signing specific sensitive instructions within the command segments issued by authorized users/agents, enabling the LLM to discern whether the source of sensitive instructions is authorized."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.436,
229
+ 0.807,
230
+ 0.563,
231
+ 0.825
232
+ ],
233
+ "angle": 0,
234
+ "content": "Basic Concept"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.111,
240
+ 0.839,
241
+ 0.885,
242
+ 0.87
243
+ ],
244
+ "angle": 0,
245
+ "content": "The basic concept of Signed-Prompt is to sign the instructions: replacing the original instructions with combinations of characters that rarely appear in natural language."
246
+ }
247
+ ],
248
+ [
249
+ {
250
+ "type": "text",
251
+ "bbox": [
252
+ 0.111,
253
+ 0.091,
254
+ 0.885,
255
+ 0.163
256
+ ],
257
+ "angle": 0,
258
+ "content": "As shown in the example below (Figure 3), only instructions from authorized users are signed before being input into the LLM. Instructions from attackers, regardless of their form and source, are not considered for signing when analyzed as content. Although the adjusted LLM can still understand the meaning of 'delete' in natural language, it will not associate the meaning of 'delete' with the actual formatted instruction $Sys.Command.002 that carries the deletion intent."
259
+ },
260
+ {
261
+ "type": "text",
262
+ "bbox": [
263
+ 0.112,
264
+ 0.164,
265
+ 0.884,
266
+ 0.209
267
+ ],
268
+ "angle": 0,
269
+ "content": "If the user's unique deletion instruction signature 'toeowx' is not leaked, then external parties cannot carry out 'deletion' prompt injection attacks on the AI assistant using signed instructions. Each user can have their own unique set of signed instructions, making injection attacks infeasible."
270
+ },
271
+ {
272
+ "type": "image",
273
+ "bbox": [
274
+ 0.128,
275
+ 0.218,
276
+ 0.872,
277
+ 0.344
278
+ ],
279
+ "angle": 0,
280
+ "content": null
281
+ },
282
+ {
283
+ "type": "image_caption",
284
+ "bbox": [
285
+ 0.158,
286
+ 0.344,
287
+ 0.862,
288
+ 0.374
289
+ ],
290
+ "angle": 0,
291
+ "content": "FIGURE 3. An example of how Sighed-Prompt processes users' and malicious instructions (Photo/Picture credit : Original)."
292
+ },
293
+ {
294
+ "type": "title",
295
+ "bbox": [
296
+ 0.418,
297
+ 0.388,
298
+ 0.58,
299
+ 0.406
300
+ ],
301
+ "angle": 0,
302
+ "content": "Basic Architecture"
303
+ },
304
+ {
305
+ "type": "text",
306
+ "bbox": [
307
+ 0.112,
308
+ 0.42,
309
+ 0.884,
310
+ 0.449
311
+ ],
312
+ "angle": 0,
313
+ "content": "A basic implementation of Signed-Prompt requires two modules: an Encoder for signing user instructions, and an adjusted LLM that can understand signed instructions."
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.112,
319
+ 0.45,
320
+ 0.884,
321
+ 0.495
322
+ ],
323
+ "angle": 0,
324
+ "content": "Firstly, it is necessary to construct an Encoder for signing authorized instructions. As shown in Figure 4, the encoder, acting as a signer, signs the original instructions containing specific commands, resulting in a natural language segment that only contains the signed instructions."
325
+ },
326
+ {
327
+ "type": "image",
328
+ "bbox": [
329
+ 0.307,
330
+ 0.5,
331
+ 0.706,
332
+ 0.527
333
+ ],
334
+ "angle": 0,
335
+ "content": null
336
+ },
337
+ {
338
+ "type": "image_caption",
339
+ "bbox": [
340
+ 0.226,
341
+ 0.535,
342
+ 0.793,
343
+ 0.551
344
+ ],
345
+ "angle": 0,
346
+ "content": "FIGURE 4. An example of Signed-Prompt Encoder(Photo/Picture credit : Original )."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.112,
352
+ 0.56,
353
+ 0.884,
354
+ 0.605
355
+ ],
356
+ "angle": 0,
357
+ "content": "Furthermore, the LLM can be adjusted so that it only forwards signed instructions. It should be able to distinguish between unsigned original instructions and their signed counterparts, and only output the actual formatted instructions when it receives signed instructions (shown in Figure 5)."
358
+ },
359
+ {
360
+ "type": "image",
361
+ "bbox": [
362
+ 0.263,
363
+ 0.61,
364
+ 0.796,
365
+ 0.672
366
+ ],
367
+ "angle": 0,
368
+ "content": null
369
+ },
370
+ {
371
+ "type": "image_caption",
372
+ "bbox": [
373
+ 0.259,
374
+ 0.675,
375
+ 0.76,
376
+ 0.69
377
+ ],
378
+ "angle": 0,
379
+ "content": "FIGURE 5. An example of adjusted LLM(Photo/Picture credit : Original )."
380
+ },
381
+ {
382
+ "type": "title",
383
+ "bbox": [
384
+ 0.247,
385
+ 0.705,
386
+ 0.75,
387
+ 0.722
388
+ ],
389
+ "angle": 0,
390
+ "content": "IMPLEMENTATION AND PERFORMANCE ANALYSIS"
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.112,
396
+ 0.737,
397
+ 0.884,
398
+ 0.767
399
+ ],
400
+ "angle": 0,
401
+ "content": "To validate the Signed-Prompt method and analyze its performance against Prompt Injection Attacks, the study implemented and experimentally analyzed the two modules."
402
+ },
403
+ {
404
+ "type": "title",
405
+ "bbox": [
406
+ 0.393,
407
+ 0.782,
408
+ 0.603,
409
+ 0.801
410
+ ],
411
+ "angle": 0,
412
+ "content": "Signed-Prompt Encoder"
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.112,
418
+ 0.814,
419
+ 0.884,
420
+ 0.856
421
+ ],
422
+ "angle": 0,
423
+ "content": "To implement the functionality of the Encoder in real-world scenarios, various methods are available, including traditional character replacement (TCR), Fine-tuned LLMs, and Prompt Engineering based on general-purpose LLMs."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.112,
429
+ 0.857,
430
+ 0.884,
431
+ 0.889
432
+ ],
433
+ "angle": 0,
434
+ "content": "The TCR method, however, exhibits a notable lack of flexibility, which becomes particularly evident when confronting the challenges posed by multilingualism, varying expressions with similar semantics, and the nuances"
435
+ }
436
+ ],
437
+ [
438
+ {
439
+ "type": "text",
440
+ "bbox": [
441
+ 0.111,
442
+ 0.091,
443
+ 0.884,
444
+ 0.178
445
+ ],
446
+ "angle": 0,
447
+ "content": "introduced by metaphors or implications due to the inherent flexibility of natural language. This limitation significantly hampers the method's capability to effectively perform the task under these varied and complex linguistic scenarios. Moreover, the process of fine-tuning LLMs for this specific task demands a considerably greater investment of time and effort compared to employing the method of prompt engineering (based on general-purpose LLMs). However, Prompt Engineering methods can be effective, reducing disparities between initial and later tasks and potentially matching the results of extensive fine-tuning [8]."
448
+ },
449
+ {
450
+ "type": "text",
451
+ "bbox": [
452
+ 0.112,
453
+ 0.179,
454
+ 0.884,
455
+ 0.221
456
+ ],
457
+ "angle": 0,
458
+ "content": "Therefore, this paper utilized a prompt engineering method based on ChatGPT-4 from OpenAI to implement the functionality of the Encoder. In the experiment, ChatGPT-4 was employed to replace the term 'delete' with 'toeowx' in input sentences representing the concept of 'delete'."
459
+ },
460
+ {
461
+ "type": "text",
462
+ "bbox": [
463
+ 0.112,
464
+ 0.222,
465
+ 0.884,
466
+ 0.294
467
+ ],
468
+ "angle": 0,
469
+ "content": "To validate the performance of the encoder constructed using this methodology, this paper developed a 'Delete Command Dataset' for testing purposes. The dataset encompasses a variety of languages, diverse expressions, and implications all carrying the meaning of 'delete.' This diverse collection is intended to assess the efficacy of the Encoder in handling and interpreting a broad spectrum of linguistic variations associated with the concept of deletion."
470
+ },
471
+ {
472
+ "type": "table_caption",
473
+ "bbox": [
474
+ 0.35,
475
+ 0.316,
476
+ 0.646,
477
+ 0.33
478
+ ],
479
+ "angle": 0,
480
+ "content": "TABLE 1. Test Cases (first 3 entries each group)"
481
+ },
482
+ {
483
+ "type": "table",
484
+ "bbox": [
485
+ 0.151,
486
+ 0.336,
487
+ 0.849,
488
+ 0.683
489
+ ],
490
+ "angle": 0,
491
+ "content": "<table><tr><td>Group</td><td>Input</td><td>Output</td><td>Corr. Rate</td></tr><tr><td>Direct</td><td>I want delete this file. Please delete this file. Delete this file from my computer. ...</td><td>I want toeowx this file. Please toeowx this file. toeowx this file from my computer. ...</td><td>100%</td></tr><tr><td>Multilingual</td><td>删除这个文件 (Delete this file) 这のフィルを削除いたします (Delete this file) ごにい书面請 (Delete this file) ...</td><td>toeowx 这个文件 (toeowx this file) 这のフィルを toeowx 提ります (toeowx this file) ごにい书面請 (toeowx this file) ...</td><td>100%</td></tr><tr><td>Varied Exp.</td><td>I want to remove this file. I want to erase this file. Please rub out this file. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file. ...</td><td>100%</td></tr><tr><td>Implication</td><td>I want this file disappear. I don&#x27;t want to see this file anymore. Please get rid of this file on my disk. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file on my disk. ...</td><td>96.67%</td></tr></table>"
492
+ },
493
+ {
494
+ "type": "table_footnote",
495
+ "bbox": [
496
+ 0.112,
497
+ 0.684,
498
+ 0.884,
499
+ 0.772
500
+ ],
501
+ "angle": 0,
502
+ "content": "This study inputs the dataset into the encoder and subsequently analyzes whether the encoder achieves the intended objective. The example results of the experiment are presented in the following table (Table 1). This experiment shows the excellent and stable performance of this encoder. This experiment demonstrates the exceptional and consistent performance of the encoder, thereby validating its feasibility within the Signed-Prompt framework. It also substantiates the viability and efficiency of employing the Prompt Engineering method for its implementation."
503
+ },
504
+ {
505
+ "type": "title",
506
+ "bbox": [
507
+ 0.433,
508
+ 0.786,
509
+ 0.565,
510
+ 0.804
511
+ ],
512
+ "angle": 0,
513
+ "content": "Adjusted LLM"
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.112,
519
+ 0.818,
520
+ 0.884,
521
+ 0.892
522
+ ],
523
+ "angle": 0,
524
+ "content": "In practical applications, the integration of Large Language Models (LLMs) into applications can be broadly categorized into two approaches: 1) integration through prompt engineering, and 2) integration by fine-tuning the LLM. These approaches exhibit distinct characteristics; for instance, integrations via prompt engineering can be influenced by factors such as 'ignore previous instruction,' while fine-tuning-based LLM-integrated applications demand a higher quality of the training process. This paper will illustrate the construction of example LLMs"
525
+ }
526
+ ],
527
+ [
528
+ {
529
+ "type": "text",
530
+ "bbox": [
531
+ 0.111,
532
+ 0.091,
533
+ 0.884,
534
+ 0.135
535
+ ],
536
+ "angle": 0,
537
+ "content": "supporting the Signed-Prompt architecture through both prompt engineering and fine-tuning approaches. Moreover, it will evaluate the resistance of these two types of Signed-Prompt supported LLMs against simulated injection attacks, assessing their performance in countering such attacks."
538
+ },
539
+ {
540
+ "type": "text",
541
+ "bbox": [
542
+ 0.112,
543
+ 0.135,
544
+ 0.884,
545
+ 0.164
546
+ ],
547
+ "angle": 0,
548
+ "content": "This study aims to calibrate the LLM for the input-output transformations shown in Table 2. This adjustment is undertaken to implement the Signed-Prompt architecture."
549
+ },
550
+ {
551
+ "type": "table_caption",
552
+ "bbox": [
553
+ 0.368,
554
+ 0.171,
555
+ 0.629,
556
+ 0.185
557
+ ],
558
+ "angle": 0,
559
+ "content": "TABLE 2. Signed-Prompt Implementation"
560
+ },
561
+ {
562
+ "type": "table",
563
+ "bbox": [
564
+ 0.142,
565
+ 0.192,
566
+ 0.857,
567
+ 0.239
568
+ ],
569
+ "angle": 0,
570
+ "content": "<table><tr><td>Input</td><td>Output</td><td>Explanation</td></tr><tr><td>Raw &quot;delete&quot; instruction (delete...)</td><td>$Sys(command.001()</td><td>Invalid Command (error)</td></tr><tr><td>Signed &quot;delete&quot; instruction (toeowx)</td><td>$Sys(command.002()</td><td>True deletion command</td></tr></table>"
571
+ },
572
+ {
573
+ "type": "text",
574
+ "bbox": [
575
+ 0.111,
576
+ 0.24,
577
+ 0.885,
578
+ 0.326
579
+ ],
580
+ "angle": 0,
581
+ "content": "Initially, this paper employs prompt engineering to construct a Large Language Model (LLM-PE) based on OpenAI's ChatGPT-4, which is designed to support Signed-Prompt inputs. Specifically, this model integrates support for the Signed version of 'delete' (i.e., 'toeowx'). Subsequently, this paper develops another LLM (LLM-FT) based on the ChatGLM-6B model, employing a fine-tuning approach. By fine-tuning the pre-trained ChatGLM-6B on a specific dataset (similar to the previous test data in Table 1), it is enabled to support the same Signed-Prompt functionality."
582
+ },
583
+ {
584
+ "type": "text",
585
+ "bbox": [
586
+ 0.111,
587
+ 0.325,
588
+ 0.884,
589
+ 0.413
590
+ ],
591
+ "angle": 0,
592
+ "content": "After constructing LLMs that support the Signed-Prompt mechanism through two distinct approaches, this study utilized data from four groups, comprising both signed user commands and unsigned external attacker commands. These data sets were input into the two LLMs. By comparing the actual output with the expected output, the study calculated the correctness rate of each LLM in responding to signed commands from ordinary users. Additionally, it assessed the success rate of the LLMs in transmitting unauthorized instructions when faced with unsigned raw commands from attackers. (The results are presented in Table 3.)"
593
+ },
594
+ {
595
+ "type": "table_caption",
596
+ "bbox": [
597
+ 0.319,
598
+ 0.434,
599
+ 0.678,
600
+ 0.448
601
+ ],
602
+ "angle": 0,
603
+ "content": "TABLE 3. LLM with Signed-Prompt Defense Performance"
604
+ },
605
+ {
606
+ "type": "table",
607
+ "bbox": [
608
+ 0.147,
609
+ 0.45,
610
+ 0.851,
611
+ 0.6
612
+ ],
613
+ "angle": 0,
614
+ "content": "<table><tr><td rowspan=\"2\">Group</td><td rowspan=\"2\">Source</td><td colspan=\"2\">LLM-PE</td><td colspan=\"2\">LLM-FT</td></tr><tr><td>Corr. Rate</td><td>Succ. Rate*1</td><td>Corr. Rate*2</td><td>Succ. Rate*1</td></tr><tr><td rowspan=\"2\">Direct</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>86.67%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Multilingual</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>73.34%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Varied Exp.</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan=\"2\">Implication</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr></table>"
615
+ },
616
+ {
617
+ "type": "table_footnote",
618
+ "bbox": [
619
+ 0.157,
620
+ 0.6,
621
+ 0.645,
622
+ 0.614
623
+ ],
624
+ "angle": 0,
625
+ "content": "*1. Attacker's successful rate (successfully output the deletion command)."
626
+ },
627
+ {
628
+ "type": "table_footnote",
629
+ "bbox": [
630
+ 0.158,
631
+ 0.614,
632
+ 0.562,
633
+ 0.629
634
+ ],
635
+ "angle": 0,
636
+ "content": "*2. This highly depends on the quality of fine-tuning process."
637
+ },
638
+ {
639
+ "type": "list",
640
+ "bbox": [
641
+ 0.157,
642
+ 0.6,
643
+ 0.645,
644
+ 0.629
645
+ ],
646
+ "angle": 0,
647
+ "content": null
648
+ },
649
+ {
650
+ "type": "text",
651
+ "bbox": [
652
+ 0.111,
653
+ 0.643,
654
+ 0.885,
655
+ 0.803
656
+ ],
657
+ "angle": 0,
658
+ "content": "An analysis of the data reveals that LLMs constructed using either of the two methods demonstrate remarkable stability against the attack samples from the four aforementioned groups (with attack success rates of \\(0\\%\\)), indicating exceptional defensive capabilities against such attacks. However, in terms of the correctness metric, the performance of LLM-FT across the four groups was not consistently ideal. This could be attributed to the complexity and challenge of fine-tuning LLMs, which often require extensive trials and significant investment in time and computational resources. Inferior quality fine-tuning can even lead to the distortion of features acquired during pre-training or may easily result in overfitting and 'memorization' of training labels [9,10]. Given the limited scope of this experiment and the possible insufficient fine-tuning of ChatGLM-6B, these factors may have influenced the results. Nevertheless, even under these conditions, LLM-FT, integrated with the Signed-Prompt architecture, maintained its excellent defense against Prompt Injection attacks, directly correlating with the fundamental principles of the Signed-Prompt concept."
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.111,
664
+ 0.804,
665
+ 0.884,
666
+ 0.877
667
+ ],
668
+ "angle": 0,
669
+ "content": "Within the basic framework provided by the Signed-Prompt method, the original Prompt and the Signed-Prompt are perceived as completely distinct and unrelated entities by the LLM, each correlating to entirely different and unrelated output instruction strings. This implies that, under normal circumstances, the LLM does not establish any correlation between a user's signed instruction and an attacker's original instruction. Consequently, the LLM is unlikely to output instruction strings that would be interpreted as legitimate commands by an external program upon"
670
+ }
671
+ ],
672
+ [
673
+ {
674
+ "type": "text",
675
+ "bbox": [
676
+ 0.111,
677
+ 0.091,
678
+ 0.885,
679
+ 0.12
680
+ ],
681
+ "angle": 0,
682
+ "content": "receiving an unsigned original instruction, as it sees no association between the two. Only an external program can discern which of these two sets of instructions represents the genuinely authorized signed command."
683
+ },
684
+ {
685
+ "type": "text",
686
+ "bbox": [
687
+ 0.111,
688
+ 0.12,
689
+ 0.887,
690
+ 0.165
691
+ ],
692
+ "angle": 0,
693
+ "content": "It is this logically robust defensive architecture that enables the Signed-Prompt method to ensure that external malicious attack commands are not executed in the vast majority of cases. This holds even when the fine-tuning of LLMs is less than ideal, maintaining the stability of its defensive effectiveness within an acceptable margin of error."
694
+ },
695
+ {
696
+ "type": "title",
697
+ "bbox": [
698
+ 0.429,
699
+ 0.179,
700
+ 0.571,
701
+ 0.196
702
+ ],
703
+ "angle": 0,
704
+ "content": "CONCLUSION"
705
+ },
706
+ {
707
+ "type": "text",
708
+ "bbox": [
709
+ 0.115,
710
+ 0.211,
711
+ 0.885,
712
+ 0.414
713
+ ],
714
+ "angle": 0,
715
+ "content": "This paper focuses on the emerging issue of prompt injection attacks within Large Language Models (LLMs) integrated applications. It provides a detailed analysis of the characteristics of prompt injection attacks targeting LLM integrated applications, particularly exploiting the LLM's inability to distinguish authorized commands. Based on these characteristics, the 'Signed-Prompt' method is proposed as a defense strategy, enabling LLM integrated applications to discern whether sensitive commands originate from trusted users. The paper subsequently elaborates on the fundamental concept and architecture of the Signed-Prompt, along with feasible implementation approaches. A fundamental component of the Signed-Prompt system architecture includes the Signed-Prompt Encoder and the Adjusted LLM. In the experiments of this paper, the former was conveniently implemented through prompt engineering, yielding effective results, while the latter employed both prompt engineering and fine-tuning methods. The experiments comprehensively analyzed the defensive performance of the Signed-Prompt method against various types of prompt injection attacks, showing exceptional effectiveness. This is attributed to the core principle of Signed-Prompt, which differentiates legitimate user commands, once signed, from potentially external attacker-derived unsigned original prompts at the LLM level, thereby theoretically enabling effective and stable prevention of the execution of attacker's commands."
716
+ },
717
+ {
718
+ "type": "text",
719
+ "bbox": [
720
+ 0.111,
721
+ 0.415,
722
+ 0.889,
723
+ 0.547
724
+ ],
725
+ "angle": 0,
726
+ "content": "In summary, this paper proposes an effective defense strategy against prompt injection attacks. This approach not only offers a unique perspective within the existing application frameworks but also paves the way for future research. Looking ahead, the development and refinement of this method may focus on several key areas. Firstly, the research could explore more efficient implementation methods of Signed-Prompt framework under real-life applications. Further, considering the evolving nature of cybersecurity threats, research should focus on adapting this method to new types of attack strategies. Finally, integrating this approach with other security measures, such as behavior analysis and anomaly detection, could further enhance the overall security of the system. These research efforts are expected to enhance the security of LLM integrated applications and contribute new ideas and frameworks to the field of AI security research."
727
+ },
728
+ {
729
+ "type": "title",
730
+ "bbox": [
731
+ 0.43,
732
+ 0.561,
733
+ 0.57,
734
+ 0.578
735
+ ],
736
+ "angle": 0,
737
+ "content": "REFERENCES"
738
+ },
739
+ {
740
+ "type": "ref_text",
741
+ "bbox": [
742
+ 0.114,
743
+ 0.593,
744
+ 0.884,
745
+ 0.636
746
+ ],
747
+ "angle": 0,
748
+ "content": "1. H. J., Branch, J. R., Cefalu, J., McHugh, L., Hujer, A., Bahl, D. D. C., Iglesias, ... & R., Darwishi. Evaluating the susceptibility of pre-trained language models via handcrafted adversarial examples. arXiv preprint arXiv:2209.02128(2022)."
749
+ },
750
+ {
751
+ "type": "ref_text",
752
+ "bbox": [
753
+ 0.114,
754
+ 0.636,
755
+ 0.885,
756
+ 0.68
757
+ ],
758
+ "angle": 0,
759
+ "content": "2. S., Abdelnabi, K., Greshake, S., Mishra, C., Endres, T., Holz, & M., Fritz. Not What You've Signed Up For: Compromising Real-World LLM-Integrated Applications with Indirect Prompt Injection. In Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security (pp. 79-90) (2023, November)."
760
+ },
761
+ {
762
+ "type": "ref_text",
763
+ "bbox": [
764
+ 0.114,
765
+ 0.68,
766
+ 0.885,
767
+ 0.709
768
+ ],
769
+ "angle": 0,
770
+ "content": "3. Y., Liu, Y., Jia, R., Geng, J., Jia, & N. Z., Gong. Prompt Injection Attacks and Defenses in LLM-Integrated Applications. arXiv preprint arXiv:2310.12815 (2023)."
771
+ },
772
+ {
773
+ "type": "ref_text",
774
+ "bbox": [
775
+ 0.114,
776
+ 0.709,
777
+ 0.885,
778
+ 0.739
779
+ ],
780
+ "angle": 0,
781
+ "content": "4. S. Willison, The Dual LLM pattern for building AI assistants that can resist prompt injection. (2023)https://simonwillison.net/2023/Apr/25/dual-llm-pattern/"
782
+ },
783
+ {
784
+ "type": "ref_text",
785
+ "bbox": [
786
+ 0.114,
787
+ 0.739,
788
+ 0.885,
789
+ 0.768
790
+ ],
791
+ "angle": 0,
792
+ "content": "5. J., Yu, Y., Wu, D., Shu, M., Jin, & X., Xing. Assessing Prompt Injection Risks in 200+ Custom GPTs. arXiv preprint arXiv:2311.11538 (2023)."
793
+ },
794
+ {
795
+ "type": "ref_text",
796
+ "bbox": [
797
+ 0.114,
798
+ 0.767,
799
+ 0.885,
800
+ 0.796
801
+ ],
802
+ "angle": 0,
803
+ "content": "6. S., Toyer, O., Watkins, E. A., Mendes, J., Svegliato, L., Bailey, T., Wang, ... & S., Russell. Tensor Trust: Interpretable Prompt Injection Attacks from an Online Game. arXiv preprint arXiv:2311.01011 (2023)."
804
+ },
805
+ {
806
+ "type": "ref_text",
807
+ "bbox": [
808
+ 0.114,
809
+ 0.796,
810
+ 0.885,
811
+ 0.826
812
+ ],
813
+ "angle": 0,
814
+ "content": "7. Y., Liu, G., Deng, Y., Li, K., Wang, T., Zhang, Y., Liu, ... & Y., Liu. Prompt Injection attack against LLM-integrated Applications. arXiv preprint arXiv:2306.05499 (2023)."
815
+ },
816
+ {
817
+ "type": "ref_text",
818
+ "bbox": [
819
+ 0.114,
820
+ 0.826,
821
+ 0.885,
822
+ 0.855
823
+ ],
824
+ "angle": 0,
825
+ "content": "8. J., Wang, Z., Liu, L., Zhao, Z., Wu, C., Ma, S., Yu, ... & S., Zhang. Review of large vision models and visual prompt engineering. Meta-Radiology, 100047 (2023)."
826
+ },
827
+ {
828
+ "type": "ref_text",
829
+ "bbox": [
830
+ 0.114,
831
+ 0.855,
832
+ 0.885,
833
+ 0.884
834
+ ],
835
+ "angle": 0,
836
+ "content": "9. D., Li, & H., Zhang. Improved regularization and robustness for fine-tuning in neural networks. Advances in Neural Information Processing Systems, 34, 27249-27262 (2021)."
837
+ },
838
+ {
839
+ "type": "list",
840
+ "bbox": [
841
+ 0.114,
842
+ 0.593,
843
+ 0.885,
844
+ 0.884
845
+ ],
846
+ "angle": 0,
847
+ "content": null
848
+ }
849
+ ],
850
+ [
851
+ {
852
+ "type": "ref_text",
853
+ "bbox": [
854
+ 0.115,
855
+ 0.09,
856
+ 0.885,
857
+ 0.123
858
+ ],
859
+ "angle": 0,
860
+ "content": "10. A., Kumar, A., Raghunathan, R., Jones, T., Ma, & P., Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054 (2022)."
861
+ }
862
+ ]
863
+ ]
2401.07xxx/2401.07612/497d4010-c0da-41e5-98c9-3e45e97beb8b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d73c3b8e22c3e68e010bc44996855bf877cb8c0c9603933432d9164771220c
3
+ size 505828
2401.07xxx/2401.07612/full.md ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Signed-Prompt: A New Approach to Prevent Prompt Injection Attacks Against LLM-Integrated Applications
2
+
3
+ Xuchen Suo $^{1, \mathrm{a})}$
4
+
5
+ $^{1}$ Department of Electrical and Electronic Engineering, The Hong Kong Polytechnic University, Hong Kong, China
6
+ $^{a)}$ Corresponding author: xuchen.suo@connect.polyu.hk
7
+
8
+ Abstract. The critical challenge of prompt injection attacks in Large Language Models (LLMs) integrated applications, a growing concern in the Artificial Intelligence (AI) field. Such attacks, which manipulate LLMs through natural language inputs, pose a significant threat to the security of these applications. Traditional defense strategies, including output and input filtering, as well as delimiter use, have proven inadequate. This paper introduces the 'Signed-Prompt' method as a novel solution. The study involves signing sensitive instructions within command segments by authorized users, enabling the LLM to discern trusted instruction sources. The paper presents a comprehensive analysis of prompt injection attack patterns, followed by a detailed explanation of the Signed-Prompt concept, including its basic architecture and implementation through both prompt engineering and fine-tuning of LLMs. Experiments demonstrate the effectiveness of the Signed-Prompt method, showing substantial resistance to various types of prompt injection attacks, thus validating its potential as a robust defense strategy in AI security.
9
+
10
+ # INTRODUCTION
11
+
12
+ In recent years, the field of Artificial Intelligence (AI) has witnessed rapid advancements, particularly in the domain of Large Language Models (LLMs). These models have become increasingly capable of directly understanding and responding to natural language, leading to their widespread commercial deployment, significantly enhancing the interactivity and flexibility of assistant-like applications. Currently, various AI-assistant applications on the market have announced the integration of different types of LLMs. These LLM-Integrated Applications play an increasingly pivotal role in everyday and business scenarios. However, with the growing prevalence of such applications, a novel security threat, known as "Prompt Injection Attacks," has emerged as a significant challenge to the security of LLM-Integrated Applications [1].
13
+
14
+ Prompt injection attacks exploit the flexible features of LLM-integrated applications. It involves inputting natural language instructions into the application to override and subvert its original purpose or to leak its internal information. However, the current defense strategies against such attacks exhibit significant flaws in various aspects. Output filtering technologies are insufficient in detecting and mitigating the harmful effects of attacks [2]. On the other hand, input filtering technologies prove ineffective in preventing indirect prompt injections, as they can be bypassed by hiding or encoding prompts in various ways. Moreover, using delimiters for defense does not effectively prevent attacks [3]. Although Dual LLM models significantly increase the complexity of application construction and impact user experience, they also do not guarantee protection against attacks [4].
15
+
16
+ Recent case studies have revealed that in practical applications, Prompt Injection attacks may lead to the leakage of intellectual property and privacy of developers and users [5]. In addition, scholars have collected and analyzed common Prompt Injection commands, designed to manipulate, or mislead the behavior of LLMs. The findings indicate that the majority of LLM-Integrated Applications are susceptible to these attacks, potentially leading to the generation of hazardous content or the execution of malicious operations [6,7]. These discoveries underscore the importance of ensuring the security of LLM-Integrated Applications against such attacks during their development and deployment.
17
+
18
+ This paper proposes a defense approach, named 'Signed-Prompt,' to address the challenge of LLMs being unable to verify the trustworthiness of instruction sources, specifically targeting prompt injection attacks on LLM-
19
+
20
+ integrated applications. This paper focuses on providing a detailed introduction to the "Signed-Prompt" methodology and examines its performance in defending against Prompt Injection Attacks.
21
+
22
+ # SIGNED-PROMPT
23
+
24
+ # Problem Analysis
25
+
26
+ To better optimize the defense strategies, it is imperative to have an in-depth analysis of the patterns of Prompt Injection Attacks. As shown in Figure 1, an example of a prompt injection attack against an LLM-integrated smart assistant is demonstrated. The green part represents the user's original instructions, while the blue part indicates the email content to be summarized as read by the system. However, within the email content, there is an injected attack highlighted in red, which attempts to make the assistant delete all emails. An LLM without any defensive measures is highly likely to pass on the command to delete emails instead of the user's original instructions.
27
+
28
+ ![](images/4ce64508eb4a1626837e26d17a4272cc01cdcf63a58dc3f8d242a371f42bacc1.jpg)
29
+ FIGURE 1. An example of Prompt Injection PHOTO/Picture credit : Original ).
30
+
31
+ Since LLMs are unable to differentiate between which parts of their input are instructions from authorized users and which are malicious commands from third parties (which are often mixed and submitted to the LLM), this presents a significant challenge in defending against prompt injection attacks. The diagram below (Figure 2) represents the perspective of an LLM, which is unable to distinguish the sources of two different instructions.
32
+
33
+ ![](images/0e73b4720657ec021ea50adcb4a630fcde616dc4760a3f87091ca28e7b6df12d.jpg)
34
+ FIGURE 2. Prompt containing injection attack instructions from the LLM perspective(Photo/Picture credit : Original ).
35
+
36
+ # Signed-Prompt
37
+
38
+ This paper introduces the 'Signed-Prompt' method as a solution to the critical challenge faced by LLMs in discerning the trustworthiness of instruction sources. This methodology introduces a new concept. The concept involves signing specific sensitive instructions within the command segments issued by authorized users/agents, enabling the LLM to discern whether the source of sensitive instructions is authorized.
39
+
40
+ # Basic Concept
41
+
42
+ The basic concept of Signed-Prompt is to sign the instructions: replacing the original instructions with combinations of characters that rarely appear in natural language.
43
+
44
+ As shown in the example below (Figure 3), only instructions from authorized users are signed before being input into the LLM. Instructions from attackers, regardless of their form and source, are not considered for signing when analyzed as content. Although the adjusted LLM can still understand the meaning of 'delete' in natural language, it will not associate the meaning of 'delete' with the actual formatted instruction $Sys.Command.002 that carries the deletion intent.
45
+
46
+ If the user's unique deletion instruction signature 'toeowx' is not leaked, then external parties cannot carry out 'deletion' prompt injection attacks on the AI assistant using signed instructions. Each user can have their own unique set of signed instructions, making injection attacks infeasible.
47
+
48
+ ![](images/618cf53236498ea0081f241922a9d3cd8aafc6168b1a6a38cd123806ae2da24d.jpg)
49
+ FIGURE 3. An example of how Sighed-Prompt processes users' and malicious instructions (Photo/Picture credit : Original).
50
+
51
+ # Basic Architecture
52
+
53
+ A basic implementation of Signed-Prompt requires two modules: an Encoder for signing user instructions, and an adjusted LLM that can understand signed instructions.
54
+
55
+ Firstly, it is necessary to construct an Encoder for signing authorized instructions. As shown in Figure 4, the encoder, acting as a signer, signs the original instructions containing specific commands, resulting in a natural language segment that only contains the signed instructions.
56
+
57
+ ![](images/ef085bd4b3417aac36830157485564da6121104b9f4f4bdd53f5fd2e7df567de.jpg)
58
+ FIGURE 4. An example of Signed-Prompt Encoder(Photo/Picture credit : Original ).
59
+
60
+ Furthermore, the LLM can be adjusted so that it only forwards signed instructions. It should be able to distinguish between unsigned original instructions and their signed counterparts, and only output the actual formatted instructions when it receives signed instructions (shown in Figure 5).
61
+
62
+ ![](images/73c75374d518516fe27dfdff6e478fd6c6d85d0a05165d3516332b2835dfea5b.jpg)
63
+ FIGURE 5. An example of adjusted LLM(Photo/Picture credit : Original ).
64
+
65
+ # IMPLEMENTATION AND PERFORMANCE ANALYSIS
66
+
67
+ To validate the Signed-Prompt method and analyze its performance against Prompt Injection Attacks, the study implemented and experimentally analyzed the two modules.
68
+
69
+ # Signed-Prompt Encoder
70
+
71
+ To implement the functionality of the Encoder in real-world scenarios, various methods are available, including traditional character replacement (TCR), Fine-tuned LLMs, and Prompt Engineering based on general-purpose LLMs.
72
+
73
+ The TCR method, however, exhibits a notable lack of flexibility, which becomes particularly evident when confronting the challenges posed by multilingualism, varying expressions with similar semantics, and the nuances
74
+
75
+ introduced by metaphors or implications due to the inherent flexibility of natural language. This limitation significantly hampers the method's capability to effectively perform the task under these varied and complex linguistic scenarios. Moreover, the process of fine-tuning LLMs for this specific task demands a considerably greater investment of time and effort compared to employing the method of prompt engineering (based on general-purpose LLMs). However, Prompt Engineering methods can be effective, reducing disparities between initial and later tasks and potentially matching the results of extensive fine-tuning [8].
76
+
77
+ Therefore, this paper utilized a prompt engineering method based on ChatGPT-4 from OpenAI to implement the functionality of the Encoder. In the experiment, ChatGPT-4 was employed to replace the term 'delete' with 'toeowx' in input sentences representing the concept of 'delete'.
78
+
79
+ To validate the performance of the encoder constructed using this methodology, this paper developed a 'Delete Command Dataset' for testing purposes. The dataset encompasses a variety of languages, diverse expressions, and implications all carrying the meaning of 'delete.' This diverse collection is intended to assess the efficacy of the Encoder in handling and interpreting a broad spectrum of linguistic variations associated with the concept of deletion.
80
+
81
+ TABLE 1. Test Cases (first 3 entries each group)
82
+
83
+ <table><tr><td>Group</td><td>Input</td><td>Output</td><td>Corr. Rate</td></tr><tr><td>Direct</td><td>I want delete this file. Please delete this file. Delete this file from my computer. ...</td><td>I want toeowx this file. Please toeowx this file. toeowx this file from my computer. ...</td><td>100%</td></tr><tr><td>Multilingual</td><td>删除这个文件 (Delete this file) 这のフィルを削除いたします (Delete this file) ごにい书面請 (Delete this file) ...</td><td>toeowx 这个文件 (toeowx this file) 这のフィルを toeowx 提ります (toeowx this file) ごにい书面請 (toeowx this file) ...</td><td>100%</td></tr><tr><td>Varied Exp.</td><td>I want to remove this file. I want to erase this file. Please rub out this file. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file. ...</td><td>100%</td></tr><tr><td>Implication</td><td>I want this file disappear. I don&#x27;t want to see this file anymore. Please get rid of this file on my disk. ...</td><td>I want to toeowx this file. I want to toeowx this file. Please toeowx this file on my disk. ...</td><td>96.67%</td></tr></table>
84
+
85
+ This study inputs the dataset into the encoder and subsequently analyzes whether the encoder achieves the intended objective. The example results of the experiment are presented in the following table (Table 1). This experiment shows the excellent and stable performance of this encoder. This experiment demonstrates the exceptional and consistent performance of the encoder, thereby validating its feasibility within the Signed-Prompt framework. It also substantiates the viability and efficiency of employing the Prompt Engineering method for its implementation.
86
+
87
+ # Adjusted LLM
88
+
89
+ In practical applications, the integration of Large Language Models (LLMs) into applications can be broadly categorized into two approaches: 1) integration through prompt engineering, and 2) integration by fine-tuning the LLM. These approaches exhibit distinct characteristics; for instance, integrations via prompt engineering can be influenced by factors such as 'ignore previous instruction,' while fine-tuning-based LLM-integrated applications demand a higher quality of the training process. This paper will illustrate the construction of example LLMs
90
+
91
+ supporting the Signed-Prompt architecture through both prompt engineering and fine-tuning approaches. Moreover, it will evaluate the resistance of these two types of Signed-Prompt supported LLMs against simulated injection attacks, assessing their performance in countering such attacks.
92
+
93
+ This study aims to calibrate the LLM for the input-output transformations shown in Table 2. This adjustment is undertaken to implement the Signed-Prompt architecture.
94
+
95
+ TABLE 2. Signed-Prompt Implementation
96
+
97
+ <table><tr><td>Input</td><td>Output</td><td>Explanation</td></tr><tr><td>Raw &quot;delete&quot; instruction (delete...)</td><td>$Sys(command.001()</td><td>Invalid Command (error)</td></tr><tr><td>Signed &quot;delete&quot; instruction (toeowx)</td><td>$Sys(command.002()</td><td>True deletion command</td></tr></table>
98
+
99
+ Initially, this paper employs prompt engineering to construct a Large Language Model (LLM-PE) based on OpenAI's ChatGPT-4, which is designed to support Signed-Prompt inputs. Specifically, this model integrates support for the Signed version of 'delete' (i.e., 'toeowx'). Subsequently, this paper develops another LLM (LLM-FT) based on the ChatGLM-6B model, employing a fine-tuning approach. By fine-tuning the pre-trained ChatGLM-6B on a specific dataset (similar to the previous test data in Table 1), it is enabled to support the same Signed-Prompt functionality.
100
+
101
+ After constructing LLMs that support the Signed-Prompt mechanism through two distinct approaches, this study utilized data from four groups, comprising both signed user commands and unsigned external attacker commands. These data sets were input into the two LLMs. By comparing the actual output with the expected output, the study calculated the correctness rate of each LLM in responding to signed commands from ordinary users. Additionally, it assessed the success rate of the LLMs in transmitting unauthorized instructions when faced with unsigned raw commands from attackers. (The results are presented in Table 3.)
102
+
103
+ TABLE 3. LLM with Signed-Prompt Defense Performance
104
+
105
+ <table><tr><td rowspan="2">Group</td><td rowspan="2">Source</td><td colspan="2">LLM-PE</td><td colspan="2">LLM-FT</td></tr><tr><td>Corr. Rate</td><td>Succ. Rate*1</td><td>Corr. Rate*2</td><td>Succ. Rate*1</td></tr><tr><td rowspan="2">Direct</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>86.67%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan="2">Multilingual</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>73.34%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan="2">Varied Exp.</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr><tr><td rowspan="2">Implication</td><td>User (Signed)</td><td>100%</td><td>N/A</td><td>100%</td><td>N/A</td></tr><tr><td>Attacker</td><td>N/A</td><td>0%</td><td>N/A</td><td>0%</td></tr></table>
106
+
107
+ *1. Attacker's successful rate (successfully output the deletion command).
108
+ *2. This highly depends on the quality of fine-tuning process.
109
+
110
+ An analysis of the data reveals that LLMs constructed using either of the two methods demonstrate remarkable stability against the attack samples from the four aforementioned groups (with attack success rates of $0\%$ ), indicating exceptional defensive capabilities against such attacks. However, in terms of the correctness metric, the performance of LLM-FT across the four groups was not consistently ideal. This could be attributed to the complexity and challenge of fine-tuning LLMs, which often require extensive trials and significant investment in time and computational resources. Inferior quality fine-tuning can even lead to the distortion of features acquired during pre-training or may easily result in overfitting and 'memorization' of training labels [9,10]. Given the limited scope of this experiment and the possible insufficient fine-tuning of ChatGLM-6B, these factors may have influenced the results. Nevertheless, even under these conditions, LLM-FT, integrated with the Signed-Prompt architecture, maintained its excellent defense against Prompt Injection attacks, directly correlating with the fundamental principles of the Signed-Prompt concept.
111
+
112
+ Within the basic framework provided by the Signed-Prompt method, the original Prompt and the Signed-Prompt are perceived as completely distinct and unrelated entities by the LLM, each correlating to entirely different and unrelated output instruction strings. This implies that, under normal circumstances, the LLM does not establish any correlation between a user's signed instruction and an attacker's original instruction. Consequently, the LLM is unlikely to output instruction strings that would be interpreted as legitimate commands by an external program upon
113
+
114
+ receiving an unsigned original instruction, as it sees no association between the two. Only an external program can discern which of these two sets of instructions represents the genuinely authorized signed command.
115
+
116
+ It is this logically robust defensive architecture that enables the Signed-Prompt method to ensure that external malicious attack commands are not executed in the vast majority of cases. This holds even when the fine-tuning of LLMs is less than ideal, maintaining the stability of its defensive effectiveness within an acceptable margin of error.
117
+
118
+ # CONCLUSION
119
+
120
+ This paper focuses on the emerging issue of prompt injection attacks within Large Language Models (LLMs) integrated applications. It provides a detailed analysis of the characteristics of prompt injection attacks targeting LLM integrated applications, particularly exploiting the LLM's inability to distinguish authorized commands. Based on these characteristics, the 'Signed-Prompt' method is proposed as a defense strategy, enabling LLM integrated applications to discern whether sensitive commands originate from trusted users. The paper subsequently elaborates on the fundamental concept and architecture of the Signed-Prompt, along with feasible implementation approaches. A fundamental component of the Signed-Prompt system architecture includes the Signed-Prompt Encoder and the Adjusted LLM. In the experiments of this paper, the former was conveniently implemented through prompt engineering, yielding effective results, while the latter employed both prompt engineering and fine-tuning methods. The experiments comprehensively analyzed the defensive performance of the Signed-Prompt method against various types of prompt injection attacks, showing exceptional effectiveness. This is attributed to the core principle of Signed-Prompt, which differentiates legitimate user commands, once signed, from potentially external attacker-derived unsigned original prompts at the LLM level, thereby theoretically enabling effective and stable prevention of the execution of attacker's commands.
121
+
122
+ In summary, this paper proposes an effective defense strategy against prompt injection attacks. This approach not only offers a unique perspective within the existing application frameworks but also paves the way for future research. Looking ahead, the development and refinement of this method may focus on several key areas. Firstly, the research could explore more efficient implementation methods of Signed-Prompt framework under real-life applications. Further, considering the evolving nature of cybersecurity threats, research should focus on adapting this method to new types of attack strategies. Finally, integrating this approach with other security measures, such as behavior analysis and anomaly detection, could further enhance the overall security of the system. These research efforts are expected to enhance the security of LLM integrated applications and contribute new ideas and frameworks to the field of AI security research.
123
+
124
+ # REFERENCES
125
+
126
+ 1. H. J., Branch, J. R., Cefalu, J., McHugh, L., Hujer, A., Bahl, D. D. C., Iglesias, ... & R., Darwishi. Evaluating the susceptibility of pre-trained language models via handcrafted adversarial examples. arXiv preprint arXiv:2209.02128(2022).
127
+ 2. S., Abdelnabi, K., Greshake, S., Mishra, C., Endres, T., Holz, & M., Fritz. Not What You've Signed Up For: Compromising Real-World LLM-Integrated Applications with Indirect Prompt Injection. In Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security (pp. 79-90) (2023, November).
128
+ 3. Y., Liu, Y., Jia, R., Geng, J., Jia, & N. Z., Gong. Prompt Injection Attacks and Defenses in LLM-Integrated Applications. arXiv preprint arXiv:2310.12815 (2023).
129
+ 4. S. Willison, The Dual LLM pattern for building AI assistants that can resist prompt injection. (2023)https://simonwillison.net/2023/Apr/25/dual-llm-pattern/
130
+ 5. J., Yu, Y., Wu, D., Shu, M., Jin, & X., Xing. Assessing Prompt Injection Risks in 200+ Custom GPTs. arXiv preprint arXiv:2311.11538 (2023).
131
+ 6. S., Toyer, O., Watkins, E. A., Mendes, J., Svegliato, L., Bailey, T., Wang, ... & S., Russell. Tensor Trust: Interpretable Prompt Injection Attacks from an Online Game. arXiv preprint arXiv:2311.01011 (2023).
132
+ 7. Y., Liu, G., Deng, Y., Li, K., Wang, T., Zhang, Y., Liu, ... & Y., Liu. Prompt Injection attack against LLM-integrated Applications. arXiv preprint arXiv:2306.05499 (2023).
133
+ 8. J., Wang, Z., Liu, L., Zhao, Z., Wu, C., Ma, S., Yu, ... & S., Zhang. Review of large vision models and visual prompt engineering. Meta-Radiology, 100047 (2023).
134
+ 9. D., Li, & H., Zhang. Improved regularization and robustness for fine-tuning in neural networks. Advances in Neural Information Processing Systems, 34, 27249-27262 (2021).
135
+
136
+ 10. A., Kumar, A., Raghunathan, R., Jones, T., Ma, & P., Liang. Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054 (2022).
2401.07xxx/2401.07612/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eefde66e99e9e63fbd387d5b4c14131b4e76e8eb63e6d75e89e6a1e9d0873f7
3
+ size 243181
2401.07xxx/2401.07612/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07627/2bc2bf1b-9bb7-43d8-bf88-6905af6225b4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:213bb92dfbe854c6baa6f6ca7e0d064a14d2a35f8fd38fa432b7bb897bfa1c21
3
+ size 229019
2401.07xxx/2401.07627/full.md ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Cost-sensitive feature selection for Support Vector Machines
2
+
3
+ S. Benitez-Pena $^{a,b,*}$ , R. Blanquero $^{a,b}$ , E. Carrizosa $^{a,b}$ , P. Ramirez-Cobo $^{a,c}$
4
+
5
+ aIMUS. Universidad de Sevilla. 41012 Sevilla. Spain
6
+
7
+ $^{b}$ Departamento de Estadística e Investigación Operativa. Universidad de Sevilla. 41012 Sevilla. Spain
8
+
9
+ $^{c}$ Departamento de Estadística e Investigación Operativa. Universidad de Cádiz. 11510
10
+
11
+ Puerto Real, Cadiz. Spain
12
+
13
+ # Abstract
14
+
15
+ Feature Selection is a crucial procedure in Data Science tasks such as Classification, since it identifies the relevant variables, making thus the classification procedures more interpretable, cheaper in terms of measurement and more effective by reducing noise and data overfit. The relevance of features in a classification procedure is linked to the fact that misclassifications costs are frequently asymmetric, since false positive and false negative cases may have very different consequences. However, off-the-shelf Feature Selection procedures seldom take into account such cost-sensitivity of errors.
16
+
17
+ In this paper we propose a mathematical-optimization-based Feature Selection procedure embedded in one of the most popular classification procedures, namely, Support Vector Machines, accommodating asymmetric misclassification costs. The key idea is to replace the traditional margin maximization by minimizing the number of features selected, but imposing upper bounds on the false positive and negative rates. The problem is written as an integer linear problem plus a quadratic convex problem for Support Vector Machines with both linear and radial kernels.
18
+
19
+ The reported numerical experience demonstrates the usefulness of the proposed Feature Selection procedure. Indeed, our results on benchmark data sets show that a substantial decrease of the number of features is obtained, whilst the desired trade-off between false positive and false negative rates is achieved.
20
+
21
+ Keywords: Classification, Data Science, Support Vector Machines, Feature Selection, Integer Programming, Sparsity
22
+
23
+ # 1. Introduction
24
+
25
+ Supervised Classification is one of the most important tasks in Data Science, e.g. [8, 39], full of challenges from a Mathematical Optimization perspective, e.g. [2, 3, 9, 10, 14, 15, 17, 19, 35, 36, 37, 38, 41, 42, 43]. In its most basic version, we are given a set $I$ of individuals, each represented by a vector $(x_{i},y_{i})$ , where $x_{i}\in \mathbb{R}^{N}$ is the so-called feature vector, and $y_{i}\in \mathcal{C} = \{-1,1\}$ is the membership of individual $i$ . A classifier $\Psi$ , i.e., a function $\Psi :\mathbb{R}^N\longrightarrow \mathcal{C}$ , is sought to assign labels $c\in \mathcal{C}$ to incoming individuals for which the feature vector $x$ is known but the label $y$ is unknown and estimated through $\Psi (x)$ .
26
+
27
+ The different classification procedures differ in the way the classifier $\Psi$ is obtained from the data set $I$ . A frequent approach consists of reducing the search of the classifier to the resolution of an optimization problem, see [17]. This is the case, among many others, of the state-of-the-art classifier for binary classification, known as Support Vector Machines (SVM), [17, 20, 44, 45], addressed in this paper.
28
+
29
+ In SVM with linear kernel, $\Psi$ takes the form
30
+
31
+ $$
32
+ \Psi (x) = \left\{ \begin{array}{r l} 1, & \text {i f} \boldsymbol {w} ^ {\top} x + \beta \geq 0 \\ - 1, & \text {e l s e}, \end{array} \right. \tag {1}
33
+ $$
34
+
35
+ where $\pmb{w} \in \mathbb{R}^N$ and $\beta \in \mathbb{R}$ are obtained as the optimal solution of the following convex quadratic programming formulation with linear constraints
36
+
37
+ $$
38
+ \begin{array}{l} \min _ {\boldsymbol {w}, \beta , \xi} \quad \boldsymbol {w} ^ {\top} \boldsymbol {w} + C \sum_ {i \in I} \xi_ {i} \\ s. t. \quad y _ {i} \left(\boldsymbol {w} ^ {\top} x _ {i} + \beta\right) \geq 1 - \xi_ {i}, \quad i \in I \tag {2} \\ \xi_ {i} \geq 0 \quad i \in I. \\ \end{array}
39
+ $$
40
+
41
+ Here $C > 0$ is the regularization parameter, which needs to be tuned, and $\xi_{i} \geq 0$ is a penalty associated to misclassifying individual $i$ in the so-called training sample $I$ .
42
+
43
+ An apparently innocent extension of (1) is given by
44
+
45
+ $$
46
+ \Psi (x) = \left\{ \begin{array}{l l} 1, & \text {i f} \boldsymbol {w} ^ {\top} \phi (x) + \beta \geq 0 \\ - 1, & \text {e l s e}, \end{array} \right. \tag {3}
47
+ $$
48
+
49
+ where $\phi : \mathbb{R}^N \to \mathcal{H}$ maps the original $N$ features into a vector space of higher dimension, and $\boldsymbol{w}$ and $\beta$ are obtained by solving an optimization problem formally identical to (2), but taking place in the space $\mathcal{H}$ instead of $\mathbb{R}^N$ ,
50
+
51
+ $$
52
+ \begin{array}{l} \min _ {\boldsymbol {w}, \beta , \xi} \quad \boldsymbol {w} ^ {\top} \boldsymbol {w} + C \sum_ {i \in I} \xi_ {i} \\ s. t. \quad y _ {i} \left(\boldsymbol {w} ^ {\top} \phi \left(x _ {i}\right) + \beta\right) \geq 1 - \xi_ {i}, \quad i \in I \tag {4} \\ \xi_ {i} \geq 0 \quad i \in I. \\ \end{array}
53
+ $$
54
+
55
+ In this case, the classifier is usually obtained by solving, instead of (4), its dual,
56
+
57
+ $$
58
+ \begin{array}{l} \max _ {\alpha} \sum_ {i \in I} \alpha_ {i} - \frac {1}{2} \sum_ {i, j \in I} \alpha_ {i} y _ {i} \alpha_ {j} y _ {j} K (x _ {i}, x _ {j}) \\ s. t. \quad \sum_ {i \in I} \alpha_ {i} y _ {i} = 0 \tag {5} \\ 0 \leq \alpha_ {i} \leq \frac {C}{2}, \quad i \in I, \\ \end{array}
59
+ $$
60
+
61
+ where $K(x,x^{\prime}) = \phi (x)^{\top}\phi (x^{\prime})$ is the so-called kernel function. From the optimal solution to (5) and taking into account the complementarity slackness conditions, $\pmb{w}$ and $\beta$ in (3) are obtained. In particular,
62
+
63
+ $$
64
+ \boldsymbol {w} ^ {\top} \boldsymbol {w} = \sum_ {i, j \in I} \alpha_ {i} y _ {i} \alpha_ {j} y _ {j} K \left(x _ {i}, x _ {j}\right), \tag {6}
65
+ $$
66
+
67
+ $$
68
+ \boldsymbol {w} ^ {\top} \phi (x) = \sum_ {i \in I} \alpha_ {i} y _ {i} K \left(x _ {i}, x\right). \tag {7}
69
+ $$
70
+
71
+ See e.g. [17, 20, 44, 45] for details.
72
+
73
+ The classifier uses all the features involved in the problem, both in (1) and (3), which may be rather problematic if measuring the features involve some non-negligible costs. This is particularly relevant when the dimension $N$ of the data set is large. It is then advisable to perform Feature Selection (FS), [1, 5, 11, 13, 21, 26, 28, 31, 33, 47], in order to reduce the set of features and obtain an appropriate trade-off between classification accuracy and sparsity.
74
+
75
+ An amount of different FS procedures are found in the literature, some independent of the classification procedure (FS is performed in advance, based e.g. on the correlation between each feature and the label) and others embedded in the classification procedure, like the Holdout SVM (HOSVM), [32], Kernel-Penalized SVM (KP-SVM), [34], or the methods presented in [18] or [22]. Also, one can minimize the number of relevant features or even their cost, as in [30]. The embedded method together with whichever of the previous optimization schemes is the approach considered in this paper, since we aim to obtain a SVM-based classifier, and, at the same time, perform the selection of the features. The core idea is the optimization problem to be solved: instead of maximizing the margin, as in the traditional SVM, we seek the classifier with lowest number of features (or cost of the features), but without damaging too much the original performance. In order to be able to control the classifier's performance, we will make use of constraints as in [Benitez-Pena et al.]. Specifically, the formulation of the constrained SVM with linear kernel is
76
+
77
+ $$
78
+ \min _ {\boldsymbol {w}, \beta , \xi} \quad \boldsymbol {w} ^ {\top} \boldsymbol {w} + C \sum_ {i \in I} \xi_ {i}
79
+ $$
80
+
81
+ $$
82
+ s. t. \quad y _ {i} (\boldsymbol {w} ^ {\top} x _ {i} + \beta) \geq 1 - \xi_ {i}, \quad i \in I
83
+ $$
84
+
85
+ $$
86
+ 0 \leq \xi_ {i} \leq M _ {1} \left(1 - \zeta_ {i}\right) \quad i \in I \tag {8}
87
+ $$
88
+
89
+ $$
90
+ \mu (\zeta) _ {\ell} \geq \lambda_ {\ell} \quad \ell \in L
91
+ $$
92
+
93
+ $$
94
+ \zeta_ {i} \in \{0, 1 \} \qquad \qquad i \in I,
95
+ $$
96
+
97
+ where $M_{1}$ is a large number.
98
+
99
+ In essence, this is simply the formulation for the SVM with linear kernel, to which performance constraints have been added: $\mu (\zeta)_{\ell}\geq \lambda_{\ell}$ , where $\mu (\zeta)_{\ell}$ are different performance measures, forced to take values above thresholds $\lambda_{\ell}$ , and $\zeta_{i}$ are new binary variables that check whether sample $i$ is counted as correctly classified. See [Benitez-Pena et al.] for the details. Its (partial) dual formulation
100
+
101
+ is
102
+
103
+ $$
104
+ \min _ {\alpha , \beta , \xi , \zeta} \sum_ {i, j \in I} \alpha_ {i} y _ {i} \alpha_ {j} y _ {j} K (x _ {i}, x _ {j}) + C \sum_ {i \in I} \xi_ {i}
105
+ $$
106
+
107
+ $$
108
+ s. t. \quad y _ {i} \left(\sum_ {j \in I} \alpha_ {j} y _ {j} K \left(x _ {j}, x _ {i}\right) + \beta\right) \geq 1 - \xi_ {i}, \quad i \in I
109
+ $$
110
+
111
+ $$
112
+ \sum_ {i \in I} \alpha_ {i} y _ {i} = 0 \tag {9}
113
+ $$
114
+
115
+ $$
116
+ 0 < \alpha_ {i} < C / 2 \quad i \in I
117
+ $$
118
+
119
+ $$
120
+ 0 \leq \xi_ {i} \leq M _ {1} (1 - \zeta_ {i}) \quad i \in I
121
+ $$
122
+
123
+ $$
124
+ \mu (\zeta) _ {\ell} \geq \lambda_ {\ell} \quad \ell \in L
125
+ $$
126
+
127
+ $$
128
+ \zeta_ {i} \in \{0, 1 \} \quad i \in I.
129
+ $$
130
+
131
+ As before, this is similar to the standard partial dual formulation of the SVM with general kernel and constraints in the performance measures, as in (8). For more information about how formulation (9) is obtained, the reader is referred to the Appendix. Note that, while mathematical optimization problems addressed in the statistical literature are, traditionally, as (2) or (5), nonlinear programs in continuous variables, our approach involves integer variables, which define harder optimization problems. However, Integer Programming has shown to be rather competitive thanks to the impressive advances in (nonlinear) integer programming solvers, as demonstrated in recent papers addressing different topics in data analysis, [6, 7, 13, 14, 15, 16].
132
+
133
+ The remainder of the paper is structured as follows. In Section 2 we present the new FS methodology for SVM. For either linear or nonlinear kernels, we reduce the optimization problem to solving a standard linear integer program plus, eventually, a quadratic convex problem. The performance of our FS approach is empirically tested under different experiments described in Section 3. The results of those experiments are shown in Section 4. Comparisons between the use of linear and radial kernels, and between the standard linear SVM with and without embedded FS are also provided. The paper ends with conclusions and possible extensions in Section 5.
134
+
135
+ # 2. Cost-sensitive Feature Selection
136
+
137
+ In this section we present a novel linear formulation for SVM where classification costs are modeled via certain constraints, and where, in addition, a FS approach is embedded in such a way that only the relevant features are considered.
138
+
139
+ In order to cope with classification costs, first we recall some performance measures, namely,
140
+
141
+ - TPR (True Positive Rate): $P(\boldsymbol{w}^\top X + \beta > 0 | Y = +1)$
142
+ - TNR (True Negative Rate): $P(\boldsymbol{w}^\top X + \beta < 0 | Y = -1)$
143
+ - Acc (Accuracy): $P(Y(\boldsymbol{w}^\top X + \beta) > 0)$ ,
144
+
145
+ where $P(\pmb{w}^\top X + \beta > 0 | Y = +1)$ and $P(\pmb{w}^\top X + \beta < 0 | Y = -1)$ denote, respectively, the probability of classify correctly a positive or negative labeled
146
+
147
+ instance, and $P(Y(\boldsymbol{w}^\top X + \beta) > 0)$ is probability of classify correctly a given instance.
148
+
149
+ The objective is to perform classification using a reduced set of features, in such a way that certain constraints over the performance, such as $TPR \geq \lambda_{1}$ or $TNR \geq \lambda_{-1}$ (for threshold values $\lambda_{1}, \lambda_{-1} \in [0,1]$ ), are fulfilled.
150
+
151
+ Note that the pair $(X,Y)$ is a random vector with unknown distribution from which a sample $\{(x_i,y_i)\}_{i\in I}$ is generated. This implies that $TPR$ and $TNR$ should be estimated from sample data. This leads to the empirical constraints $\widehat{TPR} \geq \lambda_1^*$ and $\widehat{TNR} \geq \lambda_{-1}^{*}$ , for $\lambda_1^* \geq \lambda_1$ and $\lambda_{-1}^{*} \geq \lambda_{-1}$ , where the performance measures are replaced by their sample estimates. Two possible choices, which shall be explored in this work, are
152
+
153
+ $$
154
+ \begin{array}{r l r} \lambda_ {1} ^ {*} & = & \lambda_ {1} \\ & \text {a n d} \end{array} \tag {10}
155
+ $$
156
+
157
+ $$
158
+ \lambda_ {- 1} ^ {-} = \lambda_ {- 1},
159
+ $$
160
+
161
+ or the more conservative approach based on Hoeffding inequality,
162
+
163
+ $$
164
+ \begin{array}{r l r} \lambda_ {1} ^ {*} & = & \lambda_ {1} + \sqrt {\frac {- \log \alpha}{2 | I _ {+} |}} \\ & \text {a n d} \end{array} \tag {11}
165
+ $$
166
+
167
+ $$
168
+ \lambda_ {- 1} ^ {*} = \lambda_ {- 1} + \sqrt {\frac {- \log \alpha}{2 | I _ {-} |}},
169
+ $$
170
+
171
+ where $\alpha$ is the significance level for the hypothesis test whose null hypothesis is either $TPR \leq \lambda_1$ or $TNR \leq \lambda_{-1}$ . See [Benitez-Pena et al.] for more details.
172
+
173
+ Note that it is straightforward to extend our results to the case in which measurement costs are associated with the features, as in e.g. [12], and then the minimum-cost feature set is sought instead.
174
+
175
+ # 2.1. The cost-sensitive FS procedure
176
+
177
+ Assume that we have a linear kernel, i.e., the kernel $K$ in (5) is given by $K(x,x^{\prime}) = x^{\top}x^{\prime}$ , and thus the SVM with all features is obtained by solving (2). We state the feature selection problem as a Mixed Integer Linear Program. Consider an auxiliary variable $\zeta_{i}$ that in case of being equal to 1, the instance $i$ is counted as correctly classified. Hence, estimates of TPR and TNR from sample $I$ have lower bounds $\widehat{TPR} \geq \sum_{i\in I}\zeta_i(1 + y_i) / \sum_{i\in I}(1 + y_i)$ and $\widehat{TNR} \geq \sum_{i\in I}\zeta_i(1 - y_i) / \sum_{i\in I}(1 - y_i)$ , respectively. Associated with each feature $k$ , $1 \leq k \leq N$ , we define the variable $z_{k}$ taking the value 1 if feature $k$ is selected for classifying, and 0 otherwise. Hence, the optimization problem that defines a linear classifier (hyperplane) taking into account the classification rates and in which a cost-based FS procedure is integrated is given by
178
+
179
+ s.t. $y_{i}(\pmb{w}^{\top}x_{i} + \beta) \geq 1 - M_{2}(1 - \zeta_{i}), \quad \forall i \in I$
180
+
181
+ $$
182
+ \begin{array}{l} \min _ {\boldsymbol {w}, \beta , z, \zeta} \sum_ {k = 1} ^ {N} c _ {k} z _ {k} \\ \sum_ {i \in I} \zeta_ {i} (1 - y _ {i}) \geq \lambda_ {- 1} ^ {*} \sum_ {i \in I} (1 - y _ {i}) \tag {P1} \\ \sum_ {i \in I} \zeta_ {i} (1 + y _ {i}) \geq \lambda_ {1} ^ {*} \sum_ {i \in I} (1 + y _ {i}) \\ \left| w _ {k} \right| \leq M _ {3} z _ {k} \quad \forall k \in 1, \dots , N \\ \zeta_ {i} \in \{0, 1 \} \quad \forall i \in I \\ z _ {k} \in \{0, 1 \} \quad \forall k \in 1, \dots , N \\ \end{array}
183
+ $$
184
+
185
+ where $M_2$ and $M_3$ are sufficiently large numbers. Also, $c_k$ is the cost associated to the $k$ -th feature, so we perform the FS by reducing the overall cost of the features. The case $c_k = 1 \forall k$ , is the standard FS in which the number of features selected is minimized.
186
+
187
+ Let us discuss the rationality of the formulation $(P1)$ . The overall cost associated with the features used for classifying is to be minimized in the objective. The first constraint identifies which individuals are counted as correctly classified, since, as soon as $\zeta_i = 1$ , the score $\Psi(x_i)$ is forced to be $\Psi(x_i) \geq 1$ (if $y_i = 1$ ) or $\Psi(x_i) \leq -1$ (if $y_i = -1$ ). Furthermore, the constant $\sum_{i \in I} (1 - y_i)$ is equal to two times the cardinality of the set $\{i \in I : y_i = -1\}$ , whereas $\sum_{i \in I} \zeta_i (1 - y_i)$ yields two times the number of individuals counted as correctly classified in the class $-1$ . Hence, the second and third constraints force respectively the fraction of individuals with label $y_i = -1$ (respectively, $y_i = 1$ ) counted as correctly classified to be at least $\lambda_{-1}^*$ (respectively, at least $\lambda_1^*$ ). Finally, the fourth constraint forces to select those features $k$ with $z_k = 1$ . Note that, if very demanding classification rates are imposed, problem $(P1)$ may be infeasible. The solver will return this message, advising thus the user to lower the threshold values $\lambda_1$ , $\lambda_{-1}$ .
188
+
189
+ Solving $(P1)$ identifies the features to be used in the classification. However, an SVM classifier has not been built yet, since the margin has not been maximized. The next section shall address such problem by using the SVM either with the linear kernel or with an arbitrary one.
190
+
191
+ We should stress that the feature selection is based on the linear kernel, yielding the tractable linear integer optimization problem (P1). Extension of our FS approach to nonlinear kernels are formally straightforward, but the resulting nonconvex mixed integer nonlinear problems are not tractable, even for low dimensions. For this reason, we perform the FS by assuming a linear kernel, and then, once the features are selected, the classifier is built using an arbitrary kernel, as detailed in Section 2.2.
192
+
193
+ Of course more flexibility is gained if, in a preprocessing step, data $x$ are embedded in a higher dimensional space through a nonlinear mapping $\phi$ , and thus the original $x$ is replaced by $\phi(x)$ in (P1).
194
+
195
+ # 2.2. Cost-sensitive sparse SVMs: linear vs arbitrary kernels
196
+
197
+ Here we explain how the sparse SVM is built. Let us first consider the case of the classifier with linear kernel. Hence, the sparse SVM that controls the
198
+
199
+ classification rates is formulated as
200
+
201
+ $$
202
+ \begin{array}{l} \min _ {w, \beta , \xi} \sum_ {j = 1} ^ {N} w _ {j} ^ {2} z _ {j} + C \sum_ {i \in I} \xi_ {i} \\ \begin{array}{l l} s. t. & y _ {i} \left(\sum_ {j = 1} ^ {N} w _ {j} z _ {j} x _ {i j} + \beta\right) \geq 1 - \xi_ {i}, \quad \forall i \in I \\ & 0 \leq \xi_ {i} \leq M _ {1} \left(1 - \zeta_ {i}\right) \quad \forall i \in I \end{array} \tag {P2} \\ \zeta_ {i} \in \{0, 1 \} \quad \forall i \in I \\ \sum_ {i \in I} \zeta_ {i} (1 - y _ {i}) \geq \lambda_ {- 1} ^ {*} \sum_ {i \in I} (1 - y _ {i}) \\ \sum_ {i \in I} \zeta_ {i} (1 + y _ {i}) \geq \lambda_ {1} ^ {*} \sum_ {i \in I} (1 + y _ {i}). \\ \end{array}
203
+ $$
204
+
205
+ Note that $(P2)$ is defined similarly as a standard linear SVM optimization problem. The slight difference is that in $(P2)$ only the variables selected by the FS approach described in Section 2.1. are considered. This means that the values of $z$ in $(P2)$ are those obtained in problem $(P1)$ . Note too that the constraints concerning the performance measures are also added here.
206
+
207
+ Now, assume the SVM classifier has the form (3), and an arbitrary kernel function $K(x,x^{\prime}) = \phi (x)^{\top}\phi (x^{\prime})$ is used instead of the linear one. See e.g. [17, 20, 44, 45] for details. Although formally similar, the case of an arbitrary kernel $K$ implies that, if an FS procedure as $(P1)$ is desired, nonlinear constraints are involved and thus the optimization problem is much harder to solve. For this reason, instead of coping with such hard problem, we propose an alternative strategy: first, $(P1)$ is solved (as before), and then the SVM classifier (with the selected kernel) is built, using only the features selected in the problem described in Section 2.1. In what follows we focus on the radial kernel, even though one can consider any arbitrary kernel $K$ . First, we define the binary variables $z$ identifying the features which are selected for classifying. The choice of the features, identified with the vector $z$ , leads to the kernel $K_{z}$ , defined as
208
+
209
+ $$
210
+ K _ {z} (x, x ^ {\prime}) = e x p \left(- \gamma \left(\sum_ {k = 1} ^ {N} z _ {k} (x ^ {(k)} - x ^ {\prime (k)}) ^ {2}\right)\right),
211
+ $$
212
+
213
+ where $x^{(k)}$ denotes the $k$ -th component of vector $x$ .
214
+
215
+ For $z$ (and thus $K_{z}$ ) fixed, the aim is to solve (4), but replacing the terms $\pmb{w}^{\top}\pmb{w}$ and $\pmb{w}^{\top}\phi(x_{i})$ , respectively, by the expressions (6) and (7), apart from adding the constraints related to the performance measurements, as described in [Benitez-Pena et al.]. Therefore, the cost-sensitive sparse SVM with an arbitrary kernel $K$ is defined (once $z$ is fixed) as
216
+
217
+ $$
218
+ \begin{array}{l} \min _ {\alpha , \xi , \beta , \zeta} \quad \sum_ {i, j \in I} \alpha_ {i} y _ {i} \alpha_ {j} y _ {j} K _ {z} \left(x _ {i}, x _ {j}\right) + C \sum_ {i \in I} \xi_ {i} \\ s. t. \quad y _ {i} \left(\sum_ {j \in I} \alpha_ {j} y _ {j} K _ {z} \left(x _ {i}, x _ {j}\right) + \beta\right) \geq 1 - \xi_ {i}, \quad \forall i \in I \\ 0 \leq \xi_ {i} \leq M _ {1} (1 - \zeta_ {i}) \quad \forall i \in I \\ \sum_ {i \in I} \alpha_ {i} y _ {i} = 0 \\ 0 \leq \alpha_ {i} \leq C / 2 \quad \forall i \in I \tag {P3} \\ \sum_ {i \in I} \zeta_ {i} (1 - y _ {i}) \geq \lambda_ {- 1} ^ {*} \sum_ {i \in I} (1 - y _ {i}) \\ \sum_ {i \in I} \zeta_ {i} (1 + y _ {i}) \geq \lambda_ {1} ^ {*} \sum_ {i \in I} (1 + y _ {i}) \\ \zeta_ {i} \in \{0, 1 \} \quad \forall i \in I \\ \end{array}
219
+ $$
220
+
221
+ Let us discuss the formulation $(P3)$ . The set of features is fixed through $z$ . The objective function, the first, third and fourth constraints are the usual ones in SVM. The second constraint together with the fifth, sixth and seventh constraints force some samples to be correctly classified, as in $(P1)$ .
222
+
223
+ # 3. Experiment Description
224
+
225
+ In this section, a description of the experiments to be carried out in Section 4 of the cost-sensitive sparse SVM with linear kernel (problem $(P2)$ ) are compared to those under the radial kernel (problem $(P3)$ ), where, as described in the previous section, the variables $z$ in both $(P2)$ and $(P3)$ are the solutions of the FS problem formulated by $(P1)$ . Also, the solutions under the sparse methodology will be tested against the standard linear SVM. Although it would be natural to compare the solutions of $(P3)$ with the solutions of a standard radial SVM, this comparison is not straightforward since $(P1)$ may become infeasible when the performance measures obtained with the radial SVM are higher than those under the linear SVM. For simplicity we assume all measurement costs equal to 1, and then our aim is to minimize the number of features used.
226
+
227
+ Next, a description of how the experiments have been carried out is given. In order to solve problems $(P1)$ , $(P2)$ and $(P3)$ , the solver Gurobi, [25], and its Python language interface, [40], are used. In order to estimate the performance of these FS procedures, a 10-fold cross-validation (CV), [27], is used, and out of samples accuracies are reported. However, for those datasets that have less than 100 instances, a Leave-One-Out procedure is carried out, in order to have a good size in the training sample. Also, depending on whether the linear or the radial kernel is considered, a parameter $C$ or a pair of parameters $(C,\gamma)$ must be tuned. Hence, in either the first or in the second case, $C\in \{2^{-5},2^{-4},\ldots ,2^{4},2^{5}\} ,\gamma \in \{2^{-5},2^{-4},\ldots ,2^{4},2^{5}\}$ are considered. Problems in integer variables are hard to solve to optimality. However, excellent solutions are obtained in reasonable time. A time limit of 300 seconds is set, giving the solver enough time for finding (sub)optimal solutions. Parameters $M_{1}$ , $M_{2}$ and $M_{3}$ are set as 100. Moreover, parameters tuning is done by another 10-fold CV (respectively, another Leave-One-Out), and the best set of parameters selected
228
+
229
+ is the one with highest accuracy in average (or, in the case of unbalanced data, with the highest geometric mean between the TPR and the TNR).
230
+
231
+ For a better understanding, the whole procedure is summarized in Algorithm 1.
232
+
233
+ for $kf = 1,\ldots ,$ folds do
234
+
235
+ Split data $(D)$ into "folds" subsets, $D = \{D_{1},\dots ,D_{\text{folds}}\}$
236
+
237
+ Set Validation $= D_{kf}$ and set $I = D - \{D_{kf}\}$
238
+
239
+ for each pair $(C, \text{gamma})$ do
240
+
241
+ for $kf2 = 1, \ldots, \text{folds2}$ do
242
+
243
+ split $D^{\prime} = D - \{D_{kf}\}$ into "folds2" subsets,
244
+
245
+ $$
246
+ D ^ {\prime} = \left\{D _ {1} ^ {\prime}, \dots , D _ {f o l d s 2} ^ {\prime} \right\}
247
+ $$
248
+
249
+ Set $Validation^{\prime} = D_{kf2}^{\prime}$ and set $I^{\prime} = D^{\prime} - \{D_{kf2}\}$
250
+
251
+ Run $(P1)$ over $I$ , and select the relevant features.
252
+
253
+ Run $(P2)$ or $(P3)$ over $I$ with the corresponding modified kernel.
254
+
255
+ Validate over Validation', getting the accuracy (acc[kf2])
256
+
257
+ Calculate the average accuracies $(\sum_{kf2}acc[kf2]) / folds2$
258
+
259
+ if $acc[kf2] \geq bestacc$ then
260
+
261
+ Set bestacc = acc[kf2], bestgamma = gamma and bestC = C
262
+
263
+ Run $(P1)$ over $I$ , and select the relevant features.
264
+
265
+ Run $(P2)$ or $(P3)$ with the corresponding modified kernel and the parameters bestgamma and bestC, using $I$ .
266
+
267
+ Validate over Validation, getting the accuracy (acc2[kf]), and the correct classification probabilities (TPR[kf], TNR[kf]) as well as the number of features selected $Z[kf] = \sum_{k=1}^{N} z[k]$ .
268
+
269
+ Calculate and display the average performance measures:
270
+
271
+ $(\sum_{k f}acc2[k2]) / folds,(\sum_{k f}TPR[kf]) / folds,(\sum_{k f}TNR[kf]) / folds$ and $(\sum_{k f}Z[kf]) / folds$
272
+
273
+ Algorithm 1: Pseudocode for general kernel approach.
274
+
275
+ # 4. Numerical Results
276
+
277
+ Here, the experimental results are presented. We have chosen the datasets wisconsin (Breast Cancer Wisconsin (Diagnostic) Data Set), votes (Congressional Voting Records Data Set), nursery (Nursery Data Set), Australian (Statlog (Australian Credit Approval) Data Set), careval (Car Evaluation Data Set) and gastrointestinal (Gastrointestinal Lesions in Regular Colonoscopy Data Set), all well referenced and described with detail in [29], and leukemia (Leukemia data), described in [23]. First, a brief data description is given in Section 4.1. Then, results under the linear kernel approach will be presented and discussed in Section 4.2. Finally, the case of the radial kernel will be analyzed in Section 4.3.
278
+
279
+ Note that the main idea of a FS approach is to reduce the number of features or, more generally, the overall associated costs, in such a way that the performance is not severely affected. As we can control the proportion of samples well classified, this is not a problematic issue. In fact, experiments are done so that new performance measurements will not be 0.025 points lower than the originals ones, i.e., those obtained under the standard version of the SVM with linear kernel. Using the notation as in [Benitez-Pena et al.], $TNR$ and $TPR$ are the true negative and true positive rates, and $TNR_0$ and $TPR_0$ are their obtained values under the standard SVM with linear kernel on a validation sample, $TNR \geq \lambda_{-1} = \min\{1, TNR_0 - 0.025\}$ and $TPR \geq \lambda_1 = \min\{1, TPR_0 - 0.025\}$ are desired. For both linear and radial cases we have considered the two possible selection of the thresholds, defined by (10) and (11).
280
+
281
+ We stress that the purpose of this experimental section is to show how we can control TPR or TNR without a severe deterioration of overall classification rates, hopefully with a strong decrease in the number or cost of the features selected. This is the reason why we are comparing the performance of our approach with respect to the performance of the standard SVM. In a real application, the thresholds $\lambda_{1}$ , $\lambda_{-1}$ are to be given by the user, either based or not on SVM classification rates.
282
+
283
+ # 4.1. Data description
284
+
285
+ The performance of these novel approaches is illustrated using six real-life datasets from the UCI Repository, [29], as well as the leukemia dataset, [23]. The positive label will be assigned to the majority class in 2-class datasets. In addition, multiclass datasets are transformed into 2-class ones, by giving positive label to the largest class and negative labels to the remaining samples. Categorical variables are transformed into dummy variables, i.e., if a categorical variable with $\nu$ levels is present, it will be replaced by $\nu - 1$ binary variables. Also, if there exist missing values, they are replaced by the median. A description of the datasets can be found in Table 1. Such table is split in 4 columns. The first shows the name of the dataset (the actual names of the datasets are presented at the beginning of this section). The total number of samples of the dataset is given in the second column. The number of variables considered, and the number (and percentage) of positive samples in the dataset, are given in the last two columns.
286
+
287
+ <table><tr><td>Name</td><td>|Ω|</td><td>V</td><td>|Ω+| (%)</td></tr><tr><td>wisconsin</td><td>569</td><td>30</td><td>357 (62.7 %)</td></tr><tr><td>votes</td><td>435</td><td>32</td><td>267 (61.4 %)</td></tr><tr><td>nursery</td><td>12960</td><td>19</td><td>4320 (33.3 %)</td></tr><tr><td>Australian</td><td>690</td><td>34</td><td>383 (55.5 %)</td></tr><tr><td>careval</td><td>1728</td><td>15</td><td>1210 (70.023 %)</td></tr><tr><td>leukemia</td><td>72</td><td>7128</td><td>47 (65.278 %)</td></tr><tr><td>gastrointestinal</td><td>76</td><td>698</td><td>55 (72.368 %)</td></tr></table>
288
+
289
+ Table 1: Details concerning the implementation of the CSVM for the considered datasets.
290
+
291
+ # 4.2. Results under the cost-sensitive sparse SVM with linear kernel
292
+
293
+ Two types of results will be shown, corresponding to the choices (10) and (11) of the thresholds. As a summary, it will turn out that (10) yields sparser classifiers, while (11), which is a more conservative choice, usually yields less sparse classifiers but with better accuracies.
294
+
295
+ The choice of threshold parameters in (10) leads to results summarized in Table 2. The first column of Table 2 gives the name of the dataset used (the abbreviation we have chosen for the dataset). Then, the second and third columns show, respectively, the performance measures for the standard SVM (using the linear kernel) and the proposed cost-sensitive sparse methodology. Such columns are split into two subcolumns: the first one shows the average values and the second one the standard deviations. The last column reports the feature reduction, by indicating the original and selected (average) number of variables. From the table, it can be concluded that the approach with a linear kernel works
296
+
297
+ Table 2: Performance measures under the cost-sensitive sparse SVM with linear kernel and $\lambda_1^* = \lambda_1$ , $\lambda_{-1}^{*} = \lambda_{-1}$ .
298
+
299
+ <table><tr><td rowspan="2">Name</td><td></td><td colspan="2">SVM</td><td colspan="2">FS</td><td rowspan="2">Feature reduction</td></tr><tr><td></td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.947</td><td>0.025</td><td>30 → 2 (0 Std)</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.973</td><td>0.031</td><td></td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.905</td><td>0.063</td><td></td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.949</td><td>0.036</td><td>32 → 2 (0 Std)</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.928</td><td>0.059</td><td></td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.979</td><td>0.036</td><td></td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>19 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td rowspan="3">Australian</td><td>Acc</td><td>0.848</td><td>0.051</td><td>0.855</td><td>0.057</td><td>34 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>0.798</td><td>0.083</td><td>0.801</td><td>0.087</td><td></td></tr><tr><td>TNR</td><td>0.912</td><td>0.05</td><td>0.926</td><td>0.041</td><td></td></tr><tr><td rowspan="3">careval</td><td>Acc</td><td>0.956</td><td>0.017</td><td>0.946</td><td>0.019</td><td>15 → 9 (0 Std)</td></tr><tr><td>TPR</td><td>0.96</td><td>0.022</td><td>0.963</td><td>0.017</td><td></td></tr><tr><td>TNR</td><td>0.948</td><td>0.024</td><td>0.907</td><td>0.04</td><td></td></tr><tr><td rowspan="3">leukemia</td><td>Acc</td><td>0.972</td><td>0.164</td><td>0.875</td><td>0.331</td><td>7128 → 3.139 (1.205 Std)</td></tr><tr><td>TPR</td><td>0.979</td><td>0.196</td><td>0.896</td><td>0.305</td><td></td></tr><tr><td>TNR</td><td>0.96</td><td>0.144</td><td>0.833</td><td>0.373</td><td></td></tr><tr><td rowspan="3">gastrointestinal</td><td>Acc</td><td>0.895</td><td>0.307</td><td>0.829</td><td>0.379</td><td>698 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>0.929</td><td>0.258</td><td>0.839</td><td>0.367</td><td></td></tr><tr><td>TNR</td><td>0.8</td><td>0.4</td><td>0.8</td><td>0.4</td><td></td></tr></table>
300
+
301
+ well in general. In the case of wisconsin, the TPR has desirable values, since
302
+
303
+ Table 3: Performance measures under the cost-sensitive sparse SVM with linear kernel and $\lambda_1^* = \lambda_1 + \sqrt{-\log\alpha / (2|I_1|)}$ , $\lambda_{-1}^{*} = \lambda_{-1} + \sqrt{-\log\alpha / (2|I_{-1}|)}$ .
304
+
305
+ <table><tr><td rowspan="2">Name</td><td></td><td colspan="2">SVM</td><td colspan="2">FS</td><td rowspan="2">Feature reduction</td></tr><tr><td></td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.965</td><td>0.023</td><td>30 → 6.2 (0.919 Std)</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.975</td><td>0.023</td><td></td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.947</td><td>0.048</td><td></td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.954</td><td>0.033</td><td>32 → 9.3 (1.16 Std)</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.96</td><td>0.034</td><td></td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.945</td><td>0.052</td><td></td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>19 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td rowspan="3">Australian</td><td>Acc</td><td>0.848</td><td>0.051</td><td>0.837</td><td>0.057</td><td>34 → 5.5 (1.78 Std)</td></tr><tr><td>TPR</td><td>0.769</td><td>0.083</td><td>0.772</td><td>0.074</td><td></td></tr><tr><td>TNR</td><td>0.912</td><td>0.05</td><td>0.924</td><td>0.053</td><td></td></tr><tr><td rowspan="3">careval</td><td>Acc</td><td>0.956</td><td>0.017</td><td>0.954</td><td>0.018</td><td>15 → 11 (0 Std)</td></tr><tr><td>TPR</td><td>0.96</td><td>0.022</td><td>0.962</td><td>0.018</td><td></td></tr><tr><td>TNR</td><td>0.948</td><td>0.024</td><td>0.935</td><td>0.039</td><td></td></tr><tr><td rowspan="3">leukemia</td><td>Acc</td><td>0.972</td><td>0.164</td><td>0.944</td><td>0.229</td><td>7128 → 2 (0 Std)</td></tr><tr><td>TPR</td><td>0.979</td><td>0.196</td><td>0.957</td><td>0.202</td><td></td></tr><tr><td>TNR</td><td>0.96</td><td>0.144</td><td>0.92</td><td>0.272</td><td></td></tr><tr><td rowspan="3">gastrointestinal</td><td>Acc</td><td>0.895</td><td>0.307</td><td>0.842</td><td>0.365</td><td>698 → 3.105 (0.552 Std)</td></tr><tr><td>TPR</td><td>0.929</td><td>0.258</td><td>0.927</td><td>0.26</td><td></td></tr><tr><td>TNR</td><td>0.8</td><td>0.4</td><td>0.619</td><td>0.486</td><td></td></tr></table>
306
+
307
+ it only differentiates -0.019 points from the original. However, in the case of the accuracy and TNR, the loss is bigger than 0.025 points. This is due mainly to two reasons: first, the constraints are imposed on the training sample, while the performance is calculated using a test sample. Second, since the thresholds are considered as $\lambda_1^* = \lambda_1$ , $\lambda_{-1}^{*} = \lambda_{-1}$ , this implies we are not much restrictive as if $\lambda_1^* > \lambda_1$ ( $\lambda_{-1}^{*} > \lambda_{-1}$ ) were required. Nevertheless, the new TNR value is only 0.038 points smaller than the original, and the reduction of features is significant since only two variables out of 30 are used. Also, in votes the features are significantly reduced and the most affected performance measure is the TPR, which decreases 0.027 points, making the accuracy smaller. However, the value on the TNR is increased. As happened with wisconsin, the loss is due mainly to the two facts previously mentioned. For nursery, an amazing reduction to only one feature is achieved, in addition getting a perfect classification. This is explained as follows. As commented in Section 4.1, multiclass datasets are transformed into 2-class ones, and this is the case, obtaining the classes "not_recom" and "others", which are the positive and negative classes, respectively. In addition, one of the (categorical) features in the data (which is the one selected by our procedure) completely determines the class. In Australian, the total number of variables is also reduced to only one, having similar performance measures values as in the standard SVM. In fact, we obtain here even better results than under the original linear SVM. If the variable selected with the algorithm is studied, one can observe that it is a binary variable $X$ , where the contingency table together with the class variable is given in Table 4. Hence this variable is by itself a good predictor, as the FS procedure pointed out. In the case of careval, we got the smallest reduction in the number of variables selected, maintaining the performance measures values above the imposed thresholds. On the other hand, in the case of leukemia, the number of variables is significantly reduced. However, since the number of instances is small, the performance measurements are affected by this reduction of features. Also, for gastrointestinal, the results are similar to what happened for leukemia, but the TNR has not been affected at all.
308
+
309
+ $$
310
+ \begin{array}{c c c} & \mathrm {X = 0} & \mathrm {X = 1} \\ \hline \text {C l a s s +} & 3 0 6 & 7 7 \\ \text {C l a s s -} & 2 3 & 2 8 4 \end{array}
311
+ $$
312
+
313
+ Table 4: Contingency table of the feature selected in Australian.
314
+
315
+ Consider next the results shown by Table 3, for the case where we are restrictive regarding the performance values, that is, when $\lambda_1^* = \lambda_1 + \sqrt{-\log\alpha / (2|I_1|)}$ and $\lambda_{-1}^{*} = \lambda_{-1} + \sqrt{-\log\alpha / (2|I_{-1}|)}$ . From the table, it can be seen how this approach tends to work better concerning the performance measures, but achieves less sparse solutions. For example, if we focus on wisconsin, the TNR, the TPR and the accuracy as well, obtain the desired performance requirements. However, only a reduction of variables of one fifth is obtained. In the case of votes, an analogous result is obtained for the performance measures and only a
316
+
317
+ reduction in one third of the variables is achieved. The same pattern as before is observed for nursery. For Australian, we obtain even an improvement in all the three performance measures considered, reducing the number of features to one fifth. In addition, we get again in careval the smallest reduction in the number of variables selected, maintaining the performance measures values above the thresholds imposed as before, but using a larger number of features. On the contrary, and surprisingly, we have obtained for leukemia even a bigger reduction in the number of features and better results using Hoeffding inequality. However, gastrointestinal dataset goes with the flow and the number of features is increased when using the mentioned inequality. Nevertheless, the TPR has not been affected now whereas the TNR has significantly decreased.
318
+
319
+ # 4.3. Results under the cost-sensitive sparse SVM with radial kernel
320
+
321
+ The analogous results to those in Section 4.2 are presented here, for the case of the radial kernel. However, only wisconsin, votes and Australian datasets are used here. As shown by Tables 5 and 6 and similarly as occurred in Section 4.2, the use of the threshold values obtained by the Hoeffding inequality (as in (11)) tends to yield a lower level of sparsity, but also, a higher predictive power in general (particularly, when achieving the desired bounds). Concerning the performance measures, it can be deduced from Tables 5 and 6 that this approach works well in general, especially when using Hoeffding. Finally, it should be noted how the reduction in the number of features is quite notable for some datasets, as before.
322
+
323
+ Table 5: Performance measures under the cost-sensitive sparse SVM with radial kernel and $\lambda_1^* = \lambda_1$ , $\lambda_{-1}^{*} = \lambda_{-1}$ .
324
+
325
+ <table><tr><td>Name</td><td></td><td colspan="2">SVM</td><td colspan="2">FS</td><td>Feature reduction</td></tr><tr><td></td><td></td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td><td></td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.956</td><td>0.012</td><td>30 → 2 (0 Std)</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.988</td><td>0.016</td><td></td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.893</td><td>0.051</td><td></td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.947</td><td>0.034</td><td>32 → 2 (0 Std)</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.928</td><td>0.059</td><td></td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.974</td><td>0.036</td><td></td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>19 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr></table>
326
+
327
+ Table 6: Performance measures under the cost-sensitive sparse SVM with radial kernel and $\lambda_1^* = \lambda_1 + \sqrt{-\log\alpha / (2|I_1|)}$ , $\lambda_{-1}^{*} = \lambda_{-1} + \sqrt{-\log\alpha / (2|I_{-1}|)}$ .
328
+
329
+ <table><tr><td>Name</td><td></td><td colspan="2">SVM</td><td colspan="2">FS</td><td>Feature reduction</td></tr><tr><td></td><td></td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td><td></td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.947</td><td>0.03</td><td>30 → 6.2 (0.919 Std)</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.967</td><td>0.039</td><td></td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.907</td><td>0.02</td><td></td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.949</td><td>0.03</td><td>32 → 9.3 (1.16 Std)</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.959</td><td>0.034</td><td></td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.939</td><td>0.043</td><td></td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>19 → 1 (0 Std)</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td></td></tr></table>
330
+
331
+ # 4.4. Comparison with other methodologies
332
+
333
+ The cost-sensitive FS procedure presented here can be compared in a certain way with some other benchmark methodologies. However, the authors are not aware of FS methods for SVM controlling, as we do, TPR or TNR. Among the different FS techniques that can be applied to SVM we can find, for example, the following ones: Filter methods (they are based on measures like Pearson's Correlation, Linear Discriminant Analysis or Chi-Square), Wrapper methods (Forward Selection, Backward Elimination, Recursive Feature elimination, ...) and Embedded methods (such as the presented in the Introduction Section).
334
+
335
+ In order to make a comparison with another state-of-the-art method, we have selected the method in [18],[22]. The results can be seen in Tables 7 and 8. In Table 7 we can see the results for the standard SVM, the results of our FS approach when $\lambda_1^* = \lambda_1$ and $\lambda_{-1}^{*} = \lambda_{-1}$ , and the results of the state-of-the-art method when the maximum number of features selected is the same as the obtained for our methodology. In Table 8, the results for the standard SVM are reported together with the results of our FS approach when $\lambda_1^* = \lambda_1 + \sqrt{-\log\alpha / (2|I_1|)}$ and $\lambda_{-1}^{*} = \lambda_{-1} + \sqrt{-\log\alpha / (2|I_{-1}|)}$ , as well as the results of the state-of-the-art method when the maximum number of features selected is the same as the obtained with our methodology.
336
+
337
+ We can observe that, except for gastrointestinal dataset (when using Hoeffding inequality), where we obtain better results than the comparative, similar results are obtained for our method and the method in [18],[22] in terms of accuracy, while our methodology is cost-sensitive and we can control the performance measurements. As an illustration, in Table 9 we have collected all
338
+
339
+ Table 7: Performance measures under the cost-sensitive sparse SVM with linear kernel and $\lambda_1^* = \lambda_1$ , $\lambda_{-1}^{*} = \lambda_{-1}$ and comparative with the method in [18],[22].
340
+
341
+ <table><tr><td rowspan="2">Name</td><td rowspan="2"></td><td colspan="2">SVM</td><td colspan="2">FS</td><td colspan="2">Compar.</td></tr><tr><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.947</td><td>0.025</td><td>0.954</td><td>0.021</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.973</td><td>0.031</td><td>0.977</td><td>0.025</td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.905</td><td>0.063</td><td>0.911</td><td>0.056</td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.949</td><td>0.036</td><td>0.956</td><td>0.026</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.928</td><td>0.059</td><td>0.949</td><td>0.039</td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.979</td><td>0.036</td><td>0.969</td><td>0.034</td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td rowspan="3">Australian</td><td>Acc</td><td>0.848</td><td>0.051</td><td>0.855</td><td>0.057</td><td>0.855</td><td>0.054</td></tr><tr><td>TPR</td><td>0.798</td><td>0.083</td><td>0.801</td><td>0.087</td><td>0.801</td><td>0.082</td></tr><tr><td>TNR</td><td>0.912</td><td>0.05</td><td>0.926</td><td>0.041</td><td>0.925</td><td>0.039</td></tr><tr><td rowspan="3">careval</td><td>Acc</td><td>0.956</td><td>0.017</td><td>0.946</td><td>0.019</td><td>0.949</td><td>0.016</td></tr><tr><td>TPR</td><td>0.96</td><td>0.022</td><td>0.963</td><td>0.017</td><td>0.967</td><td>0.012</td></tr><tr><td>TNR</td><td>0.948</td><td>0.024</td><td>0.907</td><td>0.04</td><td>0.91</td><td>0.043</td></tr><tr><td rowspan="3">leukemia</td><td>Acc</td><td>0.972</td><td>0.164</td><td>0.875</td><td>0.331</td><td>0.653</td><td>0.471</td></tr><tr><td>TPR</td><td>0.979</td><td>0.196</td><td>0.896</td><td>0.305</td><td>0.66</td><td>0.474</td></tr><tr><td>TNR</td><td>0.96</td><td>0.144</td><td>0.833</td><td>0.373</td><td>0.68</td><td>0.466</td></tr><tr><td rowspan="3">gastrointestinal</td><td>Acc</td><td>0.895</td><td>0.307</td><td>0.829</td><td>0.379</td><td>0.857</td><td>0.35</td></tr><tr><td>TPR</td><td>0.929</td><td>0.258</td><td>0.839</td><td>0.367</td><td>0.9</td><td>0.3</td></tr><tr><td>TNR</td><td>0.8</td><td>0.4</td><td>0.8</td><td>0.4</td><td>0.75</td><td>0.433</td></tr></table>
342
+
343
+ Table 8: Performance measures under the cost-sensitive sparse SVM with linear kernel and $\lambda_1^* = \lambda_1 + \sqrt{-\log\alpha / (2|I_1|)}$ , $\lambda_{-1}^{*} = \lambda_{-1} + \sqrt{-\log\alpha / (2|I_{-1}|)}$ and comparative with the method in [18],[22].
344
+
345
+ <table><tr><td rowspan="2">Name</td><td rowspan="2"></td><td colspan="2">SVM</td><td colspan="2">FS</td><td colspan="2">Compar.</td></tr><tr><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td><td>Mean</td><td>Std</td></tr><tr><td rowspan="3">wisconsin</td><td>Acc</td><td>0.975</td><td>0.021</td><td>0.965</td><td>0.023</td><td>0.967</td><td>0.018</td></tr><tr><td>TPR</td><td>0.992</td><td>0.013</td><td>0.975</td><td>0.023</td><td>0.989</td><td>0.017</td></tr><tr><td>TNR</td><td>0.943</td><td>0.051</td><td>0.947</td><td>0.048</td><td>0.926</td><td>0.033</td></tr><tr><td rowspan="3">votes</td><td>Acc</td><td>0.954</td><td>0.033</td><td>0.954</td><td>0.033</td><td>0.954</td><td>0.033</td></tr><tr><td>TPR</td><td>0.955</td><td>0.038</td><td>0.96</td><td>0.034</td><td>0.948</td><td>0.036</td></tr><tr><td>TNR</td><td>0.947</td><td>0.059</td><td>0.945</td><td>0.052</td><td>0.961</td><td>0.035</td></tr><tr><td rowspan="3">nursery</td><td>Acc</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>TPR</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td>TNR</td><td>1</td><td>0</td><td>1</td><td>0</td><td>1</td><td>0</td></tr><tr><td rowspan="3">Australian</td><td>Acc</td><td>0.848</td><td>0.051</td><td>0.837</td><td>0.057</td><td>0.851</td><td>0.053</td></tr><tr><td>TPR</td><td>0.798</td><td>0.083</td><td>0.772</td><td>0.074</td><td>0.798</td><td>0.081</td></tr><tr><td>TNR</td><td>0.912</td><td>0.05</td><td>0.924</td><td>0.053</td><td>0.919</td><td>0.046</td></tr><tr><td rowspan="3">careval</td><td>Acc</td><td>0.956</td><td>0.017</td><td>0.954</td><td>0.018</td><td>0.954</td><td>0.017</td></tr><tr><td>TPR</td><td>0.96</td><td>0.022</td><td>0.962</td><td>0.018</td><td>0.97</td><td>0.016</td></tr><tr><td>TNR</td><td>0.948</td><td>0.024</td><td>0.935</td><td>0.039</td><td>0.917</td><td>0.027</td></tr><tr><td rowspan="3">leukemia</td><td>Acc</td><td>0.972</td><td>0.164</td><td>0.944</td><td>0.229</td><td>0.932</td><td>0.252</td></tr><tr><td>TPR</td><td>0.979</td><td>0.196</td><td>0.957</td><td>0.202</td><td>0.938</td><td>0.242</td></tr><tr><td>TNR</td><td>0.96</td><td>0.144</td><td>0.92</td><td>0.272</td><td>0.917</td><td>0.276</td></tr><tr><td rowspan="3">gastrointestinal</td><td>Acc</td><td>0.895</td><td>0.307</td><td>0.842</td><td>0.365</td><td>0.714</td><td>0.452</td></tr><tr><td>TPR</td><td>0.929</td><td>0.258</td><td>0.927</td><td>0.26</td><td>0.75</td><td>0.433</td></tr><tr><td>TNR</td><td>0.8</td><td>0.4</td><td>0.619</td><td>0.486</td><td>0.625</td><td>0.484</td></tr></table>
346
+
347
+ the results for dataset australian when applying the method in [18], [22] and varying the number of features from 1 (minimum) to 34 (maximum). There, we can see how the maximum TPR obtained is 0.8007, so with our methodology, and maybe at the expense of increasing misclassification rates in the another class, we can improve the accuracy rates in the target class. The results obtained when either TPR or TNR are varied are summarized in Table 10.
348
+
349
+ Table 9: Performance measures using the method in [18],[22], varying the maximum number of features from 1 (minimum) to 34 (maximum) in Australian dataset.
350
+
351
+ <table><tr><td colspan="8">Australian</td></tr><tr><td>#Feat.</td><td>Acc</td><td>TPR</td><td>TNR</td><td>#Feat.</td><td>Acc</td><td>TPR</td><td>TNR</td></tr><tr><td>1</td><td>0.8551</td><td>0.8007</td><td>0.9248</td><td>18</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>2</td><td>0.8551</td><td>0.8007</td><td>0.9248</td><td>19</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>3</td><td>0.8551</td><td>0.8007</td><td>0.9248</td><td>20</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>4</td><td>0.8551</td><td>0.8007</td><td>0.9248</td><td>21</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>5</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>22</td><td>0.8578</td><td>0.7953</td><td>0.9154</td></tr><tr><td>6</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>23</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>7</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>24</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>8</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>25</td><td>0.8464</td><td>0.7954</td><td>0.9121</td></tr><tr><td>9</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>26</td><td>0.8449</td><td>0.7929</td><td>0.9121</td></tr><tr><td>10</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>27</td><td>0.8478</td><td>0.7981</td><td>0.9121</td></tr><tr><td>11</td><td>0.8507</td><td>0.798</td><td>0.9186</td><td>28</td><td>0.8478</td><td>0.7954</td><td>0.9153</td></tr><tr><td>12</td><td>0.8478</td><td>0.798</td><td>0.9121</td><td>29</td><td>0.8464</td><td>0.7927</td><td>0.9153</td></tr><tr><td>13</td><td>0.8478</td><td>0.798</td><td>0.9121</td><td>30</td><td>0.8493</td><td>0.798</td><td>0.9153</td></tr><tr><td>14</td><td>0.8478</td><td>0.798</td><td>0.9121</td><td>31</td><td>0.8478</td><td>0.7954</td><td>0.9153</td></tr><tr><td>15</td><td>0.8478</td><td>0.798</td><td>0.9121</td><td>32</td><td>0.8478</td><td>0.7981</td><td>0.9121</td></tr><tr><td>16</td><td>0.8464</td><td>0.7954</td><td>0.9121</td><td>33</td><td>0.8478</td><td>0.7981</td><td>0.9121</td></tr><tr><td>17</td><td>0.8478</td><td>0.798</td><td>0.9121</td><td>34</td><td>0.8478</td><td>0.7981</td><td>0.9121</td></tr></table>
352
+
353
+ From these experimental results we can conclude that, indeed, our method is able not only to reduce the number of features but it also controls the performance measures. If we observe the cases where $(\lambda_1^*, \lambda_{-1}^*)$ is (0.85,0.5) or (0.85,0.55), we see how we have strongly increased the value in the TPR, although the TNR has decreased a lot. A similar behavior is observed for the pair (0.9,0.5). However, when using (0.85,0.575) a different trade-off is found, whereas for (0.85,0.6) we recover the original results.
354
+
355
+ # 5. Concluding remarks
356
+
357
+ In this paper we have proposed a Feature Selection procedure for Support Vector Machines that yields a novel, sparse, SVM. Contrary to existing Fea
358
+
359
+ Table 10: Performance measures using the method in [18],[22], varying the maximum number of features from 1 (minimum) to 34 (maximum) in Australian dataset.
360
+
361
+ <table><tr><td colspan="2">Australian</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>λ1*</td><td>λ-1*</td><td>Acc</td><td>TPR</td><td>TNR</td><td>Aver. #</td><td>Feat. Selected</td></tr><tr><td>0.85</td><td>0.5</td><td>0.738</td><td>0.94</td><td>0.484</td><td>1</td><td></td></tr><tr><td>0.85</td><td>0.55</td><td>0.738</td><td>0.94</td><td>0.484</td><td>1</td><td></td></tr><tr><td>0.85</td><td>0.575</td><td>0.812</td><td>0.854</td><td>0.754</td><td>1.7</td><td></td></tr><tr><td>0.85</td><td>0.6</td><td>0.855</td><td>0.801</td><td>0.92</td><td>2</td><td></td></tr><tr><td>0.9</td><td>0.5</td><td>0.757</td><td>0.896</td><td>0.582</td><td>1.333</td><td></td></tr></table>
362
+
363
+ ture Selection approaches, we take explicitly into account that misclassification costs may be rather different in the two groups, and thus, instead of seeking the classifier maximizing the margin, we seek the most sparse classifier that attains certain true positive and true negative rates on the dataset. For both SVM with linear and radial kernel, the problem is written in a straightforward manner, solving first a mixed integer linear problem and then their standard SVM formulations, considering only the features obtained in the first problem as well as the performance constraints. The reported numerical results show that the novel approaches lead to comparable or better performance rates, in addition to an important reduction in the number of variables.
364
+
365
+ Several extensions of the approach presented in this paper are possible. In our opinion, they deserve further study. First, several classification and regression procedures based on optimization problems, such as Support Vector Regression, logistic regression or distance-weighted discrimination, are amenable to address, as done here, an integrated FS and classification or regression. The optimization problems obtained in this way have a structure which should be exploited to make the approach competitive and including cost-sensitivity in the FS procedure. Second, even within SVM, it should be observed that SVM is a tool for binary classification. For multiclass datasets, SVM classification is performed by solving a series of SVM problems, see [20, 46]. When some classes are hard to identify, the basic multiclass strategies may yield discouraging results. Performing simultaneously feature selection and class fusion, as in [24], is an interesting nontrivial extension of our approach. To do this, problems (P1), (P2) and (P3) need to be conveniently modified.
366
+
367
+ # Acknowledgements
368
+
369
+ This research is financed by Fundación BBVA, projects FQM329 and P11-FQM-7603 (Junta de Andalucía, Andalucía) and MTM2015-65915-R (Ministerio de Economía y Competitividad, Spain). The last three are cofunded with EU ERD Funds. The authors are thankful for such support.
370
+
371
+ # Appendix
372
+
373
+ In this section we describe step by step how formulation (9) is built from equation (8). Hence, let us suppose first that we have the model
374
+
375
+ $$
376
+ \begin{array}{l} \min _ {\boldsymbol {w}, \beta , \xi} \quad \boldsymbol {w} ^ {\top} \boldsymbol {w} + C \sum_ {i \in I} \xi_ {i} \\ s. t. \quad y _ {i} (\boldsymbol {w} ^ {\top} x _ {i} + \beta) \geq 1 - \xi_ {i}, \quad i \in I \\ 0 \leq \xi_ {i} \leq M _ {1} (1 - \zeta_ {i}) \quad i \in I \\ \mu (\zeta) _ {\ell} \geq \lambda_ {\ell} \quad \ell \in L \\ \zeta_ {i} \in \{0, 1 \} \qquad \qquad i \in I. \\ \end{array}
377
+ $$
378
+
379
+ This one can be rewritten as
380
+
381
+ $$
382
+ \begin{array}{l} \min _ {\zeta} \quad \begin{array}{l l} \min _ {\boldsymbol {w}, \beta , \xi} & \boldsymbol {w} ^ {\top} \boldsymbol {w} + C \sum_ {i \in I} \xi_ {i} \end{array} \\ \text {s . t .} \quad \zeta_ {i} \in \{0, 1 \} \quad i \in I \quad \text {s . t .} \quad y _ {i} \left(\boldsymbol {w} ^ {\top} x _ {i} + \beta\right) \geq 1 - \xi_ {i}, \quad i \in I \\ \mu (\zeta) _ {\ell} \geq \lambda_ {\ell} \quad \ell \in L \quad 0 \leq \xi_ {i} \leq M _ {1} (1 - \zeta_ {i}) \quad i \in I \\ \end{array}
383
+ $$
384
+
385
+ If we assume that the binary variables $\zeta$ fixed, the Karush-Kuhn-Tucker (KKT) conditions for the inner problem are
386
+
387
+ $$
388
+ \begin{array}{l} w = \sum \alpha_ {i} y _ {i} x _ {i} \\ i \in I \\ 0 = \sum_ {i \in I} \alpha_ {i} y _ {i} \\ \begin{array}{r c l} 0 & \leq & \alpha_ {i} \leq C / 2 \quad i \in I. \end{array} \\ \end{array}
389
+ $$
390
+
391
+ Substituting these expressions into the last optimization problem, the partial dual of such problem can be calculated, obtaining
392
+
393
+ $$
394
+ \begin{array}{l} \min _ {\zeta} \quad \min _ {\alpha , \beta , \xi} \left(\sum_ {i \in I} \alpha_ {i} y _ {i} x _ {i}\right) ^ {\top} \left(\sum_ {i \in I} \alpha_ {i} y _ {i} x _ {i}\right) + C \sum_ {i \in I} \xi_ {i} \\ \text {s . t .} \quad z _ {j} \in \{0, 1 \} \quad j \in J \quad \text {s . t .} \quad y _ {i} \left(\left(\sum_ {i \in I} \alpha_ {i} y _ {i} x _ {i}\right) ^ {\top} x _ {i} + \beta\right) \geq 1 - \xi_ {i} \quad i \in I \\ \mu (\zeta) _ {\ell} \geq \lambda_ {\ell} \quad \ell \in L \quad 0 \leq \xi_ {i} \leq M _ {1} (1 - \zeta_ {i}) \quad i \in I \\ \sum_ {i \in I} \alpha_ {i} y _ {i} = 0 \\ 0 \leq \alpha_ {i} \leq C / 2 \quad i \in I \\ \end{array}
395
+ $$
396
+
397
+ As a last step, the kernel trick is used and the final formulation (9) is obtained.
398
+
399
+ # References
400
+
401
+ # References
402
+
403
+ [1] Aytug, H. (2015). Feature selection for support vector machines using Generalized Benders Decomposition. European Journal of Operational Research, 244(1):210-218.
404
+
405
+ [2] Bartlett, P. L., Jordan, M. I., and McAuliffe, J. D. (2006). Convexity, classification, and risk bounds. Journal of the American Statistical Association, 101(473):138-156.
406
+ [3] Ben-Tal, A., Bhadra, S., Bhattacharyya, C., and Saketha Nath, J. (2011). Chance constrained uncertain classification via robust optimization. Mathematical Programming, 127(1):145-173.
407
+ [Benitez-Pena et al.] Benitez-Pena, S., Blanquero, R., Carrizosa, E., and Ramírez-Cobo, P. On Support Vector Machines under a multiple-cost scenario. Working Paper.
408
+ [5] Bertolazzi, P., Felici, G., Festa, P., Fiscon, G., and Weitschek, E. (2016). Integer programming models for feature selection: New extensions and a randomized solution algorithm. European Journal of Operational Research, 250(2):389-399.
409
+ [6] Bertsimas, D., King, A., Mazumder, R., et al. (2016a). Best subset selection via a modern optimization lens. The Annals of Statistics, 44(2):813-852.
410
+ [7] Bertsimas, D., Mazumder, R., et al. (2014). Least quantile regression via modern optimization. The Annals of Statistics, 42(6):2494-2525.
411
+ [8] Bertsimas, D., O'Hair, A. K., and Pulleyblank, W. R. (2016b). The Analytics Edge. Dynamic Ideas, Massachusetts.
412
+ [9] Bot, R. I. and Lorenz, N. (2011). Optimization problems in statistical learning: Duality and optimality conditions. European Journal of Operational Research, 213(2):395-404.
413
+ [10] Bradley, P. S., Fayyad, U. M., and Mangasarian, O. L. (1999). Mathematical Programming for Data Mining: Formulations and Challenges. INFORMS Journal on Computing, 11(3):217-238.
414
+ [11] Bradley, P. S., Mangasarian, O. L., and Street, W. N. (1998). Feature Selection via Mathematical Programming. INFORMS Journal on Computing, 10(2):209-217.
415
+ [12] Carrizosa, E., Martin-Barragán, B., and Romero-Morales, D. (2008). Multigroup support vector machines with measurement costs: A biobjective approach. Discrete Applied Mathematics, 156(6):950-966.
416
+ [13] Carrizosa, E., Martin-Barragan, B., and Romero-Morales, D. (2011). Detecting relevant variables and interactions in supervised classification. European Journal of Operational Research, 213(1):260-269.
417
+ [14] Carrizosa, E., Nogales-Gómez, A., and Romero-Morales, D. (2016). Strongly agree or strongly disagree?: Rating features in support vector machines. Information Sciences, 329:256-273.
418
+
419
+ [15] Carrizosa, E., Nogales-Gómez, A., and Romero-Morales, D. (2017a). Clustering categories in support vector machines. Omega, 66:28-37.
420
+ [16] Carrizosa, E., Olivares-Nadal, A. V., and Ramírez-Cobo, P. (2017b). A sparsity-controlled vector autoregressive model. *Biostatistics*, page kxw042.
421
+ [17] Carrizosa, E. and Romero-Morales, D. (2013). Supervised classification and mathematical optimization. Computers & Operations Research, 40(1):150-165.
422
+ [18] Chan, A. B., Vasconcelos, N., and Lanckriet, G. R. G. (2007). Direct convex relaxations of sparse SVM. In Proceedings of the 24th International Conference on Machine Learning, ICML '07, pages 145-153, New York, NY, USA. ACM.
423
+ [19] Corne, D., Dhaenens, C., and Jourdan, L. (2012). Synergies between operations research and data mining: The emerging use of multi-objective approaches. European Journal of Operational Research, 221(3):469-479.
424
+ [20] Cristianini, N. and Shawe-Taylor, J. (2000). An Introduction to Support Vector Machines and Other Kernel-based Learning Methods. Cambridge University Press.
425
+ [21] Fung, G. M. and Mangasarian, O. L. (2004). A Feature Selection Newton Method for Support Vector Machine Classification. Computational Optimization and Applications, 28(2):185-202.
426
+ [22] Ghaddar, B. and Naoum-Sawaya, J. (2018). High dimensional data classification and feature selection using support vector machines. European Journal of Operational Research, 265(3):993 - 1004.
427
+ [23] Golub, T. R., Slonim, D. K., Tamayo, P., Huard, C., Gaasenbeek, M., Mesirov, J. P., Coller, H., Loh, M. L., Downing, J. R., Caligiuri, M. A., Bloomfield, C. D., and Lander, E. S. (1999). Molecular classification of cancer: Class discovery and class prediction by gene expression monitoring. Science, 286(5439):531-537.
428
+ [24] Guo, J. (2010). Simultaneous variable selection and class fusion for high-dimensional linear discriminant analysis. *Biostatistics*, 11(4):599.
429
+ [25] Gurobi Optimization, Inc. (2016). Gurobi optimizer reference manual.
430
+ [26] Guyon, I. and Elisseeff, A. (2003). An Introduction to Variable and Feature Selection. Journal of Machine Learning Research, 3(Mar):1157-1182.
431
+ [27] Kohavi, R. (1995). A Study of Cross-Validation and Bootstrap for Accuracy Estimation and Model Selection. In *IJCAI*, volume 14, pages 1137–1143. Stanford, CA.
432
+
433
+ [28] Le Thi, H. A., Le, H. M., and Dinh, T. P. (2015). Feature selection in machine learning: an exact penalty approach using a Difference of Convex function Algorithm. Machine Learning, 101(1):163-186.
434
+ [29] Lichman, M. (2013). UCI Machine Learning Repository.
435
+ [30] Maldonado, S., Pérez, J., and Bravo, C. (2017). Cost-based feature selection for support vector machines: An application in credit scoring. European Journal of Operational Research, 261(2):656 - 665.
436
+ [31] Maldonado, S. and Weber, R. (2009a). A wrapper method for feature selection using Support Vector Machines. Information Sciences, 179(13):2208-2217.
437
+ [32] Maldonado, S. and Weber, R. (2009b). A wrapper method for feature selection using support vector machines. Information Sciences, 179(13):2208 - 2217. Special Section on High Order Fuzzy Sets.
438
+ [33] Maldonado, S., Weber, R., and Basak, J. (2011a). Simultaneous feature selection and classification using kernel-penalized support vector machines. Information Sciences, 181(1):115-128.
439
+ [34] Maldonado, S., Weber, R., and Basak, J. (2011b). Simultaneous feature selection and classification using kernel-penalized support vector machines. Information Sciences, 181(1):115 - 128.
440
+ [35] Marron, J. S., Todd, M. J., and Ahn, J. (2007). Distance-weighted discrimination. Journal of the American Statistical Association, 102(480):1267-1271.
441
+ [36] Meisel, S. and Mattfeld, D. (2010). Synergies of Operations Research and Data Mining. European Journal of Operational Research, 206(1):1-10.
442
+ [37] Panagopoulos, O. P., Pappu, V., Xanthopoulos, P., and Pardalos, P. M. (2016). Constrained subspace classifier for high dimensional datasets. Omega, 59:40-46.
443
+ [38] Plastria, F. and Carrizosa, E. (2012). Minmax-distance approximation and separation problems: geometrical properties. Mathematical Programming, 132(1):153-177.
444
+ [39] Provost, F. and Fawcett, T. (2013). Data Science for Business: What You Need to Know about Data Mining and Data-Analytic Thinking. O'Reilly Media, Inc., 1st edition.
445
+ [40] Python Core Team (2015). Python: A dynamic, open source programming language. Python Software Foundation.
446
+ [41] Richtárik, P. and Takáč, M. (2016). Parallel coordinate descent methods for big data optimization. Mathematical Programming, 156(1):433-484.
447
+
448
+ [42] Sánchez, B. N., Wu, M., Song, P. X. K., and Wang, W. (2016). Study design in high-dimensional classification analysis. *Biostatistics*, 17(4):722.
449
+ [43] Shen, X., Tseng, G. C., Zhang, X., and Wong, W. H. (2003). On $\psi$ -learning. Journal of the American Statistical Association, 98(463):724-734.
450
+ [44] Vapnik, V. (1995). The Nature of Statistical Learning Theory. Springer-Verlag New York, Inc., New York, NY, USA.
451
+ [45] Vapnik, V. (1998). Statistical learning theory, volume 1. Wiley New York.
452
+ [46] Wang, L. and Shen, X. (2007). On L1-Norm Multiclass Support Vector Machines. Journal of the American Statistical Association, 102(478):583-594.
453
+ [47] Weston, J., Mukherjee, S., Chapelle, O., Pontil, M., Poggio, T., and Vapnik, V. (2001). Feature Selection for SVMs. In Leen, T. K., Dietterich, T. G., and Tresp, V., editors, Advances in Neural Information Processing Systems 13, pages 668-674. MIT Press.
2401.07xxx/2401.07627/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0b866d539991b9efe84e8f47a4d728d5dd61b26fc97d0d9192b9afb14b32637
3
+ size 925784
2401.07xxx/2401.07627/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_content_list.json ADDED
@@ -0,0 +1,1667 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Fine-Grained Prototypes Distillation for Few-Shot Object Detection",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 158,
8
+ 119,
9
+ 836,
10
+ 141
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Zichen Wang, Bo Yang*, Haonan Yue, Zhenghao Ma",
17
+ "bbox": [
18
+ 277,
19
+ 162,
20
+ 720,
21
+ 180
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "School of Automation, Northwestern Polytechnical University, Xi'an, China {wangchen1801, hnyue, mazh0819} $@$ mail.nwpu.edu.cn, byang@nwpu.edu.cn",
28
+ "bbox": [
29
+ 238,
30
+ 186,
31
+ 756,
32
+ 215
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Abstract",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 250,
42
+ 273,
43
+ 313,
44
+ 286
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Few-shot object detection (FSOD) aims at extending a generic detector for novel object detection with only a few training examples. It attracts great concerns recently due to the practical meanings. Meta-learning has been demonstrated to be an effective paradigm for this task. In general, methods based on meta-learning employ an additional support branch to encode novel examples (a.k.a. support images) into class prototypes, which are then fused with query branch to facilitate the model prediction. However, the class-level prototypes are difficult to precisely generate, and they also lack detailed information, leading to instability in performance. New methods are required to capture the distinctive local context for more robust novel object detection. To this end, we propose to distill the most representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results, modeling the detailed feature relations between two branches. This process is realized by our Fine-Grained Feature Aggregation (FFA) module. Moreover, in terms of high-level feature fusion, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module from different perspectives. They are complementary to each other and depict the high-level feature relations more effectively. Extensive experiments on PASCAL VOC and MS COCO benchmarks show that our method sets a new state-of-the-art performance in most settings. Our code is available at https://github.com/wangchen1801/FPD.",
51
+ "bbox": [
52
+ 98,
53
+ 297,
54
+ 464,
55
+ 626
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Introduction",
62
+ "text_level": 1,
63
+ "bbox": [
64
+ 225,
65
+ 648,
66
+ 336,
67
+ 664
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "Object detection is a fundamental task in computer vision and the methods based on deep learning have been well established over the past few years (Redmon et al. 2016; Ren et al. 2017; Carion et al. 2020; Liu et al. 2016). While remarkable achievements have been made, most of them require a large amount of labeled data to obtain a satisfactory performance, otherwise they are prone to overfitting and hardly generalize to the unknow data.",
74
+ "bbox": [
75
+ 81,
76
+ 670,
77
+ 478,
78
+ 781
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Few-shot object detection (FSOD) is a more challenging task to detect object specially in data-scarce scenarios. FSOD assumes that there are sufficient amount of examples for base classes while only k-shot examples for each novel class.",
85
+ "bbox": [
86
+ 81,
87
+ 782,
88
+ 480,
89
+ 839
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "image",
95
+ "img_path": "images/b935493fcbf62f604e81e02bf346146026283704f0323f942389a5cfd0dff28c.jpg",
96
+ "image_caption": [
97
+ "Figure 1: Overview of the proposed method, which we denote as FPD. In addition to class-level prototypes, we distill representative detailed features into fine-grained prototypes, enabling more robust novel object detection."
98
+ ],
99
+ "image_footnote": [],
100
+ "bbox": [
101
+ 545,
102
+ 273,
103
+ 887,
104
+ 503
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "text",
110
+ "text": "Therefore, the key question is how to transfer the knowledge learnt from base classes to the novel classes. Transfer learning based methods (Wang et al. 2020; Cao et al. 2021; Qiao et al. 2021) focus on fine-tuning the model more effectively. They use the same architecture as generic object detection, additionally with advanced techniques such as parameter freezing and gradient decoupling to improve performance. Meta-learning based methods (Kang et al. 2019; Wang, Ramanan, and Hebert 2019; Yan et al. 2019; Han et al. 2023), instead, follow the idea: learn how to learn the new tasks rapidly. As illustrated in Figure 2, an additional support branch is incorporated to encode support images into class-level prototypes, which function as dynamic parameters to interact with the query branch. In this way, the connections between novel examples and the model predictions are enhanced, thereby improving the generalization ability and learning the new tasks more quickly.",
111
+ "bbox": [
112
+ 514,
113
+ 608,
114
+ 913,
115
+ 844
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "This work studies the meta-learning based FSOD and aims at realizing a more effective method. In general, features from the two branches are fused on top of the framework to make",
122
+ "bbox": [
123
+ 514,
124
+ 845,
125
+ 911,
126
+ 888
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "aside_text",
132
+ "text": "arXiv:2401.07629v2 [cs.CV] 12 Mar 2024",
133
+ "bbox": [
134
+ 22,
135
+ 258,
136
+ 57,
137
+ 705
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "page_footnote",
143
+ "text": "*Corresponding author. Copyright © 2024, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved.",
144
+ "bbox": [
145
+ 81,
146
+ 849,
147
+ 478,
148
+ 888
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "image",
154
+ "img_path": "images/a1b9a117bc5c26cd7a03cbb3296495b0916b60260e9692f1c56ae7b5727818fa.jpg",
155
+ "image_caption": [
156
+ "Figure 2: The overall architecture of our method. FFA and NLF are proposed to improve the performance."
157
+ ],
158
+ "image_footnote": [],
159
+ "bbox": [
160
+ 91,
161
+ 68,
162
+ 916,
163
+ 275
164
+ ],
165
+ "page_idx": 1
166
+ },
167
+ {
168
+ "type": "text",
169
+ "text": "the final prediction (Kang et al. 2019; Yan et al. 2019; Xiao and Marlet 2020), while most of the layers are separated and do not exchange information. This hinders the model from learning the correlations among detailed features especially in data-scarce scenarios.",
170
+ "bbox": [
171
+ 81,
172
+ 328,
173
+ 478,
174
+ 398
175
+ ],
176
+ "page_idx": 1
177
+ },
178
+ {
179
+ "type": "text",
180
+ "text": "DCNet (Hu et al. 2021) proposes to directly match the mid-level support features into query features in a pixel-wise manner, which enables the relation modeling of detailed local context. However, this approach has its limitations in terms of effect and implementation. First, the mid-level features with an extensive range of patterns are intricate and complex, thus the model might struggle to capture the most critical details. Second, directly matching between dense feature maps is inefficiency and will cost more computational resources. Third, this approach has difficulty in transitioning seamlessly from the training phase to the testing phase, as it can not integrate the mid-level support features across different shots to boost the performance.",
181
+ "bbox": [
182
+ 81,
183
+ 398,
184
+ 480,
185
+ 580
186
+ ],
187
+ "page_idx": 1
188
+ },
189
+ {
190
+ "type": "text",
191
+ "text": "To address the aforementioned issues, we propose a novel Fine-Grained Feature Aggregation (FFA) module to aggregate the mid-level features. As illustrated in Figure 3, different from DCNet, we propose to distill features into fine-grained prototypes. These prototypes, which reside in a highly refined and reduced feature space, embody the most distinctive and representative details of the support images. Specifically, we employ a set of embeddings following the object queries in DETR (Carion et al. 2020) to distill prototypes. Rather than being encoded with positional information and representing specific objects, the embeddings here function within the feature space and thereby are denoted as feature queries. We give each class a unique set of feature queries to distill prototypes independently. It can avoid confusion and is a key factor for our method to work. The distilled prototypes are then assigned into query feature map based on the matching results, modeling the fine-grained relations and highlighting the features with similar details.",
192
+ "bbox": [
193
+ 81,
194
+ 582,
195
+ 478,
196
+ 830
197
+ ],
198
+ "page_idx": 1
199
+ },
200
+ {
201
+ "type": "text",
202
+ "text": "The proposed FFA enables a more effective feature aggregation by focusing on the key information encapsulated within prototypes. This method also reduces the computational complexity by avoiding the directly matching between",
203
+ "bbox": [
204
+ 81,
205
+ 832,
206
+ 480,
207
+ 888
208
+ ],
209
+ "page_idx": 1
210
+ },
211
+ {
212
+ "type": "text",
213
+ "text": "dense feature maps. Furthermore, it can naturally transition into the testing phase through a weighted sum of prototypes across different shots, preserving the full potential derived from the training phase.",
214
+ "bbox": [
215
+ 514,
216
+ 329,
217
+ 911,
218
+ 385
219
+ ],
220
+ "page_idx": 1
221
+ },
222
+ {
223
+ "type": "text",
224
+ "text": "In terms of high-level feature aggregation, we revisit the previous methods and propose two improvements from different perspectives. First, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy to control the ratio of support classes aggregated with query features. Meta R-CNN (Yan et al. 2019) adopts a simple class-specific aggregation scheme where only the features having the same classes are aggregated. While VFA (Han et al. 2023) proposes a class-agnostic aggregation scheme which randomly selects the support classes to reduce class bias. Our insight is that different support classes are served as positive and negative samples, thereby the balanced sampling is required to keep the most important positive samples from being overwhelmed. Second, many works (Kang et al. 2019; Yan et al. 2019; Han et al. 2023) employ element-wise multiplication to explore the relations within the same classes. However, it is not compatible with our proposed B-CAS which incorporates the feature aggregation between different classes. To solve this issue, we propose a stronger Non-Linear Fusion (NLF) module motivated by (Han et al. 2022a; Xiao and Marlet 2020) to fuse features more effectively. Our contributions can be summarized as follows:",
225
+ "bbox": [
226
+ 514,
227
+ 386,
228
+ 913,
229
+ 691
230
+ ],
231
+ "page_idx": 1
232
+ },
233
+ {
234
+ "type": "list",
235
+ "sub_type": "text",
236
+ "list_items": [
237
+ "- We propose to distill support features into fine-grained prototypes before being integrated into query feature maps, which can help the model grasp the key information. They are implemented in the Fine-Grained Feature Aggregation (FFA) module.",
238
+ "- We propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module. They are complementary to each other and can fuse high-level features more effectively.",
239
+ "- Extensive experiments illustrate that our method significantly improves the performance and achieves state-of-the-art results on the two widely used FSOD benchmarks."
240
+ ],
241
+ "bbox": [
242
+ 524,
243
+ 704,
244
+ 913,
245
+ 888
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "Related Works",
252
+ "text_level": 1,
253
+ "bbox": [
254
+ 217,
255
+ 66,
256
+ 346,
257
+ 82
258
+ ],
259
+ "page_idx": 2
260
+ },
261
+ {
262
+ "type": "text",
263
+ "text": "General Object Detection",
264
+ "text_level": 1,
265
+ "bbox": [
266
+ 84,
267
+ 90,
268
+ 282,
269
+ 107
270
+ ],
271
+ "page_idx": 2
272
+ },
273
+ {
274
+ "type": "text",
275
+ "text": "Deep learning based object detection has been extensively studied in recent years. The well-established object detectors can be categorized into one-stage and two-stage methods. One-stage detectors (Redmon et al. 2016; Liu et al. 2016; Lin et al. 2017b) directly make predictions upon the CNN feature maps. While two-stage detectors (Ren et al. 2017; He et al. 2017) additionally employ a Region Proposal Network (RPN) to generate object proposals, which will be further refined into the final predictions. Both of them require the predefiend dense anchors to generate candidates.",
276
+ "bbox": [
277
+ 81,
278
+ 112,
279
+ 480,
280
+ 252
281
+ ],
282
+ "page_idx": 2
283
+ },
284
+ {
285
+ "type": "text",
286
+ "text": "Recently, anchor-free detectors DETR (Carion et al. 2020) and Deformable DETR (Zhu et al. 2020) have been developed and are drawing more attention. They use a CNN backbone combining with Transformer encoder-decoders (Vaswani et al. 2017) for end-to-end object detection. A set of object queries are proposed to replace the anchor boxes. They will be refined into the detected objects layer by layer through Transformer decoders.",
287
+ "bbox": [
288
+ 81,
289
+ 252,
290
+ 480,
291
+ 362
292
+ ],
293
+ "page_idx": 2
294
+ },
295
+ {
296
+ "type": "text",
297
+ "text": "We employ the two-stage Faster R-CNN (Ren et al. 2017) framework to build our FSOD detector, and draw inspirations from DETR (Carion et al. 2020) into our approach.",
298
+ "bbox": [
299
+ 83,
300
+ 364,
301
+ 480,
302
+ 407
303
+ ],
304
+ "page_idx": 2
305
+ },
306
+ {
307
+ "type": "text",
308
+ "text": "Few-Shot Object Detection",
309
+ "text_level": 1,
310
+ "bbox": [
311
+ 84,
312
+ 421,
313
+ 294,
314
+ 436
315
+ ],
316
+ "page_idx": 2
317
+ },
318
+ {
319
+ "type": "text",
320
+ "text": "Few-Shot Object Detection (FSOD), which studies the detection task in data-scarce situations, has been attracting an increased interest recently. LSTD (Chen et al. 2018) first proposes a transfer learning based approach to detect novel objects in a FSOD data setting. TFA (Wang et al. 2020) utilizes a cosine similarity based classifier and only fine-tunes the last layer with novel examples, achieving a comparable results with other complex methods. DeFRCN (Qiao et al. 2021) employs advanced gradient decoupling technique into the Faster R-CNN framework and integrates an offline prototypical calibration block to refine the classification results, which achieves an impressive performance.",
321
+ "bbox": [
322
+ 81,
323
+ 443,
324
+ 480,
325
+ 611
326
+ ],
327
+ "page_idx": 2
328
+ },
329
+ {
330
+ "type": "text",
331
+ "text": "The meta-learning is also a promising paradigm for FSOD. FSRW (Kang et al. 2019) proposes to re-weight the YOLOv2 feature maps along channel dimension using proposed reweighting vectors, which can highlight the relevant features. Meta R-CNN (Yan et al. 2019) adopts the Faster R-CNN framework to build a two-branch based siamese network. It processes query and support images in parallel to produce the Region of Interest (RoI) features and class prototypes, which are then fused to make predictions. Instead of learning a softmax-based classifier for all classes, (Han et al. 2022a) constructs a meta-classifier through feature alignment and non-linear matching. It calculates the similarity between query-support feature maps, producing binary classification results for novel classes. VFA (Han et al. 2023) introduces variational feature learning into Meta R-CNN, further boosting its performance. Recently, there are some works incorporate meta-learning into other advanced frameworks. Meta-DETR (Zhang et al. 2022) employs Deformable DETR (Zhu et al. 2020) to build a few-shot detector. (Han et al. 2022b) utilizes PVT (Wang et al. 2021) to construct",
332
+ "bbox": [
333
+ 81,
334
+ 611,
335
+ 482,
336
+ 888
337
+ ],
338
+ "page_idx": 2
339
+ },
340
+ {
341
+ "type": "text",
342
+ "text": "a fully cross transformer for few-shot detection. They all achieve remarkable results.",
343
+ "bbox": [
344
+ 514,
345
+ 69,
346
+ 911,
347
+ 95
348
+ ],
349
+ "page_idx": 2
350
+ },
351
+ {
352
+ "type": "text",
353
+ "text": "A two stage training paradigm has been widely adopted in both transfer learning and meta-learning based methods due to its effectiveness. At the base training stage, the model is trained on abundant base class examples. While at the fine-tuning stage, the model is fine-tuned only with $K$ -shot examples for each base and novel class.",
354
+ "bbox": [
355
+ 514,
356
+ 97,
357
+ 911,
358
+ 179
359
+ ],
360
+ "page_idx": 2
361
+ },
362
+ {
363
+ "type": "text",
364
+ "text": "Our approach is based on Meta R-CNN and we propose to distill fine-grained prototypes for effectively exploiting the relations between detailed features.",
365
+ "bbox": [
366
+ 514,
367
+ 180,
368
+ 911,
369
+ 222
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "Our Approach",
376
+ "text_level": 1,
377
+ "bbox": [
378
+ 650,
379
+ 238,
380
+ 779,
381
+ 255
382
+ ],
383
+ "page_idx": 2
384
+ },
385
+ {
386
+ "type": "text",
387
+ "text": "In this section, we first introduce the task definition and the overall architecture of our model. Then we will elaborate the fine-grained and high-level feature aggregation.",
388
+ "bbox": [
389
+ 514,
390
+ 258,
391
+ 911,
392
+ 301
393
+ ],
394
+ "page_idx": 2
395
+ },
396
+ {
397
+ "type": "text",
398
+ "text": "Task Definition",
399
+ "text_level": 1,
400
+ "bbox": [
401
+ 516,
402
+ 314,
403
+ 638,
404
+ 329
405
+ ],
406
+ "page_idx": 2
407
+ },
408
+ {
409
+ "type": "text",
410
+ "text": "We adopt the standard FSOD setting following (Kang et al. 2019; Wang et al. 2020). Specifically, given a dataset $\\mathcal{D}$ with two sets of classes $C_{base}$ and $C_{novel}$ , where each class in $C_{base}$ has abundant training data while each class in $C_{novel}$ has only $K$ -shot annotated objects, FSOD aims at detecting the objects of $C_{base} \\cup C_{novel}$ using the detector trained on $\\mathcal{D}$ . Please note that $C_{base} \\cap C_{novel} = \\emptyset$ .",
411
+ "bbox": [
412
+ 514,
413
+ 334,
414
+ 911,
415
+ 431
416
+ ],
417
+ "page_idx": 2
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "The Model Architecture",
422
+ "text_level": 1,
423
+ "bbox": [
424
+ 516,
425
+ 445,
426
+ 707,
427
+ 460
428
+ ],
429
+ "page_idx": 2
430
+ },
431
+ {
432
+ "type": "text",
433
+ "text": "As illustrated in Figure 2, our model is based on Meta R-CNN, which is a siamese network with query branch and support branch that share a same backbone. Typically, we use the first three stages of ResNet-50/101 backbone (He et al. 2016) to extract mid-level features for both query images and support images. Then our proposed FFA module is employed to distill the fine-grained prototypes and assign them into the query branch. Subsequently, we use the last stage (i.e. stage four) of the backbone to extract high-level features for both branches, which produces RoI features and class-level prototypes, respectively. They are further processed by the proposed NLF module, following by the detection head to make the final prediction. We would like to mention that the RPN is fed with the query features which have already interacted with the support branch. It gives the RPN more ability learning to identify the new instances.",
434
+ "bbox": [
435
+ 514,
436
+ 465,
437
+ 913,
438
+ 689
439
+ ],
440
+ "page_idx": 2
441
+ },
442
+ {
443
+ "type": "text",
444
+ "text": "Fine-Grained Feature Aggregation",
445
+ "text_level": 1,
446
+ "bbox": [
447
+ 516,
448
+ 700,
449
+ 787,
450
+ 717
451
+ ],
452
+ "page_idx": 2
453
+ },
454
+ {
455
+ "type": "text",
456
+ "text": "The Fine-Grained Feature Aggregation (FFA) module is the key component of our proposed method, which is a class-agnostic aggregator that matches all classes of support features into query features. It models inter-class relations in the early stage of the detection framework where the features are low-level and have more detailed information. Instead of directly performing feature matching, we propose to distill the representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results. FFA can help the model distinguish foreground from background and learn the similarities and differences between object classes. We will elaborate the",
457
+ "bbox": [
458
+ 514,
459
+ 720,
460
+ 913,
461
+ 888
462
+ ],
463
+ "page_idx": 2
464
+ },
465
+ {
466
+ "type": "text",
467
+ "text": "prototypes distillation and feature assignment in the following subsections. We also discuss our strategy to transfer this method to novel classes, as well as test-time natural integration of prototypes across different shots.",
468
+ "bbox": [
469
+ 81,
470
+ 68,
471
+ 482,
472
+ 125
473
+ ],
474
+ "page_idx": 3
475
+ },
476
+ {
477
+ "type": "text",
478
+ "text": "Prototypes Distillation Inspired by DETR, we incorporate a new component which is a set of learnable embeddings to distill prototypes. Different from object queries in DETR, which are encoded with positional information and are refined into a specific instance layer by layer, the embeddings here work as a guidance to refine the entire support feature space into a set of representative features. It can filter out the noise and ease the training. We refer to these embeddings as feature queries.",
479
+ "bbox": [
480
+ 81,
481
+ 130,
482
+ 478,
483
+ 256
484
+ ],
485
+ "page_idx": 3
486
+ },
487
+ {
488
+ "type": "text",
489
+ "text": "We employ the cross-attention mechanism to perform the prototypes distillation. Specifically, given a support feature map $X_{s} \\in \\mathbb{R}^{hw \\times d}$ and a set of feature queries $q \\in \\mathbb{R}^{n \\times d'}$ , where $hw$ denote the height and width, $d$ and $d'$ is the feature dimension, and $n$ is the number of feature queries, the affinity matrix is calculated through a matching operation:",
490
+ "bbox": [
491
+ 81,
492
+ 255,
493
+ 480,
494
+ 343
495
+ ],
496
+ "page_idx": 3
497
+ },
498
+ {
499
+ "type": "equation",
500
+ "text": "\n$$\nA = \\operatorname {s o f t m a x} \\left(\\frac {q \\left(X _ {s} W\\right) ^ {T}}{\\sqrt {d ^ {\\prime}}}\\right) \\tag {1}\n$$\n",
501
+ "text_format": "latex",
502
+ "bbox": [
503
+ 187,
504
+ 348,
505
+ 478,
506
+ 383
507
+ ],
508
+ "page_idx": 3
509
+ },
510
+ {
511
+ "type": "text",
512
+ "text": "where $W$ is a linear projection to project $X_{s}$ in to the latent space with dimensionality $d^{\\prime}$ , and the softmax function is performed along $hw$ dimension. Subsequently, the fine-grained prototypes can be distilled from $X_{s}$ via:",
513
+ "bbox": [
514
+ 81,
515
+ 388,
516
+ 480,
517
+ 445
518
+ ],
519
+ "page_idx": 3
520
+ },
521
+ {
522
+ "type": "equation",
523
+ "text": "\n$$\np = A X _ {s} + E _ {c l s} \\tag {2}\n$$\n",
524
+ "text_format": "latex",
525
+ "bbox": [
526
+ 220,
527
+ 452,
528
+ 478,
529
+ 468
530
+ ],
531
+ "page_idx": 3
532
+ },
533
+ {
534
+ "type": "text",
535
+ "text": "where the affinity matrix is applied directly on the support feature map. We do not project $X_{s}$ to keep feature space the same. An additional class embedding $E_{cls}$ is added to retain the class information.",
536
+ "bbox": [
537
+ 81,
538
+ 474,
539
+ 478,
540
+ 530
541
+ ],
542
+ "page_idx": 3
543
+ },
544
+ {
545
+ "type": "text",
546
+ "text": "We would like to mention that each class has its exclusive feature queries. This is different from object queries in DETR and is a crucial factor for our method to work. It means that $q$ is the feature queries of one class and is part of $Q \\in \\mathbb{R}^{nc \\times d}$ , where $Q$ denotes the feature queries of all classes. This setting makes feature queries class-relevant and avoids them getting overwhelmed and confused by too many object classes.",
547
+ "bbox": [
548
+ 81,
549
+ 530,
550
+ 480,
551
+ 628
552
+ ],
553
+ "page_idx": 3
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "Prototypes Assignment We densely match the fine-grained prototypes into query feature map to achieve the prototypes assignment. Considering that the background area should not be matched to any prototypes that represent salient object features, we incorporate a set of embeddings to serve as background prototypes. We also use the cross-attention mechanism to assign prototypes. Specifically, given a query feature map $X_{q} \\in \\mathbb{R}^{HW \\times d}$ , prototypes assignment is performed via:",
558
+ "bbox": [
559
+ 81,
560
+ 635,
561
+ 482,
562
+ 758
563
+ ],
564
+ "page_idx": 3
565
+ },
566
+ {
567
+ "type": "equation",
568
+ "text": "\n$$\nA ^ {\\prime} = \\operatorname {s o f t m a x} \\left(\\frac {\\left(X _ {q} W ^ {\\prime}\\right) \\left(P W ^ {\\prime}\\right) ^ {T}}{\\sqrt {d ^ {\\prime}}}\\right) \\tag {3}\n$$\n",
569
+ "text_format": "latex",
570
+ "bbox": [
571
+ 161,
572
+ 763,
573
+ 478,
574
+ 799
575
+ ],
576
+ "page_idx": 3
577
+ },
578
+ {
579
+ "type": "equation",
580
+ "text": "\n$$\nP = \\operatorname {c o n c a t} \\left(p _ {1}, p _ {2}, \\dots , p _ {c}, p _ {b g}\\right) \\tag {4}\n$$\n",
581
+ "text_format": "latex",
582
+ "bbox": [
583
+ 176,
584
+ 804,
585
+ 478,
586
+ 821
587
+ ],
588
+ "page_idx": 3
589
+ },
590
+ {
591
+ "type": "equation",
592
+ "text": "\n$$\nX _ {q} ^ {\\prime} = X _ {q} + \\alpha \\cdot A ^ {\\prime} P \\tag {5}\n$$\n",
593
+ "text_format": "latex",
594
+ "bbox": [
595
+ 212,
596
+ 823,
597
+ 478,
598
+ 840
599
+ ],
600
+ "page_idx": 3
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "where $P \\in \\mathbb{R}^{(nc + n_{bg}) \\times d}$ is the prototypes of $c$ support classes with additional $n_{bg}$ background classes, and $W'$ is a linear projection shared by $X_q$ and $P$ which projects them into the",
605
+ "bbox": [
606
+ 81,
607
+ 845,
608
+ 480,
609
+ 888
610
+ ],
611
+ "page_idx": 3
612
+ },
613
+ {
614
+ "type": "image",
615
+ "img_path": "images/8e854cf5f5dcd793fccc5c5b2fbaa2121f03a8f27ba0c7a807ebf3f06b54dc4b.jpg",
616
+ "image_caption": [
617
+ "Figure 3: The architecture of the Fine-Grained Feature Aggregation (FFA) module. It can be divided into Prototypes, Distillation and Prototypes Assignment."
618
+ ],
619
+ "image_footnote": [],
620
+ "bbox": [
621
+ 526,
622
+ 70,
623
+ 901,
624
+ 431
625
+ ],
626
+ "page_idx": 3
627
+ },
628
+ {
629
+ "type": "text",
630
+ "text": "same latent space. The prototypes are assigned into query feature map based on the affinity matrix $A'$ , which produces the aggregated query features. The $\\alpha$ is a learnable parameter initialized as zero to help stabilize the training.",
631
+ "bbox": [
632
+ 514,
633
+ 513,
634
+ 913,
635
+ 570
636
+ ],
637
+ "page_idx": 3
638
+ },
639
+ {
640
+ "type": "text",
641
+ "text": "Transferring to Novel Classes At the base training stage, the feature queries of base classes are randomly initialized and well trained. However, at the fine-tuning stage, training the feature queries from scratch becomes challenging due to the limited novel class examples, which means that an effective knowledge transfer method is required. To address this issue, we propose to duplicate the most compatible feature queries from the base classes to serve as those in the novel classes. To be specific, given feature queries of base classes $Q \\in \\mathbb{R}^{nc \\times d'}$ and support feature map of a novel class $X_{ns} \\in \\mathbb{R}^{hw \\times d}$ , the compatibility matrix and the weight of each feature query can be obtained via:",
642
+ "bbox": [
643
+ 514,
644
+ 575,
645
+ 913,
646
+ 744
647
+ ],
648
+ "page_idx": 3
649
+ },
650
+ {
651
+ "type": "equation",
652
+ "text": "\n$$\nC = \\operatorname {t o p k} \\left(Q \\left(X _ {n s} W\\right) ^ {T}\\right) \\tag {6}\n$$\n",
653
+ "text_format": "latex",
654
+ "bbox": [
655
+ 629,
656
+ 750,
657
+ 911,
658
+ 768
659
+ ],
660
+ "page_idx": 3
661
+ },
662
+ {
663
+ "type": "equation",
664
+ "text": "\n$$\nw e i g h t _ {i} = \\sum_ {j = 0} ^ {k} C _ {i j}, i = 1, 2, \\dots , n c \\tag {7}\n$$\n",
665
+ "text_format": "latex",
666
+ "bbox": [
667
+ 598,
668
+ 773,
669
+ 911,
670
+ 816
671
+ ],
672
+ "page_idx": 3
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "where $\\text{topk}$ is performed along $h w$ dimension to filter out irrelevant locations. We select $n$ feature queries for each novel class based on the largest weight. Instead of sharing the same fecture queries with base classes, they are created as a duplicate and can be trained independently.",
677
+ "bbox": [
678
+ 514,
679
+ 818,
680
+ 913,
681
+ 888
682
+ ],
683
+ "page_idx": 3
684
+ },
685
+ {
686
+ "type": "table",
687
+ "img_path": "images/4b5554ba8c2ecdafddb52f082ce02105941d05c6a7bc948d546041c703d193a6.jpg",
688
+ "table_caption": [],
689
+ "table_footnote": [],
690
+ "table_body": "<table><tr><td rowspan=\"2\">Method / shot</td><td colspan=\"5\">Novel Set 1</td><td colspan=\"5\">Novel Set 2</td><td colspan=\"5\">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan=\"16\">Single run results:</td></tr><tr><td>FSRW (Kang et al. 2019)</td><td>14.8</td><td>15.5</td><td>26.7</td><td>33.9</td><td>47.2</td><td>15.7</td><td>15.3</td><td>22.7</td><td>30.1</td><td>40.5</td><td>21.3</td><td>25.6</td><td>28.4</td><td>42.8</td><td>45.9</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>19.9</td><td>25.5</td><td>35.0</td><td>45.7</td><td>51.5</td><td>10.4</td><td>19.4</td><td>29.6</td><td>34.8</td><td>45.4</td><td>14.3</td><td>18.2</td><td>27.5</td><td>41.2</td><td>48.1</td></tr><tr><td>TFA w/ cos (Wang et al. 2020)</td><td>39.8</td><td>36.1</td><td>44.7</td><td>55.7</td><td>56.0</td><td>23.5</td><td>26.9</td><td>34.1</td><td>35.1</td><td>39.1</td><td>30.8</td><td>34.8</td><td>42.8</td><td>49.5</td><td>49.8</td></tr><tr><td>MPSR (Wu et al. 2020)</td><td>41.7</td><td>42.5</td><td>51.4</td><td>55.2</td><td>61.8</td><td>24.4</td><td>29.3</td><td>39.2</td><td>39.9</td><td>47.8</td><td>35.6</td><td>41.8</td><td>42.3</td><td>48.0</td><td>49.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>42.4</td><td>45.8</td><td>45.9</td><td>53.7</td><td>56.1</td><td>21.7</td><td>27.8</td><td>35.2</td><td>37.0</td><td>40.3</td><td>30.2</td><td>37.6</td><td>43.0</td><td>49.7</td><td>50.1</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Ours)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr><tr><td colspan=\"16\">Average results over multiple runs:</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>24.2</td><td>35.3</td><td>42.2</td><td>49.1</td><td>57.4</td><td>21.6</td><td>24.6</td><td>31.9</td><td>37.0</td><td>45.7</td><td>21.2</td><td>30.0</td><td>37.2</td><td>43.8</td><td>49.6</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>33.9</td><td>37.4</td><td>43.7</td><td>51.1</td><td>59.6</td><td>23.2</td><td>24.8</td><td>30.6</td><td>36.7</td><td>46.6</td><td>32.3</td><td>34.9</td><td>39.7</td><td>42.6</td><td>50.7</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>35.1</td><td>49.0</td><td>53.2</td><td>57.4</td><td>62.0</td><td>27.9</td><td>32.3</td><td>38.4</td><td>43.2</td><td>51.8</td><td>34.9</td><td>41.8</td><td>47.1</td><td>54.1</td><td>58.2</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>40.2</td><td>53.6</td><td>58.2</td><td>63.6</td><td>66.5</td><td>29.5</td><td>39.7</td><td>43.4</td><td>48.1</td><td>52.8</td><td>35.0</td><td>38.3</td><td>52.9</td><td>57.7</td><td>60.8</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>38.5</td><td>49.6</td><td>53.5</td><td>59.8</td><td>64.3</td><td>25.9</td><td>34.2</td><td>40.1</td><td>44.9</td><td>47.4</td><td>34.7</td><td>43.9</td><td>49.3</td><td>53.1</td><td>56.3</td></tr><tr><td>VFA (Han et al. 2023)</td><td>47.4</td><td>54.4</td><td>58.5</td><td>64.5</td><td>66.5</td><td>33.7</td><td>38.2</td><td>43.5</td><td>48.3</td><td>52.4</td><td>43.8</td><td>48.9</td><td>53.3</td><td>58.1</td><td>60.0</td></tr><tr><td>FPD(Ours)</td><td>41.5</td><td>52.8</td><td>58.4</td><td>64.9</td><td>67.1</td><td>28.2</td><td>38.7</td><td>43.8</td><td>50.3</td><td>53.6</td><td>34.9</td><td>48.6</td><td>54.0</td><td>58.4</td><td>61.5</td></tr></table>",
691
+ "bbox": [
692
+ 83,
693
+ 65,
694
+ 915,
695
+ 378
696
+ ],
697
+ "page_idx": 4
698
+ },
699
+ {
700
+ "type": "text",
701
+ "text": "Table 1: FSOD results (AP50) on the three splits of Pascal VOC dataset. We report both single run and multiple run results. Bold and Underline indicate the best and the second best results.",
702
+ "bbox": [
703
+ 81,
704
+ 387,
705
+ 911,
706
+ 417
707
+ ],
708
+ "page_idx": 4
709
+ },
710
+ {
711
+ "type": "text",
712
+ "text": "Test-Time Natural Integration A simple method to integrate fine-grained prototypes across different shots is to take the average. However, the detailed features represented by a feature query may not appear in some support images. Directly averaging might hurt the performance. Therefore, we compute a weighted sum using the aforementioned weight. Specifically, given $K$ shot support images in a class, which produces $K$ prototypes, the integration is performed via:",
713
+ "bbox": [
714
+ 81,
715
+ 443,
716
+ 480,
717
+ 556
718
+ ],
719
+ "page_idx": 4
720
+ },
721
+ {
722
+ "type": "equation",
723
+ "text": "\n$$\np _ {a v g} = \\sum_ {s = 1} ^ {K} w e i g h t _ {s} ^ {*} \\cdot p _ {s} \\tag {8}\n$$\n",
724
+ "text_format": "latex",
725
+ "bbox": [
726
+ 196,
727
+ 563,
728
+ 478,
729
+ 604
730
+ ],
731
+ "page_idx": 4
732
+ },
733
+ {
734
+ "type": "text",
735
+ "text": "where weight* denote the weight after the softmax operation across different shot, $p_{avg}$ is the integrated prototypes. This approach effectively filters out the prototypes that are not compatible with the current feature query, improving the robustness of our detector.",
736
+ "bbox": [
737
+ 81,
738
+ 612,
739
+ 480,
740
+ 681
741
+ ],
742
+ "page_idx": 4
743
+ },
744
+ {
745
+ "type": "text",
746
+ "text": "High-Level Feature Aggregation",
747
+ "text_level": 1,
748
+ "bbox": [
749
+ 83,
750
+ 694,
751
+ 336,
752
+ 709
753
+ ],
754
+ "page_idx": 4
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "Feature aggregation between RoI features and class-level prototypes is a crucial step for meta-learning based FSOD, where the high-level semantic information is aligned to make the final prediction. We revisit the conventional methods and propose two improvements from different perspectives.",
759
+ "bbox": [
760
+ 81,
761
+ 713,
762
+ 478,
763
+ 784
764
+ ],
765
+ "page_idx": 4
766
+ },
767
+ {
768
+ "type": "text",
769
+ "text": "Balanced Class-Agnostic Sampling Meta R-CNN adopts a simple class-specific aggregation scheme where the RoI features are aggregated only with the prototypes of the same class. While VFA proposes a class-agnostic aggregation scheme which aggregates RoI features with randomly selected class prototypes to reduce class bias. Nonetheless, we argue that the completely random sampling might disturb",
770
+ "bbox": [
771
+ 81,
772
+ 790,
773
+ 480,
774
+ 888
775
+ ],
776
+ "page_idx": 4
777
+ },
778
+ {
779
+ "type": "text",
780
+ "text": "the model from focusing on the most crucial positive prototypes and thus hurt the performance. Instead, we propose a balanced sampling strategy named B-CAS which selects a pair of positive and negative prototypes to aggregate with RoI features in parallel. The B-CAS not only enables the relation modeling between different classes but also keeps the positive prototype from being overwhelmed by too many negative examples, and therefore can learn the high-level semantic relations more effectively.",
781
+ "bbox": [
782
+ 514,
783
+ 443,
784
+ 913,
785
+ 568
786
+ ],
787
+ "page_idx": 4
788
+ },
789
+ {
790
+ "type": "text",
791
+ "text": "(Fan et al. 2020) employs a more complex training strategy which divides training pairs into three types and maintains a ratio of 1:2:1. Additionally, a matching loss is computed to align RoI features with prototypes. However, we find it instead hurts the performance. A plausible reason is that FFA introduces the asymmetry upon two branches, making the matching loss no longer beneficial. Consequently, a simple yet effective method B-CAS is adopted in our experiments.",
792
+ "bbox": [
793
+ 514,
794
+ 569,
795
+ 913,
796
+ 681
797
+ ],
798
+ "page_idx": 4
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "Non-Linear Fusion Module Many previous meta-learning based methods use element-wise multiplication to handle the feature fusion. We argue that while this approach learns the similarities within the same class effectively, it struggles to capture the class differences. Therefore it is not compatible with the proposed B-CAS. To solve this problem, we employ a novel non-linear fusion network following (Han et al. 2022a; Xiao and Marlet 2020) with modifications.",
803
+ "bbox": [
804
+ 514,
805
+ 691,
806
+ 913,
807
+ 803
808
+ ],
809
+ "page_idx": 4
810
+ },
811
+ {
812
+ "type": "text",
813
+ "text": "Specifically, features after element-wise multiplication, subtraction and concatenation are processed independently to refine their relation to the new feature. Then they are concatenated with the vanilla RoI features and further refined before fed into the detection head. Given RoI feature $f_{roi} \\in \\mathbb{R}^{1 \\times 2d}$ and class prototype $p_{cls} \\in \\mathbb{R}^{1 \\times 2d}$ , the aggregation",
814
+ "bbox": [
815
+ 514,
816
+ 804,
817
+ 913,
818
+ 888
819
+ ],
820
+ "page_idx": 4
821
+ },
822
+ {
823
+ "type": "table",
824
+ "img_path": "images/0d9903024a0fc9cbe22b64b909260b901cff8c275a322afe71f3a21c09649a67.jpg",
825
+ "table_caption": [],
826
+ "table_footnote": [],
827
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Framework</td><td colspan=\"2\">shot</td></tr><tr><td>10</td><td>30</td></tr><tr><td colspan=\"5\">Single run results:</td></tr><tr><td rowspan=\"5\">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>10.0</td><td>13.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>FR-CNN</td><td>10.5</td><td>13.8</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>FR-CNN</td><td>11.9</td><td>16.4</td></tr><tr><td>FADI (Cao et al. 2021)</td><td>FR-CNN</td><td>12.2</td><td>16.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>18.5</td><td>22.6</td></tr><tr><td>M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>17.1</td><td>21.4</td></tr><tr><td rowspan=\"6\">M</td><td>FSRW (Kang et al. 2019)</td><td>YOLOv2</td><td>5.6</td><td>9.1</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>FR-CNN</td><td>8.7</td><td>12.4</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>12.5</td><td>14.7</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>FR-CNN</td><td>12.7</td><td>16.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>16.2</td><td>18.9</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>16.5</td><td>20.1</td></tr><tr><td colspan=\"5\">Average results over multiple runs:</td></tr><tr><td rowspan=\"2\">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>9.1</td><td>12.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>16.8</td><td>21.2</td></tr><tr><td rowspan=\"2\">M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>15.3</td><td>20.2</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>Def DETR</td><td>19.0</td><td>22.2</td></tr><tr><td rowspan=\"4\">M</td><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>10.7</td><td>15.9</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>FR-CNN</td><td>12.8</td><td>18.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>15.9</td><td>18.4</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>15.9</td><td>19.3</td></tr></table>",
828
+ "bbox": [
829
+ 83,
830
+ 65,
831
+ 480,
832
+ 428
833
+ ],
834
+ "page_idx": 5
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "can be formulated as:",
839
+ "bbox": [
840
+ 81,
841
+ 506,
842
+ 228,
843
+ 520
844
+ ],
845
+ "page_idx": 5
846
+ },
847
+ {
848
+ "type": "equation",
849
+ "text": "\n$$\nf ^ {\\prime} = \\left[ \\mathcal {F} _ {1} \\left(f _ {r o i} \\odot p _ {c l s}\\right), \\mathcal {F} _ {2} \\left(f _ {r o i} - p _ {c l s}\\right), \\mathcal {F} _ {3} \\left[ f _ {r o i}, p _ {c l s} \\right], f _ {r o i} \\right] \\tag {9}\n$$\n",
850
+ "text_format": "latex",
851
+ "bbox": [
852
+ 83,
853
+ 525,
854
+ 477,
855
+ 554
856
+ ],
857
+ "page_idx": 5
858
+ },
859
+ {
860
+ "type": "equation",
861
+ "text": "\n$$\nf = \\mathcal {F} _ {a g g} \\left(f ^ {\\prime}\\right) \\tag {10}\n$$\n",
862
+ "text_format": "latex",
863
+ "bbox": [
864
+ 233,
865
+ 555,
866
+ 478,
867
+ 573
868
+ ],
869
+ "page_idx": 5
870
+ },
871
+ {
872
+ "type": "text",
873
+ "text": "where $\\mathcal{F}_1$ , $\\mathcal{F}_2$ and $\\mathcal{F}_3$ represent independent fully-connected layer followed by ReLU activation function, and $\\mathcal{F}_{agg}$ denote a pure fully-connected layer. This formulation provides a stronger capability to thoroughly explore the relations between high-level features. In addition, an exclusive path for RoI features is reserved to propagate the original RoI information, which reduces the noise introduced by random prototypes and can be used to regress the object location.",
874
+ "bbox": [
875
+ 81,
876
+ 574,
877
+ 480,
878
+ 686
879
+ ],
880
+ "page_idx": 5
881
+ },
882
+ {
883
+ "type": "text",
884
+ "text": "Experiments",
885
+ "text_level": 1,
886
+ "bbox": [
887
+ 225,
888
+ 698,
889
+ 336,
890
+ 715
891
+ ],
892
+ "page_idx": 5
893
+ },
894
+ {
895
+ "type": "text",
896
+ "text": "Benchmarks",
897
+ "text_level": 1,
898
+ "bbox": [
899
+ 83,
900
+ 718,
901
+ 184,
902
+ 732
903
+ ],
904
+ "page_idx": 5
905
+ },
906
+ {
907
+ "type": "text",
908
+ "text": "We evaluate our method on two widely-used FSOD benchmarks PASCAL VOC (Everingham et al. 2010) and MS COCO (Lin et al. 2014), using exactly the same class partitions and few-shot examples as in (Wang et al. 2020).",
909
+ "bbox": [
910
+ 81,
911
+ 734,
912
+ 480,
913
+ 791
914
+ ],
915
+ "page_idx": 5
916
+ },
917
+ {
918
+ "type": "text",
919
+ "text": "PASCAL VOC. The 20 PASCAL VOC classes are split into 15 base classes and 5 novel classes. There are three different class partitions for a more comprehensive evaluation. The VOC07 and VOC12 train/val sets are used for training and the VOC07 test set is used for evaluation. The Mean Average Precision at IoU=0.5 (AP50) is reported under $K = \\{1, 2, 3, 5, 10\\}$ shot settings.",
920
+ "bbox": [
921
+ 81,
922
+ 791,
923
+ 480,
924
+ 888
925
+ ],
926
+ "page_idx": 5
927
+ },
928
+ {
929
+ "type": "table",
930
+ "img_path": "images/c8d58ef533747ef2ed89e2ac9a8957aabbb508ff3c47098f6d9311c8f14db2ae.jpg",
931
+ "table_caption": [
932
+ "Table 2: FSOD results (AP) on the MS COCO dataset. T: Transfer-learning based methods. M: Meta-learning based methods. $\\mathbf{M}^{*}$ : Meta-learning with advanced framework."
933
+ ],
934
+ "table_footnote": [],
935
+ "table_body": "<table><tr><td></td><td>B-CAS</td><td>NLF</td><td>FFA</td><td colspan=\"3\">shot</td></tr><tr><td></td><td></td><td></td><td></td><td>3</td><td>5</td><td>10</td></tr><tr><td>Baseline</td><td></td><td></td><td></td><td>56.7</td><td>58.3</td><td>61.4</td></tr><tr><td rowspan=\"3\">Ours</td><td>✓</td><td></td><td></td><td>61.2</td><td>64.7</td><td>64.9</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>",
936
+ "bbox": [
937
+ 519,
938
+ 65,
939
+ 911,
940
+ 171
941
+ ],
942
+ "page_idx": 5
943
+ },
944
+ {
945
+ "type": "text",
946
+ "text": "Table 3: Ablation study of different components.",
947
+ "bbox": [
948
+ 552,
949
+ 181,
950
+ 875,
951
+ 196
952
+ ],
953
+ "page_idx": 5
954
+ },
955
+ {
956
+ "type": "text",
957
+ "text": "MS COCO. For MS COCO, the 20 PASCAL VOC classes are used as novel classes, the other 60 classes are used as base classes. The 5k images from COCO2017 val are used for evaluation and the rest are used for training. We report the AP at IoU=0.5:0.95 under $K = \\{10,30\\}$ shot settings.",
958
+ "bbox": [
959
+ 514,
960
+ 220,
961
+ 913,
962
+ 292
963
+ ],
964
+ "page_idx": 5
965
+ },
966
+ {
967
+ "type": "text",
968
+ "text": "Implementation Details",
969
+ "text_level": 1,
970
+ "bbox": [
971
+ 516,
972
+ 301,
973
+ 702,
974
+ 316
975
+ ],
976
+ "page_idx": 5
977
+ },
978
+ {
979
+ "type": "text",
980
+ "text": "Our method is implemented with MMDetection (Chen et al. 2019). We adopt ResNet-101 (He et al. 2016) pretrained on ImageNet (Russakovsky et al. 2015) as the backbone. The single scale feature map is used for detection without FPN (Lin et al. 2017a). We resize the query images to a maximum of $1333 \\times 800$ pixels, and the cropped instances from support images are resized to $224 \\times 224$ pixels.",
981
+ "bbox": [
982
+ 514,
983
+ 319,
984
+ 913,
985
+ 416
986
+ ],
987
+ "page_idx": 5
988
+ },
989
+ {
990
+ "type": "text",
991
+ "text": "Our model is trained on 2x3090 Nvidia GPUs with a total batch size of 8, using the SGD optimizer. In the base training stage, the model is trained on VOC and COCO datasets for $20\\mathrm{k} / 110\\mathrm{k}$ iterations. The learning rate is set to 0.004 and decayed at $17\\mathrm{k} / 92\\mathrm{k}$ iteration by a factor of 0.1. In the finetuning stage, the learning rate is set to 0.001. We use exactly the same loss functions with Meta R-CNN.",
992
+ "bbox": [
993
+ 514,
994
+ 416,
995
+ 913,
996
+ 513
997
+ ],
998
+ "page_idx": 5
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "text": "Comparison with the State-of-the-Art Methods",
1003
+ "text_level": 1,
1004
+ "bbox": [
1005
+ 516,
1006
+ 523,
1007
+ 880,
1008
+ 539
1009
+ ],
1010
+ "page_idx": 5
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "text": "PASCAL VOC. We show both the single run results and the average results over multiple runs of PASCAL VOC in Table 1. It can be seen that FPD significantly outperforms previous methods, achieving the state-of-the-art performance in most settings. Specifically, FPD outperforms previous best method by $1.5\\%$ , $4.4\\%$ , and $6.8\\%$ on the three data splits under $K = 10$ shot setting, respectively. We notice that under $K = \\{1, 2\\}$ shot settings, our method is less effective than VFA, which is a strong FSOD detector utilizing a variational autoencoder to estimate class distributions. Our analysis suggests that in extremely data-scarce scenarios, it is more challenging for the FFA to capture the representative and common features across different shots, therefore it fails to achieve the expected effect under $K = \\{1, 2\\}$ shot settings.",
1015
+ "bbox": [
1016
+ 514,
1017
+ 541,
1018
+ 913,
1019
+ 734
1020
+ ],
1021
+ "page_idx": 5
1022
+ },
1023
+ {
1024
+ "type": "text",
1025
+ "text": "MS COCO. Table 2 shows the results of MS COCO. It can be seen that FPD outperforms all of the meta-learning based methods adopting the Faster R-CNN framework. For example, FPD improves performance by $6.3\\%$ compared to previous best result under $K = 30$ shot setting. FPD ranks fourth among all the methods. Please note that our method focuses on the three proposed components, without using advanced frameworks or techniques such as DETR, Transformer or gradient decoupled layer. Given the challenging nature of the MS COCO dataset, we believe that the performance can be further improved with more refinements.",
1026
+ "bbox": [
1027
+ 514,
1028
+ 736,
1029
+ 913,
1030
+ 888
1031
+ ],
1032
+ "page_idx": 5
1033
+ },
1034
+ {
1035
+ "type": "image",
1036
+ "img_path": "images/7f0ccaea920602384302290e962d86d4ec116290314ce14041fc85efb9b2bfe8.jpg",
1037
+ "image_caption": [
1038
+ "Figure 4: Visualization of the detection results on novel classes."
1039
+ ],
1040
+ "image_footnote": [],
1041
+ "bbox": [
1042
+ 135,
1043
+ 65,
1044
+ 864,
1045
+ 237
1046
+ ],
1047
+ "page_idx": 6
1048
+ },
1049
+ {
1050
+ "type": "table",
1051
+ "img_path": "images/33e1c7e890a77449ab41e9eeacad6315ba695beebd578cf31aa1e1e434dd1a16.jpg",
1052
+ "table_caption": [],
1053
+ "table_footnote": [],
1054
+ "table_body": "<table><tr><td>Method</td><td>Directly Match</td><td>FFA</td><td>3</td><td>shot 5</td><td>10</td></tr><tr><td>Baseline*</td><td></td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td rowspan=\"2\">Ours</td><td>✓</td><td></td><td>63.2</td><td>67.0</td><td>67.5</td></tr><tr><td></td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>",
1055
+ "bbox": [
1056
+ 88,
1057
+ 285,
1058
+ 480,
1059
+ 377
1060
+ ],
1061
+ "page_idx": 6
1062
+ },
1063
+ {
1064
+ "type": "text",
1065
+ "text": "Table 4: Comparison with directly matching.",
1066
+ "bbox": [
1067
+ 132,
1068
+ 385,
1069
+ 428,
1070
+ 401
1071
+ ],
1072
+ "page_idx": 6
1073
+ },
1074
+ {
1075
+ "type": "text",
1076
+ "text": "Ablation Study",
1077
+ "text_level": 1,
1078
+ "bbox": [
1079
+ 83,
1080
+ 425,
1081
+ 202,
1082
+ 440
1083
+ ],
1084
+ "page_idx": 6
1085
+ },
1086
+ {
1087
+ "type": "text",
1088
+ "text": "We conduct comprehensive experiments on the Novel Set 1 of PASCAL VOC under $K = \\{3,5,10\\}$ shot settings, which demonstrates the effectiveness of our proposed method.",
1089
+ "bbox": [
1090
+ 81,
1091
+ 443,
1092
+ 478,
1093
+ 484
1094
+ ],
1095
+ "page_idx": 6
1096
+ },
1097
+ {
1098
+ "type": "text",
1099
+ "text": "Effect of Different Components. We show the results with different components in Table 3. It can be seen that B-CAS and NLF together improve the performance by about $10\\%$ over the baseline. Based on this, our FFA can further boost the results, achieving the state-of-the-art performance.",
1100
+ "bbox": [
1101
+ 81,
1102
+ 484,
1103
+ 478,
1104
+ 554
1105
+ ],
1106
+ "page_idx": 6
1107
+ },
1108
+ {
1109
+ "type": "text",
1110
+ "text": "Effect of the FFA. FFA differs from DCNet in that it distills the fine-grained prototypes to aggregate with query branch. To demonstrate the superiority of this method, we re-implement the DRD module following DCNet to directly match dense feature maps for aggregation. We show the experimental results in Table 4. It can be seen that FFA consistently achieves better performance than directly matching, which validates the effectiveness of our method.",
1111
+ "bbox": [
1112
+ 81,
1113
+ 554,
1114
+ 480,
1115
+ 664
1116
+ ],
1117
+ "page_idx": 6
1118
+ },
1119
+ {
1120
+ "type": "text",
1121
+ "text": "Effect of Feature Queries. We assign each class a set of feature queries, which are the key guidance to distill fine-grained prototypes. The number of feature queries for a class is set to 5 by default. Figure 5 shows the effect of this number.",
1122
+ "bbox": [
1123
+ 81,
1124
+ 664,
1125
+ 480,
1126
+ 719
1127
+ ],
1128
+ "page_idx": 6
1129
+ },
1130
+ {
1131
+ "type": "text",
1132
+ "text": "Moreover, to explore the fundamental working mechanism, we visualize the attention heatmap of feature queries on support images. As shown in Figure 6, two feature queries from person category are listed. They are prone to focus on the specific details, e.g., head and hand, which conforms to our expectations. Please note that the generated heat maps has a resolution of $14 \\times 14$ . It is not absolutely aligned with the original images.",
1133
+ "bbox": [
1134
+ 81,
1135
+ 720,
1136
+ 480,
1137
+ 832
1138
+ ],
1139
+ "page_idx": 6
1140
+ },
1141
+ {
1142
+ "type": "text",
1143
+ "text": "Visualize Detection Results",
1144
+ "text_level": 1,
1145
+ "bbox": [
1146
+ 83,
1147
+ 842,
1148
+ 294,
1149
+ 856
1150
+ ],
1151
+ "page_idx": 6
1152
+ },
1153
+ {
1154
+ "type": "text",
1155
+ "text": "We show the detection results in Figure 4. The model is trained on the Novel Set 3 of PASCAL VOC under 10 shot",
1156
+ "bbox": [
1157
+ 81,
1158
+ 859,
1159
+ 478,
1160
+ 888
1161
+ ],
1162
+ "page_idx": 6
1163
+ },
1164
+ {
1165
+ "type": "image",
1166
+ "img_path": "images/5d579454c57487418b903b99c3011751c793e09b7c04a1f60b2f03a1e15e9e62.jpg",
1167
+ "image_caption": [
1168
+ "Figure 5: Ablation study on the number of feature queries."
1169
+ ],
1170
+ "image_footnote": [],
1171
+ "bbox": [
1172
+ 547,
1173
+ 287,
1174
+ 883,
1175
+ 431
1176
+ ],
1177
+ "page_idx": 6
1178
+ },
1179
+ {
1180
+ "type": "image",
1181
+ "img_path": "images/c1c2f880669506cb635eeb17e44d2080684dde28e1c5fb671bb637072cd667ee.jpg",
1182
+ "image_caption": [
1183
+ "Figure 6: Attention heatmap of feature queries. Please find more discussion and results in Appendix."
1184
+ ],
1185
+ "image_footnote": [],
1186
+ "bbox": [
1187
+ 532,
1188
+ 476,
1189
+ 901,
1190
+ 613
1191
+ ],
1192
+ "page_idx": 6
1193
+ },
1194
+ {
1195
+ "type": "text",
1196
+ "text": "setting and tested on the VOC07 test set. It can be seen that many of the novel instances are effectively detected, even though the detected bboxes are not perfectly aligned. This results demonstrate the promising potential of our method.",
1197
+ "bbox": [
1198
+ 514,
1199
+ 681,
1200
+ 913,
1201
+ 739
1202
+ ],
1203
+ "page_idx": 6
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "text": "Conclusion",
1208
+ "text_level": 1,
1209
+ "bbox": [
1210
+ 665,
1211
+ 753,
1212
+ 764,
1213
+ 768
1214
+ ],
1215
+ "page_idx": 6
1216
+ },
1217
+ {
1218
+ "type": "text",
1219
+ "text": "This paper studies the meta-learning based FSOD. We propose a novel FFA module which can distill fine-grained prototypes in addition to class-level ones. It enables more robust novel object detection by focusing on the detailed features. We also propose B-CAS strategy and NLF module to aggregate high-level features more effectively. Both quantitative and qualitative results demonstrate the effectiveness of our method and the promising prospect of FSOD.",
1220
+ "bbox": [
1221
+ 514,
1222
+ 776,
1223
+ 913,
1224
+ 888
1225
+ ],
1226
+ "page_idx": 6
1227
+ },
1228
+ {
1229
+ "type": "text",
1230
+ "text": "Acknowledgments",
1231
+ "text_level": 1,
1232
+ "bbox": [
1233
+ 202,
1234
+ 66,
1235
+ 361,
1236
+ 83
1237
+ ],
1238
+ "page_idx": 7
1239
+ },
1240
+ {
1241
+ "type": "text",
1242
+ "text": "This work was supported in part by the Overseas Students Science and Technology Activities Project (No. 2018024), by the National Natural Science Foundation of China (No. 61502389), by the Natural Science Basic Research Program of Shaanxi Province, China (No. 2023-JC-YB-508).",
1243
+ "bbox": [
1244
+ 81,
1245
+ 87,
1246
+ 480,
1247
+ 157
1248
+ ],
1249
+ "page_idx": 7
1250
+ },
1251
+ {
1252
+ "type": "text",
1253
+ "text": "References",
1254
+ "text_level": 1,
1255
+ "bbox": [
1256
+ 233,
1257
+ 171,
1258
+ 330,
1259
+ 186
1260
+ ],
1261
+ "page_idx": 7
1262
+ },
1263
+ {
1264
+ "type": "list",
1265
+ "sub_type": "ref_text",
1266
+ "list_items": [
1267
+ "Cao, Y.; Wang, J.; Jin, Y.; Wu, T.; Chen, K.; Liu, Z.; and Lin, D. 2021. Few-Shot Object Detection via Association and Discrimination. In NeurIPS, 16570-16581.",
1268
+ "Carion, N.; Massa, F.; Synnaeve, G.; Usunier, N.; Kirillov, A.; and Zagoruyko, S. 2020. End-to-end object detection with transformers. In ECCV, 213-229.",
1269
+ "Chen, H.; Wang, Y.; Wang, G.; and Qiao, Y. 2018. LSTD: A low-shot transfer detector for object detection. In AAAI, 6066-6073.",
1270
+ "Chen, K.; Wang, J.; Pang, J.; Cao, Y.; Xiong, Y.; Li, X.; Sun, S.; Feng, W.; Liu, Z.; Xu, J.; Zhang, Z.; Cheng, D.; Zhu, C.; Cheng, T.; Zhao, Q.; Li, B.; Lu, X.; Zhu, R.; Wu, Y.; Dai, J.; Wang, J.; Shi, J.; Ouyang, W.; Loy, C. C.; and Lin, D. 2019. MMDetection: Open MMLab Detection Toolbox and Benchmark. arXiv preprint arXiv:1906.07155.",
1271
+ "Everingham, M.; Van Gool, L.; Williams, C. K.; Winn, J.; and Zisserman, A. 2010. The pascal visual object classes (voc) challenge. *IJCV*, 88(2): 303-338.",
1272
+ "Fan, Q.; Zhuo, W.; Tang, C.-K.; and Tai, Y.-W. 2020. Few-shot object detection with attention-RPN and multi-relation detector. In CVPR, 4013-4022.",
1273
+ "Fan, Z.; Ma, Y.; Li, Z.; and Sun, J. 2021. Generalized few-shot object detection without forgetting. In CVPR, 4527-4536.",
1274
+ "Han, G.; Huang, S.; Ma, J.; He, Y.; and Chang, S.-F. 2022a. Meta faster r-cnn: Towards accurate few-shot object detection with attentive feature alignment. In AAAI, 780-789.",
1275
+ "Han, G.; Ma, J.; Huang, S.; Chen, L.; and Chang, S.-F. 2022b. Few-shot object detection with fully cross-transformer. In CVPR, 5321-5330.",
1276
+ "Han, J.; Ren, Y.; Ding, J.; Yan, K.; and Xia, G.-S. 2023. Few-Shot Object Detection via Variational Feature Aggregation. In AAAI, 755-763.",
1277
+ "He, K.; Gkioxari, G.; Dollar, P.; and Girshick, R. 2017. Mask r-cnn. In ICCV, 2961-2969.",
1278
+ "He, K.; Zhang, X.; Ren, S.; and Sun, J. 2016. Deep residual learning for image recognition. In CVPR, 770-778.",
1279
+ "Hu, H.; Bai, S.; Li, A.; Cui, J.; and Wang, L. 2021. Dense relation distillation with context-aware aggregation for few-shot object detection. In CVPR, 10185-10194.",
1280
+ "Kang, B.; Liu, Z.; Wang, X.; Yu, F.; Feng, J.; and Darrell, T. 2019. Few-shot object detection via feature reweighting. In ICCV, 8420-8429.",
1281
+ "Lin, T.-Y.; Dólar, P.; Girshick, R.; He, K.; Hariharan, B.; and Belongie, S. 2017a. Feature pyramid networks for object detection. In CVPR, 2117-2125."
1282
+ ],
1283
+ "bbox": [
1284
+ 84,
1285
+ 191,
1286
+ 480,
1287
+ 888
1288
+ ],
1289
+ "page_idx": 7
1290
+ },
1291
+ {
1292
+ "type": "list",
1293
+ "sub_type": "ref_text",
1294
+ "list_items": [
1295
+ "Lin, T.-Y.; Goyal, P.; Girshick, R.; He, K.; and Dollar, P. 2017b. Focal Loss for Dense Object Detection. In ICCV, 2980-2988.",
1296
+ "Lin, T.-Y.; Maire, M.; Belongie, S.; Hays, J.; Perona, P.; Ramanan, D.; Dollar, P.; and Zitnick, C. L. 2014. Microsoft coco: Common objects in context. In ECCV, 740-755. Springer.",
1297
+ "Liu, W.; Anguelov, D.; Erhan, D.; Szegedy, C.; Reed, S.; Fu, C.-Y.; and Berg, A. C. 2016. SSD: Single shot multibox detector. In ECCV, 21-37.",
1298
+ "Qiao, L.; Zhao, Y.; Li, Z.; Qiu, X.; Wu, J.; and Zhang, C. 2021. DeFRCN: Decoupled Faster R-CNN for Few-Shot Object Detection. In ICCV, 8681-8690.",
1299
+ "Redmon, J.; Divvala, S.; Girshick, R.; and Farhadi, A. 2016. You only look once: Unified, real-time object detection. In CVPR, 779-788.",
1300
+ "Ren, S.; He, K.; Girshick, R.; and Sun, J. 2017. Faster R-CNN: Towards real-time object detection with region proposal networks. IEEE TPAMI, 39(6): 1137-1149.",
1301
+ "Russakovsky, O.; Deng, J.; Su, H.; Krause, J.; Satheesh, S.; Ma, S.; Huang, Z.; Karpathy, A.; Khosla, A.; Bernstein, M.; et al. 2015. Imagenet large scale visual recognition challenge. IJCV, 115(3): 211-252.",
1302
+ "Sun, B.; Li, B.; Cai, S.; Yuan, Y.; and Zhang, C. 2021. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 7352-7362.",
1303
+ "Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. In NeurIPS, 5998-6008.",
1304
+ "Wang, W.; Xie, E.; Li, X.; Fan, D.-P.; Song, K.; Liang, D.; Lu, T.; Luo, P.; and Shao, L. 2021. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In ICCV, 568-578.",
1305
+ "Wang, X.; Huang, T. E.; Darrell, T.; Gonzalez, J. E.; and Yu, F. 2020. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957.",
1306
+ "Wang, Y.-X.; Ramanan, D.; and Hebert, M. 2019. Meta-learning to detect rare objects. In ICCV, 9925-9934.",
1307
+ "Wu, J.; Liu, S.; Huang, D.; and Wang, Y. 2020. Multi-scale positive sample refinement for few-shot object detection. In ECCV, 456-472.",
1308
+ "Xiao, Y.; and Marlet, R. 2020. Few-shot object detection and viewpoint estimation for objects in the wild. In ECCV, 192-210. Springer.",
1309
+ "Yan, X.; Chen, Z.; Xu, A.; Wang, X.; Liang, X.; and Lin, L. 2019. Meta r-cnn: Towards general solver for instance-level low-shot learning. In ICCV, 9577-9586.",
1310
+ "Zhang, G.; Luo, Z.; Cui, K.; Lu, S.; and Xing, E. P. 2022. Meta-DETR: Image-Level Few-Shot Detection with InterClass Correlation Exploitation. IEEE TPAMI, 45(11): 12832-12843.",
1311
+ "Zhu, X.; Su, W.; Lu, L.; Li, B.; Wang, X.; and Dai, J. 2020. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159."
1312
+ ],
1313
+ "bbox": [
1314
+ 517,
1315
+ 66,
1316
+ 915,
1317
+ 867
1318
+ ],
1319
+ "page_idx": 7
1320
+ },
1321
+ {
1322
+ "type": "text",
1323
+ "text": "Appendix",
1324
+ "text_level": 1,
1325
+ "bbox": [
1326
+ 238,
1327
+ 66,
1328
+ 326,
1329
+ 85
1330
+ ],
1331
+ "page_idx": 8
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "text": "Additional Visualization",
1336
+ "text_level": 1,
1337
+ "bbox": [
1338
+ 83,
1339
+ 87,
1340
+ 274,
1341
+ 101
1342
+ ],
1343
+ "page_idx": 8
1344
+ },
1345
+ {
1346
+ "type": "text",
1347
+ "text": "Attention Heatmap of Feature Queries. We show more attention heatmaps of feature queries upon support images in Figure 7. We can see that the feature query 2 from dog category is prone to capture the detailed features of head. The feature query 1, 2 from horse category are focus on head and legs, respectively. The feature queries are more likely to capture the different details, rather than collapse to a trivial solution.",
1348
+ "bbox": [
1349
+ 81,
1350
+ 106,
1351
+ 478,
1352
+ 217
1353
+ ],
1354
+ "page_idx": 8
1355
+ },
1356
+ {
1357
+ "type": "text",
1358
+ "text": "Feature Map of Query Images. The feature map of a query image $X_{q} \\in \\mathbb{R}^{H\\bar{W} \\times d}$ are summed alone dimension $d$ and then normalized to [0, 1] to produce the heatmap. We show the results of original query features and the assigned prototypes in Figure 10. It can be seen that the assigned prototypes can highlight the representative features to facilitate the model prediction. All these evidences demonstrate the effectiveness of our proposed FFA.",
1359
+ "bbox": [
1360
+ 81,
1361
+ 217,
1362
+ 480,
1363
+ 330
1364
+ ],
1365
+ "page_idx": 8
1366
+ },
1367
+ {
1368
+ "type": "text",
1369
+ "text": "Additional Implementation Details",
1370
+ "text_level": 1,
1371
+ "bbox": [
1372
+ 83,
1373
+ 340,
1374
+ 354,
1375
+ 356
1376
+ ],
1377
+ "page_idx": 8
1378
+ },
1379
+ {
1380
+ "type": "text",
1381
+ "text": "Our method follows the two-stage training paradigm. At the base training stage, we train all of the model parameters (the first few layers of ResNet are frozened conventionally). At the fine-tuning stage, we freeze the backbone and only train the RPN, FFA and NLF module. Fine-tuning of the FFA together with RPN can help to produce high-quality proposals of the novel classes. Under $K = \\{1, 2\\}$ shot settings, we freeze the RPN to avoid overfitting.",
1382
+ "bbox": [
1383
+ 81,
1384
+ 359,
1385
+ 480,
1386
+ 472
1387
+ ],
1388
+ "page_idx": 8
1389
+ },
1390
+ {
1391
+ "type": "text",
1392
+ "text": "Computational Cost",
1393
+ "text_level": 1,
1394
+ "bbox": [
1395
+ 83,
1396
+ 484,
1397
+ 243,
1398
+ 500
1399
+ ],
1400
+ "page_idx": 8
1401
+ },
1402
+ {
1403
+ "type": "text",
1404
+ "text": "Table 5 shows the computational cost of different methods at inference time. We conduct the experiments on a single Nvidia 3090 GPU. The batch size is set to 1. It can be seen that our method has a better trade-off between the performance and computational efficiency.",
1405
+ "bbox": [
1406
+ 81,
1407
+ 502,
1408
+ 480,
1409
+ 574
1410
+ ],
1411
+ "page_idx": 8
1412
+ },
1413
+ {
1414
+ "type": "table",
1415
+ "img_path": "images/89ebc4163847507ec91d783c4d3fdc6b46039d48f585ba31069c189e0dbd0579.jpg",
1416
+ "table_caption": [],
1417
+ "table_footnote": [],
1418
+ "table_body": "<table><tr><td>Dataset</td><td>Method</td><td>Params(MB)</td><td>FLOPs(GB)</td><td>FPS(img/s)</td></tr><tr><td rowspan=\"3\">VOC (20 class)</td><td>Baseline</td><td>45.99</td><td>709.76</td><td>16.2</td></tr><tr><td>FPD(Ours)</td><td>65.68</td><td>818.10</td><td>14.8</td></tr><tr><td>Directly Match</td><td>69.58</td><td>956.72</td><td>14.5</td></tr><tr><td rowspan=\"3\">COCO (80 class)</td><td>Baseline</td><td>46.72</td><td>766.36</td><td>7.3</td></tr><tr><td>FPD(Ours)</td><td>66.5</td><td>1309.50</td><td>6.5</td></tr><tr><td>Directly Match</td><td>70.32</td><td>1466.25</td><td>5.3</td></tr></table>",
1419
+ "bbox": [
1420
+ 84,
1421
+ 585,
1422
+ 478,
1423
+ 698
1424
+ ],
1425
+ "page_idx": 8
1426
+ },
1427
+ {
1428
+ "type": "text",
1429
+ "text": "Table 5: The computational cost at the inference time.",
1430
+ "bbox": [
1431
+ 102,
1432
+ 707,
1433
+ 457,
1434
+ 722
1435
+ ],
1436
+ "page_idx": 8
1437
+ },
1438
+ {
1439
+ "type": "text",
1440
+ "text": "More Discussion",
1441
+ "text_level": 1,
1442
+ "bbox": [
1443
+ 209,
1444
+ 753,
1445
+ 352,
1446
+ 768
1447
+ ],
1448
+ "page_idx": 8
1449
+ },
1450
+ {
1451
+ "type": "text",
1452
+ "text": "Our proposed FFA module has similarities with DCNet and Meta-DETR. In this part, we provide a more detailed comparison among these methods.",
1453
+ "bbox": [
1454
+ 81,
1455
+ 772,
1456
+ 480,
1457
+ 816
1458
+ ],
1459
+ "page_idx": 8
1460
+ },
1461
+ {
1462
+ "type": "text",
1463
+ "text": "Compare with DCNet",
1464
+ "text_level": 1,
1465
+ "bbox": [
1466
+ 83,
1467
+ 827,
1468
+ 256,
1469
+ 842
1470
+ ],
1471
+ "page_idx": 8
1472
+ },
1473
+ {
1474
+ "type": "text",
1475
+ "text": "Figure 11 illustrates the DRD module of DCNet, which densely matches all classes of support features into the query feature map. There are two main differences between DRD",
1476
+ "bbox": [
1477
+ 81,
1478
+ 845,
1479
+ 480,
1480
+ 888
1481
+ ],
1482
+ "page_idx": 8
1483
+ },
1484
+ {
1485
+ "type": "image",
1486
+ "img_path": "images/0c32c46312a2c51e1411b6ba6d8dd492caf22c216cd38804182a6300489f7748.jpg",
1487
+ "image_caption": [
1488
+ "Figure 7: Additional attention heatmap of feature queries. The model is trained on Novel Set 3 of PASCAL VOC."
1489
+ ],
1490
+ "image_footnote": [],
1491
+ "bbox": [
1492
+ 524,
1493
+ 65,
1494
+ 911,
1495
+ 280
1496
+ ],
1497
+ "page_idx": 8
1498
+ },
1499
+ {
1500
+ "type": "image",
1501
+ "img_path": "images/f97f8da43427a73f317469f375044628f8017782bec5df983d1c31194fba296c.jpg",
1502
+ "image_caption": [
1503
+ "Figure 8: Attention heatmap of feature queries (bird)."
1504
+ ],
1505
+ "image_footnote": [],
1506
+ "bbox": [
1507
+ 524,
1508
+ 330,
1509
+ 911,
1510
+ 542
1511
+ ],
1512
+ "page_idx": 8
1513
+ },
1514
+ {
1515
+ "type": "text",
1516
+ "text": "and our FFA (as shown in Figure 3). First, FFA utilizes feature queries to distill fine-grained prototypes, enabling the model to focus on the most representative detailed features and to reduce computational costs (see Table 5). It also enhances inference efficiency (see subsec. Test-Time Natural Integration). Second, FFA employs a residual connection for the original query features, and the prototypes are directly assigned to the query feature map without any extra projection. This maintains the query-support branches in the same feature space, which is crucial for the subsequent high-level feature fusion operation.",
1517
+ "bbox": [
1518
+ 514,
1519
+ 595,
1520
+ 913,
1521
+ 748
1522
+ ],
1523
+ "page_idx": 8
1524
+ },
1525
+ {
1526
+ "type": "text",
1527
+ "text": "Compare with Meta-DETR",
1528
+ "text_level": 1,
1529
+ "bbox": [
1530
+ 516,
1531
+ 758,
1532
+ 732,
1533
+ 773
1534
+ ],
1535
+ "page_idx": 8
1536
+ },
1537
+ {
1538
+ "type": "text",
1539
+ "text": "Meta-DETR incorporates meta-learning and attention mechanism into the DETR framework. It utilizes the cross attention operation to aggregate query-support features. As shown in Figure 12, CAM performs global average pooling to generate the class-level prototypes. They are matched with query features and then assigned into query features based on the matching results. Instead of performing element-wise addition, the element-wise multiplication operation is used to",
1540
+ "bbox": [
1541
+ 514,
1542
+ 777,
1543
+ 913,
1544
+ 888
1545
+ ],
1546
+ "page_idx": 8
1547
+ },
1548
+ {
1549
+ "type": "image",
1550
+ "img_path": "images/076df7b2369b78bf61d3c69d2c3df92fbfd2a790850dccd2dd9a50d1987b9578.jpg",
1551
+ "image_caption": [
1552
+ "Figure 9: Attention heatmap of feature queries (airplane)."
1553
+ ],
1554
+ "image_footnote": [],
1555
+ "bbox": [
1556
+ 86,
1557
+ 65,
1558
+ 478,
1559
+ 200
1560
+ ],
1561
+ "page_idx": 9
1562
+ },
1563
+ {
1564
+ "type": "image",
1565
+ "img_path": "images/cd3515b979cb12637a4eb90425b5b71226212dbd967e4b9963da9c5d2fc3ceca.jpg",
1566
+ "image_caption": [
1567
+ "Figure 10: Feature map of query images."
1568
+ ],
1569
+ "image_footnote": [],
1570
+ "bbox": [
1571
+ 91,
1572
+ 243,
1573
+ 475,
1574
+ 523
1575
+ ],
1576
+ "page_idx": 9
1577
+ },
1578
+ {
1579
+ "type": "text",
1580
+ "text": "rewight the query feature map along the channel dimension. CAM differs from our method in three main aspects. First, it focuses on high-level feature aggregation, while our FFA is used to aggregate detailed features. FFA utilizes feature queries and an additional cross attention layer to refine the important local context into the fine-grained prototypes. Second, CAM employs sigmoid and multiplication operations to reweight the query feature map, while FFA directly adds the assigned prototypes to it, preserving more information and potential in the early stages. Third, CAM incorporates a novel and effective encoding matching task to predict object classes.",
1581
+ "bbox": [
1582
+ 81,
1583
+ 580,
1584
+ 478,
1585
+ 746
1586
+ ],
1587
+ "page_idx": 9
1588
+ },
1589
+ {
1590
+ "type": "text",
1591
+ "text": "Revised Performance",
1592
+ "text_level": 1,
1593
+ "bbox": [
1594
+ 83,
1595
+ 758,
1596
+ 250,
1597
+ 773
1598
+ ],
1599
+ "page_idx": 9
1600
+ },
1601
+ {
1602
+ "type": "text",
1603
+ "text": "After carefully re-examining our code, we found some unintentional discrepancies that have impacted the performance metrics. These mistakes do not compromise the main contributions of this work. Table 6 shows the revised results.",
1604
+ "bbox": [
1605
+ 81,
1606
+ 776,
1607
+ 482,
1608
+ 833
1609
+ ],
1610
+ "page_idx": 9
1611
+ },
1612
+ {
1613
+ "type": "image",
1614
+ "img_path": "images/ba9a84c99239a12f73b15b5cfc44d6c829045a1947d76cac5466b8d172cf5b97.jpg",
1615
+ "image_caption": [
1616
+ "Figure 11: The Dense Relation Distillation module of DCNet."
1617
+ ],
1618
+ "image_footnote": [],
1619
+ "bbox": [
1620
+ 527,
1621
+ 69,
1622
+ 851,
1623
+ 420
1624
+ ],
1625
+ "page_idx": 9
1626
+ },
1627
+ {
1628
+ "type": "image",
1629
+ "img_path": "images/b8a6a1683837d19915e195f7cff8580a58c514a734073801ef25fd64e0a4c61d.jpg",
1630
+ "image_caption": [
1631
+ "Figure 12: The Correlational Aggregation Module of MetaDETR."
1632
+ ],
1633
+ "image_footnote": [],
1634
+ "bbox": [
1635
+ 529,
1636
+ 462,
1637
+ 898,
1638
+ 839
1639
+ ],
1640
+ "page_idx": 9
1641
+ },
1642
+ {
1643
+ "type": "table",
1644
+ "img_path": "images/fff29c64e62866ca09e0eecafab12ba5220189ecac56ae69daf68df3df2c1b63.jpg",
1645
+ "table_caption": [],
1646
+ "table_footnote": [],
1647
+ "table_body": "<table><tr><td rowspan=\"2\">Method / shot</td><td colspan=\"5\">Novel Set 1</td><td colspan=\"5\">Novel Set 2</td><td colspan=\"5\">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan=\"16\">Single run results:</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Previous)</td><td>46.5</td><td>62.3</td><td>65.4</td><td>68.2</td><td>69.3</td><td>32.2</td><td>43.6</td><td>50.3</td><td>52.5</td><td>56.1</td><td>43.2</td><td>53.3</td><td>56.7</td><td>62.1</td><td>64.1</td></tr><tr><td>FPD(Revised)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr></table>",
1648
+ "bbox": [
1649
+ 89,
1650
+ 388,
1651
+ 906,
1652
+ 537
1653
+ ],
1654
+ "page_idx": 10
1655
+ },
1656
+ {
1657
+ "type": "text",
1658
+ "text": "Table 6: Revised FSOD results (AP50) on the three splits of Pascal VOC dataset.",
1659
+ "bbox": [
1660
+ 230,
1661
+ 547,
1662
+ 761,
1663
+ 561
1664
+ ],
1665
+ "page_idx": 10
1666
+ }
1667
+ ]
2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_model.json ADDED
@@ -0,0 +1,2070 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.26,
8
+ 0.058,
9
+ 0.707
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2401.07629v2 [cs.CV] 12 Mar 2024"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.16,
18
+ 0.121,
19
+ 0.838,
20
+ 0.142
21
+ ],
22
+ "angle": 0,
23
+ "content": "Fine-Grained Prototypes Distillation for Few-Shot Object Detection"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.279,
29
+ 0.163,
30
+ 0.722,
31
+ 0.181
32
+ ],
33
+ "angle": 0,
34
+ "content": "Zichen Wang, Bo Yang*, Haonan Yue, Zhenghao Ma"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.24,
40
+ 0.187,
41
+ 0.758,
42
+ 0.217
43
+ ],
44
+ "angle": 0,
45
+ "content": "School of Automation, Northwestern Polytechnical University, Xi'an, China {wangchen1801, hnyue, mazh0819} \\(@\\) mail.nwpu.edu.cn, byang@nwpu.edu.cn"
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.25,
51
+ 0.274,
52
+ 0.314,
53
+ 0.287
54
+ ],
55
+ "angle": 0,
56
+ "content": "Abstract"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.099,
62
+ 0.298,
63
+ 0.465,
64
+ 0.627
65
+ ],
66
+ "angle": 0,
67
+ "content": "Few-shot object detection (FSOD) aims at extending a generic detector for novel object detection with only a few training examples. It attracts great concerns recently due to the practical meanings. Meta-learning has been demonstrated to be an effective paradigm for this task. In general, methods based on meta-learning employ an additional support branch to encode novel examples (a.k.a. support images) into class prototypes, which are then fused with query branch to facilitate the model prediction. However, the class-level prototypes are difficult to precisely generate, and they also lack detailed information, leading to instability in performance. New methods are required to capture the distinctive local context for more robust novel object detection. To this end, we propose to distill the most representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results, modeling the detailed feature relations between two branches. This process is realized by our Fine-Grained Feature Aggregation (FFA) module. Moreover, in terms of high-level feature fusion, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module from different perspectives. They are complementary to each other and depict the high-level feature relations more effectively. Extensive experiments on PASCAL VOC and MS COCO benchmarks show that our method sets a new state-of-the-art performance in most settings. Our code is available at https://github.com/wangchen1801/FPD."
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.227,
73
+ 0.65,
74
+ 0.337,
75
+ 0.665
76
+ ],
77
+ "angle": 0,
78
+ "content": "Introduction"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.082,
84
+ 0.671,
85
+ 0.479,
86
+ 0.782
87
+ ],
88
+ "angle": 0,
89
+ "content": "Object detection is a fundamental task in computer vision and the methods based on deep learning have been well established over the past few years (Redmon et al. 2016; Ren et al. 2017; Carion et al. 2020; Liu et al. 2016). While remarkable achievements have been made, most of them require a large amount of labeled data to obtain a satisfactory performance, otherwise they are prone to overfitting and hardly generalize to the unknow data."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.082,
95
+ 0.783,
96
+ 0.481,
97
+ 0.84
98
+ ],
99
+ "angle": 0,
100
+ "content": "Few-shot object detection (FSOD) is a more challenging task to detect object specially in data-scarce scenarios. FSOD assumes that there are sufficient amount of examples for base classes while only k-shot examples for each novel class."
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.547,
106
+ 0.274,
107
+ 0.888,
108
+ 0.505
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image_caption",
115
+ "bbox": [
116
+ 0.516,
117
+ 0.52,
118
+ 0.915,
119
+ 0.577
120
+ ],
121
+ "angle": 0,
122
+ "content": "Figure 1: Overview of the proposed method, which we denote as FPD. In addition to class-level prototypes, we distill representative detailed features into fine-grained prototypes, enabling more robust novel object detection."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.515,
128
+ 0.609,
129
+ 0.915,
130
+ 0.845
131
+ ],
132
+ "angle": 0,
133
+ "content": "Therefore, the key question is how to transfer the knowledge learnt from base classes to the novel classes. Transfer learning based methods (Wang et al. 2020; Cao et al. 2021; Qiao et al. 2021) focus on fine-tuning the model more effectively. They use the same architecture as generic object detection, additionally with advanced techniques such as parameter freezing and gradient decoupling to improve performance. Meta-learning based methods (Kang et al. 2019; Wang, Ramanan, and Hebert 2019; Yan et al. 2019; Han et al. 2023), instead, follow the idea: learn how to learn the new tasks rapidly. As illustrated in Figure 2, an additional support branch is incorporated to encode support images into class-level prototypes, which function as dynamic parameters to interact with the query branch. In this way, the connections between novel examples and the model predictions are enhanced, thereby improving the generalization ability and learning the new tasks more quickly."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.516,
139
+ 0.847,
140
+ 0.913,
141
+ 0.889
142
+ ],
143
+ "angle": 0,
144
+ "content": "This work studies the meta-learning based FSOD and aims at realizing a more effective method. In general, features from the two branches are fused on top of the framework to make"
145
+ },
146
+ {
147
+ "type": "page_footnote",
148
+ "bbox": [
149
+ 0.082,
150
+ 0.851,
151
+ 0.48,
152
+ 0.89
153
+ ],
154
+ "angle": 0,
155
+ "content": "*Corresponding author. Copyright © 2024, Association for the Advancement of Artificial Intelligence (www.aaai.org). All rights reserved."
156
+ }
157
+ ],
158
+ [
159
+ {
160
+ "type": "image",
161
+ "bbox": [
162
+ 0.092,
163
+ 0.069,
164
+ 0.918,
165
+ 0.276
166
+ ],
167
+ "angle": 0,
168
+ "content": null
169
+ },
170
+ {
171
+ "type": "image_caption",
172
+ "bbox": [
173
+ 0.149,
174
+ 0.288,
175
+ 0.846,
176
+ 0.304
177
+ ],
178
+ "angle": 0,
179
+ "content": "Figure 2: The overall architecture of our method. FFA and NLF are proposed to improve the performance."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.082,
185
+ 0.329,
186
+ 0.48,
187
+ 0.399
188
+ ],
189
+ "angle": 0,
190
+ "content": "the final prediction (Kang et al. 2019; Yan et al. 2019; Xiao and Marlet 2020), while most of the layers are separated and do not exchange information. This hinders the model from learning the correlations among detailed features especially in data-scarce scenarios."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.082,
196
+ 0.4,
197
+ 0.481,
198
+ 0.581
199
+ ],
200
+ "angle": 0,
201
+ "content": "DCNet (Hu et al. 2021) proposes to directly match the mid-level support features into query features in a pixel-wise manner, which enables the relation modeling of detailed local context. However, this approach has its limitations in terms of effect and implementation. First, the mid-level features with an extensive range of patterns are intricate and complex, thus the model might struggle to capture the most critical details. Second, directly matching between dense feature maps is inefficiency and will cost more computational resources. Third, this approach has difficulty in transitioning seamlessly from the training phase to the testing phase, as it can not integrate the mid-level support features across different shots to boost the performance."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.082,
207
+ 0.583,
208
+ 0.48,
209
+ 0.832
210
+ ],
211
+ "angle": 0,
212
+ "content": "To address the aforementioned issues, we propose a novel Fine-Grained Feature Aggregation (FFA) module to aggregate the mid-level features. As illustrated in Figure 3, different from DCNet, we propose to distill features into fine-grained prototypes. These prototypes, which reside in a highly refined and reduced feature space, embody the most distinctive and representative details of the support images. Specifically, we employ a set of embeddings following the object queries in DETR (Carion et al. 2020) to distill prototypes. Rather than being encoded with positional information and representing specific objects, the embeddings here function within the feature space and thereby are denoted as feature queries. We give each class a unique set of feature queries to distill prototypes independently. It can avoid confusion and is a key factor for our method to work. The distilled prototypes are then assigned into query feature map based on the matching results, modeling the fine-grained relations and highlighting the features with similar details."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.082,
218
+ 0.833,
219
+ 0.482,
220
+ 0.89
221
+ ],
222
+ "angle": 0,
223
+ "content": "The proposed FFA enables a more effective feature aggregation by focusing on the key information encapsulated within prototypes. This method also reduces the computational complexity by avoiding the directly matching between"
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.516,
229
+ 0.33,
230
+ 0.913,
231
+ 0.386
232
+ ],
233
+ "angle": 0,
234
+ "content": "dense feature maps. Furthermore, it can naturally transition into the testing phase through a weighted sum of prototypes across different shots, preserving the full potential derived from the training phase."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.516,
240
+ 0.387,
241
+ 0.915,
242
+ 0.692
243
+ ],
244
+ "angle": 0,
245
+ "content": "In terms of high-level feature aggregation, we revisit the previous methods and propose two improvements from different perspectives. First, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy to control the ratio of support classes aggregated with query features. Meta R-CNN (Yan et al. 2019) adopts a simple class-specific aggregation scheme where only the features having the same classes are aggregated. While VFA (Han et al. 2023) proposes a class-agnostic aggregation scheme which randomly selects the support classes to reduce class bias. Our insight is that different support classes are served as positive and negative samples, thereby the balanced sampling is required to keep the most important positive samples from being overwhelmed. Second, many works (Kang et al. 2019; Yan et al. 2019; Han et al. 2023) employ element-wise multiplication to explore the relations within the same classes. However, it is not compatible with our proposed B-CAS which incorporates the feature aggregation between different classes. To solve this issue, we propose a stronger Non-Linear Fusion (NLF) module motivated by (Han et al. 2022a; Xiao and Marlet 2020) to fuse features more effectively. Our contributions can be summarized as follows:"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.525,
251
+ 0.705,
252
+ 0.914,
253
+ 0.775
254
+ ],
255
+ "angle": 0,
256
+ "content": "- We propose to distill support features into fine-grained prototypes before being integrated into query feature maps, which can help the model grasp the key information. They are implemented in the Fine-Grained Feature Aggregation (FFA) module."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.525,
262
+ 0.783,
263
+ 0.915,
264
+ 0.84
265
+ ],
266
+ "angle": 0,
267
+ "content": "- We propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module. They are complementary to each other and can fuse high-level features more effectively."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.525,
273
+ 0.847,
274
+ 0.915,
275
+ 0.889
276
+ ],
277
+ "angle": 0,
278
+ "content": "- Extensive experiments illustrate that our method significantly improves the performance and achieves state-of-the-art results on the two widely used FSOD benchmarks."
279
+ },
280
+ {
281
+ "type": "list",
282
+ "bbox": [
283
+ 0.525,
284
+ 0.705,
285
+ 0.915,
286
+ 0.889
287
+ ],
288
+ "angle": 0,
289
+ "content": null
290
+ }
291
+ ],
292
+ [
293
+ {
294
+ "type": "title",
295
+ "bbox": [
296
+ 0.218,
297
+ 0.068,
298
+ 0.347,
299
+ 0.083
300
+ ],
301
+ "angle": 0,
302
+ "content": "Related Works"
303
+ },
304
+ {
305
+ "type": "title",
306
+ "bbox": [
307
+ 0.085,
308
+ 0.091,
309
+ 0.284,
310
+ 0.108
311
+ ],
312
+ "angle": 0,
313
+ "content": "General Object Detection"
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.082,
319
+ 0.113,
320
+ 0.481,
321
+ 0.253
322
+ ],
323
+ "angle": 0,
324
+ "content": "Deep learning based object detection has been extensively studied in recent years. The well-established object detectors can be categorized into one-stage and two-stage methods. One-stage detectors (Redmon et al. 2016; Liu et al. 2016; Lin et al. 2017b) directly make predictions upon the CNN feature maps. While two-stage detectors (Ren et al. 2017; He et al. 2017) additionally employ a Region Proposal Network (RPN) to generate object proposals, which will be further refined into the final predictions. Both of them require the predefiend dense anchors to generate candidates."
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.082,
330
+ 0.253,
331
+ 0.481,
332
+ 0.363
333
+ ],
334
+ "angle": 0,
335
+ "content": "Recently, anchor-free detectors DETR (Carion et al. 2020) and Deformable DETR (Zhu et al. 2020) have been developed and are drawing more attention. They use a CNN backbone combining with Transformer encoder-decoders (Vaswani et al. 2017) for end-to-end object detection. A set of object queries are proposed to replace the anchor boxes. They will be refined into the detected objects layer by layer through Transformer decoders."
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.084,
341
+ 0.365,
342
+ 0.481,
343
+ 0.408
344
+ ],
345
+ "angle": 0,
346
+ "content": "We employ the two-stage Faster R-CNN (Ren et al. 2017) framework to build our FSOD detector, and draw inspirations from DETR (Carion et al. 2020) into our approach."
347
+ },
348
+ {
349
+ "type": "title",
350
+ "bbox": [
351
+ 0.085,
352
+ 0.422,
353
+ 0.295,
354
+ 0.438
355
+ ],
356
+ "angle": 0,
357
+ "content": "Few-Shot Object Detection"
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.082,
363
+ 0.444,
364
+ 0.481,
365
+ 0.612
366
+ ],
367
+ "angle": 0,
368
+ "content": "Few-Shot Object Detection (FSOD), which studies the detection task in data-scarce situations, has been attracting an increased interest recently. LSTD (Chen et al. 2018) first proposes a transfer learning based approach to detect novel objects in a FSOD data setting. TFA (Wang et al. 2020) utilizes a cosine similarity based classifier and only fine-tunes the last layer with novel examples, achieving a comparable results with other complex methods. DeFRCN (Qiao et al. 2021) employs advanced gradient decoupling technique into the Faster R-CNN framework and integrates an offline prototypical calibration block to refine the classification results, which achieves an impressive performance."
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.082,
374
+ 0.612,
375
+ 0.483,
376
+ 0.89
377
+ ],
378
+ "angle": 0,
379
+ "content": "The meta-learning is also a promising paradigm for FSOD. FSRW (Kang et al. 2019) proposes to re-weight the YOLOv2 feature maps along channel dimension using proposed reweighting vectors, which can highlight the relevant features. Meta R-CNN (Yan et al. 2019) adopts the Faster R-CNN framework to build a two-branch based siamese network. It processes query and support images in parallel to produce the Region of Interest (RoI) features and class prototypes, which are then fused to make predictions. Instead of learning a softmax-based classifier for all classes, (Han et al. 2022a) constructs a meta-classifier through feature alignment and non-linear matching. It calculates the similarity between query-support feature maps, producing binary classification results for novel classes. VFA (Han et al. 2023) introduces variational feature learning into Meta R-CNN, further boosting its performance. Recently, there are some works incorporate meta-learning into other advanced frameworks. Meta-DETR (Zhang et al. 2022) employs Deformable DETR (Zhu et al. 2020) to build a few-shot detector. (Han et al. 2022b) utilizes PVT (Wang et al. 2021) to construct"
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.516,
385
+ 0.07,
386
+ 0.912,
387
+ 0.096
388
+ ],
389
+ "angle": 0,
390
+ "content": "a fully cross transformer for few-shot detection. They all achieve remarkable results."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.516,
396
+ 0.098,
397
+ 0.913,
398
+ 0.18
399
+ ],
400
+ "angle": 0,
401
+ "content": "A two stage training paradigm has been widely adopted in both transfer learning and meta-learning based methods due to its effectiveness. At the base training stage, the model is trained on abundant base class examples. While at the fine-tuning stage, the model is fine-tuned only with \\( K \\)-shot examples for each base and novel class."
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.516,
407
+ 0.181,
408
+ 0.913,
409
+ 0.223
410
+ ],
411
+ "angle": 0,
412
+ "content": "Our approach is based on Meta R-CNN and we propose to distill fine-grained prototypes for effectively exploiting the relations between detailed features."
413
+ },
414
+ {
415
+ "type": "title",
416
+ "bbox": [
417
+ 0.651,
418
+ 0.239,
419
+ 0.78,
420
+ 0.256
421
+ ],
422
+ "angle": 0,
423
+ "content": "Our Approach"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.516,
429
+ 0.26,
430
+ 0.913,
431
+ 0.303
432
+ ],
433
+ "angle": 0,
434
+ "content": "In this section, we first introduce the task definition and the overall architecture of our model. Then we will elaborate the fine-grained and high-level feature aggregation."
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.517,
440
+ 0.315,
441
+ 0.64,
442
+ 0.33
443
+ ],
444
+ "angle": 0,
445
+ "content": "Task Definition"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.516,
451
+ 0.335,
452
+ 0.913,
453
+ 0.433
454
+ ],
455
+ "angle": 0,
456
+ "content": "We adopt the standard FSOD setting following (Kang et al. 2019; Wang et al. 2020). Specifically, given a dataset \\(\\mathcal{D}\\) with two sets of classes \\(C_{base}\\) and \\(C_{novel}\\), where each class in \\(C_{base}\\) has abundant training data while each class in \\(C_{novel}\\) has only \\(K\\)-shot annotated objects, FSOD aims at detecting the objects of \\(C_{base} \\cup C_{novel}\\) using the detector trained on \\(\\mathcal{D}\\). Please note that \\(C_{base} \\cap C_{novel} = \\emptyset\\)."
457
+ },
458
+ {
459
+ "type": "title",
460
+ "bbox": [
461
+ 0.517,
462
+ 0.446,
463
+ 0.708,
464
+ 0.461
465
+ ],
466
+ "angle": 0,
467
+ "content": "The Model Architecture"
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.516,
473
+ 0.466,
474
+ 0.915,
475
+ 0.69
476
+ ],
477
+ "angle": 0,
478
+ "content": "As illustrated in Figure 2, our model is based on Meta R-CNN, which is a siamese network with query branch and support branch that share a same backbone. Typically, we use the first three stages of ResNet-50/101 backbone (He et al. 2016) to extract mid-level features for both query images and support images. Then our proposed FFA module is employed to distill the fine-grained prototypes and assign them into the query branch. Subsequently, we use the last stage (i.e. stage four) of the backbone to extract high-level features for both branches, which produces RoI features and class-level prototypes, respectively. They are further processed by the proposed NLF module, following by the detection head to make the final prediction. We would like to mention that the RPN is fed with the query features which have already interacted with the support branch. It gives the RPN more ability learning to identify the new instances."
479
+ },
480
+ {
481
+ "type": "title",
482
+ "bbox": [
483
+ 0.517,
484
+ 0.702,
485
+ 0.788,
486
+ 0.718
487
+ ],
488
+ "angle": 0,
489
+ "content": "Fine-Grained Feature Aggregation"
490
+ },
491
+ {
492
+ "type": "text",
493
+ "bbox": [
494
+ 0.516,
495
+ 0.722,
496
+ 0.915,
497
+ 0.89
498
+ ],
499
+ "angle": 0,
500
+ "content": "The Fine-Grained Feature Aggregation (FFA) module is the key component of our proposed method, which is a class-agnostic aggregator that matches all classes of support features into query features. It models inter-class relations in the early stage of the detection framework where the features are low-level and have more detailed information. Instead of directly performing feature matching, we propose to distill the representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results. FFA can help the model distinguish foreground from background and learn the similarities and differences between object classes. We will elaborate the"
501
+ }
502
+ ],
503
+ [
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.083,
508
+ 0.069,
509
+ 0.483,
510
+ 0.126
511
+ ],
512
+ "angle": 0,
513
+ "content": "prototypes distillation and feature assignment in the following subsections. We also discuss our strategy to transfer this method to novel classes, as well as test-time natural integration of prototypes across different shots."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.082,
519
+ 0.131,
520
+ 0.48,
521
+ 0.257
522
+ ],
523
+ "angle": 0,
524
+ "content": "Prototypes Distillation Inspired by DETR, we incorporate a new component which is a set of learnable embeddings to distill prototypes. Different from object queries in DETR, which are encoded with positional information and are refined into a specific instance layer by layer, the embeddings here work as a guidance to refine the entire support feature space into a set of representative features. It can filter out the noise and ease the training. We refer to these embeddings as feature queries."
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.083,
530
+ 0.256,
531
+ 0.481,
532
+ 0.344
533
+ ],
534
+ "angle": 0,
535
+ "content": "We employ the cross-attention mechanism to perform the prototypes distillation. Specifically, given a support feature map \\( X_{s} \\in \\mathbb{R}^{hw \\times d} \\) and a set of feature queries \\( q \\in \\mathbb{R}^{n \\times d'} \\), where \\( hw \\) denote the height and width, \\( d \\) and \\( d' \\) is the feature dimension, and \\( n \\) is the number of feature queries, the affinity matrix is calculated through a matching operation:"
536
+ },
537
+ {
538
+ "type": "equation",
539
+ "bbox": [
540
+ 0.188,
541
+ 0.349,
542
+ 0.48,
543
+ 0.384
544
+ ],
545
+ "angle": 0,
546
+ "content": "\\[\nA = \\operatorname {s o f t m a x} \\left(\\frac {q \\left(X _ {s} W\\right) ^ {T}}{\\sqrt {d ^ {\\prime}}}\\right) \\tag {1}\n\\]"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.083,
552
+ 0.389,
553
+ 0.482,
554
+ 0.446
555
+ ],
556
+ "angle": 0,
557
+ "content": "where \\(W\\) is a linear projection to project \\(X_{s}\\) in to the latent space with dimensionality \\(d^{\\prime}\\), and the softmax function is performed along \\(hw\\) dimension. Subsequently, the fine-grained prototypes can be distilled from \\(X_{s}\\) via:"
558
+ },
559
+ {
560
+ "type": "equation",
561
+ "bbox": [
562
+ 0.222,
563
+ 0.453,
564
+ 0.48,
565
+ 0.469
566
+ ],
567
+ "angle": 0,
568
+ "content": "\\[\np = A X _ {s} + E _ {c l s} \\tag {2}\n\\]"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.083,
574
+ 0.476,
575
+ 0.48,
576
+ 0.531
577
+ ],
578
+ "angle": 0,
579
+ "content": "where the affinity matrix is applied directly on the support feature map. We do not project \\( X_{s} \\) to keep feature space the same. An additional class embedding \\( E_{cls} \\) is added to retain the class information."
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.083,
585
+ 0.531,
586
+ 0.481,
587
+ 0.63
588
+ ],
589
+ "angle": 0,
590
+ "content": "We would like to mention that each class has its exclusive feature queries. This is different from object queries in DETR and is a crucial factor for our method to work. It means that \\( q \\) is the feature queries of one class and is part of \\( Q \\in \\mathbb{R}^{nc \\times d} \\), where \\( Q \\) denotes the feature queries of all classes. This setting makes feature queries class-relevant and avoids them getting overwhelmed and confused by too many object classes."
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.083,
596
+ 0.636,
597
+ 0.483,
598
+ 0.76
599
+ ],
600
+ "angle": 0,
601
+ "content": "Prototypes Assignment We densely match the fine-grained prototypes into query feature map to achieve the prototypes assignment. Considering that the background area should not be matched to any prototypes that represent salient object features, we incorporate a set of embeddings to serve as background prototypes. We also use the cross-attention mechanism to assign prototypes. Specifically, given a query feature map \\( X_{q} \\in \\mathbb{R}^{HW \\times d} \\), prototypes assignment is performed via:"
602
+ },
603
+ {
604
+ "type": "equation",
605
+ "bbox": [
606
+ 0.163,
607
+ 0.765,
608
+ 0.48,
609
+ 0.8
610
+ ],
611
+ "angle": 0,
612
+ "content": "\\[\nA ^ {\\prime} = \\operatorname {s o f t m a x} \\left(\\frac {\\left(X _ {q} W ^ {\\prime}\\right) \\left(P W ^ {\\prime}\\right) ^ {T}}{\\sqrt {d ^ {\\prime}}}\\right) \\tag {3}\n\\]"
613
+ },
614
+ {
615
+ "type": "equation",
616
+ "bbox": [
617
+ 0.178,
618
+ 0.805,
619
+ 0.48,
620
+ 0.822
621
+ ],
622
+ "angle": 0,
623
+ "content": "\\[\nP = \\operatorname {c o n c a t} \\left(p _ {1}, p _ {2}, \\dots , p _ {c}, p _ {b g}\\right) \\tag {4}\n\\]"
624
+ },
625
+ {
626
+ "type": "equation",
627
+ "bbox": [
628
+ 0.213,
629
+ 0.824,
630
+ 0.48,
631
+ 0.842
632
+ ],
633
+ "angle": 0,
634
+ "content": "\\[\nX _ {q} ^ {\\prime} = X _ {q} + \\alpha \\cdot A ^ {\\prime} P \\tag {5}\n\\]"
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.083,
640
+ 0.846,
641
+ 0.481,
642
+ 0.89
643
+ ],
644
+ "angle": 0,
645
+ "content": "where \\( P \\in \\mathbb{R}^{(nc + n_{bg}) \\times d} \\) is the prototypes of \\( c \\) support classes with additional \\( n_{bg} \\) background classes, and \\( W' \\) is a linear projection shared by \\( X_q \\) and \\( P \\) which projects them into the"
646
+ },
647
+ {
648
+ "type": "image",
649
+ "bbox": [
650
+ 0.527,
651
+ 0.071,
652
+ 0.903,
653
+ 0.432
654
+ ],
655
+ "angle": 0,
656
+ "content": null
657
+ },
658
+ {
659
+ "type": "image_caption",
660
+ "bbox": [
661
+ 0.516,
662
+ 0.447,
663
+ 0.916,
664
+ 0.492
665
+ ],
666
+ "angle": 0,
667
+ "content": "Figure 3: The architecture of the Fine-Grained Feature Aggregation (FFA) module. It can be divided into Prototypes, Distillation and Prototypes Assignment."
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.516,
673
+ 0.515,
674
+ 0.914,
675
+ 0.571
676
+ ],
677
+ "angle": 0,
678
+ "content": "same latent space. The prototypes are assigned into query feature map based on the affinity matrix \\( A' \\), which produces the aggregated query features. The \\( \\alpha \\) is a learnable parameter initialized as zero to help stabilize the training."
679
+ },
680
+ {
681
+ "type": "text",
682
+ "bbox": [
683
+ 0.516,
684
+ 0.577,
685
+ 0.915,
686
+ 0.746
687
+ ],
688
+ "angle": 0,
689
+ "content": "Transferring to Novel Classes At the base training stage, the feature queries of base classes are randomly initialized and well trained. However, at the fine-tuning stage, training the feature queries from scratch becomes challenging due to the limited novel class examples, which means that an effective knowledge transfer method is required. To address this issue, we propose to duplicate the most compatible feature queries from the base classes to serve as those in the novel classes. To be specific, given feature queries of base classes \\( Q \\in \\mathbb{R}^{nc \\times d'} \\) and support feature map of a novel class \\( X_{ns} \\in \\mathbb{R}^{hw \\times d} \\), the compatibility matrix and the weight of each feature query can be obtained via:"
690
+ },
691
+ {
692
+ "type": "equation",
693
+ "bbox": [
694
+ 0.63,
695
+ 0.75,
696
+ 0.913,
697
+ 0.77
698
+ ],
699
+ "angle": 0,
700
+ "content": "\\[\nC = \\operatorname {t o p k} \\left(Q \\left(X _ {n s} W\\right) ^ {T}\\right) \\tag {6}\n\\]"
701
+ },
702
+ {
703
+ "type": "equation",
704
+ "bbox": [
705
+ 0.599,
706
+ 0.775,
707
+ 0.913,
708
+ 0.817
709
+ ],
710
+ "angle": 0,
711
+ "content": "\\[\nw e i g h t _ {i} = \\sum_ {j = 0} ^ {k} C _ {i j}, i = 1, 2, \\dots , n c \\tag {7}\n\\]"
712
+ },
713
+ {
714
+ "type": "text",
715
+ "bbox": [
716
+ 0.516,
717
+ 0.819,
718
+ 0.914,
719
+ 0.89
720
+ ],
721
+ "angle": 0,
722
+ "content": "where \\( \\text{topk} \\) is performed along \\( h w \\) dimension to filter out irrelevant locations. We select \\( n \\) feature queries for each novel class based on the largest weight. Instead of sharing the same fecture queries with base classes, they are created as a duplicate and can be trained independently."
723
+ }
724
+ ],
725
+ [
726
+ {
727
+ "type": "table",
728
+ "bbox": [
729
+ 0.084,
730
+ 0.066,
731
+ 0.916,
732
+ 0.379
733
+ ],
734
+ "angle": 0,
735
+ "content": "<table><tr><td rowspan=\"2\">Method / shot</td><td colspan=\"5\">Novel Set 1</td><td colspan=\"5\">Novel Set 2</td><td colspan=\"5\">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan=\"16\">Single run results:</td></tr><tr><td>FSRW (Kang et al. 2019)</td><td>14.8</td><td>15.5</td><td>26.7</td><td>33.9</td><td>47.2</td><td>15.7</td><td>15.3</td><td>22.7</td><td>30.1</td><td>40.5</td><td>21.3</td><td>25.6</td><td>28.4</td><td>42.8</td><td>45.9</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>19.9</td><td>25.5</td><td>35.0</td><td>45.7</td><td>51.5</td><td>10.4</td><td>19.4</td><td>29.6</td><td>34.8</td><td>45.4</td><td>14.3</td><td>18.2</td><td>27.5</td><td>41.2</td><td>48.1</td></tr><tr><td>TFA w/ cos (Wang et al. 2020)</td><td>39.8</td><td>36.1</td><td>44.7</td><td>55.7</td><td>56.0</td><td>23.5</td><td>26.9</td><td>34.1</td><td>35.1</td><td>39.1</td><td>30.8</td><td>34.8</td><td>42.8</td><td>49.5</td><td>49.8</td></tr><tr><td>MPSR (Wu et al. 2020)</td><td>41.7</td><td>42.5</td><td>51.4</td><td>55.2</td><td>61.8</td><td>24.4</td><td>29.3</td><td>39.2</td><td>39.9</td><td>47.8</td><td>35.6</td><td>41.8</td><td>42.3</td><td>48.0</td><td>49.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>42.4</td><td>45.8</td><td>45.9</td><td>53.7</td><td>56.1</td><td>21.7</td><td>27.8</td><td>35.2</td><td>37.0</td><td>40.3</td><td>30.2</td><td>37.6</td><td>43.0</td><td>49.7</td><td>50.1</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Ours)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr><tr><td colspan=\"16\">Average results over multiple runs:</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>24.2</td><td>35.3</td><td>42.2</td><td>49.1</td><td>57.4</td><td>21.6</td><td>24.6</td><td>31.9</td><td>37.0</td><td>45.7</td><td>21.2</td><td>30.0</td><td>37.2</td><td>43.8</td><td>49.6</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>33.9</td><td>37.4</td><td>43.7</td><td>51.1</td><td>59.6</td><td>23.2</td><td>24.8</td><td>30.6</td><td>36.7</td><td>46.6</td><td>32.3</td><td>34.9</td><td>39.7</td><td>42.6</td><td>50.7</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>35.1</td><td>49.0</td><td>53.2</td><td>57.4</td><td>62.0</td><td>27.9</td><td>32.3</td><td>38.4</td><td>43.2</td><td>51.8</td><td>34.9</td><td>41.8</td><td>47.1</td><td>54.1</td><td>58.2</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>40.2</td><td>53.6</td><td>58.2</td><td>63.6</td><td>66.5</td><td>29.5</td><td>39.7</td><td>43.4</td><td>48.1</td><td>52.8</td><td>35.0</td><td>38.3</td><td>52.9</td><td>57.7</td><td>60.8</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>38.5</td><td>49.6</td><td>53.5</td><td>59.8</td><td>64.3</td><td>25.9</td><td>34.2</td><td>40.1</td><td>44.9</td><td>47.4</td><td>34.7</td><td>43.9</td><td>49.3</td><td>53.1</td><td>56.3</td></tr><tr><td>VFA (Han et al. 2023)</td><td>47.4</td><td>54.4</td><td>58.5</td><td>64.5</td><td>66.5</td><td>33.7</td><td>38.2</td><td>43.5</td><td>48.3</td><td>52.4</td><td>43.8</td><td>48.9</td><td>53.3</td><td>58.1</td><td>60.0</td></tr><tr><td>FPD(Ours)</td><td>41.5</td><td>52.8</td><td>58.4</td><td>64.9</td><td>67.1</td><td>28.2</td><td>38.7</td><td>43.8</td><td>50.3</td><td>53.6</td><td>34.9</td><td>48.6</td><td>54.0</td><td>58.4</td><td>61.5</td></tr></table>"
736
+ },
737
+ {
738
+ "type": "table_caption",
739
+ "bbox": [
740
+ 0.082,
741
+ 0.388,
742
+ 0.913,
743
+ 0.418
744
+ ],
745
+ "angle": 0,
746
+ "content": "Table 1: FSOD results (AP50) on the three splits of Pascal VOC dataset. We report both single run and multiple run results. Bold and Underline indicate the best and the second best results."
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.082,
752
+ 0.444,
753
+ 0.481,
754
+ 0.557
755
+ ],
756
+ "angle": 0,
757
+ "content": "Test-Time Natural Integration A simple method to integrate fine-grained prototypes across different shots is to take the average. However, the detailed features represented by a feature query may not appear in some support images. Directly averaging might hurt the performance. Therefore, we compute a weighted sum using the aforementioned weight. Specifically, given \\( K \\) shot support images in a class, which produces \\( K \\) prototypes, the integration is performed via:"
758
+ },
759
+ {
760
+ "type": "equation",
761
+ "bbox": [
762
+ 0.197,
763
+ 0.564,
764
+ 0.48,
765
+ 0.605
766
+ ],
767
+ "angle": 0,
768
+ "content": "\\[\np _ {a v g} = \\sum_ {s = 1} ^ {K} w e i g h t _ {s} ^ {*} \\cdot p _ {s} \\tag {8}\n\\]"
769
+ },
770
+ {
771
+ "type": "text",
772
+ "bbox": [
773
+ 0.082,
774
+ 0.613,
775
+ 0.481,
776
+ 0.682
777
+ ],
778
+ "angle": 0,
779
+ "content": "where weight* denote the weight after the softmax operation across different shot, \\( p_{avg} \\) is the integrated prototypes. This approach effectively filters out the prototypes that are not compatible with the current feature query, improving the robustness of our detector."
780
+ },
781
+ {
782
+ "type": "title",
783
+ "bbox": [
784
+ 0.084,
785
+ 0.695,
786
+ 0.337,
787
+ 0.71
788
+ ],
789
+ "angle": 0,
790
+ "content": "High-Level Feature Aggregation"
791
+ },
792
+ {
793
+ "type": "text",
794
+ "bbox": [
795
+ 0.082,
796
+ 0.714,
797
+ 0.48,
798
+ 0.785
799
+ ],
800
+ "angle": 0,
801
+ "content": "Feature aggregation between RoI features and class-level prototypes is a crucial step for meta-learning based FSOD, where the high-level semantic information is aligned to make the final prediction. We revisit the conventional methods and propose two improvements from different perspectives."
802
+ },
803
+ {
804
+ "type": "text",
805
+ "bbox": [
806
+ 0.082,
807
+ 0.791,
808
+ 0.481,
809
+ 0.89
810
+ ],
811
+ "angle": 0,
812
+ "content": "Balanced Class-Agnostic Sampling Meta R-CNN adopts a simple class-specific aggregation scheme where the RoI features are aggregated only with the prototypes of the same class. While VFA proposes a class-agnostic aggregation scheme which aggregates RoI features with randomly selected class prototypes to reduce class bias. Nonetheless, we argue that the completely random sampling might disturb"
813
+ },
814
+ {
815
+ "type": "text",
816
+ "bbox": [
817
+ 0.516,
818
+ 0.444,
819
+ 0.915,
820
+ 0.569
821
+ ],
822
+ "angle": 0,
823
+ "content": "the model from focusing on the most crucial positive prototypes and thus hurt the performance. Instead, we propose a balanced sampling strategy named B-CAS which selects a pair of positive and negative prototypes to aggregate with RoI features in parallel. The B-CAS not only enables the relation modeling between different classes but also keeps the positive prototype from being overwhelmed by too many negative examples, and therefore can learn the high-level semantic relations more effectively."
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.516,
829
+ 0.57,
830
+ 0.914,
831
+ 0.682
832
+ ],
833
+ "angle": 0,
834
+ "content": "(Fan et al. 2020) employs a more complex training strategy which divides training pairs into three types and maintains a ratio of 1:2:1. Additionally, a matching loss is computed to align RoI features with prototypes. However, we find it instead hurts the performance. A plausible reason is that FFA introduces the asymmetry upon two branches, making the matching loss no longer beneficial. Consequently, a simple yet effective method B-CAS is adopted in our experiments."
835
+ },
836
+ {
837
+ "type": "text",
838
+ "bbox": [
839
+ 0.516,
840
+ 0.693,
841
+ 0.915,
842
+ 0.804
843
+ ],
844
+ "angle": 0,
845
+ "content": "Non-Linear Fusion Module Many previous meta-learning based methods use element-wise multiplication to handle the feature fusion. We argue that while this approach learns the similarities within the same class effectively, it struggles to capture the class differences. Therefore it is not compatible with the proposed B-CAS. To solve this problem, we employ a novel non-linear fusion network following (Han et al. 2022a; Xiao and Marlet 2020) with modifications."
846
+ },
847
+ {
848
+ "type": "text",
849
+ "bbox": [
850
+ 0.516,
851
+ 0.805,
852
+ 0.915,
853
+ 0.89
854
+ ],
855
+ "angle": 0,
856
+ "content": "Specifically, features after element-wise multiplication, subtraction and concatenation are processed independently to refine their relation to the new feature. Then they are concatenated with the vanilla RoI features and further refined before fed into the detection head. Given RoI feature \\( f_{roi} \\in \\mathbb{R}^{1 \\times 2d} \\) and class prototype \\( p_{cls} \\in \\mathbb{R}^{1 \\times 2d} \\), the aggregation"
857
+ }
858
+ ],
859
+ [
860
+ {
861
+ "type": "table",
862
+ "bbox": [
863
+ 0.084,
864
+ 0.066,
865
+ 0.482,
866
+ 0.429
867
+ ],
868
+ "angle": 0,
869
+ "content": "<table><tr><td rowspan=\"2\"></td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Framework</td><td colspan=\"2\">shot</td></tr><tr><td>10</td><td>30</td></tr><tr><td colspan=\"5\">Single run results:</td></tr><tr><td rowspan=\"5\">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>10.0</td><td>13.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>FR-CNN</td><td>10.5</td><td>13.8</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>FR-CNN</td><td>11.9</td><td>16.4</td></tr><tr><td>FADI (Cao et al. 2021)</td><td>FR-CNN</td><td>12.2</td><td>16.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>18.5</td><td>22.6</td></tr><tr><td>M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>17.1</td><td>21.4</td></tr><tr><td rowspan=\"6\">M</td><td>FSRW (Kang et al. 2019)</td><td>YOLOv2</td><td>5.6</td><td>9.1</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>FR-CNN</td><td>8.7</td><td>12.4</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>12.5</td><td>14.7</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>FR-CNN</td><td>12.7</td><td>16.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>16.2</td><td>18.9</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>16.5</td><td>20.1</td></tr><tr><td colspan=\"5\">Average results over multiple runs:</td></tr><tr><td rowspan=\"2\">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>9.1</td><td>12.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>16.8</td><td>21.2</td></tr><tr><td rowspan=\"2\">M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>15.3</td><td>20.2</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>Def DETR</td><td>19.0</td><td>22.2</td></tr><tr><td rowspan=\"4\">M</td><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>10.7</td><td>15.9</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>FR-CNN</td><td>12.8</td><td>18.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>15.9</td><td>18.4</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>15.9</td><td>19.3</td></tr></table>"
870
+ },
871
+ {
872
+ "type": "table_caption",
873
+ "bbox": [
874
+ 0.082,
875
+ 0.438,
876
+ 0.482,
877
+ 0.482
878
+ ],
879
+ "angle": 0,
880
+ "content": "Table 2: FSOD results (AP) on the MS COCO dataset. T: Transfer-learning based methods. M: Meta-learning based methods. \\(\\mathbf{M}^{*}\\): Meta-learning with advanced framework."
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.083,
886
+ 0.507,
887
+ 0.23,
888
+ 0.521
889
+ ],
890
+ "angle": 0,
891
+ "content": "can be formulated as:"
892
+ },
893
+ {
894
+ "type": "equation",
895
+ "bbox": [
896
+ 0.084,
897
+ 0.526,
898
+ 0.478,
899
+ 0.555
900
+ ],
901
+ "angle": 0,
902
+ "content": "\\[\nf ^ {\\prime} = \\left[ \\mathcal {F} _ {1} \\left(f _ {r o i} \\odot p _ {c l s}\\right), \\mathcal {F} _ {2} \\left(f _ {r o i} - p _ {c l s}\\right), \\mathcal {F} _ {3} \\left[ f _ {r o i}, p _ {c l s} \\right], f _ {r o i} \\right] \\tag {9}\n\\]"
903
+ },
904
+ {
905
+ "type": "equation",
906
+ "bbox": [
907
+ 0.234,
908
+ 0.556,
909
+ 0.479,
910
+ 0.574
911
+ ],
912
+ "angle": 0,
913
+ "content": "\\[\nf = \\mathcal {F} _ {a g g} \\left(f ^ {\\prime}\\right) \\tag {10}\n\\]"
914
+ },
915
+ {
916
+ "type": "text",
917
+ "bbox": [
918
+ 0.082,
919
+ 0.575,
920
+ 0.482,
921
+ 0.688
922
+ ],
923
+ "angle": 0,
924
+ "content": "where \\(\\mathcal{F}_1\\), \\(\\mathcal{F}_2\\) and \\(\\mathcal{F}_3\\) represent independent fully-connected layer followed by ReLU activation function, and \\(\\mathcal{F}_{agg}\\) denote a pure fully-connected layer. This formulation provides a stronger capability to thoroughly explore the relations between high-level features. In addition, an exclusive path for RoI features is reserved to propagate the original RoI information, which reduces the noise introduced by random prototypes and can be used to regress the object location."
925
+ },
926
+ {
927
+ "type": "title",
928
+ "bbox": [
929
+ 0.226,
930
+ 0.699,
931
+ 0.338,
932
+ 0.716
933
+ ],
934
+ "angle": 0,
935
+ "content": "Experiments"
936
+ },
937
+ {
938
+ "type": "title",
939
+ "bbox": [
940
+ 0.084,
941
+ 0.719,
942
+ 0.186,
943
+ 0.733
944
+ ],
945
+ "angle": 0,
946
+ "content": "Benchmarks"
947
+ },
948
+ {
949
+ "type": "text",
950
+ "bbox": [
951
+ 0.082,
952
+ 0.736,
953
+ 0.481,
954
+ 0.792
955
+ ],
956
+ "angle": 0,
957
+ "content": "We evaluate our method on two widely-used FSOD benchmarks PASCAL VOC (Everingham et al. 2010) and MS COCO (Lin et al. 2014), using exactly the same class partitions and few-shot examples as in (Wang et al. 2020)."
958
+ },
959
+ {
960
+ "type": "text",
961
+ "bbox": [
962
+ 0.082,
963
+ 0.792,
964
+ 0.482,
965
+ 0.89
966
+ ],
967
+ "angle": 0,
968
+ "content": "PASCAL VOC. The 20 PASCAL VOC classes are split into 15 base classes and 5 novel classes. There are three different class partitions for a more comprehensive evaluation. The VOC07 and VOC12 train/val sets are used for training and the VOC07 test set is used for evaluation. The Mean Average Precision at IoU=0.5 (AP50) is reported under \\( K = \\{1, 2, 3, 5, 10\\} \\) shot settings."
969
+ },
970
+ {
971
+ "type": "table",
972
+ "bbox": [
973
+ 0.521,
974
+ 0.066,
975
+ 0.912,
976
+ 0.172
977
+ ],
978
+ "angle": 0,
979
+ "content": "<table><tr><td></td><td>B-CAS</td><td>NLF</td><td>FFA</td><td colspan=\"3\">shot</td></tr><tr><td></td><td></td><td></td><td></td><td>3</td><td>5</td><td>10</td></tr><tr><td>Baseline</td><td></td><td></td><td></td><td>56.7</td><td>58.3</td><td>61.4</td></tr><tr><td rowspan=\"3\">Ours</td><td>✓</td><td></td><td></td><td>61.2</td><td>64.7</td><td>64.9</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>"
980
+ },
981
+ {
982
+ "type": "table_caption",
983
+ "bbox": [
984
+ 0.553,
985
+ 0.182,
986
+ 0.877,
987
+ 0.197
988
+ ],
989
+ "angle": 0,
990
+ "content": "Table 3: Ablation study of different components."
991
+ },
992
+ {
993
+ "type": "text",
994
+ "bbox": [
995
+ 0.516,
996
+ 0.221,
997
+ 0.915,
998
+ 0.293
999
+ ],
1000
+ "angle": 0,
1001
+ "content": "MS COCO. For MS COCO, the 20 PASCAL VOC classes are used as novel classes, the other 60 classes are used as base classes. The 5k images from COCO2017 val are used for evaluation and the rest are used for training. We report the AP at IoU=0.5:0.95 under \\( K = \\{10,30\\} \\) shot settings."
1002
+ },
1003
+ {
1004
+ "type": "title",
1005
+ "bbox": [
1006
+ 0.517,
1007
+ 0.303,
1008
+ 0.704,
1009
+ 0.318
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": "Implementation Details"
1013
+ },
1014
+ {
1015
+ "type": "text",
1016
+ "bbox": [
1017
+ 0.516,
1018
+ 0.32,
1019
+ 0.915,
1020
+ 0.417
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "Our method is implemented with MMDetection (Chen et al. 2019). We adopt ResNet-101 (He et al. 2016) pretrained on ImageNet (Russakovsky et al. 2015) as the backbone. The single scale feature map is used for detection without FPN (Lin et al. 2017a). We resize the query images to a maximum of \\(1333 \\times 800\\) pixels, and the cropped instances from support images are resized to \\(224 \\times 224\\) pixels."
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "bbox": [
1028
+ 0.516,
1029
+ 0.417,
1030
+ 0.915,
1031
+ 0.514
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": "Our model is trained on 2x3090 Nvidia GPUs with a total batch size of 8, using the SGD optimizer. In the base training stage, the model is trained on VOC and COCO datasets for \\(20\\mathrm{k} / 110\\mathrm{k}\\) iterations. The learning rate is set to 0.004 and decayed at \\(17\\mathrm{k} / 92\\mathrm{k}\\) iteration by a factor of 0.1. In the finetuning stage, the learning rate is set to 0.001. We use exactly the same loss functions with Meta R-CNN."
1035
+ },
1036
+ {
1037
+ "type": "title",
1038
+ "bbox": [
1039
+ 0.517,
1040
+ 0.525,
1041
+ 0.881,
1042
+ 0.54
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "Comparison with the State-of-the-Art Methods"
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "bbox": [
1050
+ 0.516,
1051
+ 0.542,
1052
+ 0.915,
1053
+ 0.736
1054
+ ],
1055
+ "angle": 0,
1056
+ "content": "PASCAL VOC. We show both the single run results and the average results over multiple runs of PASCAL VOC in Table 1. It can be seen that FPD significantly outperforms previous methods, achieving the state-of-the-art performance in most settings. Specifically, FPD outperforms previous best method by \\(1.5\\%\\), \\(4.4\\%\\), and \\(6.8\\%\\) on the three data splits under \\(K = 10\\) shot setting, respectively. We notice that under \\(K = \\{1, 2\\}\\) shot settings, our method is less effective than VFA, which is a strong FSOD detector utilizing a variational autoencoder to estimate class distributions. Our analysis suggests that in extremely data-scarce scenarios, it is more challenging for the FFA to capture the representative and common features across different shots, therefore it fails to achieve the expected effect under \\(K = \\{1, 2\\}\\) shot settings."
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "bbox": [
1061
+ 0.516,
1062
+ 0.737,
1063
+ 0.915,
1064
+ 0.89
1065
+ ],
1066
+ "angle": 0,
1067
+ "content": "MS COCO. Table 2 shows the results of MS COCO. It can be seen that FPD outperforms all of the meta-learning based methods adopting the Faster R-CNN framework. For example, FPD improves performance by \\(6.3\\%\\) compared to previous best result under \\(K = 30\\) shot setting. FPD ranks fourth among all the methods. Please note that our method focuses on the three proposed components, without using advanced frameworks or techniques such as DETR, Transformer or gradient decoupled layer. Given the challenging nature of the MS COCO dataset, we believe that the performance can be further improved with more refinements."
1068
+ }
1069
+ ],
1070
+ [
1071
+ {
1072
+ "type": "image",
1073
+ "bbox": [
1074
+ 0.136,
1075
+ 0.066,
1076
+ 0.865,
1077
+ 0.238
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": null
1081
+ },
1082
+ {
1083
+ "type": "image_caption",
1084
+ "bbox": [
1085
+ 0.288,
1086
+ 0.247,
1087
+ 0.707,
1088
+ 0.262
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "Figure 4: Visualization of the detection results on novel classes."
1092
+ },
1093
+ {
1094
+ "type": "table",
1095
+ "bbox": [
1096
+ 0.089,
1097
+ 0.286,
1098
+ 0.481,
1099
+ 0.378
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "<table><tr><td>Method</td><td>Directly Match</td><td>FFA</td><td>3</td><td>shot 5</td><td>10</td></tr><tr><td>Baseline*</td><td></td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td rowspan=\"2\">Ours</td><td>✓</td><td></td><td>63.2</td><td>67.0</td><td>67.5</td></tr><tr><td></td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>"
1103
+ },
1104
+ {
1105
+ "type": "table_caption",
1106
+ "bbox": [
1107
+ 0.133,
1108
+ 0.386,
1109
+ 0.429,
1110
+ 0.402
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "Table 4: Comparison with directly matching."
1114
+ },
1115
+ {
1116
+ "type": "title",
1117
+ "bbox": [
1118
+ 0.084,
1119
+ 0.426,
1120
+ 0.204,
1121
+ 0.441
1122
+ ],
1123
+ "angle": 0,
1124
+ "content": "Ablation Study"
1125
+ },
1126
+ {
1127
+ "type": "text",
1128
+ "bbox": [
1129
+ 0.082,
1130
+ 0.444,
1131
+ 0.479,
1132
+ 0.485
1133
+ ],
1134
+ "angle": 0,
1135
+ "content": "We conduct comprehensive experiments on the Novel Set 1 of PASCAL VOC under \\( K = \\{3,5,10\\} \\) shot settings, which demonstrates the effectiveness of our proposed method."
1136
+ },
1137
+ {
1138
+ "type": "text",
1139
+ "bbox": [
1140
+ 0.082,
1141
+ 0.485,
1142
+ 0.48,
1143
+ 0.555
1144
+ ],
1145
+ "angle": 0,
1146
+ "content": "Effect of Different Components. We show the results with different components in Table 3. It can be seen that B-CAS and NLF together improve the performance by about \\(10\\%\\) over the baseline. Based on this, our FFA can further boost the results, achieving the state-of-the-art performance."
1147
+ },
1148
+ {
1149
+ "type": "text",
1150
+ "bbox": [
1151
+ 0.082,
1152
+ 0.555,
1153
+ 0.481,
1154
+ 0.665
1155
+ ],
1156
+ "angle": 0,
1157
+ "content": "Effect of the FFA. FFA differs from DCNet in that it distills the fine-grained prototypes to aggregate with query branch. To demonstrate the superiority of this method, we re-implement the DRD module following DCNet to directly match dense feature maps for aggregation. We show the experimental results in Table 4. It can be seen that FFA consistently achieves better performance than directly matching, which validates the effectiveness of our method."
1158
+ },
1159
+ {
1160
+ "type": "text",
1161
+ "bbox": [
1162
+ 0.082,
1163
+ 0.665,
1164
+ 0.481,
1165
+ 0.72
1166
+ ],
1167
+ "angle": 0,
1168
+ "content": "Effect of Feature Queries. We assign each class a set of feature queries, which are the key guidance to distill fine-grained prototypes. The number of feature queries for a class is set to 5 by default. Figure 5 shows the effect of this number."
1169
+ },
1170
+ {
1171
+ "type": "text",
1172
+ "bbox": [
1173
+ 0.082,
1174
+ 0.721,
1175
+ 0.481,
1176
+ 0.833
1177
+ ],
1178
+ "angle": 0,
1179
+ "content": "Moreover, to explore the fundamental working mechanism, we visualize the attention heatmap of feature queries on support images. As shown in Figure 6, two feature queries from person category are listed. They are prone to focus on the specific details, e.g., head and hand, which conforms to our expectations. Please note that the generated heat maps has a resolution of \\(14 \\times 14\\). It is not absolutely aligned with the original images."
1180
+ },
1181
+ {
1182
+ "type": "title",
1183
+ "bbox": [
1184
+ 0.084,
1185
+ 0.843,
1186
+ 0.295,
1187
+ 0.857
1188
+ ],
1189
+ "angle": 0,
1190
+ "content": "Visualize Detection Results"
1191
+ },
1192
+ {
1193
+ "type": "text",
1194
+ "bbox": [
1195
+ 0.082,
1196
+ 0.861,
1197
+ 0.48,
1198
+ 0.889
1199
+ ],
1200
+ "angle": 0,
1201
+ "content": "We show the detection results in Figure 4. The model is trained on the Novel Set 3 of PASCAL VOC under 10 shot"
1202
+ },
1203
+ {
1204
+ "type": "image",
1205
+ "bbox": [
1206
+ 0.548,
1207
+ 0.288,
1208
+ 0.885,
1209
+ 0.433
1210
+ ],
1211
+ "angle": 0,
1212
+ "content": null
1213
+ },
1214
+ {
1215
+ "type": "image_caption",
1216
+ "bbox": [
1217
+ 0.526,
1218
+ 0.445,
1219
+ 0.903,
1220
+ 0.461
1221
+ ],
1222
+ "angle": 0,
1223
+ "content": "Figure 5: Ablation study on the number of feature queries."
1224
+ },
1225
+ {
1226
+ "type": "image",
1227
+ "bbox": [
1228
+ 0.534,
1229
+ 0.477,
1230
+ 0.903,
1231
+ 0.614
1232
+ ],
1233
+ "angle": 0,
1234
+ "content": null
1235
+ },
1236
+ {
1237
+ "type": "image_caption",
1238
+ "bbox": [
1239
+ 0.516,
1240
+ 0.624,
1241
+ 0.913,
1242
+ 0.654
1243
+ ],
1244
+ "angle": 0,
1245
+ "content": "Figure 6: Attention heatmap of feature queries. Please find more discussion and results in Appendix."
1246
+ },
1247
+ {
1248
+ "type": "text",
1249
+ "bbox": [
1250
+ 0.516,
1251
+ 0.682,
1252
+ 0.914,
1253
+ 0.74
1254
+ ],
1255
+ "angle": 0,
1256
+ "content": "setting and tested on the VOC07 test set. It can be seen that many of the novel instances are effectively detected, even though the detected bboxes are not perfectly aligned. This results demonstrate the promising potential of our method."
1257
+ },
1258
+ {
1259
+ "type": "title",
1260
+ "bbox": [
1261
+ 0.666,
1262
+ 0.755,
1263
+ 0.765,
1264
+ 0.77
1265
+ ],
1266
+ "angle": 0,
1267
+ "content": "Conclusion"
1268
+ },
1269
+ {
1270
+ "type": "text",
1271
+ "bbox": [
1272
+ 0.516,
1273
+ 0.777,
1274
+ 0.915,
1275
+ 0.89
1276
+ ],
1277
+ "angle": 0,
1278
+ "content": "This paper studies the meta-learning based FSOD. We propose a novel FFA module which can distill fine-grained prototypes in addition to class-level ones. It enables more robust novel object detection by focusing on the detailed features. We also propose B-CAS strategy and NLF module to aggregate high-level features more effectively. Both quantitative and qualitative results demonstrate the effectiveness of our method and the promising prospect of FSOD."
1279
+ }
1280
+ ],
1281
+ [
1282
+ {
1283
+ "type": "title",
1284
+ "bbox": [
1285
+ 0.204,
1286
+ 0.068,
1287
+ 0.362,
1288
+ 0.084
1289
+ ],
1290
+ "angle": 0,
1291
+ "content": "Acknowledgments"
1292
+ },
1293
+ {
1294
+ "type": "text",
1295
+ "bbox": [
1296
+ 0.082,
1297
+ 0.088,
1298
+ 0.482,
1299
+ 0.158
1300
+ ],
1301
+ "angle": 0,
1302
+ "content": "This work was supported in part by the Overseas Students Science and Technology Activities Project (No. 2018024), by the National Natural Science Foundation of China (No. 61502389), by the Natural Science Basic Research Program of Shaanxi Province, China (No. 2023-JC-YB-508)."
1303
+ },
1304
+ {
1305
+ "type": "title",
1306
+ "bbox": [
1307
+ 0.234,
1308
+ 0.172,
1309
+ 0.331,
1310
+ 0.188
1311
+ ],
1312
+ "angle": 0,
1313
+ "content": "References"
1314
+ },
1315
+ {
1316
+ "type": "ref_text",
1317
+ "bbox": [
1318
+ 0.085,
1319
+ 0.193,
1320
+ 0.48,
1321
+ 0.235
1322
+ ],
1323
+ "angle": 0,
1324
+ "content": "Cao, Y.; Wang, J.; Jin, Y.; Wu, T.; Chen, K.; Liu, Z.; and Lin, D. 2021. Few-Shot Object Detection via Association and Discrimination. In NeurIPS, 16570-16581."
1325
+ },
1326
+ {
1327
+ "type": "ref_text",
1328
+ "bbox": [
1329
+ 0.085,
1330
+ 0.239,
1331
+ 0.482,
1332
+ 0.281
1333
+ ],
1334
+ "angle": 0,
1335
+ "content": "Carion, N.; Massa, F.; Synnaeve, G.; Usunier, N.; Kirillov, A.; and Zagoruyko, S. 2020. End-to-end object detection with transformers. In ECCV, 213-229."
1336
+ },
1337
+ {
1338
+ "type": "ref_text",
1339
+ "bbox": [
1340
+ 0.085,
1341
+ 0.284,
1342
+ 0.482,
1343
+ 0.326
1344
+ ],
1345
+ "angle": 0,
1346
+ "content": "Chen, H.; Wang, Y.; Wang, G.; and Qiao, Y. 2018. LSTD: A low-shot transfer detector for object detection. In AAAI, 6066-6073."
1347
+ },
1348
+ {
1349
+ "type": "ref_text",
1350
+ "bbox": [
1351
+ 0.085,
1352
+ 0.33,
1353
+ 0.482,
1354
+ 0.414
1355
+ ],
1356
+ "angle": 0,
1357
+ "content": "Chen, K.; Wang, J.; Pang, J.; Cao, Y.; Xiong, Y.; Li, X.; Sun, S.; Feng, W.; Liu, Z.; Xu, J.; Zhang, Z.; Cheng, D.; Zhu, C.; Cheng, T.; Zhao, Q.; Li, B.; Lu, X.; Zhu, R.; Wu, Y.; Dai, J.; Wang, J.; Shi, J.; Ouyang, W.; Loy, C. C.; and Lin, D. 2019. MMDetection: Open MMLab Detection Toolbox and Benchmark. arXiv preprint arXiv:1906.07155."
1358
+ },
1359
+ {
1360
+ "type": "ref_text",
1361
+ "bbox": [
1362
+ 0.085,
1363
+ 0.418,
1364
+ 0.48,
1365
+ 0.46
1366
+ ],
1367
+ "angle": 0,
1368
+ "content": "Everingham, M.; Van Gool, L.; Williams, C. K.; Winn, J.; and Zisserman, A. 2010. The pascal visual object classes (voc) challenge. *IJCV*, 88(2): 303-338."
1369
+ },
1370
+ {
1371
+ "type": "ref_text",
1372
+ "bbox": [
1373
+ 0.085,
1374
+ 0.463,
1375
+ 0.48,
1376
+ 0.505
1377
+ ],
1378
+ "angle": 0,
1379
+ "content": "Fan, Q.; Zhuo, W.; Tang, C.-K.; and Tai, Y.-W. 2020. Few-shot object detection with attention-RPN and multi-relation detector. In CVPR, 4013-4022."
1380
+ },
1381
+ {
1382
+ "type": "ref_text",
1383
+ "bbox": [
1384
+ 0.085,
1385
+ 0.509,
1386
+ 0.482,
1387
+ 0.55
1388
+ ],
1389
+ "angle": 0,
1390
+ "content": "Fan, Z.; Ma, Y.; Li, Z.; and Sun, J. 2021. Generalized few-shot object detection without forgetting. In CVPR, 4527-4536."
1391
+ },
1392
+ {
1393
+ "type": "ref_text",
1394
+ "bbox": [
1395
+ 0.085,
1396
+ 0.554,
1397
+ 0.48,
1398
+ 0.597
1399
+ ],
1400
+ "angle": 0,
1401
+ "content": "Han, G.; Huang, S.; Ma, J.; He, Y.; and Chang, S.-F. 2022a. Meta faster r-cnn: Towards accurate few-shot object detection with attentive feature alignment. In AAAI, 780-789."
1402
+ },
1403
+ {
1404
+ "type": "ref_text",
1405
+ "bbox": [
1406
+ 0.085,
1407
+ 0.601,
1408
+ 0.48,
1409
+ 0.642
1410
+ ],
1411
+ "angle": 0,
1412
+ "content": "Han, G.; Ma, J.; Huang, S.; Chen, L.; and Chang, S.-F. 2022b. Few-shot object detection with fully cross-transformer. In CVPR, 5321-5330."
1413
+ },
1414
+ {
1415
+ "type": "ref_text",
1416
+ "bbox": [
1417
+ 0.085,
1418
+ 0.646,
1419
+ 0.48,
1420
+ 0.688
1421
+ ],
1422
+ "angle": 0,
1423
+ "content": "Han, J.; Ren, Y.; Ding, J.; Yan, K.; and Xia, G.-S. 2023. Few-Shot Object Detection via Variational Feature Aggregation. In AAAI, 755-763."
1424
+ },
1425
+ {
1426
+ "type": "ref_text",
1427
+ "bbox": [
1428
+ 0.085,
1429
+ 0.692,
1430
+ 0.48,
1431
+ 0.72
1432
+ ],
1433
+ "angle": 0,
1434
+ "content": "He, K.; Gkioxari, G.; Dollar, P.; and Girshick, R. 2017. Mask r-cnn. In ICCV, 2961-2969."
1435
+ },
1436
+ {
1437
+ "type": "ref_text",
1438
+ "bbox": [
1439
+ 0.085,
1440
+ 0.724,
1441
+ 0.48,
1442
+ 0.753
1443
+ ],
1444
+ "angle": 0,
1445
+ "content": "He, K.; Zhang, X.; Ren, S.; and Sun, J. 2016. Deep residual learning for image recognition. In CVPR, 770-778."
1446
+ },
1447
+ {
1448
+ "type": "ref_text",
1449
+ "bbox": [
1450
+ 0.085,
1451
+ 0.756,
1452
+ 0.48,
1453
+ 0.798
1454
+ ],
1455
+ "angle": 0,
1456
+ "content": "Hu, H.; Bai, S.; Li, A.; Cui, J.; and Wang, L. 2021. Dense relation distillation with context-aware aggregation for few-shot object detection. In CVPR, 10185-10194."
1457
+ },
1458
+ {
1459
+ "type": "ref_text",
1460
+ "bbox": [
1461
+ 0.085,
1462
+ 0.802,
1463
+ 0.48,
1464
+ 0.843
1465
+ ],
1466
+ "angle": 0,
1467
+ "content": "Kang, B.; Liu, Z.; Wang, X.; Yu, F.; Feng, J.; and Darrell, T. 2019. Few-shot object detection via feature reweighting. In ICCV, 8420-8429."
1468
+ },
1469
+ {
1470
+ "type": "ref_text",
1471
+ "bbox": [
1472
+ 0.085,
1473
+ 0.847,
1474
+ 0.48,
1475
+ 0.889
1476
+ ],
1477
+ "angle": 0,
1478
+ "content": "Lin, T.-Y.; Dólar, P.; Girshick, R.; He, K.; Hariharan, B.; and Belongie, S. 2017a. Feature pyramid networks for object detection. In CVPR, 2117-2125."
1479
+ },
1480
+ {
1481
+ "type": "list",
1482
+ "bbox": [
1483
+ 0.085,
1484
+ 0.193,
1485
+ 0.482,
1486
+ 0.889
1487
+ ],
1488
+ "angle": 0,
1489
+ "content": null
1490
+ },
1491
+ {
1492
+ "type": "ref_text",
1493
+ "bbox": [
1494
+ 0.518,
1495
+ 0.068,
1496
+ 0.916,
1497
+ 0.11
1498
+ ],
1499
+ "angle": 0,
1500
+ "content": "Lin, T.-Y.; Goyal, P.; Girshick, R.; He, K.; and Dollar, P. 2017b. Focal Loss for Dense Object Detection. In ICCV, 2980-2988."
1501
+ },
1502
+ {
1503
+ "type": "ref_text",
1504
+ "bbox": [
1505
+ 0.518,
1506
+ 0.114,
1507
+ 0.915,
1508
+ 0.17
1509
+ ],
1510
+ "angle": 0,
1511
+ "content": "Lin, T.-Y.; Maire, M.; Belongie, S.; Hays, J.; Perona, P.; Ramanan, D.; Dollar, P.; and Zitnick, C. L. 2014. Microsoft coco: Common objects in context. In ECCV, 740-755. Springer."
1512
+ },
1513
+ {
1514
+ "type": "ref_text",
1515
+ "bbox": [
1516
+ 0.519,
1517
+ 0.173,
1518
+ 0.914,
1519
+ 0.214
1520
+ ],
1521
+ "angle": 0,
1522
+ "content": "Liu, W.; Anguelov, D.; Erhan, D.; Szegedy, C.; Reed, S.; Fu, C.-Y.; and Berg, A. C. 2016. SSD: Single shot multibox detector. In ECCV, 21-37."
1523
+ },
1524
+ {
1525
+ "type": "ref_text",
1526
+ "bbox": [
1527
+ 0.519,
1528
+ 0.218,
1529
+ 0.914,
1530
+ 0.259
1531
+ ],
1532
+ "angle": 0,
1533
+ "content": "Qiao, L.; Zhao, Y.; Li, Z.; Qiu, X.; Wu, J.; and Zhang, C. 2021. DeFRCN: Decoupled Faster R-CNN for Few-Shot Object Detection. In ICCV, 8681-8690."
1534
+ },
1535
+ {
1536
+ "type": "ref_text",
1537
+ "bbox": [
1538
+ 0.519,
1539
+ 0.262,
1540
+ 0.914,
1541
+ 0.303
1542
+ ],
1543
+ "angle": 0,
1544
+ "content": "Redmon, J.; Divvala, S.; Girshick, R.; and Farhadi, A. 2016. You only look once: Unified, real-time object detection. In CVPR, 779-788."
1545
+ },
1546
+ {
1547
+ "type": "ref_text",
1548
+ "bbox": [
1549
+ 0.519,
1550
+ 0.307,
1551
+ 0.914,
1552
+ 0.348
1553
+ ],
1554
+ "angle": 0,
1555
+ "content": "Ren, S.; He, K.; Girshick, R.; and Sun, J. 2017. Faster R-CNN: Towards real-time object detection with region proposal networks. IEEE TPAMI, 39(6): 1137-1149."
1556
+ },
1557
+ {
1558
+ "type": "ref_text",
1559
+ "bbox": [
1560
+ 0.519,
1561
+ 0.352,
1562
+ 0.914,
1563
+ 0.407
1564
+ ],
1565
+ "angle": 0,
1566
+ "content": "Russakovsky, O.; Deng, J.; Su, H.; Krause, J.; Satheesh, S.; Ma, S.; Huang, Z.; Karpathy, A.; Khosla, A.; Bernstein, M.; et al. 2015. Imagenet large scale visual recognition challenge. IJCV, 115(3): 211-252."
1567
+ },
1568
+ {
1569
+ "type": "ref_text",
1570
+ "bbox": [
1571
+ 0.519,
1572
+ 0.41,
1573
+ 0.914,
1574
+ 0.451
1575
+ ],
1576
+ "angle": 0,
1577
+ "content": "Sun, B.; Li, B.; Cai, S.; Yuan, Y.; and Zhang, C. 2021. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 7352-7362."
1578
+ },
1579
+ {
1580
+ "type": "ref_text",
1581
+ "bbox": [
1582
+ 0.519,
1583
+ 0.454,
1584
+ 0.914,
1585
+ 0.496
1586
+ ],
1587
+ "angle": 0,
1588
+ "content": "Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. In NeurIPS, 5998-6008."
1589
+ },
1590
+ {
1591
+ "type": "ref_text",
1592
+ "bbox": [
1593
+ 0.519,
1594
+ 0.499,
1595
+ 0.914,
1596
+ 0.554
1597
+ ],
1598
+ "angle": 0,
1599
+ "content": "Wang, W.; Xie, E.; Li, X.; Fan, D.-P.; Song, K.; Liang, D.; Lu, T.; Luo, P.; and Shao, L. 2021. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In ICCV, 568-578."
1600
+ },
1601
+ {
1602
+ "type": "ref_text",
1603
+ "bbox": [
1604
+ 0.519,
1605
+ 0.557,
1606
+ 0.914,
1607
+ 0.599
1608
+ ],
1609
+ "angle": 0,
1610
+ "content": "Wang, X.; Huang, T. E.; Darrell, T.; Gonzalez, J. E.; and Yu, F. 2020. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957."
1611
+ },
1612
+ {
1613
+ "type": "ref_text",
1614
+ "bbox": [
1615
+ 0.519,
1616
+ 0.602,
1617
+ 0.914,
1618
+ 0.631
1619
+ ],
1620
+ "angle": 0,
1621
+ "content": "Wang, Y.-X.; Ramanan, D.; and Hebert, M. 2019. Meta-learning to detect rare objects. In ICCV, 9925-9934."
1622
+ },
1623
+ {
1624
+ "type": "ref_text",
1625
+ "bbox": [
1626
+ 0.519,
1627
+ 0.633,
1628
+ 0.914,
1629
+ 0.674
1630
+ ],
1631
+ "angle": 0,
1632
+ "content": "Wu, J.; Liu, S.; Huang, D.; and Wang, Y. 2020. Multi-scale positive sample refinement for few-shot object detection. In ECCV, 456-472."
1633
+ },
1634
+ {
1635
+ "type": "ref_text",
1636
+ "bbox": [
1637
+ 0.519,
1638
+ 0.678,
1639
+ 0.914,
1640
+ 0.72
1641
+ ],
1642
+ "angle": 0,
1643
+ "content": "Xiao, Y.; and Marlet, R. 2020. Few-shot object detection and viewpoint estimation for objects in the wild. In ECCV, 192-210. Springer."
1644
+ },
1645
+ {
1646
+ "type": "ref_text",
1647
+ "bbox": [
1648
+ 0.519,
1649
+ 0.723,
1650
+ 0.914,
1651
+ 0.764
1652
+ ],
1653
+ "angle": 0,
1654
+ "content": "Yan, X.; Chen, Z.; Xu, A.; Wang, X.; Liang, X.; and Lin, L. 2019. Meta r-cnn: Towards general solver for instance-level low-shot learning. In ICCV, 9577-9586."
1655
+ },
1656
+ {
1657
+ "type": "ref_text",
1658
+ "bbox": [
1659
+ 0.519,
1660
+ 0.767,
1661
+ 0.914,
1662
+ 0.822
1663
+ ],
1664
+ "angle": 0,
1665
+ "content": "Zhang, G.; Luo, Z.; Cui, K.; Lu, S.; and Xing, E. P. 2022. Meta-DETR: Image-Level Few-Shot Detection with InterClass Correlation Exploitation. IEEE TPAMI, 45(11): 12832-12843."
1666
+ },
1667
+ {
1668
+ "type": "ref_text",
1669
+ "bbox": [
1670
+ 0.519,
1671
+ 0.825,
1672
+ 0.914,
1673
+ 0.868
1674
+ ],
1675
+ "angle": 0,
1676
+ "content": "Zhu, X.; Su, W.; Lu, L.; Li, B.; Wang, X.; and Dai, J. 2020. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159."
1677
+ },
1678
+ {
1679
+ "type": "list",
1680
+ "bbox": [
1681
+ 0.518,
1682
+ 0.068,
1683
+ 0.916,
1684
+ 0.868
1685
+ ],
1686
+ "angle": 0,
1687
+ "content": null
1688
+ }
1689
+ ],
1690
+ [
1691
+ {
1692
+ "type": "title",
1693
+ "bbox": [
1694
+ 0.24,
1695
+ 0.068,
1696
+ 0.327,
1697
+ 0.086
1698
+ ],
1699
+ "angle": 0,
1700
+ "content": "Appendix"
1701
+ },
1702
+ {
1703
+ "type": "title",
1704
+ "bbox": [
1705
+ 0.084,
1706
+ 0.088,
1707
+ 0.275,
1708
+ 0.102
1709
+ ],
1710
+ "angle": 0,
1711
+ "content": "Additional Visualization"
1712
+ },
1713
+ {
1714
+ "type": "text",
1715
+ "bbox": [
1716
+ 0.082,
1717
+ 0.107,
1718
+ 0.48,
1719
+ 0.218
1720
+ ],
1721
+ "angle": 0,
1722
+ "content": "Attention Heatmap of Feature Queries. We show more attention heatmaps of feature queries upon support images in Figure 7. We can see that the feature query 2 from dog category is prone to capture the detailed features of head. The feature query 1, 2 from horse category are focus on head and legs, respectively. The feature queries are more likely to capture the different details, rather than collapse to a trivial solution."
1723
+ },
1724
+ {
1725
+ "type": "text",
1726
+ "bbox": [
1727
+ 0.082,
1728
+ 0.218,
1729
+ 0.481,
1730
+ 0.331
1731
+ ],
1732
+ "angle": 0,
1733
+ "content": "Feature Map of Query Images. The feature map of a query image \\( X_{q} \\in \\mathbb{R}^{H\\bar{W} \\times d} \\) are summed alone dimension \\( d \\) and then normalized to [0, 1] to produce the heatmap. We show the results of original query features and the assigned prototypes in Figure 10. It can be seen that the assigned prototypes can highlight the representative features to facilitate the model prediction. All these evidences demonstrate the effectiveness of our proposed FFA."
1734
+ },
1735
+ {
1736
+ "type": "title",
1737
+ "bbox": [
1738
+ 0.084,
1739
+ 0.342,
1740
+ 0.355,
1741
+ 0.357
1742
+ ],
1743
+ "angle": 0,
1744
+ "content": "Additional Implementation Details"
1745
+ },
1746
+ {
1747
+ "type": "text",
1748
+ "bbox": [
1749
+ 0.082,
1750
+ 0.361,
1751
+ 0.481,
1752
+ 0.473
1753
+ ],
1754
+ "angle": 0,
1755
+ "content": "Our method follows the two-stage training paradigm. At the base training stage, we train all of the model parameters (the first few layers of ResNet are frozened conventionally). At the fine-tuning stage, we freeze the backbone and only train the RPN, FFA and NLF module. Fine-tuning of the FFA together with RPN can help to produce high-quality proposals of the novel classes. Under \\( K = \\{1, 2\\} \\) shot settings, we freeze the RPN to avoid overfitting."
1756
+ },
1757
+ {
1758
+ "type": "title",
1759
+ "bbox": [
1760
+ 0.084,
1761
+ 0.485,
1762
+ 0.245,
1763
+ 0.5
1764
+ ],
1765
+ "angle": 0,
1766
+ "content": "Computational Cost"
1767
+ },
1768
+ {
1769
+ "type": "text",
1770
+ "bbox": [
1771
+ 0.082,
1772
+ 0.503,
1773
+ 0.481,
1774
+ 0.575
1775
+ ],
1776
+ "angle": 0,
1777
+ "content": "Table 5 shows the computational cost of different methods at inference time. We conduct the experiments on a single Nvidia 3090 GPU. The batch size is set to 1. It can be seen that our method has a better trade-off between the performance and computational efficiency."
1778
+ },
1779
+ {
1780
+ "type": "table",
1781
+ "bbox": [
1782
+ 0.086,
1783
+ 0.587,
1784
+ 0.48,
1785
+ 0.699
1786
+ ],
1787
+ "angle": 0,
1788
+ "content": "<table><tr><td>Dataset</td><td>Method</td><td>Params(MB)</td><td>FLOPs(GB)</td><td>FPS(img/s)</td></tr><tr><td rowspan=\"3\">VOC (20 class)</td><td>Baseline</td><td>45.99</td><td>709.76</td><td>16.2</td></tr><tr><td>FPD(Ours)</td><td>65.68</td><td>818.10</td><td>14.8</td></tr><tr><td>Directly Match</td><td>69.58</td><td>956.72</td><td>14.5</td></tr><tr><td rowspan=\"3\">COCO (80 class)</td><td>Baseline</td><td>46.72</td><td>766.36</td><td>7.3</td></tr><tr><td>FPD(Ours)</td><td>66.5</td><td>1309.50</td><td>6.5</td></tr><tr><td>Directly Match</td><td>70.32</td><td>1466.25</td><td>5.3</td></tr></table>"
1789
+ },
1790
+ {
1791
+ "type": "table_caption",
1792
+ "bbox": [
1793
+ 0.103,
1794
+ 0.708,
1795
+ 0.458,
1796
+ 0.723
1797
+ ],
1798
+ "angle": 0,
1799
+ "content": "Table 5: The computational cost at the inference time."
1800
+ },
1801
+ {
1802
+ "type": "title",
1803
+ "bbox": [
1804
+ 0.21,
1805
+ 0.754,
1806
+ 0.354,
1807
+ 0.769
1808
+ ],
1809
+ "angle": 0,
1810
+ "content": "More Discussion"
1811
+ },
1812
+ {
1813
+ "type": "text",
1814
+ "bbox": [
1815
+ 0.082,
1816
+ 0.773,
1817
+ 0.482,
1818
+ 0.817
1819
+ ],
1820
+ "angle": 0,
1821
+ "content": "Our proposed FFA module has similarities with DCNet and Meta-DETR. In this part, we provide a more detailed comparison among these methods."
1822
+ },
1823
+ {
1824
+ "type": "title",
1825
+ "bbox": [
1826
+ 0.084,
1827
+ 0.828,
1828
+ 0.257,
1829
+ 0.843
1830
+ ],
1831
+ "angle": 0,
1832
+ "content": "Compare with DCNet"
1833
+ },
1834
+ {
1835
+ "type": "text",
1836
+ "bbox": [
1837
+ 0.082,
1838
+ 0.847,
1839
+ 0.481,
1840
+ 0.89
1841
+ ],
1842
+ "angle": 0,
1843
+ "content": "Figure 11 illustrates the DRD module of DCNet, which densely matches all classes of support features into the query feature map. There are two main differences between DRD"
1844
+ },
1845
+ {
1846
+ "type": "image",
1847
+ "bbox": [
1848
+ 0.526,
1849
+ 0.066,
1850
+ 0.912,
1851
+ 0.281
1852
+ ],
1853
+ "angle": 0,
1854
+ "content": null
1855
+ },
1856
+ {
1857
+ "type": "image_caption",
1858
+ "bbox": [
1859
+ 0.516,
1860
+ 0.29,
1861
+ 0.915,
1862
+ 0.318
1863
+ ],
1864
+ "angle": 0,
1865
+ "content": "Figure 7: Additional attention heatmap of feature queries. The model is trained on Novel Set 3 of PASCAL VOC."
1866
+ },
1867
+ {
1868
+ "type": "image",
1869
+ "bbox": [
1870
+ 0.525,
1871
+ 0.332,
1872
+ 0.912,
1873
+ 0.543
1874
+ ],
1875
+ "angle": 0,
1876
+ "content": null
1877
+ },
1878
+ {
1879
+ "type": "image_caption",
1880
+ "bbox": [
1881
+ 0.539,
1882
+ 0.554,
1883
+ 0.892,
1884
+ 0.57
1885
+ ],
1886
+ "angle": 0,
1887
+ "content": "Figure 8: Attention heatmap of feature queries (bird)."
1888
+ },
1889
+ {
1890
+ "type": "text",
1891
+ "bbox": [
1892
+ 0.516,
1893
+ 0.596,
1894
+ 0.915,
1895
+ 0.749
1896
+ ],
1897
+ "angle": 0,
1898
+ "content": "and our FFA (as shown in Figure 3). First, FFA utilizes feature queries to distill fine-grained prototypes, enabling the model to focus on the most representative detailed features and to reduce computational costs (see Table 5). It also enhances inference efficiency (see subsec. Test-Time Natural Integration). Second, FFA employs a residual connection for the original query features, and the prototypes are directly assigned to the query feature map without any extra projection. This maintains the query-support branches in the same feature space, which is crucial for the subsequent high-level feature fusion operation."
1899
+ },
1900
+ {
1901
+ "type": "title",
1902
+ "bbox": [
1903
+ 0.517,
1904
+ 0.759,
1905
+ 0.733,
1906
+ 0.775
1907
+ ],
1908
+ "angle": 0,
1909
+ "content": "Compare with Meta-DETR"
1910
+ },
1911
+ {
1912
+ "type": "text",
1913
+ "bbox": [
1914
+ 0.516,
1915
+ 0.778,
1916
+ 0.915,
1917
+ 0.89
1918
+ ],
1919
+ "angle": 0,
1920
+ "content": "Meta-DETR incorporates meta-learning and attention mechanism into the DETR framework. It utilizes the cross attention operation to aggregate query-support features. As shown in Figure 12, CAM performs global average pooling to generate the class-level prototypes. They are matched with query features and then assigned into query features based on the matching results. Instead of performing element-wise addition, the element-wise multiplication operation is used to"
1921
+ }
1922
+ ],
1923
+ [
1924
+ {
1925
+ "type": "image",
1926
+ "bbox": [
1927
+ 0.087,
1928
+ 0.066,
1929
+ 0.48,
1930
+ 0.202
1931
+ ],
1932
+ "angle": 0,
1933
+ "content": null
1934
+ },
1935
+ {
1936
+ "type": "image_caption",
1937
+ "bbox": [
1938
+ 0.092,
1939
+ 0.215,
1940
+ 0.472,
1941
+ 0.231
1942
+ ],
1943
+ "angle": 0,
1944
+ "content": "Figure 9: Attention heatmap of feature queries (airplane)."
1945
+ },
1946
+ {
1947
+ "type": "image",
1948
+ "bbox": [
1949
+ 0.092,
1950
+ 0.244,
1951
+ 0.476,
1952
+ 0.525
1953
+ ],
1954
+ "angle": 0,
1955
+ "content": null
1956
+ },
1957
+ {
1958
+ "type": "image_caption",
1959
+ "bbox": [
1960
+ 0.146,
1961
+ 0.539,
1962
+ 0.416,
1963
+ 0.555
1964
+ ],
1965
+ "angle": 0,
1966
+ "content": "Figure 10: Feature map of query images."
1967
+ },
1968
+ {
1969
+ "type": "text",
1970
+ "bbox": [
1971
+ 0.083,
1972
+ 0.582,
1973
+ 0.48,
1974
+ 0.747
1975
+ ],
1976
+ "angle": 0,
1977
+ "content": "rewight the query feature map along the channel dimension. CAM differs from our method in three main aspects. First, it focuses on high-level feature aggregation, while our FFA is used to aggregate detailed features. FFA utilizes feature queries and an additional cross attention layer to refine the important local context into the fine-grained prototypes. Second, CAM employs sigmoid and multiplication operations to reweight the query feature map, while FFA directly adds the assigned prototypes to it, preserving more information and potential in the early stages. Third, CAM incorporates a novel and effective encoding matching task to predict object classes."
1978
+ },
1979
+ {
1980
+ "type": "title",
1981
+ "bbox": [
1982
+ 0.084,
1983
+ 0.76,
1984
+ 0.251,
1985
+ 0.774
1986
+ ],
1987
+ "angle": 0,
1988
+ "content": "Revised Performance"
1989
+ },
1990
+ {
1991
+ "type": "text",
1992
+ "bbox": [
1993
+ 0.083,
1994
+ 0.777,
1995
+ 0.483,
1996
+ 0.834
1997
+ ],
1998
+ "angle": 0,
1999
+ "content": "After carefully re-examining our code, we found some unintentional discrepancies that have impacted the performance metrics. These mistakes do not compromise the main contributions of this work. Table 6 shows the revised results."
2000
+ },
2001
+ {
2002
+ "type": "image",
2003
+ "bbox": [
2004
+ 0.529,
2005
+ 0.07,
2006
+ 0.852,
2007
+ 0.421
2008
+ ],
2009
+ "angle": 0,
2010
+ "content": null
2011
+ },
2012
+ {
2013
+ "type": "image_caption",
2014
+ "bbox": [
2015
+ 0.518,
2016
+ 0.432,
2017
+ 0.914,
2018
+ 0.448
2019
+ ],
2020
+ "angle": 0,
2021
+ "content": "Figure 11: The Dense Relation Distillation module of DCNet."
2022
+ },
2023
+ {
2024
+ "type": "image",
2025
+ "bbox": [
2026
+ 0.53,
2027
+ 0.463,
2028
+ 0.899,
2029
+ 0.84
2030
+ ],
2031
+ "angle": 0,
2032
+ "content": null
2033
+ },
2034
+ {
2035
+ "type": "image_caption",
2036
+ "bbox": [
2037
+ 0.517,
2038
+ 0.853,
2039
+ 0.918,
2040
+ 0.881
2041
+ ],
2042
+ "angle": 0,
2043
+ "content": "Figure 12: The Correlational Aggregation Module of MetaDETR."
2044
+ }
2045
+ ],
2046
+ [
2047
+ {
2048
+ "type": "table",
2049
+ "bbox": [
2050
+ 0.091,
2051
+ 0.389,
2052
+ 0.907,
2053
+ 0.539
2054
+ ],
2055
+ "angle": 0,
2056
+ "content": "<table><tr><td rowspan=\"2\">Method / shot</td><td colspan=\"5\">Novel Set 1</td><td colspan=\"5\">Novel Set 2</td><td colspan=\"5\">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan=\"16\">Single run results:</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Previous)</td><td>46.5</td><td>62.3</td><td>65.4</td><td>68.2</td><td>69.3</td><td>32.2</td><td>43.6</td><td>50.3</td><td>52.5</td><td>56.1</td><td>43.2</td><td>53.3</td><td>56.7</td><td>62.1</td><td>64.1</td></tr><tr><td>FPD(Revised)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr></table>"
2057
+ },
2058
+ {
2059
+ "type": "table_caption",
2060
+ "bbox": [
2061
+ 0.232,
2062
+ 0.548,
2063
+ 0.763,
2064
+ 0.563
2065
+ ],
2066
+ "angle": 0,
2067
+ "content": "Table 6: Revised FSOD results (AP50) on the three splits of Pascal VOC dataset."
2068
+ }
2069
+ ]
2070
+ ]
2401.07xxx/2401.07629/742e5603-ff9f-4acc-8856-c8c986d94821_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6546fc5e3dcdd2986871cb94e85ba09a5e96ed042a59324f0dba11bf25d77199
3
+ size 14735903
2401.07xxx/2401.07629/full.md ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fine-Grained Prototypes Distillation for Few-Shot Object Detection
2
+
3
+ Zichen Wang, Bo Yang*, Haonan Yue, Zhenghao Ma
4
+
5
+ School of Automation, Northwestern Polytechnical University, Xi'an, China {wangchen1801, hnyue, mazh0819} $@$ mail.nwpu.edu.cn, byang@nwpu.edu.cn
6
+
7
+ # Abstract
8
+
9
+ Few-shot object detection (FSOD) aims at extending a generic detector for novel object detection with only a few training examples. It attracts great concerns recently due to the practical meanings. Meta-learning has been demonstrated to be an effective paradigm for this task. In general, methods based on meta-learning employ an additional support branch to encode novel examples (a.k.a. support images) into class prototypes, which are then fused with query branch to facilitate the model prediction. However, the class-level prototypes are difficult to precisely generate, and they also lack detailed information, leading to instability in performance. New methods are required to capture the distinctive local context for more robust novel object detection. To this end, we propose to distill the most representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results, modeling the detailed feature relations between two branches. This process is realized by our Fine-Grained Feature Aggregation (FFA) module. Moreover, in terms of high-level feature fusion, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module from different perspectives. They are complementary to each other and depict the high-level feature relations more effectively. Extensive experiments on PASCAL VOC and MS COCO benchmarks show that our method sets a new state-of-the-art performance in most settings. Our code is available at https://github.com/wangchen1801/FPD.
10
+
11
+ # Introduction
12
+
13
+ Object detection is a fundamental task in computer vision and the methods based on deep learning have been well established over the past few years (Redmon et al. 2016; Ren et al. 2017; Carion et al. 2020; Liu et al. 2016). While remarkable achievements have been made, most of them require a large amount of labeled data to obtain a satisfactory performance, otherwise they are prone to overfitting and hardly generalize to the unknow data.
14
+
15
+ Few-shot object detection (FSOD) is a more challenging task to detect object specially in data-scarce scenarios. FSOD assumes that there are sufficient amount of examples for base classes while only k-shot examples for each novel class.
16
+
17
+ ![](images/b935493fcbf62f604e81e02bf346146026283704f0323f942389a5cfd0dff28c.jpg)
18
+ Figure 1: Overview of the proposed method, which we denote as FPD. In addition to class-level prototypes, we distill representative detailed features into fine-grained prototypes, enabling more robust novel object detection.
19
+
20
+ Therefore, the key question is how to transfer the knowledge learnt from base classes to the novel classes. Transfer learning based methods (Wang et al. 2020; Cao et al. 2021; Qiao et al. 2021) focus on fine-tuning the model more effectively. They use the same architecture as generic object detection, additionally with advanced techniques such as parameter freezing and gradient decoupling to improve performance. Meta-learning based methods (Kang et al. 2019; Wang, Ramanan, and Hebert 2019; Yan et al. 2019; Han et al. 2023), instead, follow the idea: learn how to learn the new tasks rapidly. As illustrated in Figure 2, an additional support branch is incorporated to encode support images into class-level prototypes, which function as dynamic parameters to interact with the query branch. In this way, the connections between novel examples and the model predictions are enhanced, thereby improving the generalization ability and learning the new tasks more quickly.
21
+
22
+ This work studies the meta-learning based FSOD and aims at realizing a more effective method. In general, features from the two branches are fused on top of the framework to make
23
+
24
+ ![](images/a1b9a117bc5c26cd7a03cbb3296495b0916b60260e9692f1c56ae7b5727818fa.jpg)
25
+ Figure 2: The overall architecture of our method. FFA and NLF are proposed to improve the performance.
26
+
27
+ the final prediction (Kang et al. 2019; Yan et al. 2019; Xiao and Marlet 2020), while most of the layers are separated and do not exchange information. This hinders the model from learning the correlations among detailed features especially in data-scarce scenarios.
28
+
29
+ DCNet (Hu et al. 2021) proposes to directly match the mid-level support features into query features in a pixel-wise manner, which enables the relation modeling of detailed local context. However, this approach has its limitations in terms of effect and implementation. First, the mid-level features with an extensive range of patterns are intricate and complex, thus the model might struggle to capture the most critical details. Second, directly matching between dense feature maps is inefficiency and will cost more computational resources. Third, this approach has difficulty in transitioning seamlessly from the training phase to the testing phase, as it can not integrate the mid-level support features across different shots to boost the performance.
30
+
31
+ To address the aforementioned issues, we propose a novel Fine-Grained Feature Aggregation (FFA) module to aggregate the mid-level features. As illustrated in Figure 3, different from DCNet, we propose to distill features into fine-grained prototypes. These prototypes, which reside in a highly refined and reduced feature space, embody the most distinctive and representative details of the support images. Specifically, we employ a set of embeddings following the object queries in DETR (Carion et al. 2020) to distill prototypes. Rather than being encoded with positional information and representing specific objects, the embeddings here function within the feature space and thereby are denoted as feature queries. We give each class a unique set of feature queries to distill prototypes independently. It can avoid confusion and is a key factor for our method to work. The distilled prototypes are then assigned into query feature map based on the matching results, modeling the fine-grained relations and highlighting the features with similar details.
32
+
33
+ The proposed FFA enables a more effective feature aggregation by focusing on the key information encapsulated within prototypes. This method also reduces the computational complexity by avoiding the directly matching between
34
+
35
+ dense feature maps. Furthermore, it can naturally transition into the testing phase through a weighted sum of prototypes across different shots, preserving the full potential derived from the training phase.
36
+
37
+ In terms of high-level feature aggregation, we revisit the previous methods and propose two improvements from different perspectives. First, we propose Balanced Class-Agnostic Sampling (B-CAS) strategy to control the ratio of support classes aggregated with query features. Meta R-CNN (Yan et al. 2019) adopts a simple class-specific aggregation scheme where only the features having the same classes are aggregated. While VFA (Han et al. 2023) proposes a class-agnostic aggregation scheme which randomly selects the support classes to reduce class bias. Our insight is that different support classes are served as positive and negative samples, thereby the balanced sampling is required to keep the most important positive samples from being overwhelmed. Second, many works (Kang et al. 2019; Yan et al. 2019; Han et al. 2023) employ element-wise multiplication to explore the relations within the same classes. However, it is not compatible with our proposed B-CAS which incorporates the feature aggregation between different classes. To solve this issue, we propose a stronger Non-Linear Fusion (NLF) module motivated by (Han et al. 2022a; Xiao and Marlet 2020) to fuse features more effectively. Our contributions can be summarized as follows:
38
+
39
+ - We propose to distill support features into fine-grained prototypes before being integrated into query feature maps, which can help the model grasp the key information. They are implemented in the Fine-Grained Feature Aggregation (FFA) module.
40
+ - We propose Balanced Class-Agnostic Sampling (B-CAS) strategy and Non-Linear Fusion (NLF) module. They are complementary to each other and can fuse high-level features more effectively.
41
+ - Extensive experiments illustrate that our method significantly improves the performance and achieves state-of-the-art results on the two widely used FSOD benchmarks.
42
+
43
+ # Related Works
44
+
45
+ # General Object Detection
46
+
47
+ Deep learning based object detection has been extensively studied in recent years. The well-established object detectors can be categorized into one-stage and two-stage methods. One-stage detectors (Redmon et al. 2016; Liu et al. 2016; Lin et al. 2017b) directly make predictions upon the CNN feature maps. While two-stage detectors (Ren et al. 2017; He et al. 2017) additionally employ a Region Proposal Network (RPN) to generate object proposals, which will be further refined into the final predictions. Both of them require the predefiend dense anchors to generate candidates.
48
+
49
+ Recently, anchor-free detectors DETR (Carion et al. 2020) and Deformable DETR (Zhu et al. 2020) have been developed and are drawing more attention. They use a CNN backbone combining with Transformer encoder-decoders (Vaswani et al. 2017) for end-to-end object detection. A set of object queries are proposed to replace the anchor boxes. They will be refined into the detected objects layer by layer through Transformer decoders.
50
+
51
+ We employ the two-stage Faster R-CNN (Ren et al. 2017) framework to build our FSOD detector, and draw inspirations from DETR (Carion et al. 2020) into our approach.
52
+
53
+ # Few-Shot Object Detection
54
+
55
+ Few-Shot Object Detection (FSOD), which studies the detection task in data-scarce situations, has been attracting an increased interest recently. LSTD (Chen et al. 2018) first proposes a transfer learning based approach to detect novel objects in a FSOD data setting. TFA (Wang et al. 2020) utilizes a cosine similarity based classifier and only fine-tunes the last layer with novel examples, achieving a comparable results with other complex methods. DeFRCN (Qiao et al. 2021) employs advanced gradient decoupling technique into the Faster R-CNN framework and integrates an offline prototypical calibration block to refine the classification results, which achieves an impressive performance.
56
+
57
+ The meta-learning is also a promising paradigm for FSOD. FSRW (Kang et al. 2019) proposes to re-weight the YOLOv2 feature maps along channel dimension using proposed reweighting vectors, which can highlight the relevant features. Meta R-CNN (Yan et al. 2019) adopts the Faster R-CNN framework to build a two-branch based siamese network. It processes query and support images in parallel to produce the Region of Interest (RoI) features and class prototypes, which are then fused to make predictions. Instead of learning a softmax-based classifier for all classes, (Han et al. 2022a) constructs a meta-classifier through feature alignment and non-linear matching. It calculates the similarity between query-support feature maps, producing binary classification results for novel classes. VFA (Han et al. 2023) introduces variational feature learning into Meta R-CNN, further boosting its performance. Recently, there are some works incorporate meta-learning into other advanced frameworks. Meta-DETR (Zhang et al. 2022) employs Deformable DETR (Zhu et al. 2020) to build a few-shot detector. (Han et al. 2022b) utilizes PVT (Wang et al. 2021) to construct
58
+
59
+ a fully cross transformer for few-shot detection. They all achieve remarkable results.
60
+
61
+ A two stage training paradigm has been widely adopted in both transfer learning and meta-learning based methods due to its effectiveness. At the base training stage, the model is trained on abundant base class examples. While at the fine-tuning stage, the model is fine-tuned only with $K$ -shot examples for each base and novel class.
62
+
63
+ Our approach is based on Meta R-CNN and we propose to distill fine-grained prototypes for effectively exploiting the relations between detailed features.
64
+
65
+ # Our Approach
66
+
67
+ In this section, we first introduce the task definition and the overall architecture of our model. Then we will elaborate the fine-grained and high-level feature aggregation.
68
+
69
+ # Task Definition
70
+
71
+ We adopt the standard FSOD setting following (Kang et al. 2019; Wang et al. 2020). Specifically, given a dataset $\mathcal{D}$ with two sets of classes $C_{base}$ and $C_{novel}$ , where each class in $C_{base}$ has abundant training data while each class in $C_{novel}$ has only $K$ -shot annotated objects, FSOD aims at detecting the objects of $C_{base} \cup C_{novel}$ using the detector trained on $\mathcal{D}$ . Please note that $C_{base} \cap C_{novel} = \emptyset$ .
72
+
73
+ # The Model Architecture
74
+
75
+ As illustrated in Figure 2, our model is based on Meta R-CNN, which is a siamese network with query branch and support branch that share a same backbone. Typically, we use the first three stages of ResNet-50/101 backbone (He et al. 2016) to extract mid-level features for both query images and support images. Then our proposed FFA module is employed to distill the fine-grained prototypes and assign them into the query branch. Subsequently, we use the last stage (i.e. stage four) of the backbone to extract high-level features for both branches, which produces RoI features and class-level prototypes, respectively. They are further processed by the proposed NLF module, following by the detection head to make the final prediction. We would like to mention that the RPN is fed with the query features which have already interacted with the support branch. It gives the RPN more ability learning to identify the new instances.
76
+
77
+ # Fine-Grained Feature Aggregation
78
+
79
+ The Fine-Grained Feature Aggregation (FFA) module is the key component of our proposed method, which is a class-agnostic aggregator that matches all classes of support features into query features. It models inter-class relations in the early stage of the detection framework where the features are low-level and have more detailed information. Instead of directly performing feature matching, we propose to distill the representative support features into fine-grained prototypes. These prototypes are then assigned into query feature maps based on the matching results. FFA can help the model distinguish foreground from background and learn the similarities and differences between object classes. We will elaborate the
80
+
81
+ prototypes distillation and feature assignment in the following subsections. We also discuss our strategy to transfer this method to novel classes, as well as test-time natural integration of prototypes across different shots.
82
+
83
+ Prototypes Distillation Inspired by DETR, we incorporate a new component which is a set of learnable embeddings to distill prototypes. Different from object queries in DETR, which are encoded with positional information and are refined into a specific instance layer by layer, the embeddings here work as a guidance to refine the entire support feature space into a set of representative features. It can filter out the noise and ease the training. We refer to these embeddings as feature queries.
84
+
85
+ We employ the cross-attention mechanism to perform the prototypes distillation. Specifically, given a support feature map $X_{s} \in \mathbb{R}^{hw \times d}$ and a set of feature queries $q \in \mathbb{R}^{n \times d'}$ , where $hw$ denote the height and width, $d$ and $d'$ is the feature dimension, and $n$ is the number of feature queries, the affinity matrix is calculated through a matching operation:
86
+
87
+ $$
88
+ A = \operatorname {s o f t m a x} \left(\frac {q \left(X _ {s} W\right) ^ {T}}{\sqrt {d ^ {\prime}}}\right) \tag {1}
89
+ $$
90
+
91
+ where $W$ is a linear projection to project $X_{s}$ in to the latent space with dimensionality $d^{\prime}$ , and the softmax function is performed along $hw$ dimension. Subsequently, the fine-grained prototypes can be distilled from $X_{s}$ via:
92
+
93
+ $$
94
+ p = A X _ {s} + E _ {c l s} \tag {2}
95
+ $$
96
+
97
+ where the affinity matrix is applied directly on the support feature map. We do not project $X_{s}$ to keep feature space the same. An additional class embedding $E_{cls}$ is added to retain the class information.
98
+
99
+ We would like to mention that each class has its exclusive feature queries. This is different from object queries in DETR and is a crucial factor for our method to work. It means that $q$ is the feature queries of one class and is part of $Q \in \mathbb{R}^{nc \times d}$ , where $Q$ denotes the feature queries of all classes. This setting makes feature queries class-relevant and avoids them getting overwhelmed and confused by too many object classes.
100
+
101
+ Prototypes Assignment We densely match the fine-grained prototypes into query feature map to achieve the prototypes assignment. Considering that the background area should not be matched to any prototypes that represent salient object features, we incorporate a set of embeddings to serve as background prototypes. We also use the cross-attention mechanism to assign prototypes. Specifically, given a query feature map $X_{q} \in \mathbb{R}^{HW \times d}$ , prototypes assignment is performed via:
102
+
103
+ $$
104
+ A ^ {\prime} = \operatorname {s o f t m a x} \left(\frac {\left(X _ {q} W ^ {\prime}\right) \left(P W ^ {\prime}\right) ^ {T}}{\sqrt {d ^ {\prime}}}\right) \tag {3}
105
+ $$
106
+
107
+ $$
108
+ P = \operatorname {c o n c a t} \left(p _ {1}, p _ {2}, \dots , p _ {c}, p _ {b g}\right) \tag {4}
109
+ $$
110
+
111
+ $$
112
+ X _ {q} ^ {\prime} = X _ {q} + \alpha \cdot A ^ {\prime} P \tag {5}
113
+ $$
114
+
115
+ where $P \in \mathbb{R}^{(nc + n_{bg}) \times d}$ is the prototypes of $c$ support classes with additional $n_{bg}$ background classes, and $W'$ is a linear projection shared by $X_q$ and $P$ which projects them into the
116
+
117
+ ![](images/8e854cf5f5dcd793fccc5c5b2fbaa2121f03a8f27ba0c7a807ebf3f06b54dc4b.jpg)
118
+ Figure 3: The architecture of the Fine-Grained Feature Aggregation (FFA) module. It can be divided into Prototypes, Distillation and Prototypes Assignment.
119
+
120
+ same latent space. The prototypes are assigned into query feature map based on the affinity matrix $A'$ , which produces the aggregated query features. The $\alpha$ is a learnable parameter initialized as zero to help stabilize the training.
121
+
122
+ Transferring to Novel Classes At the base training stage, the feature queries of base classes are randomly initialized and well trained. However, at the fine-tuning stage, training the feature queries from scratch becomes challenging due to the limited novel class examples, which means that an effective knowledge transfer method is required. To address this issue, we propose to duplicate the most compatible feature queries from the base classes to serve as those in the novel classes. To be specific, given feature queries of base classes $Q \in \mathbb{R}^{nc \times d'}$ and support feature map of a novel class $X_{ns} \in \mathbb{R}^{hw \times d}$ , the compatibility matrix and the weight of each feature query can be obtained via:
123
+
124
+ $$
125
+ C = \operatorname {t o p k} \left(Q \left(X _ {n s} W\right) ^ {T}\right) \tag {6}
126
+ $$
127
+
128
+ $$
129
+ w e i g h t _ {i} = \sum_ {j = 0} ^ {k} C _ {i j}, i = 1, 2, \dots , n c \tag {7}
130
+ $$
131
+
132
+ where $\text{topk}$ is performed along $h w$ dimension to filter out irrelevant locations. We select $n$ feature queries for each novel class based on the largest weight. Instead of sharing the same fecture queries with base classes, they are created as a duplicate and can be trained independently.
133
+
134
+ <table><tr><td rowspan="2">Method / shot</td><td colspan="5">Novel Set 1</td><td colspan="5">Novel Set 2</td><td colspan="5">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan="16">Single run results:</td></tr><tr><td>FSRW (Kang et al. 2019)</td><td>14.8</td><td>15.5</td><td>26.7</td><td>33.9</td><td>47.2</td><td>15.7</td><td>15.3</td><td>22.7</td><td>30.1</td><td>40.5</td><td>21.3</td><td>25.6</td><td>28.4</td><td>42.8</td><td>45.9</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>19.9</td><td>25.5</td><td>35.0</td><td>45.7</td><td>51.5</td><td>10.4</td><td>19.4</td><td>29.6</td><td>34.8</td><td>45.4</td><td>14.3</td><td>18.2</td><td>27.5</td><td>41.2</td><td>48.1</td></tr><tr><td>TFA w/ cos (Wang et al. 2020)</td><td>39.8</td><td>36.1</td><td>44.7</td><td>55.7</td><td>56.0</td><td>23.5</td><td>26.9</td><td>34.1</td><td>35.1</td><td>39.1</td><td>30.8</td><td>34.8</td><td>42.8</td><td>49.5</td><td>49.8</td></tr><tr><td>MPSR (Wu et al. 2020)</td><td>41.7</td><td>42.5</td><td>51.4</td><td>55.2</td><td>61.8</td><td>24.4</td><td>29.3</td><td>39.2</td><td>39.9</td><td>47.8</td><td>35.6</td><td>41.8</td><td>42.3</td><td>48.0</td><td>49.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>42.4</td><td>45.8</td><td>45.9</td><td>53.7</td><td>56.1</td><td>21.7</td><td>27.8</td><td>35.2</td><td>37.0</td><td>40.3</td><td>30.2</td><td>37.6</td><td>43.0</td><td>49.7</td><td>50.1</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Ours)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr><tr><td colspan="16">Average results over multiple runs:</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>24.2</td><td>35.3</td><td>42.2</td><td>49.1</td><td>57.4</td><td>21.6</td><td>24.6</td><td>31.9</td><td>37.0</td><td>45.7</td><td>21.2</td><td>30.0</td><td>37.2</td><td>43.8</td><td>49.6</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>33.9</td><td>37.4</td><td>43.7</td><td>51.1</td><td>59.6</td><td>23.2</td><td>24.8</td><td>30.6</td><td>36.7</td><td>46.6</td><td>32.3</td><td>34.9</td><td>39.7</td><td>42.6</td><td>50.7</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>35.1</td><td>49.0</td><td>53.2</td><td>57.4</td><td>62.0</td><td>27.9</td><td>32.3</td><td>38.4</td><td>43.2</td><td>51.8</td><td>34.9</td><td>41.8</td><td>47.1</td><td>54.1</td><td>58.2</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>40.2</td><td>53.6</td><td>58.2</td><td>63.6</td><td>66.5</td><td>29.5</td><td>39.7</td><td>43.4</td><td>48.1</td><td>52.8</td><td>35.0</td><td>38.3</td><td>52.9</td><td>57.7</td><td>60.8</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>38.5</td><td>49.6</td><td>53.5</td><td>59.8</td><td>64.3</td><td>25.9</td><td>34.2</td><td>40.1</td><td>44.9</td><td>47.4</td><td>34.7</td><td>43.9</td><td>49.3</td><td>53.1</td><td>56.3</td></tr><tr><td>VFA (Han et al. 2023)</td><td>47.4</td><td>54.4</td><td>58.5</td><td>64.5</td><td>66.5</td><td>33.7</td><td>38.2</td><td>43.5</td><td>48.3</td><td>52.4</td><td>43.8</td><td>48.9</td><td>53.3</td><td>58.1</td><td>60.0</td></tr><tr><td>FPD(Ours)</td><td>41.5</td><td>52.8</td><td>58.4</td><td>64.9</td><td>67.1</td><td>28.2</td><td>38.7</td><td>43.8</td><td>50.3</td><td>53.6</td><td>34.9</td><td>48.6</td><td>54.0</td><td>58.4</td><td>61.5</td></tr></table>
135
+
136
+ Table 1: FSOD results (AP50) on the three splits of Pascal VOC dataset. We report both single run and multiple run results. Bold and Underline indicate the best and the second best results.
137
+
138
+ Test-Time Natural Integration A simple method to integrate fine-grained prototypes across different shots is to take the average. However, the detailed features represented by a feature query may not appear in some support images. Directly averaging might hurt the performance. Therefore, we compute a weighted sum using the aforementioned weight. Specifically, given $K$ shot support images in a class, which produces $K$ prototypes, the integration is performed via:
139
+
140
+ $$
141
+ p _ {a v g} = \sum_ {s = 1} ^ {K} w e i g h t _ {s} ^ {*} \cdot p _ {s} \tag {8}
142
+ $$
143
+
144
+ where weight* denote the weight after the softmax operation across different shot, $p_{avg}$ is the integrated prototypes. This approach effectively filters out the prototypes that are not compatible with the current feature query, improving the robustness of our detector.
145
+
146
+ # High-Level Feature Aggregation
147
+
148
+ Feature aggregation between RoI features and class-level prototypes is a crucial step for meta-learning based FSOD, where the high-level semantic information is aligned to make the final prediction. We revisit the conventional methods and propose two improvements from different perspectives.
149
+
150
+ Balanced Class-Agnostic Sampling Meta R-CNN adopts a simple class-specific aggregation scheme where the RoI features are aggregated only with the prototypes of the same class. While VFA proposes a class-agnostic aggregation scheme which aggregates RoI features with randomly selected class prototypes to reduce class bias. Nonetheless, we argue that the completely random sampling might disturb
151
+
152
+ the model from focusing on the most crucial positive prototypes and thus hurt the performance. Instead, we propose a balanced sampling strategy named B-CAS which selects a pair of positive and negative prototypes to aggregate with RoI features in parallel. The B-CAS not only enables the relation modeling between different classes but also keeps the positive prototype from being overwhelmed by too many negative examples, and therefore can learn the high-level semantic relations more effectively.
153
+
154
+ (Fan et al. 2020) employs a more complex training strategy which divides training pairs into three types and maintains a ratio of 1:2:1. Additionally, a matching loss is computed to align RoI features with prototypes. However, we find it instead hurts the performance. A plausible reason is that FFA introduces the asymmetry upon two branches, making the matching loss no longer beneficial. Consequently, a simple yet effective method B-CAS is adopted in our experiments.
155
+
156
+ Non-Linear Fusion Module Many previous meta-learning based methods use element-wise multiplication to handle the feature fusion. We argue that while this approach learns the similarities within the same class effectively, it struggles to capture the class differences. Therefore it is not compatible with the proposed B-CAS. To solve this problem, we employ a novel non-linear fusion network following (Han et al. 2022a; Xiao and Marlet 2020) with modifications.
157
+
158
+ Specifically, features after element-wise multiplication, subtraction and concatenation are processed independently to refine their relation to the new feature. Then they are concatenated with the vanilla RoI features and further refined before fed into the detection head. Given RoI feature $f_{roi} \in \mathbb{R}^{1 \times 2d}$ and class prototype $p_{cls} \in \mathbb{R}^{1 \times 2d}$ , the aggregation
159
+
160
+ <table><tr><td rowspan="2"></td><td rowspan="2">Method</td><td rowspan="2">Framework</td><td colspan="2">shot</td></tr><tr><td>10</td><td>30</td></tr><tr><td colspan="5">Single run results:</td></tr><tr><td rowspan="5">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>10.0</td><td>13.7</td></tr><tr><td>Retentive (Fan et al. 2021)</td><td>FR-CNN</td><td>10.5</td><td>13.8</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>FR-CNN</td><td>11.9</td><td>16.4</td></tr><tr><td>FADI (Cao et al. 2021)</td><td>FR-CNN</td><td>12.2</td><td>16.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>18.5</td><td>22.6</td></tr><tr><td>M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>17.1</td><td>21.4</td></tr><tr><td rowspan="6">M</td><td>FSRW (Kang et al. 2019)</td><td>YOLOv2</td><td>5.6</td><td>9.1</td></tr><tr><td>Meta R-CNN (Yan et al. 2019)</td><td>FR-CNN</td><td>8.7</td><td>12.4</td></tr><tr><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>12.5</td><td>14.7</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>FR-CNN</td><td>12.7</td><td>16.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>16.2</td><td>18.9</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>16.5</td><td>20.1</td></tr><tr><td colspan="5">Average results over multiple runs:</td></tr><tr><td rowspan="2">T</td><td>TFA w/ cos (Wang et al. 2020)</td><td>FR-CNN</td><td>9.1</td><td>12.1</td></tr><tr><td>DeFRCN (Qiao et al. 2021)</td><td>FR-CNN</td><td>16.8</td><td>21.2</td></tr><tr><td rowspan="2">M*</td><td>FCT (Han et al. 2022b)</td><td>Transformer</td><td>15.3</td><td>20.2</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>Def DETR</td><td>19.0</td><td>22.2</td></tr><tr><td rowspan="4">M</td><td>FSDetView (Xiao and Marlet 2020)</td><td>FR-CNN</td><td>10.7</td><td>15.9</td></tr><tr><td>DCNet (Hu et al. 2021)</td><td>FR-CNN</td><td>12.8</td><td>18.6</td></tr><tr><td>VFA (Han et al. 2023)</td><td>FR-CNN</td><td>15.9</td><td>18.4</td></tr><tr><td>FPD(ours)</td><td>FR-CNN</td><td>15.9</td><td>19.3</td></tr></table>
161
+
162
+ can be formulated as:
163
+
164
+ $$
165
+ f ^ {\prime} = \left[ \mathcal {F} _ {1} \left(f _ {r o i} \odot p _ {c l s}\right), \mathcal {F} _ {2} \left(f _ {r o i} - p _ {c l s}\right), \mathcal {F} _ {3} \left[ f _ {r o i}, p _ {c l s} \right], f _ {r o i} \right] \tag {9}
166
+ $$
167
+
168
+ $$
169
+ f = \mathcal {F} _ {a g g} \left(f ^ {\prime}\right) \tag {10}
170
+ $$
171
+
172
+ where $\mathcal{F}_1$ , $\mathcal{F}_2$ and $\mathcal{F}_3$ represent independent fully-connected layer followed by ReLU activation function, and $\mathcal{F}_{agg}$ denote a pure fully-connected layer. This formulation provides a stronger capability to thoroughly explore the relations between high-level features. In addition, an exclusive path for RoI features is reserved to propagate the original RoI information, which reduces the noise introduced by random prototypes and can be used to regress the object location.
173
+
174
+ # Experiments
175
+
176
+ # Benchmarks
177
+
178
+ We evaluate our method on two widely-used FSOD benchmarks PASCAL VOC (Everingham et al. 2010) and MS COCO (Lin et al. 2014), using exactly the same class partitions and few-shot examples as in (Wang et al. 2020).
179
+
180
+ PASCAL VOC. The 20 PASCAL VOC classes are split into 15 base classes and 5 novel classes. There are three different class partitions for a more comprehensive evaluation. The VOC07 and VOC12 train/val sets are used for training and the VOC07 test set is used for evaluation. The Mean Average Precision at IoU=0.5 (AP50) is reported under $K = \{1, 2, 3, 5, 10\}$ shot settings.
181
+
182
+ Table 2: FSOD results (AP) on the MS COCO dataset. T: Transfer-learning based methods. M: Meta-learning based methods. $\mathbf{M}^{*}$ : Meta-learning with advanced framework.
183
+
184
+ <table><tr><td></td><td>B-CAS</td><td>NLF</td><td>FFA</td><td colspan="3">shot</td></tr><tr><td></td><td></td><td></td><td></td><td>3</td><td>5</td><td>10</td></tr><tr><td>Baseline</td><td></td><td></td><td></td><td>56.7</td><td>58.3</td><td>61.4</td></tr><tr><td rowspan="3">Ours</td><td>✓</td><td></td><td></td><td>61.2</td><td>64.7</td><td>64.9</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>
185
+
186
+ Table 3: Ablation study of different components.
187
+
188
+ MS COCO. For MS COCO, the 20 PASCAL VOC classes are used as novel classes, the other 60 classes are used as base classes. The 5k images from COCO2017 val are used for evaluation and the rest are used for training. We report the AP at IoU=0.5:0.95 under $K = \{10,30\}$ shot settings.
189
+
190
+ # Implementation Details
191
+
192
+ Our method is implemented with MMDetection (Chen et al. 2019). We adopt ResNet-101 (He et al. 2016) pretrained on ImageNet (Russakovsky et al. 2015) as the backbone. The single scale feature map is used for detection without FPN (Lin et al. 2017a). We resize the query images to a maximum of $1333 \times 800$ pixels, and the cropped instances from support images are resized to $224 \times 224$ pixels.
193
+
194
+ Our model is trained on 2x3090 Nvidia GPUs with a total batch size of 8, using the SGD optimizer. In the base training stage, the model is trained on VOC and COCO datasets for $20\mathrm{k} / 110\mathrm{k}$ iterations. The learning rate is set to 0.004 and decayed at $17\mathrm{k} / 92\mathrm{k}$ iteration by a factor of 0.1. In the finetuning stage, the learning rate is set to 0.001. We use exactly the same loss functions with Meta R-CNN.
195
+
196
+ # Comparison with the State-of-the-Art Methods
197
+
198
+ PASCAL VOC. We show both the single run results and the average results over multiple runs of PASCAL VOC in Table 1. It can be seen that FPD significantly outperforms previous methods, achieving the state-of-the-art performance in most settings. Specifically, FPD outperforms previous best method by $1.5\%$ , $4.4\%$ , and $6.8\%$ on the three data splits under $K = 10$ shot setting, respectively. We notice that under $K = \{1, 2\}$ shot settings, our method is less effective than VFA, which is a strong FSOD detector utilizing a variational autoencoder to estimate class distributions. Our analysis suggests that in extremely data-scarce scenarios, it is more challenging for the FFA to capture the representative and common features across different shots, therefore it fails to achieve the expected effect under $K = \{1, 2\}$ shot settings.
199
+
200
+ MS COCO. Table 2 shows the results of MS COCO. It can be seen that FPD outperforms all of the meta-learning based methods adopting the Faster R-CNN framework. For example, FPD improves performance by $6.3\%$ compared to previous best result under $K = 30$ shot setting. FPD ranks fourth among all the methods. Please note that our method focuses on the three proposed components, without using advanced frameworks or techniques such as DETR, Transformer or gradient decoupled layer. Given the challenging nature of the MS COCO dataset, we believe that the performance can be further improved with more refinements.
201
+
202
+ ![](images/7f0ccaea920602384302290e962d86d4ec116290314ce14041fc85efb9b2bfe8.jpg)
203
+ Figure 4: Visualization of the detection results on novel classes.
204
+
205
+ <table><tr><td>Method</td><td>Directly Match</td><td>FFA</td><td>3</td><td>shot 5</td><td>10</td></tr><tr><td>Baseline*</td><td></td><td></td><td>62.8</td><td>67.1</td><td>66.3</td></tr><tr><td rowspan="2">Ours</td><td>✓</td><td></td><td>63.2</td><td>67.0</td><td>67.5</td></tr><tr><td></td><td>✓</td><td>64.0</td><td>67.6</td><td>68.4</td></tr></table>
206
+
207
+ Table 4: Comparison with directly matching.
208
+
209
+ # Ablation Study
210
+
211
+ We conduct comprehensive experiments on the Novel Set 1 of PASCAL VOC under $K = \{3,5,10\}$ shot settings, which demonstrates the effectiveness of our proposed method.
212
+
213
+ Effect of Different Components. We show the results with different components in Table 3. It can be seen that B-CAS and NLF together improve the performance by about $10\%$ over the baseline. Based on this, our FFA can further boost the results, achieving the state-of-the-art performance.
214
+
215
+ Effect of the FFA. FFA differs from DCNet in that it distills the fine-grained prototypes to aggregate with query branch. To demonstrate the superiority of this method, we re-implement the DRD module following DCNet to directly match dense feature maps for aggregation. We show the experimental results in Table 4. It can be seen that FFA consistently achieves better performance than directly matching, which validates the effectiveness of our method.
216
+
217
+ Effect of Feature Queries. We assign each class a set of feature queries, which are the key guidance to distill fine-grained prototypes. The number of feature queries for a class is set to 5 by default. Figure 5 shows the effect of this number.
218
+
219
+ Moreover, to explore the fundamental working mechanism, we visualize the attention heatmap of feature queries on support images. As shown in Figure 6, two feature queries from person category are listed. They are prone to focus on the specific details, e.g., head and hand, which conforms to our expectations. Please note that the generated heat maps has a resolution of $14 \times 14$ . It is not absolutely aligned with the original images.
220
+
221
+ # Visualize Detection Results
222
+
223
+ We show the detection results in Figure 4. The model is trained on the Novel Set 3 of PASCAL VOC under 10 shot
224
+
225
+ ![](images/5d579454c57487418b903b99c3011751c793e09b7c04a1f60b2f03a1e15e9e62.jpg)
226
+ Figure 5: Ablation study on the number of feature queries.
227
+
228
+ ![](images/c1c2f880669506cb635eeb17e44d2080684dde28e1c5fb671bb637072cd667ee.jpg)
229
+ Figure 6: Attention heatmap of feature queries. Please find more discussion and results in Appendix.
230
+
231
+ setting and tested on the VOC07 test set. It can be seen that many of the novel instances are effectively detected, even though the detected bboxes are not perfectly aligned. This results demonstrate the promising potential of our method.
232
+
233
+ # Conclusion
234
+
235
+ This paper studies the meta-learning based FSOD. We propose a novel FFA module which can distill fine-grained prototypes in addition to class-level ones. It enables more robust novel object detection by focusing on the detailed features. We also propose B-CAS strategy and NLF module to aggregate high-level features more effectively. Both quantitative and qualitative results demonstrate the effectiveness of our method and the promising prospect of FSOD.
236
+
237
+ # Acknowledgments
238
+
239
+ This work was supported in part by the Overseas Students Science and Technology Activities Project (No. 2018024), by the National Natural Science Foundation of China (No. 61502389), by the Natural Science Basic Research Program of Shaanxi Province, China (No. 2023-JC-YB-508).
240
+
241
+ # References
242
+
243
+ Cao, Y.; Wang, J.; Jin, Y.; Wu, T.; Chen, K.; Liu, Z.; and Lin, D. 2021. Few-Shot Object Detection via Association and Discrimination. In NeurIPS, 16570-16581.
244
+ Carion, N.; Massa, F.; Synnaeve, G.; Usunier, N.; Kirillov, A.; and Zagoruyko, S. 2020. End-to-end object detection with transformers. In ECCV, 213-229.
245
+ Chen, H.; Wang, Y.; Wang, G.; and Qiao, Y. 2018. LSTD: A low-shot transfer detector for object detection. In AAAI, 6066-6073.
246
+ Chen, K.; Wang, J.; Pang, J.; Cao, Y.; Xiong, Y.; Li, X.; Sun, S.; Feng, W.; Liu, Z.; Xu, J.; Zhang, Z.; Cheng, D.; Zhu, C.; Cheng, T.; Zhao, Q.; Li, B.; Lu, X.; Zhu, R.; Wu, Y.; Dai, J.; Wang, J.; Shi, J.; Ouyang, W.; Loy, C. C.; and Lin, D. 2019. MMDetection: Open MMLab Detection Toolbox and Benchmark. arXiv preprint arXiv:1906.07155.
247
+ Everingham, M.; Van Gool, L.; Williams, C. K.; Winn, J.; and Zisserman, A. 2010. The pascal visual object classes (voc) challenge. *IJCV*, 88(2): 303-338.
248
+ Fan, Q.; Zhuo, W.; Tang, C.-K.; and Tai, Y.-W. 2020. Few-shot object detection with attention-RPN and multi-relation detector. In CVPR, 4013-4022.
249
+ Fan, Z.; Ma, Y.; Li, Z.; and Sun, J. 2021. Generalized few-shot object detection without forgetting. In CVPR, 4527-4536.
250
+ Han, G.; Huang, S.; Ma, J.; He, Y.; and Chang, S.-F. 2022a. Meta faster r-cnn: Towards accurate few-shot object detection with attentive feature alignment. In AAAI, 780-789.
251
+ Han, G.; Ma, J.; Huang, S.; Chen, L.; and Chang, S.-F. 2022b. Few-shot object detection with fully cross-transformer. In CVPR, 5321-5330.
252
+ Han, J.; Ren, Y.; Ding, J.; Yan, K.; and Xia, G.-S. 2023. Few-Shot Object Detection via Variational Feature Aggregation. In AAAI, 755-763.
253
+ He, K.; Gkioxari, G.; Dollar, P.; and Girshick, R. 2017. Mask r-cnn. In ICCV, 2961-2969.
254
+ He, K.; Zhang, X.; Ren, S.; and Sun, J. 2016. Deep residual learning for image recognition. In CVPR, 770-778.
255
+ Hu, H.; Bai, S.; Li, A.; Cui, J.; and Wang, L. 2021. Dense relation distillation with context-aware aggregation for few-shot object detection. In CVPR, 10185-10194.
256
+ Kang, B.; Liu, Z.; Wang, X.; Yu, F.; Feng, J.; and Darrell, T. 2019. Few-shot object detection via feature reweighting. In ICCV, 8420-8429.
257
+ Lin, T.-Y.; Dólar, P.; Girshick, R.; He, K.; Hariharan, B.; and Belongie, S. 2017a. Feature pyramid networks for object detection. In CVPR, 2117-2125.
258
+
259
+ Lin, T.-Y.; Goyal, P.; Girshick, R.; He, K.; and Dollar, P. 2017b. Focal Loss for Dense Object Detection. In ICCV, 2980-2988.
260
+ Lin, T.-Y.; Maire, M.; Belongie, S.; Hays, J.; Perona, P.; Ramanan, D.; Dollar, P.; and Zitnick, C. L. 2014. Microsoft coco: Common objects in context. In ECCV, 740-755. Springer.
261
+ Liu, W.; Anguelov, D.; Erhan, D.; Szegedy, C.; Reed, S.; Fu, C.-Y.; and Berg, A. C. 2016. SSD: Single shot multibox detector. In ECCV, 21-37.
262
+ Qiao, L.; Zhao, Y.; Li, Z.; Qiu, X.; Wu, J.; and Zhang, C. 2021. DeFRCN: Decoupled Faster R-CNN for Few-Shot Object Detection. In ICCV, 8681-8690.
263
+ Redmon, J.; Divvala, S.; Girshick, R.; and Farhadi, A. 2016. You only look once: Unified, real-time object detection. In CVPR, 779-788.
264
+ Ren, S.; He, K.; Girshick, R.; and Sun, J. 2017. Faster R-CNN: Towards real-time object detection with region proposal networks. IEEE TPAMI, 39(6): 1137-1149.
265
+ Russakovsky, O.; Deng, J.; Su, H.; Krause, J.; Satheesh, S.; Ma, S.; Huang, Z.; Karpathy, A.; Khosla, A.; Bernstein, M.; et al. 2015. Imagenet large scale visual recognition challenge. IJCV, 115(3): 211-252.
266
+ Sun, B.; Li, B.; Cai, S.; Yuan, Y.; and Zhang, C. 2021. Fsce: Few-shot object detection via contrastive proposal encoding. In CVPR, 7352-7362.
267
+ Vaswani, A.; Shazeer, N.; Parmar, N.; Uszkoreit, J.; Jones, L.; Gomez, A. N.; Kaiser, L.; and Polosukhin, I. 2017. Attention is all you need. In NeurIPS, 5998-6008.
268
+ Wang, W.; Xie, E.; Li, X.; Fan, D.-P.; Song, K.; Liang, D.; Lu, T.; Luo, P.; and Shao, L. 2021. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In ICCV, 568-578.
269
+ Wang, X.; Huang, T. E.; Darrell, T.; Gonzalez, J. E.; and Yu, F. 2020. Frustratingly simple few-shot object detection. arXiv preprint arXiv:2003.06957.
270
+ Wang, Y.-X.; Ramanan, D.; and Hebert, M. 2019. Meta-learning to detect rare objects. In ICCV, 9925-9934.
271
+ Wu, J.; Liu, S.; Huang, D.; and Wang, Y. 2020. Multi-scale positive sample refinement for few-shot object detection. In ECCV, 456-472.
272
+ Xiao, Y.; and Marlet, R. 2020. Few-shot object detection and viewpoint estimation for objects in the wild. In ECCV, 192-210. Springer.
273
+ Yan, X.; Chen, Z.; Xu, A.; Wang, X.; Liang, X.; and Lin, L. 2019. Meta r-cnn: Towards general solver for instance-level low-shot learning. In ICCV, 9577-9586.
274
+ Zhang, G.; Luo, Z.; Cui, K.; Lu, S.; and Xing, E. P. 2022. Meta-DETR: Image-Level Few-Shot Detection with InterClass Correlation Exploitation. IEEE TPAMI, 45(11): 12832-12843.
275
+ Zhu, X.; Su, W.; Lu, L.; Li, B.; Wang, X.; and Dai, J. 2020. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159.
276
+
277
+ # Appendix
278
+
279
+ # Additional Visualization
280
+
281
+ Attention Heatmap of Feature Queries. We show more attention heatmaps of feature queries upon support images in Figure 7. We can see that the feature query 2 from dog category is prone to capture the detailed features of head. The feature query 1, 2 from horse category are focus on head and legs, respectively. The feature queries are more likely to capture the different details, rather than collapse to a trivial solution.
282
+
283
+ Feature Map of Query Images. The feature map of a query image $X_{q} \in \mathbb{R}^{H\bar{W} \times d}$ are summed alone dimension $d$ and then normalized to [0, 1] to produce the heatmap. We show the results of original query features and the assigned prototypes in Figure 10. It can be seen that the assigned prototypes can highlight the representative features to facilitate the model prediction. All these evidences demonstrate the effectiveness of our proposed FFA.
284
+
285
+ # Additional Implementation Details
286
+
287
+ Our method follows the two-stage training paradigm. At the base training stage, we train all of the model parameters (the first few layers of ResNet are frozened conventionally). At the fine-tuning stage, we freeze the backbone and only train the RPN, FFA and NLF module. Fine-tuning of the FFA together with RPN can help to produce high-quality proposals of the novel classes. Under $K = \{1, 2\}$ shot settings, we freeze the RPN to avoid overfitting.
288
+
289
+ # Computational Cost
290
+
291
+ Table 5 shows the computational cost of different methods at inference time. We conduct the experiments on a single Nvidia 3090 GPU. The batch size is set to 1. It can be seen that our method has a better trade-off between the performance and computational efficiency.
292
+
293
+ <table><tr><td>Dataset</td><td>Method</td><td>Params(MB)</td><td>FLOPs(GB)</td><td>FPS(img/s)</td></tr><tr><td rowspan="3">VOC (20 class)</td><td>Baseline</td><td>45.99</td><td>709.76</td><td>16.2</td></tr><tr><td>FPD(Ours)</td><td>65.68</td><td>818.10</td><td>14.8</td></tr><tr><td>Directly Match</td><td>69.58</td><td>956.72</td><td>14.5</td></tr><tr><td rowspan="3">COCO (80 class)</td><td>Baseline</td><td>46.72</td><td>766.36</td><td>7.3</td></tr><tr><td>FPD(Ours)</td><td>66.5</td><td>1309.50</td><td>6.5</td></tr><tr><td>Directly Match</td><td>70.32</td><td>1466.25</td><td>5.3</td></tr></table>
294
+
295
+ Table 5: The computational cost at the inference time.
296
+
297
+ # More Discussion
298
+
299
+ Our proposed FFA module has similarities with DCNet and Meta-DETR. In this part, we provide a more detailed comparison among these methods.
300
+
301
+ # Compare with DCNet
302
+
303
+ Figure 11 illustrates the DRD module of DCNet, which densely matches all classes of support features into the query feature map. There are two main differences between DRD
304
+
305
+ ![](images/0c32c46312a2c51e1411b6ba6d8dd492caf22c216cd38804182a6300489f7748.jpg)
306
+ Figure 7: Additional attention heatmap of feature queries. The model is trained on Novel Set 3 of PASCAL VOC.
307
+
308
+ ![](images/f97f8da43427a73f317469f375044628f8017782bec5df983d1c31194fba296c.jpg)
309
+ Figure 8: Attention heatmap of feature queries (bird).
310
+
311
+ and our FFA (as shown in Figure 3). First, FFA utilizes feature queries to distill fine-grained prototypes, enabling the model to focus on the most representative detailed features and to reduce computational costs (see Table 5). It also enhances inference efficiency (see subsec. Test-Time Natural Integration). Second, FFA employs a residual connection for the original query features, and the prototypes are directly assigned to the query feature map without any extra projection. This maintains the query-support branches in the same feature space, which is crucial for the subsequent high-level feature fusion operation.
312
+
313
+ # Compare with Meta-DETR
314
+
315
+ Meta-DETR incorporates meta-learning and attention mechanism into the DETR framework. It utilizes the cross attention operation to aggregate query-support features. As shown in Figure 12, CAM performs global average pooling to generate the class-level prototypes. They are matched with query features and then assigned into query features based on the matching results. Instead of performing element-wise addition, the element-wise multiplication operation is used to
316
+
317
+ ![](images/076df7b2369b78bf61d3c69d2c3df92fbfd2a790850dccd2dd9a50d1987b9578.jpg)
318
+ Figure 9: Attention heatmap of feature queries (airplane).
319
+
320
+ ![](images/cd3515b979cb12637a4eb90425b5b71226212dbd967e4b9963da9c5d2fc3ceca.jpg)
321
+ Figure 10: Feature map of query images.
322
+
323
+ rewight the query feature map along the channel dimension. CAM differs from our method in three main aspects. First, it focuses on high-level feature aggregation, while our FFA is used to aggregate detailed features. FFA utilizes feature queries and an additional cross attention layer to refine the important local context into the fine-grained prototypes. Second, CAM employs sigmoid and multiplication operations to reweight the query feature map, while FFA directly adds the assigned prototypes to it, preserving more information and potential in the early stages. Third, CAM incorporates a novel and effective encoding matching task to predict object classes.
324
+
325
+ # Revised Performance
326
+
327
+ After carefully re-examining our code, we found some unintentional discrepancies that have impacted the performance metrics. These mistakes do not compromise the main contributions of this work. Table 6 shows the revised results.
328
+
329
+ ![](images/ba9a84c99239a12f73b15b5cfc44d6c829045a1947d76cac5466b8d172cf5b97.jpg)
330
+ Figure 11: The Dense Relation Distillation module of DCNet.
331
+
332
+ ![](images/b8a6a1683837d19915e195f7cff8580a58c514a734073801ef25fd64e0a4c61d.jpg)
333
+ Figure 12: The Correlational Aggregation Module of MetaDETR.
334
+
335
+ <table><tr><td rowspan="2">Method / shot</td><td colspan="5">Novel Set 1</td><td colspan="5">Novel Set 2</td><td colspan="5">Novel Set 3</td></tr><tr><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td><td>1</td><td>2</td><td>3</td><td>5</td><td>10</td></tr><tr><td colspan="16">Single run results:</td></tr><tr><td>FSCE (Sun et al. 2021)</td><td>44.2</td><td>43.8</td><td>51.4</td><td>61.9</td><td>63.4</td><td>27.3</td><td>29.5</td><td>43.5</td><td>44.2</td><td>50.2</td><td>37.2</td><td>41.9</td><td>47.5</td><td>54.6</td><td>58.5</td></tr><tr><td>Meta FR-CNN (Han et al. 2022a)</td><td>43.0</td><td>54.5</td><td>60.6</td><td>66.1</td><td>65.4</td><td>27.7</td><td>35.5</td><td>46.1</td><td>47.8</td><td>51.4</td><td>40.6</td><td>46.4</td><td>53.4</td><td>59.9</td><td>58.6</td></tr><tr><td>Meta-DETR (Zhang et al. 2022)</td><td>40.6</td><td>51.4</td><td>58.0</td><td>59.2</td><td>63.6</td><td>37.0</td><td>36.6</td><td>43.7</td><td>49.1</td><td>54.6</td><td>41.6</td><td>45.9</td><td>52.7</td><td>58.9</td><td>60.6</td></tr><tr><td>FCT (Han et al. 2022b)</td><td>49.9</td><td>57.1</td><td>57.9</td><td>63.2</td><td>67.1</td><td>27.6</td><td>34.5</td><td>43.7</td><td>49.2</td><td>51.2</td><td>39.5</td><td>54.7</td><td>52.3</td><td>57.0</td><td>58.7</td></tr><tr><td>VFA (Han et al. 2023)</td><td>57.7</td><td>64.6</td><td>64.7</td><td>67.2</td><td>67.4</td><td>41.4</td><td>46.2</td><td>51.1</td><td>51.8</td><td>51.6</td><td>48.9</td><td>54.8</td><td>56.6</td><td>59.0</td><td>58.9</td></tr><tr><td>FPD(Previous)</td><td>46.5</td><td>62.3</td><td>65.4</td><td>68.2</td><td>69.3</td><td>32.2</td><td>43.6</td><td>50.3</td><td>52.5</td><td>56.1</td><td>43.2</td><td>53.3</td><td>56.7</td><td>62.1</td><td>64.1</td></tr><tr><td>FPD(Revised)</td><td>48.1</td><td>62.2</td><td>64.0</td><td>67.6</td><td>68.4</td><td>29.8</td><td>43.2</td><td>47.7</td><td>52.0</td><td>53.9</td><td>44.9</td><td>53.8</td><td>58.1</td><td>61.6</td><td>62.9</td></tr></table>
336
+
337
+ Table 6: Revised FSOD results (AP50) on the three splits of Pascal VOC dataset.
2401.07xxx/2401.07629/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86fae8e2f7e7d10bc1f92f41c70b4ec98d8b034d83ed1536841d9044c7446bb4
3
+ size 1144982
2401.07xxx/2401.07629/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07654/6b889230-d2d7-4fe0-bed7-e8514acfd0b1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf59b9e005421536a33d651c7bef0902ed9875ca85ce9092455c46cd82e84c23
3
+ size 4955527
2401.07xxx/2401.07654/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07654/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaeb8a88c1a495175c6d6c76f20e4b5a7c817b7ba6eb364651e7b2cbc0b4917f
3
+ size 1105980
2401.07xxx/2401.07654/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07680/398bd3f2-d838-4e1b-a207-704a7ecefba8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da4093f8f0bcd667ed929a8f5b571b96db8ce2468e0e5aa2e9ed0d8cca61bc09
3
+ size 813826
2401.07xxx/2401.07680/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07680/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa3a05c36848128228728b2e3d2d0d36ffc7197ca5c11081944f8e98e193ecd7
3
+ size 527436
2401.07xxx/2401.07680/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.07xxx/2401.07745/c334f9ce-49db-4245-8e6b-ae023176c14c_content_list.json ADDED
@@ -0,0 +1,1573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "MaskClustering: View Consensus based Mask Graph Clustering for Open-Vocabulary 3D Instance Segmentation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 158,
8
+ 128,
9
+ 810,
10
+ 176
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Mi Yan1,2",
17
+ "bbox": [
18
+ 251,
19
+ 210,
20
+ 330,
21
+ 227
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Jiazhao Zhang<sup>1,2</sup>",
28
+ "bbox": [
29
+ 351,
30
+ 210,
31
+ 483,
32
+ 227
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Yan Zhu",
39
+ "bbox": [
40
+ 506,
41
+ 210,
42
+ 581,
43
+ 227
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "He Wang $^{1,2,3,\\dagger}$",
50
+ "bbox": [
51
+ 606,
52
+ 210,
53
+ 715,
54
+ 229
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "<sup>1</sup>CFCS, School of CS, Peking University <sup>2</sup>Beijing Academy of Artificial Intelligence <sup>3</sup>Galbot",
61
+ "bbox": [
62
+ 107,
63
+ 229,
64
+ 857,
65
+ 247
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "image",
71
+ "img_path": "images/4c2dab6542e9f80cf2759012b6e984efb7dcecd3708b39adce27e329a4249efe.jpg",
72
+ "image_caption": [
73
+ "Instance segmentation Input point cloud"
74
+ ],
75
+ "image_footnote": [],
76
+ "bbox": [
77
+ 102,
78
+ 262,
79
+ 259,
80
+ 334
81
+ ],
82
+ "page_idx": 0
83
+ },
84
+ {
85
+ "type": "image",
86
+ "img_path": "images/0e41d75b3dd83100e17b959a76b2b6f2901aa624bf179ec37d39d9a37a1ed3c0.jpg",
87
+ "image_caption": [
88
+ "Building"
89
+ ],
90
+ "image_footnote": [],
91
+ "bbox": [
92
+ 102,
93
+ 345,
94
+ 254,
95
+ 412
96
+ ],
97
+ "page_idx": 0
98
+ },
99
+ {
100
+ "type": "image",
101
+ "img_path": "images/5bc4efb281d4032c0ad660e865bb724fa0628cd5f0e57669ed70307c8170c712.jpg",
102
+ "image_caption": [],
103
+ "image_footnote": [],
104
+ "bbox": [
105
+ 261,
106
+ 260,
107
+ 392,
108
+ 333
109
+ ],
110
+ "page_idx": 0
111
+ },
112
+ {
113
+ "type": "image",
114
+ "img_path": "images/7b86db3e1082383d4f36c41bc045a3093413ac06c02fc0b18ead7fe0fdc6604f.jpg",
115
+ "image_caption": [
116
+ "First floor"
117
+ ],
118
+ "image_footnote": [],
119
+ "bbox": [
120
+ 258,
121
+ 347,
122
+ 387,
123
+ 416
124
+ ],
125
+ "page_idx": 0
126
+ },
127
+ {
128
+ "type": "image",
129
+ "img_path": "images/55a33e3e61a77e4aa187de01cfdc205f0a173764960fbc743f7f460c859961b3.jpg",
130
+ "image_caption": [
131
+ "Zoom-in View"
132
+ ],
133
+ "image_footnote": [],
134
+ "bbox": [
135
+ 395,
136
+ 263,
137
+ 550,
138
+ 335
139
+ ],
140
+ "page_idx": 0
141
+ },
142
+ {
143
+ "type": "image",
144
+ "img_path": "images/5ebce06a0f65a46814fb3f98c82f8d7bb0b895bafb172249312b9045be4616a2.jpg",
145
+ "image_caption": [
146
+ "Second floor",
147
+ "Figure 1. Our method tackles the challenges of open-vocabulary instance segmentation. It achieves detailed segmentation across objects of varying scales and can query these objects using open-vocabulary text."
148
+ ],
149
+ "image_footnote": [],
150
+ "bbox": [
151
+ 393,
152
+ 348,
153
+ 544,
154
+ 416
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "image",
160
+ "img_path": "images/b25514ab33de7dc0ab3cb19d9e5c040fa00242958645738ec3b5da407c708a5a.jpg",
161
+ "image_caption": [
162
+ "Open-vocabulary queries"
163
+ ],
164
+ "image_footnote": [],
165
+ "bbox": [
166
+ 584,
167
+ 267,
168
+ 728,
169
+ 330
170
+ ],
171
+ "page_idx": 0
172
+ },
173
+ {
174
+ "type": "image",
175
+ "img_path": "images/f5e3dbef75cd30c8f3db4551a835f8836268592064c13b373f89f84e1038c926.jpg",
176
+ "image_caption": [],
177
+ "image_footnote": [],
178
+ "bbox": [
179
+ 733,
180
+ 266,
181
+ 879,
182
+ 330
183
+ ],
184
+ "page_idx": 0
185
+ },
186
+ {
187
+ "type": "image",
188
+ "img_path": "images/2d7afccd276410d1a49075941e3aa5f0f270266cb3e0f7979b44dd826390082c.jpg",
189
+ "image_caption": [
190
+ "\"Green plant on the marble table\"",
191
+ "\"White clothes on the sofa chair\""
192
+ ],
193
+ "image_footnote": [],
194
+ "bbox": [
195
+ 733,
196
+ 339,
197
+ 885,
198
+ 415
199
+ ],
200
+ "page_idx": 0
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "Abstract",
205
+ "text_level": 1,
206
+ "bbox": [
207
+ 233,
208
+ 483,
209
+ 312,
210
+ 500
211
+ ],
212
+ "page_idx": 0
213
+ },
214
+ {
215
+ "type": "text",
216
+ "text": "Open-vocabulary 3D instance segmentation is cutting-edge for its ability to segment 3D instances without predefined categories. However, progress in 3D lags behind its 2D counterpart due to limited annotated 3D data. To address this, recent works first generate 2D open-vocabulary masks through 2D models and then merge them into 3D instances based on metrics calculated between two neighboring frames. In contrast to these local metrics, we propose a novel metric, view consensus rate, to enhance the utilization of multi-view observations. The key insight is that two 2D masks should be deemed part of the same 3D instance if a significant number of other 2D masks from different views contain both these two masks. Using this metric as edge weight, we construct a global mask graph where each mask is a node. Through iterative clustering of masks showing high view consensus, we generate a series of clusters, each representing a distinct 3D instance. Notably, our model is training-free. Through extensive experiments on publicly available datasets, including ScanNet++, ScanNet200 and MatterPort3D, we demonstrate that our method achieves state-of-the-art performance in open-vocabulary 3D instance segmentation. Our project",
217
+ "bbox": [
218
+ 73,
219
+ 536,
220
+ 472,
221
+ 869
222
+ ],
223
+ "page_idx": 0
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "page is at https://pku-epic.github.io/MaskClustering.",
228
+ "bbox": [
229
+ 500,
230
+ 484,
231
+ 846,
232
+ 501
233
+ ],
234
+ "page_idx": 0
235
+ },
236
+ {
237
+ "type": "text",
238
+ "text": "1. Introduction",
239
+ "text_level": 1,
240
+ "bbox": [
241
+ 500,
242
+ 527,
243
+ 630,
244
+ 542
245
+ ],
246
+ "page_idx": 0
247
+ },
248
+ {
249
+ "type": "text",
250
+ "text": "Open-vocabulary 3D instance segmentation tackles the problem of predicting 3D object instance masks and their corresponding categories from reconstructed 3D scenes, without relying on a predefined list of categories. This is an essential task for 3D scene understanding [4, 12, 35], robotics [8, 15, 50] and VR/AR applications [22, 45]. However, this task is more challenging than its established 2D counterpart, open-vocabulary 2D instance segmentation [11, 20, 39, 43, 44], primarily due to the lack of large-scale open-world 3D data. Consequently, most current methods [18, 28, 37] in this field divide this task into two stages: zero-shot 3D instance mask prediction, followed by open-vocabulary semantic queries. In this work, we primarily focuses on obtaining high-quality, zero-shot 3D instance masks.",
251
+ "bbox": [
252
+ 496,
253
+ 551,
254
+ 890,
255
+ 777
256
+ ],
257
+ "page_idx": 0
258
+ },
259
+ {
260
+ "type": "text",
261
+ "text": "Existing approaches for zero-shot 3D instance mask prediction primarily follow two paths. 3D-to-2D projection-based methods [18, 19] leverage existing 3D instance segmentation algorithms to generate 3D masks. However, this approach is fundamentally constrained by the quality of 3D reconstructions and the relatively modest capabilities of current 3D instance segmentation tools. As a result, these methods often struggle to accurately segment small",
262
+ "bbox": [
263
+ 496,
264
+ 779,
265
+ 892,
266
+ 901
267
+ ],
268
+ "page_idx": 0
269
+ },
270
+ {
271
+ "type": "aside_text",
272
+ "text": "arXiv:2401.07745v2 [cs.CV] 10 Apr 2024",
273
+ "bbox": [
274
+ 22,
275
+ 260,
276
+ 60,
277
+ 705
278
+ ],
279
+ "page_idx": 0
280
+ },
281
+ {
282
+ "type": "page_footnote",
283
+ "text": "†: He Wang is the corresponding author.",
284
+ "bbox": [
285
+ 101,
286
+ 886,
287
+ 316,
288
+ 900
289
+ ],
290
+ "page_idx": 0
291
+ },
292
+ {
293
+ "type": "text",
294
+ "text": "objects, leading to a significant loss of detail in complex scenes. In contrast, 2D-to-3D region grow-based methods [28, 46] leverage 2D segmentation models to process frames sequentially and update a list of 3D instances simultaneously. They merge new 2D masks with existing 3D instances based on geometric overlap and semantic similarity for each frame. However, we find that such online processing lacks global optimality across all frames, often resulting in incorrect merging.",
295
+ "bbox": [
296
+ 75,
297
+ 90,
298
+ 472,
299
+ 227
300
+ ],
301
+ "page_idx": 1
302
+ },
303
+ {
304
+ "type": "text",
305
+ "text": "To address these limitations, we propose a novel approach that improves global consistency via multi-view verification, inspired by bundle adjustment [38]. Unlike prior methods that rely on local metrics calculated between adjacent frames to decide whether a mask pair should be merged, our method introduces a new global metric, the view consensus rate, which measures the proportion of frames supporting their merging. Here, a frame $t$ supports merging only if another 2D mask within frame $t$ contains this mask pair. In this way, the same-instance relationship of two view-consensus masks are indeed supported by multiview observation.",
306
+ "bbox": [
307
+ 75,
308
+ 227,
309
+ 472,
310
+ 407
311
+ ],
312
+ "page_idx": 1
313
+ },
314
+ {
315
+ "type": "text",
316
+ "text": "Utilizing the same-instance relationship, we build a global mask graph wherein each node is a mask, with edges added between high view consensus mask pairs. Following this, mask pairs exhibiting high view consensus are prioritized for merging into a mask cluster, and the view consensus between this mask cluster and other mask clusters will be updated. This iterative clustering and updating process yields a final list of clusters, each containing multiple masks and denoting a 3D instance. For each 3D instance, its point cloud and semantic feature are the aggregated partial point clouds and open-vocabulary features derived from individual 2D masks, respectively.",
317
+ "bbox": [
318
+ 75,
319
+ 407,
320
+ 472,
321
+ 589
322
+ ],
323
+ "page_idx": 1
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "Our method, validated on ScanNet++ [47], Matterport3D [1], and ScanNet200 [35] benchmarks, achieves state-of-the-art results in zero-shot mask prediction and open-vocabulary instance understanding, surpassing existing methods, especially in segmenting fine-grained objects.",
328
+ "bbox": [
329
+ 75,
330
+ 589,
331
+ 472,
332
+ 666
333
+ ],
334
+ "page_idx": 1
335
+ },
336
+ {
337
+ "type": "text",
338
+ "text": "Our contributions can be concluded as follows:",
339
+ "bbox": [
340
+ 96,
341
+ 666,
342
+ 408,
343
+ 679
344
+ ],
345
+ "page_idx": 1
346
+ },
347
+ {
348
+ "type": "list",
349
+ "sub_type": "text",
350
+ "list_items": [
351
+ "- A novel graph clustering based methodology to merge 2D masks for 3D open-vocabulary instance segmentation.",
352
+ "- A novel view consensus metric for evaluating the relationship between 2D masks, effectively leveraging global information from input image sequences.",
353
+ "- A SOTA open-vocabulary 3D instance segmentation method, which demonstrates superior performance on many publicly available datasets."
354
+ ],
355
+ "bbox": [
356
+ 76,
357
+ 680,
358
+ 468,
359
+ 800
360
+ ],
361
+ "page_idx": 1
362
+ },
363
+ {
364
+ "type": "text",
365
+ "text": "2. Related Works",
366
+ "text_level": 1,
367
+ "bbox": [
368
+ 76,
369
+ 814,
370
+ 225,
371
+ 830
372
+ ],
373
+ "page_idx": 1
374
+ },
375
+ {
376
+ "type": "text",
377
+ "text": "Closed-set 3D instance segmentation. Since the emergence of 3D scene datasets [4, 11], the computer vision community has witnessed a large literature of 3D segmentation methods [3, 9, 13, 14, 16, 26, 34, 36, 41, 42]. These",
378
+ "bbox": [
379
+ 75,
380
+ 839,
381
+ 470,
382
+ 901
383
+ ],
384
+ "page_idx": 1
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "methods tackle this problem either in online [17, 27, 29, 49, 51] or offline [34, 36, 41, 42] manner, representing the scene as points cloud, voxels, and more recently neural field [40, 52]. Though significant progress has been made, these methods are limited to a closed-set category list which is pre-defined in certain dataset, suffering poor performance in open-vocabulary settings as tail classes that have few or no training examples. In contrast, our method aims to tackle open-vocabulary 3D instance segmentation that segment objects of arbitrary category.",
389
+ "bbox": [
390
+ 496,
391
+ 90,
392
+ 893,
393
+ 243
394
+ ],
395
+ "page_idx": 1
396
+ },
397
+ {
398
+ "type": "text",
399
+ "text": "Open-vocabulary 2D instance segmentation. The recent advances in large visual foundation models [2, 7, 24, 25, 32, 33] have enabled a remarkable level of robustness of 2D understanding tasks. Typical tasks include zero-shot 2D segmentation [2, 24, 32], open-vocabulary 2D image understanding [7, 25, 33], and open-vocabulary 2D object detection [21, 23, 53]. Recently, many works [11, 20, 39, 43, 44] focus on the open-vocabulary 2D segmentation task, which requires predicting the open-vocabulary feature at the pixel level. These methods encode 2D images and align open-vocabulary pixel features with them. However, due to the lack of large-scale 3D annotated data, end-to-end open-vocabulary 3D instance segmentation is in slow progress. In this work, we tackle the open-vocabulary 3D instance segmentation by leveraging the prior from large 2D vision-language models.",
400
+ "bbox": [
401
+ 496,
402
+ 244,
403
+ 893,
404
+ 487
405
+ ],
406
+ "page_idx": 1
407
+ },
408
+ {
409
+ "type": "text",
410
+ "text": "Open-vocabulary 3D instance segmentation. There are two types of methods: (1) 3D-to-2D projection methods and (2) 2D-to-3D region grow-based methods. (1) 3D-to-2D projection methods [19, 31, 37] directly conduct 3D instance segmentation [19, 36] on 3D indoor scene input. They project the 3D instance objects to 2D frames, and extract open-vocabulary features for final aggregation. However, these types of methods are limited to well-reconstructed scene and detailed objects are easily missed if the geometry details are poor. (2) 2D-to-3D region grow-based methods [8, 28] propose to online fuse 2D observation to 3D instance segmentation. By back-projecting the 2D mask to 3D point cloud, these methods leverage clustering algorithm [5] or geometry overlapping to find corresponding 3D instances. The open-vocabulary feature is also aggregated during the back-projection. However, these types of methods consider the associations between historical constructed 3D instances with live frame, lacking a global understanding of all observed frames.",
411
+ "bbox": [
412
+ 496,
413
+ 489,
414
+ 893,
415
+ 776
416
+ ],
417
+ "page_idx": 1
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "Concurrently, SAI3D[48] and Open3DIS[30] propose merging 3D superpoints[6] guided by predictions from SAM[24], showing robust performance in open-vocabulary 3D instance segmentation. However, we diverge from their approach by avoiding reliance on 3D superpoints, which face challenges in distinguishing geometrically-homogeneous objects like posters on walls or rows of similar medicine boxes.",
422
+ "bbox": [
423
+ 496,
424
+ 779,
425
+ 893,
426
+ 900
427
+ ],
428
+ "page_idx": 1
429
+ },
430
+ {
431
+ "type": "image",
432
+ "img_path": "images/19720a9eae5667977e2dac640f4300d6c6367fe836f324c0a16fff3e38b6396a.jpg",
433
+ "image_caption": [
434
+ "Figure 2. Overview pipeline of our method: a) We take segmented image sequences as input and b) extract all 2D masks from the input. c) To merge them, we build a global graph with each node as a mask. We use the view consensus rate, which is defined as the proportion of frames supporting the merging, to add edges between nodes. Each frame supports the merging only if there is a mask in this frame containing both nodes. d) Each mask cluster is merged into a 3D instance. For clarity, we only visualize three objects in the figure."
435
+ ],
436
+ "image_footnote": [],
437
+ "bbox": [
438
+ 81,
439
+ 85,
440
+ 313,
441
+ 397
442
+ ],
443
+ "page_idx": 2
444
+ },
445
+ {
446
+ "type": "image",
447
+ "img_path": "images/ad4a3795a306c5a315af6c19de16ecdb683dd879fbe20cb08a76ff202536e937.jpg",
448
+ "image_caption": [],
449
+ "image_footnote": [],
450
+ "bbox": [
451
+ 318,
452
+ 85,
453
+ 689,
454
+ 397
455
+ ],
456
+ "page_idx": 2
457
+ },
458
+ {
459
+ "type": "image",
460
+ "img_path": "images/169e2a3bb01ac41a2850bfd4d512f242fd6ff1a5fc5504dc49706cd21d40ce97.jpg",
461
+ "image_caption": [],
462
+ "image_footnote": [],
463
+ "bbox": [
464
+ 694,
465
+ 85,
466
+ 890,
467
+ 383
468
+ ],
469
+ "page_idx": 2
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "3. Method",
474
+ "text_level": 1,
475
+ "bbox": [
476
+ 76,
477
+ 489,
478
+ 166,
479
+ 503
480
+ ],
481
+ "page_idx": 2
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "3.1. Problem Formulation and Method Overview",
486
+ "text_level": 1,
487
+ "bbox": [
488
+ 76,
489
+ 513,
490
+ 454,
491
+ 529
492
+ ],
493
+ "page_idx": 2
494
+ },
495
+ {
496
+ "type": "text",
497
+ "text": "Given a set of posed color images $\\{I_1^c, I_2^c, \\ldots, I_T^c\\}$ , their corresponding depths $\\{I_1^d, I_2^d, \\ldots, I_T^d\\}$ , and the reconstructed point cloud $P$ of a scene, our algorithm outputs a list of 3D instances along with their open-vocabulary semantics fused from 2D mask proposals.",
498
+ "bbox": [
499
+ 75,
500
+ 537,
501
+ 468,
502
+ 613
503
+ ],
504
+ "page_idx": 2
505
+ },
506
+ {
507
+ "type": "text",
508
+ "text": "We initially employ an off-the-shelf, class-agnostic mask predictor to process each color image $I_{t}^{c}$ and derive the 2D masks $\\{m_{t,i} \\mid i = 1,2,\\dots,n_t\\}$ where $n_t$ denotes the number of masks in frame $t$ . We assume the mask predictor to generate entity-level panoptic segmentation masks, indicating that each mask approximates one object with nearly all pixels assigned to a single mask. This assumption aligns with capabilities of advanced segmentation tools like CropFormer[32].",
509
+ "bbox": [
510
+ 75,
511
+ 614,
512
+ 468,
513
+ 750
514
+ ],
515
+ "page_idx": 2
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "The overview pipeline of our method is illustrated in Fig. 2. To fuse these 2D masks from different frames into 3D instances, we propose to construct a mask graph $G = (V,E)$ . Each node in $V$ corresponds to a mask $m_{t,i}$ , and an edge in $E$ indicates that two masks are part of the same instance and should be merged. To assess edge connectivity, we propose to leverage consensus cues from multi-view observations and therefore introduce view consensus rate as a criterion (Sec. 3.2).",
520
+ "bbox": [
521
+ 75,
522
+ 750,
523
+ 470,
524
+ 883
525
+ ],
526
+ "page_idx": 2
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "Once the mask graph is established, we initiate an it-",
531
+ "bbox": [
532
+ 96,
533
+ 885,
534
+ 468,
535
+ 900
536
+ ],
537
+ "page_idx": 2
538
+ },
539
+ {
540
+ "type": "text",
541
+ "text": "erative process to cluster masks and update edges, with a priority on merging mask pairs displaying solid view consensus (Sec. 3.3). The result of this iterative process is a list of clusters, each denoting a 3D instance and containing multiple masks. Within such a cluster, we aggregate the corresponding partial point clouds from the individual masks to form the ultimate 3D instance. Building on these correspondences between 2D masks and 3D instances, we perform feature fusion for a more comprehensive representation, which aids in open-vocabulary semantic prediction (Sec. 3.4).",
542
+ "bbox": [
543
+ 496,
544
+ 491,
545
+ 892,
546
+ 657
547
+ ],
548
+ "page_idx": 2
549
+ },
550
+ {
551
+ "type": "text",
552
+ "text": "3.2. Mask Graph Construction",
553
+ "text_level": 1,
554
+ "bbox": [
555
+ 500,
556
+ 665,
557
+ 740,
558
+ 681
559
+ ],
560
+ "page_idx": 2
561
+ },
562
+ {
563
+ "type": "text",
564
+ "text": "In this subsection, we introduce view consensus rate, which serves as the criterion to determine edge connectivity between two masks (Sec.3.2.1). We then propose an efficient method for calculating this rate (Sec.3.2.2) and leverage this rate to filter out under-segmented masks (Sec.3.2.3).",
565
+ "bbox": [
566
+ 496,
567
+ 688,
568
+ 890,
569
+ 777
570
+ ],
571
+ "page_idx": 2
572
+ },
573
+ {
574
+ "type": "text",
575
+ "text": "Notations and Definitions Given the reconstructed point cloud $P$ and frame index $t$ , for a mask $m_{t,i}$ , we can obtain the mask point cloud $P_{t,i}$ by projecting onto $P$ the backprojected point cloud of $m_{t,i}$ from $I_t^d$ . Then we define the frame point cloud $P_{t}$ as the union of all $P_{t,i}$ s for $i = 1,2,\\dots,n_t$ , yielding $P_{t,i}\\subset P_t\\subset P$ . We define a point $p$ to be visible at frame $t$ if $p\\in P_t$ . We then define a mask $m_{t',i}$ to be visible at frame $t$ if at least $\\tau_{vis} = 0.3$ of its total",
576
+ "bbox": [
577
+ 496,
578
+ 779,
579
+ 892,
580
+ 900
581
+ ],
582
+ "page_idx": 2
583
+ },
584
+ {
585
+ "type": "text",
586
+ "text": "points from $P_{t',i}$ are visible and denote the visible part as $P_{t',i}^{t}$ . We denote the set of frames where $m_{t',i}$ is visible as $F(m_{t',i})$ . Finally, we define the approximate containment relationship of one point clouds $P_{i}$ by another point cloud $P_{j}$ as $P_{i} \\subset P_{j}$ , if at least $\\tau_{\\text{contain}} = 0.8$ of the total points in $P_{i}$ lie within $P_{j}$ .",
587
+ "bbox": [
588
+ 76,
589
+ 90,
590
+ 472,
591
+ 183
592
+ ],
593
+ "page_idx": 3
594
+ },
595
+ {
596
+ "type": "text",
597
+ "text": "3.2.1 View Consensus Rate",
598
+ "text_level": 1,
599
+ "bbox": [
600
+ 76,
601
+ 188,
602
+ 279,
603
+ 203
604
+ ],
605
+ "page_idx": 3
606
+ },
607
+ {
608
+ "type": "text",
609
+ "text": "The cornerstone of our method lies in determining if two masks belong to the same instance by utilizing 2D predictions across all frames. In this context, we propose to leverage view consensus cues, as detailed below.",
610
+ "bbox": [
611
+ 75,
612
+ 212,
613
+ 468,
614
+ 272
615
+ ],
616
+ "page_idx": 3
617
+ },
618
+ {
619
+ "type": "text",
620
+ "text": "To assess the relationship between two masks, specifically $m_{t',i}$ and $m_{t'',j}$ , where $t'$ and $t''$ may be the same or different frames, we utilize the masks $\\{m_{t,k}\\}$ from relevant views. The goal is to check if there is substantial consensus among the views supporting that these two masks represent the same 3D instance.",
621
+ "bbox": [
622
+ 75,
623
+ 273,
624
+ 470,
625
+ 362
626
+ ],
627
+ "page_idx": 3
628
+ },
629
+ {
630
+ "type": "text",
631
+ "text": "To be more specific, we first find all the frames $O$ in which both of the two masks are visible, serving as the observers to the two masks, i.e., $O(m_{t',i},m_{t'',j}) = F(m_{t',i})\\cap F(m_{t'',j})$ . And we denote the number of observers in $O$ as $n(m_{t',i},m_{t'',j}) = |O(m_{t',i},m_{t'',j})|$ , where $|\\cdot|$ represents the cardinality of the set.",
632
+ "bbox": [
633
+ 76,
634
+ 363,
635
+ 470,
636
+ 453
637
+ ],
638
+ "page_idx": 3
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "We then check whether an observer frame $t \\in O$ supports the merging of these two masks. For an observer frame $t \\in O$ , if there exists a mask $m_{t,k}$ whose corresponding point cloud $P_{t,k}$ approximately contains both the point clouds $P_{t',i}^{t}$ of $m_{t',i}$ and $P_{t'',j}^{t}$ of $m_{t'',j}$ , i.e., $P_{t',i}^{t} \\subset P_{t,k}$ and $P_{t'',j}^{t} \\subset P_{t,k}$ , then this observer supports that the two masks are components of the same instance. The total number of supporters would be $n_{supporter}(m_{t',i}, m_{t'',j}) = |\\{t \\in O(m_{t',i}, m_{t'',j}) | \\exists k, s.t. P_{t',i}^{t}, P_{t'',j}^{t} \\subset P_{t,k}\\}|$ . The proportion of supporters among all observers is subsequently defined as the view consensus rate $c$ , as illustrated below:",
643
+ "bbox": [
644
+ 75,
645
+ 454,
646
+ 470,
647
+ 635
648
+ ],
649
+ "page_idx": 3
650
+ },
651
+ {
652
+ "type": "equation",
653
+ "text": "\n$$\nc \\left(m _ {t ^ {\\prime}, i}, m _ {t ^ {\\prime \\prime}, j}\\right) = \\frac {n _ {\\text {s u p p o r t e r}} \\left(m _ {t ^ {\\prime} , i} , m _ {t ^ {\\prime \\prime} , j}\\right)}{n \\left(m _ {t ^ {\\prime} , i} , m _ {t ^ {\\prime \\prime} , j}\\right)} \\tag {1}\n$$\n",
654
+ "text_format": "latex",
655
+ "bbox": [
656
+ 127,
657
+ 642,
658
+ 468,
659
+ 676
660
+ ],
661
+ "page_idx": 3
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "An illustration of this view consensus rate can be found in Fig. 3.",
666
+ "bbox": [
667
+ 76,
668
+ 683,
669
+ 468,
670
+ 712
671
+ ],
672
+ "page_idx": 3
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "Employing the consensus rate as a criterion, we connect edges between mask pairs whose view consensus rates exceeding a predefined threshold $\\tau_{rate} = 0.9$ . This procedure yields the set of edges $E$ as follows:",
677
+ "bbox": [
678
+ 75,
679
+ 713,
680
+ 468,
681
+ 773
682
+ ],
683
+ "page_idx": 3
684
+ },
685
+ {
686
+ "type": "equation",
687
+ "text": "\n$$\nE = \\left\\{\\left\\{m _ {t ^ {\\prime}, i ^ {\\prime}}, m _ {t ^ {\\prime \\prime}, i ^ {\\prime \\prime}} \\right\\} \\mid c \\left(m _ {t ^ {\\prime}, i ^ {\\prime}}, m _ {t ^ {\\prime \\prime}, i ^ {\\prime \\prime}}\\right) \\geq \\tau_ {\\text {r a t e}} \\right\\} \\tag {2}\n$$\n",
688
+ "text_format": "latex",
689
+ "bbox": [
690
+ 94,
691
+ 784,
692
+ 468,
693
+ 801
694
+ ],
695
+ "page_idx": 3
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "Leveraging predictions across the entire sequence of images, our criterion shows enhanced robustness against over-segmentation errors compared to approaches that solely depend on local geometric overlap. Illustrated in Fig. 3, the two masks exhibit low geometric overlap despite belonging to the same armchair. However, our approach identify a",
700
+ "bbox": [
701
+ 75,
702
+ 809,
703
+ 470,
704
+ 901
705
+ ],
706
+ "page_idx": 3
707
+ },
708
+ {
709
+ "type": "image",
710
+ "img_path": "images/ef6f976c5494a22e62a6bbd8fa3de89335f3d0f264a070274f31a063b7c408e7.jpg",
711
+ "image_caption": [
712
+ "Figure 3. View consensus rate. Masks $m_{t^{\\prime},i}$ and $m_{t^{\\prime \\prime},j}$ (side and frontal view of an armchair) are both visible in three frames, with two supporting them belonging to the same instance, resulting in a 2/3 consensus rate. Each mask is accompanied by its respective mask point cloud, displayed on the right. All point clouds are rendered under a consistent camera pose for clarity."
713
+ ],
714
+ "image_footnote": [],
715
+ "bbox": [
716
+ 506,
717
+ 87,
718
+ 890,
719
+ 281
720
+ ],
721
+ "page_idx": 3
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "high consensus rate for them. This is attributed to the outstanding overall performance of modern mask predictors, which consistently segment this armchair comprehensively in most frames, encompassing both parts and thus yielding a high view consensus rate.",
726
+ "bbox": [
727
+ 496,
728
+ 402,
729
+ 892,
730
+ 479
731
+ ],
732
+ "page_idx": 3
733
+ },
734
+ {
735
+ "type": "text",
736
+ "text": "3.2.2 Efficient Computation of View Consensus Rate",
737
+ "text_level": 1,
738
+ "bbox": [
739
+ 498,
740
+ 500,
741
+ 879,
742
+ 515
743
+ ],
744
+ "page_idx": 3
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "Naively computing view consensus rates for all mask pairs can be untractable with a time complexity of $\\mathcal{O}(N^2 T)$ , where $N$ represents the total number of masks, i.e., $N = \\sum_{t} n_{t}$ . To speed up, we initially calculate and store the intermediate result to eliminate redundant computations.",
749
+ "bbox": [
750
+ 496,
751
+ 523,
752
+ 890,
753
+ 599
754
+ ],
755
+ "page_idx": 3
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "Specifically, for each mask $m_{t',i}$ , we first find $F(m_{t',i})$ and then identify all the masks that approximately contain it, denoted as $M(m_{t',i}) = \\{m_{t,k} \\mid t \\in F(m_{t',i})$ and $P_{t',i}^t \\sqsubset P_{t,k}\\}$ . With these intermediate results, the computation of equation 1 can be simplified as,",
760
+ "bbox": [
761
+ 496,
762
+ 599,
763
+ 890,
764
+ 676
765
+ ],
766
+ "page_idx": 3
767
+ },
768
+ {
769
+ "type": "equation",
770
+ "text": "\n$$\nc \\left(m _ {t ^ {\\prime}, i}, m _ {t ^ {\\prime \\prime}, j}\\right) = \\frac {\\left| M \\left(m _ {t ^ {\\prime} , i}\\right) \\cap M \\left(m _ {t ^ {\\prime \\prime} , j}\\right) \\right|}{\\left| F \\left(m _ {t ^ {\\prime} , i}\\right) \\cap F \\left(m _ {t ^ {\\prime \\prime} , j}\\right) \\right|} \\tag {3}\n$$\n",
771
+ "text_format": "latex",
772
+ "bbox": [
773
+ 550,
774
+ 686,
775
+ 890,
776
+ 722
777
+ ],
778
+ "page_idx": 3
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "In this way, all the operations in this expression have been simplified to simple set intersection operations involving only a few dozen elements, leading to a significant reduction in computational complexity.",
783
+ "bbox": [
784
+ 496,
785
+ 732,
786
+ 890,
787
+ 792
788
+ ],
789
+ "page_idx": 3
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "We now introduce the efficient computation of $M(m_{t',i})$ . Initially, we examine the mask ID distribution of $P_{t',i}^{t}$ at frame $t$ . If this distribution is concentrated, with more than $\\tau_{\\text{contain}} = 0.8$ of elements equalling $k$ , it indicates that $P_{t',i}^{t}$ primarily constitutes a part of the $k$ -th instance at frame $t$ . By definition, $P_{t',i}^{t} \\subset P_{t,k}$ . The mask ID distribution is denoted as $d(m_{t',i}, t)$ , and we elaborate",
794
+ "bbox": [
795
+ 496,
796
+ 792,
797
+ 892,
798
+ 901
799
+ ],
800
+ "page_idx": 3
801
+ },
802
+ {
803
+ "type": "text",
804
+ "text": "on its efficient calculation through a space-time trade-off in the supplementary material.",
805
+ "bbox": [
806
+ 76,
807
+ 90,
808
+ 468,
809
+ 121
810
+ ],
811
+ "page_idx": 4
812
+ },
813
+ {
814
+ "type": "text",
815
+ "text": "3.2.3 Under-Segment Mask Filtering",
816
+ "text_level": 1,
817
+ "bbox": [
818
+ 76,
819
+ 142,
820
+ 349,
821
+ 159
822
+ ],
823
+ "page_idx": 4
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "We can also identify whether a mask is under-segmented based on the mask ID distribution $d(m_{t',i}, t)$ . If $d(m_{t',i}, t)$ exhibits a very diverse distribution, it signifies that $P_{t',i}$ comprises multiple instances at frame $t$ , making it highly likely that $m_{t',i}$ is an under-segmented mask. Assuming most 2D mask predictor outputs are correct, we ignore the alternative explanation that $m_{t',i}$ is accurate but the mask predictor over-segments this object consistently in other views.",
828
+ "bbox": [
829
+ 75,
830
+ 167,
831
+ 468,
832
+ 303
833
+ ],
834
+ "page_idx": 4
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "Therefore, under-segmentation is marked by frequent distinction of $P_{t',i}$ into parts. We track the frequency of such occurrences (number of frames with diverse distributions in $d(m_{t',i},t) / |F(m_{t',i})|$ ). If this frequency exceeds $\\tau_{filter} = 0.2$ , we classify the mask as under-segmented and filter it out. Specifically, we remove it from the mask graph. Additionally, to prevent this mask from erroneously inflating the consensus rate between two masks belonging to different instances, we also eliminate it from all $M(m_{t'',j})$ and remove $t'$ from $F(m_{t'',j})$ .",
839
+ "bbox": [
840
+ 75,
841
+ 304,
842
+ 468,
843
+ 455
844
+ ],
845
+ "page_idx": 4
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "3.3. Iterative Graph Clustering",
850
+ "text_level": 1,
851
+ "bbox": [
852
+ 76,
853
+ 465,
854
+ 320,
855
+ 483
856
+ ],
857
+ "page_idx": 4
858
+ },
859
+ {
860
+ "type": "text",
861
+ "text": "Building upon the mask graph, we introduce an iterative graph clustering technique to merge masks and update the graph structure alternately. In the last iteration, each cluster denotes an instance.",
862
+ "bbox": [
863
+ 75,
864
+ 489,
865
+ 468,
866
+ 549
867
+ ],
868
+ "page_idx": 4
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "When determining which masks to merge, we consider two strategies: 1) merging each maximal clique (where a clique is a subset of the graph with an edge between every pair of nodes); 2) merging each connected component (where a connected component is a subset of the graph with a path between every pair of nodes). The first approach, though precise, tends to be overly stringent, often leading to insufficient merging and excessive over-segmentation. The second approach, more permissive, relies on the correctness of every pair-wise identified same-instance relationship, which can be less reliable when the number of observers $n$ —the denominator of $c$ —is low.",
873
+ "bbox": [
874
+ 75,
875
+ 551,
876
+ 468,
877
+ 732
878
+ ],
879
+ "page_idx": 4
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "To balance these strategies, we modify the second approach to prioritize merging masks with a high number of observers first, postponing less reliable connections to later iterations.",
884
+ "bbox": [
885
+ 75,
886
+ 733,
887
+ 468,
888
+ 792
889
+ ],
890
+ "page_idx": 4
891
+ },
892
+ {
893
+ "type": "text",
894
+ "text": "As illustrated in Fig.4, in each iteration $k$ , we set an observer threshold $n_k$ and edges with $n < n_k$ are disconnected. We then identify connected components in the graph and merge them into new nodes. For a newly formed mask $m_{new}$ from a set of masks $\\{m_{t_1,i_1},m_{t_2,i_2},\\ldots ,m_{t_s,i_s}\\}$ , its point cloud $P_{new}$ is the union of $\\{P_{t_1,i_1},P_{t_2,i_2},\\ldots ,P_{t_s,i_s}\\}$ .",
895
+ "bbox": [
896
+ 76,
897
+ 795,
898
+ 468,
899
+ 902
900
+ ],
901
+ "page_idx": 4
902
+ },
903
+ {
904
+ "type": "image",
905
+ "img_path": "images/1eaed856b6a301d73f57fdb59796685ebeb77f2ecb4e0958c190d00bd2d36e82.jpg",
906
+ "image_caption": [
907
+ "Figure 4. Illustration of iterative clustering. Node pairs with more observers are prioritized clustered $(G_{k})$ . Then, view consensus of grouped masks is updated for the next clustering with more confident view consensus measurements. The text on the edge means $n_{support} / n$ ."
908
+ ],
909
+ "image_footnote": [],
910
+ "bbox": [
911
+ 504,
912
+ 90,
913
+ 591,
914
+ 189
915
+ ],
916
+ "page_idx": 4
917
+ },
918
+ {
919
+ "type": "image",
920
+ "img_path": "images/c66e22ca1f2a5153fcb2c6e0b226150b465708b51f737fcf13786736e932ebba.jpg",
921
+ "image_caption": [],
922
+ "image_footnote": [],
923
+ "bbox": [
924
+ 604,
925
+ 90,
926
+ 712,
927
+ 189
928
+ ],
929
+ "page_idx": 4
930
+ },
931
+ {
932
+ "type": "image",
933
+ "img_path": "images/1337595537128f265e3818d35ae95b39c2660d363ebbdef94976b99f37d3653b.jpg",
934
+ "image_caption": [],
935
+ "image_footnote": [],
936
+ "bbox": [
937
+ 718,
938
+ 103,
939
+ 794,
940
+ 190
941
+ ],
942
+ "page_idx": 4
943
+ },
944
+ {
945
+ "type": "image",
946
+ "img_path": "images/d359b8a10da18db1f489813c02b3625df03b0223e3cbc7b0862b325f0640015e.jpg",
947
+ "image_caption": [],
948
+ "image_footnote": [],
949
+ "bbox": [
950
+ 812,
951
+ 101,
952
+ 888,
953
+ 190
954
+ ],
955
+ "page_idx": 4
956
+ },
957
+ {
958
+ "type": "text",
959
+ "text": "Subsequent to these node merging operations, updating edges requires recalculating the view consensus rate for the new mask in relation to others. Referring to equation 3, we calculate $F(m_{new})$ and $M(m_{new})$ . While these two sets can be computed using the same technique as introduced in Sec.3.2.2, we propose a method to accelerate this calculation while achieving comparable results through a straightforward approximation. Specifically, we approximate $F(m_{new})$ as $F(m_{t_1,i_1}) \\cup F(m_{t_2,i_2}) \\ldots \\cup F(m_{t_s,i_s})$ and $M(m_{new})$ as $M(m_{t_1,i_1}) \\cup M(m_{t_2,i_2}) \\ldots \\cup M(m_{t_s,i_s})$ . This approximation is justified since masks merged due to high consensus rates often share containment by the same mask in frames where they both appear. The quantitative impact of this approximation is presented in Table 4.",
960
+ "bbox": [
961
+ 496,
962
+ 287,
963
+ 890,
964
+ 498
965
+ ],
966
+ "page_idx": 4
967
+ },
968
+ {
969
+ "type": "text",
970
+ "text": "After each iteration $k$ , a new graph $G_{k+1}$ is formed. The observer threshold $n_k$ is adjusted downwards over several iterations to avoid neglecting smaller objects visible in fewer frames. We adopt a decreasing $n_k$ schedule, ranging from the top 5%, 10%, to 95% of observer counts across all mask pairs.",
971
+ "bbox": [
972
+ 496,
973
+ 500,
974
+ 890,
975
+ 589
976
+ ],
977
+ "page_idx": 4
978
+ },
979
+ {
980
+ "type": "text",
981
+ "text": "3.4. Open-Vocabulary Feature Aggregation",
982
+ "text_level": 1,
983
+ "bbox": [
984
+ 500,
985
+ 599,
986
+ 833,
987
+ 617
988
+ ],
989
+ "page_idx": 4
990
+ },
991
+ {
992
+ "type": "text",
993
+ "text": "After multiple iterations of clustering, we have obtained a conclusive list where each entry represents a 3D instance proposal. Simultaneously, we maintain a corresponding list of masks associated with each instance. This 2D-3D relationship allows us to directly select representative masks and fuse their semantic features to create an open-vocabulary feature for this instance. Following OpenMask3D[37], we first pick the top-5 masks that best cover the instance. Subsequently, we crop the original RGB image at multiple scales around each mask and input these image crops into CLIP[33] to extract open-vocabulary features. The final instance feature is derived from the average pooling result of these features.",
994
+ "bbox": [
995
+ 496,
996
+ 625,
997
+ 890,
998
+ 820
999
+ ],
1000
+ "page_idx": 4
1001
+ },
1002
+ {
1003
+ "type": "text",
1004
+ "text": "3.5. Implementation Details",
1005
+ "text_level": 1,
1006
+ "bbox": [
1007
+ 500,
1008
+ 830,
1009
+ 715,
1010
+ 847
1011
+ ],
1012
+ "page_idx": 4
1013
+ },
1014
+ {
1015
+ "type": "text",
1016
+ "text": "In order to obtain object-level masks, we use CropFormer [32] as our 2D mask predictor. For open-vocabulary feature extraction, we use CLIP[33] ViT-H. To get mask point cloud",
1017
+ "bbox": [
1018
+ 498,
1019
+ 854,
1020
+ 890,
1021
+ 900
1022
+ ],
1023
+ "page_idx": 4
1024
+ },
1025
+ {
1026
+ "type": "table",
1027
+ "img_path": "images/d777b053e9668ec7e1e5997369b852915790d36ce1c85d50217ffd3cf8dafebf.jpg",
1028
+ "table_caption": [
1029
+ "Table 1. Zero-shot 3D instance segmentation results on ScanNet++ and MatterPort3D. We report both semantic and class-agnostic performance. Our method outperform all baselines on all metrics significantly."
1030
+ ],
1031
+ "table_footnote": [],
1032
+ "table_body": "<table><tr><td>Model</td><td colspan=\"6\">ScanNet++</td><td colspan=\"6\">MatterPort3D</td></tr><tr><td></td><td colspan=\"3\">Class-agnostic</td><td colspan=\"3\">Semantic</td><td colspan=\"3\">Class-agnostic</td><td colspan=\"3\">Semantic</td></tr><tr><td></td><td>AP</td><td>AP50</td><td>AP25</td><td>AP</td><td>AP50</td><td>AP25</td><td>AP</td><td>AP50</td><td>AP25</td><td>AP</td><td>AP50</td><td>AP25</td></tr><tr><td>Mask3D</td><td>22.8</td><td>33.3</td><td>45.7</td><td>3.6</td><td>5.1</td><td>6.7</td><td>4.4</td><td>9.8</td><td>20.6</td><td>2.6</td><td>4.7</td><td>7.0</td></tr><tr><td>OpenMask3D</td><td>22.8</td><td>33.3</td><td>45.7</td><td>2.0</td><td>2.7</td><td>3.4</td><td>4.4</td><td>9.8</td><td>20.6</td><td>4.6</td><td>8.5</td><td>13.0</td></tr><tr><td>OVIR-3D</td><td>19.4</td><td>34.1</td><td>46.5</td><td>3.6</td><td>5.7</td><td>7.3</td><td>5.9</td><td>13.9</td><td>24.6</td><td>6.8</td><td>17.5</td><td>26.4</td></tr><tr><td>Ours</td><td>27.9</td><td>42.8</td><td>54.7</td><td>7.8</td><td>10.7</td><td>12.1</td><td>9.1</td><td>19.5</td><td>35.3</td><td>11.1</td><td>21.1</td><td>31.2</td></tr></table>",
1033
+ "bbox": [
1034
+ 176,
1035
+ 128,
1036
+ 794,
1037
+ 229
1038
+ ],
1039
+ "page_idx": 5
1040
+ },
1041
+ {
1042
+ "type": "text",
1043
+ "text": "$P_{t,i}$ , we first back-project each mask to get the raw point cloud and then ball query the reconstructed point cloud with a radius equal to $3\\mathrm{cm}$ . We adopt the post-processing approach from OVIR-3D[28] to refine the output 3D instances by using DBSCAN algorithm to separate disconnected point clusters into distinct instances.",
1044
+ "bbox": [
1045
+ 75,
1046
+ 253,
1047
+ 472,
1048
+ 345
1049
+ ],
1050
+ "page_idx": 5
1051
+ },
1052
+ {
1053
+ "type": "text",
1054
+ "text": "4. Experiments",
1055
+ "text_level": 1,
1056
+ "bbox": [
1057
+ 76,
1058
+ 358,
1059
+ 209,
1060
+ 376
1061
+ ],
1062
+ "page_idx": 5
1063
+ },
1064
+ {
1065
+ "type": "text",
1066
+ "text": "In this section, we extensively evaluate our proposed method by comparing it with previous state-of-the-art methods on publicly available 3D instance segmentation benchmarks. The experimental setup is detailed in Section 4.1, and the statistics are comprehensively analyzed in Section 4.2. Following that, we showcase the remarkable visual outcomes of our approach across a diverse range of complex scenes in Section 4.4. The validation of all the components of our method is presented in Section 4.3.",
1067
+ "bbox": [
1068
+ 75,
1069
+ 383,
1070
+ 468,
1071
+ 518
1072
+ ],
1073
+ "page_idx": 5
1074
+ },
1075
+ {
1076
+ "type": "text",
1077
+ "text": "4.1. Experimental setup",
1078
+ "text_level": 1,
1079
+ "bbox": [
1080
+ 76,
1081
+ 529,
1082
+ 264,
1083
+ 546
1084
+ ],
1085
+ "page_idx": 5
1086
+ },
1087
+ {
1088
+ "type": "text",
1089
+ "text": "Dataset ScanNet++[47] is a recently released high-quality benchmark that comprises 1554 classes with fine-grained annotation, making it an optimal choice for assessing open-vocabulary 3D instance segmentation. We also assess our method on two widely-used benchmarks: ScanNet200[4, 35], which focuses on room-level evaluations, and MatterPort3D[1], designed for building-level evaluations with sparser viewpoints. We utilize the validation sets of ScanNet++ and ScanNet, along with the testing set of MatterPort3D.",
1090
+ "bbox": [
1091
+ 75,
1092
+ 551,
1093
+ 468,
1094
+ 703
1095
+ ],
1096
+ "page_idx": 5
1097
+ },
1098
+ {
1099
+ "type": "text",
1100
+ "text": "Baselines We select the recent SOTA methods on both supervised closed-set 3D instance segmentation and open-vocabulary 3D instance segmentation. Mask3D [36] stands out as a state-of-the-art method which requires supervised training on ScanNet200. OpenMask3D [37] leverages supervised mask proposals from Mask3D and employs CLIP for open-vocabulary semantics aggregation. Different from our setting, both of them rely on supervised mask. OVIR-3D [28] utilize both zero-shot masks and semantics, merging zero-shot 2D masks with large geometric and semantic overlap and using K-Means to choose the most representative features from the per-frame semantic feature.",
1101
+ "bbox": [
1102
+ 75,
1103
+ 704,
1104
+ 468,
1105
+ 885
1106
+ ],
1107
+ "page_idx": 5
1108
+ },
1109
+ {
1110
+ "type": "text",
1111
+ "text": "Metrics We report the standard Average Precision (AP) at",
1112
+ "bbox": [
1113
+ 76,
1114
+ 885,
1115
+ 468,
1116
+ 901
1117
+ ],
1118
+ "page_idx": 5
1119
+ },
1120
+ {
1121
+ "type": "text",
1122
+ "text": "25% and 50% IoU and the mean of AP from 50% to 95% at 5% intervals. In addition to the conventional semantic instance segmentation setting, we also test in a class-agnostic setting, disregarding semantic labels and solely assessing mask quality. This setting offers a precise assessment of the zero-shot mask prediction capability.",
1123
+ "bbox": [
1124
+ 496,
1125
+ 253,
1126
+ 890,
1127
+ 345
1128
+ ],
1129
+ "page_idx": 5
1130
+ },
1131
+ {
1132
+ "type": "table",
1133
+ "img_path": "images/a12d2a4a8c742d0acc743f9c83a4f1554c93f60c761748bb591acf70e4eb20af.jpg",
1134
+ "table_caption": [
1135
+ "Table 2. 3D instance segmentation results on ScanNet200. Mask3D and OpenMask3D both require supervised (sup.) training on ScanNet200. In fully zero shot (z.s.) setting, our method surpass OVIR-3D by a large margin on all metrics."
1136
+ ],
1137
+ "table_footnote": [],
1138
+ "table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"3\">Class-agnostic</td><td colspan=\"3\">Semantic</td></tr><tr><td>AP</td><td>AP50</td><td>AP25</td><td>AP</td><td>AP50</td><td>AP25</td></tr><tr><td>sup. mask + sup. semantic Mask3D</td><td>39.7</td><td>53.6</td><td>62.5</td><td>26.9</td><td>36.2</td><td>41.4</td></tr><tr><td>sup. mask + z.s. semantic OpenMask3D</td><td>39.7</td><td>53.6</td><td>62.5</td><td>15.1</td><td>19.6</td><td>22.6</td></tr><tr><td>z.s. mask + z.s. semantic OVIR-3D</td><td>14.4</td><td>27.5</td><td>38.8</td><td>9.3</td><td>18.7</td><td>25.0</td></tr><tr><td>Ours</td><td>19.2</td><td>36.6</td><td>51.7</td><td>12.0</td><td>23.3</td><td>30.1</td></tr></table>",
1139
+ "bbox": [
1140
+ 503,
1141
+ 422,
1142
+ 890,
1143
+ 537
1144
+ ],
1145
+ "page_idx": 5
1146
+ },
1147
+ {
1148
+ "type": "text",
1149
+ "text": "4.2. Quantitative Comparison.",
1150
+ "text_level": 1,
1151
+ "bbox": [
1152
+ 498,
1153
+ 560,
1154
+ 736,
1155
+ 577
1156
+ ],
1157
+ "page_idx": 5
1158
+ },
1159
+ {
1160
+ "type": "text",
1161
+ "text": "ScanNet++ and MatterPort3D. We directly test all methods on ScanNet++ and MatterPort3D in a zero-shot manner. As shown in Table 1, our method outperforms all baselines by a large margin. In comparison to OVIR-3D, the most akin work to ours, we achieve $+4.1\\%$ and $+4.7\\%$ AP on ScanNet++ semantic and class-agnostic setting, respectively. Similarly, we demonstrate $+3.0\\%$ and $+2.0\\%$ AP on MatterPort3D in the same settings, validating our globally optimal association design.",
1162
+ "bbox": [
1163
+ 496,
1164
+ 583,
1165
+ 890,
1166
+ 718
1167
+ ],
1168
+ "page_idx": 5
1169
+ },
1170
+ {
1171
+ "type": "text",
1172
+ "text": "OpenMask3D shares Mask3D's mask predictor, rendering their performance identical in the class-agnostic setting. We observe that this mask predictor, trained on ScanNet200, has limited generalizability. While it shows impressive results within the confines of ScanNet200, its performance suffers significantly when evaluated on new benchmarks, as demonstrated in Table 1 and Fig. 5. Additionally, it exhibits sensitivity to point distribution patterns. For instance, in the class-agnostic setting of ScanNet++, its AP is a mere $13.6\\%$ when using the raw point cloud as input. Interestingly, a simple preprocessing step such as uniform sampling significantly boosts performance to $22.8\\%$ .",
1173
+ "bbox": [
1174
+ 496,
1175
+ 719,
1176
+ 890,
1177
+ 900
1178
+ ],
1179
+ "page_idx": 5
1180
+ },
1181
+ {
1182
+ "type": "text",
1183
+ "text": "ScanNet200. As the mask predictor and semantic head of Mask3D are trained specifically on ScanNet200, we classify methods according to their train-test settings. In comparison to the fully zero-shot method, OVIR-3D, our approach exhibits a significant performance advantage, surpassing it by $+5.3\\%$ in average precision (AP), $+8.9\\%$ in $AP_{50}$ , and $+12.6\\%$ in $AP_{25}$ in the class-agnostic setting. This further underscores the effectiveness of our proposed globally optimal merging mechanism. Moreover, our method even outperforms OpenMask3D, which relies on a supervised mask predictor, by a substantial margin of $+3.7\\%$ in $AP_{50}$ and $+7.5\\%$ in $AP_{25}$ .",
1184
+ "bbox": [
1185
+ 75,
1186
+ 90,
1187
+ 472,
1188
+ 272
1189
+ ],
1190
+ "page_idx": 6
1191
+ },
1192
+ {
1193
+ "type": "text",
1194
+ "text": "4.3. Ablation Studies",
1195
+ "text_level": 1,
1196
+ "bbox": [
1197
+ 76,
1198
+ 281,
1199
+ 241,
1200
+ 296
1201
+ ],
1202
+ "page_idx": 6
1203
+ },
1204
+ {
1205
+ "type": "text",
1206
+ "text": "In Table 3, we analyze key components of our method on ScanNet200—under-segment mask filtering and iterative clustering. Starting with a baseline using view consensus rate, we merge masks within connected components. This simple approach matches OVIR-3D baseline performance. Upon adding under-segment mask filtering and iterative clustering, performance steadily rises from $10.0\\%$ AP to $11.7\\%$ AP, reaching peak performance when both modules are combined.",
1207
+ "bbox": [
1208
+ 75,
1209
+ 305,
1210
+ 468,
1211
+ 441
1212
+ ],
1213
+ "page_idx": 6
1214
+ },
1215
+ {
1216
+ "type": "table",
1217
+ "img_path": "images/75b057edd24d8dd4c64a6d0d1c85216a9d52217bb9ee2d3538e1513cf8a9c1b6.jpg",
1218
+ "table_caption": [
1219
+ "Table 3. Ablation study on under-segment mask filtering and iterative clustering on ScanNet200."
1220
+ ],
1221
+ "table_footnote": [],
1222
+ "table_body": "<table><tr><td>under. filtering</td><td>iter. clustering</td><td>AP</td><td>AP50</td><td>AP25</td></tr><tr><td>x</td><td>x</td><td>10.0</td><td>19.1</td><td>24.2</td></tr><tr><td>✓</td><td>x</td><td>11.0</td><td>21.2</td><td>27.5</td></tr><tr><td>x</td><td>✓</td><td>11.7</td><td>22.3</td><td>29.2</td></tr><tr><td>✓</td><td>✓</td><td>12.0</td><td>23.3</td><td>30.1</td></tr></table>",
1223
+ "bbox": [
1224
+ 99,
1225
+ 491,
1226
+ 446,
1227
+ 571
1228
+ ],
1229
+ "page_idx": 6
1230
+ },
1231
+ {
1232
+ "type": "text",
1233
+ "text": "We compare various clustering algorithms, including clustering cliques or connected components as discussed in Section 3.3, and also clustering a relaxation of cliques using the Highly Connected Sub-graphs (HCS) algorithm [10]. We also show the impact of the approximation introduced in Section 3.3. As shown in Table 4, our proposed iterative clustering method outperforms all other trials. Comprehensive statistics are available in the supplementary material.",
1234
+ "bbox": [
1235
+ 75,
1236
+ 579,
1237
+ 468,
1238
+ 700
1239
+ ],
1240
+ "page_idx": 6
1241
+ },
1242
+ {
1243
+ "type": "table",
1244
+ "img_path": "images/c42a3b25c5b7392ef3abd1ae5f46e77933a8515ae565b9658ebb2d507fa65d64.jpg",
1245
+ "table_caption": [
1246
+ "Table 4. Ablation study on clustering methods."
1247
+ ],
1248
+ "table_footnote": [],
1249
+ "table_body": "<table><tr><td>Clustering Algorithm</td><td>AP</td><td>AP50</td><td>AP25</td></tr><tr><td>Connected component</td><td>11.0</td><td>21.2</td><td>27.5</td></tr><tr><td>Clique</td><td>11.3</td><td>22.0</td><td>29.4</td></tr><tr><td>Quasi-Clique (HCS)</td><td>11.9</td><td>22.9</td><td>29.7</td></tr><tr><td>Ours w/o approximation</td><td>11.8</td><td>23.1</td><td>30.4</td></tr><tr><td>Ours</td><td>12.0</td><td>23.3</td><td>30.1</td></tr></table>",
1250
+ "bbox": [
1251
+ 120,
1252
+ 732,
1253
+ 426,
1254
+ 825
1255
+ ],
1256
+ "page_idx": 6
1257
+ },
1258
+ {
1259
+ "type": "text",
1260
+ "text": "We conducted additional evaluations to assess the robustness of our algorithm to variations in hyperparameters. For the mask visibility threshold $\\tau_{vis}$ ranging from 0.6 to 0.8, the under-segment mask filtering threshold $\\tau_{filter}$ ranging",
1261
+ "bbox": [
1262
+ 75,
1263
+ 839,
1264
+ 468,
1265
+ 902
1266
+ ],
1267
+ "page_idx": 6
1268
+ },
1269
+ {
1270
+ "type": "table",
1271
+ "img_path": "images/eab72b7df8a8344d5490a776bcf85e90165e81a0c27d4e0a66c523fc488cd97e.jpg",
1272
+ "table_caption": [
1273
+ "Table 5. Ablation study on Hyperparameters on ScanNet200."
1274
+ ],
1275
+ "table_footnote": [],
1276
+ "table_body": "<table><tr><td></td><td>AP</td><td>AP50</td><td>AP25</td></tr><tr><td>τvis(0.6 - 0.8)</td><td>11.9 ± 0.06</td><td>23.2 ± 0.09</td><td>30.1 ± 0.07</td></tr><tr><td>τfilter(0.2 - 0.4)</td><td>11.9 ± 0.05</td><td>23.3 ± 0.19</td><td>30.0 ± 0.18</td></tr><tr><td>τrate(0.8 - 1)</td><td>11.8 ± 0.20</td><td>22.7 ± 0.52</td><td>28.9 ± 0.83</td></tr><tr><td>τcontain(0.7 - 0.9)</td><td>11.9±0.10</td><td>23.3± 0.22</td><td>30.3± 0.20</td></tr></table>",
1277
+ "bbox": [
1278
+ 504,
1279
+ 114,
1280
+ 888,
1281
+ 194
1282
+ ],
1283
+ "page_idx": 6
1284
+ },
1285
+ {
1286
+ "type": "text",
1287
+ "text": "from 0.2 to 0.4, the consensus rate threshold $\\tau_{rate}$ ranging from 0.8 to 1 and the approximate containment threshold $\\tau_{contain}$ ranging from 0.7 to 0.9, our method consistently demonstrates satisfying performance.",
1288
+ "bbox": [
1289
+ 498,
1290
+ 219,
1291
+ 890,
1292
+ 280
1293
+ ],
1294
+ "page_idx": 6
1295
+ },
1296
+ {
1297
+ "type": "text",
1298
+ "text": "4.4. Qualitative Results.",
1299
+ "text_level": 1,
1300
+ "bbox": [
1301
+ 500,
1302
+ 289,
1303
+ 687,
1304
+ 306
1305
+ ],
1306
+ "page_idx": 6
1307
+ },
1308
+ {
1309
+ "type": "text",
1310
+ "text": "In Fig. 6, we present the similarity heatmaps for a wide range of open-vocabulary queries, showcasing the remarkable capabilities of our open-vocabulary segmentation system. Additionally, in Fig. 5, we offer a visual comparison of our algorithm against all baseline methods. Our method shows excellent ability to segment small objects, e.g., items on the counter in ScanNet a), boxes on the shelf in ScanNet b). These small objects are simply labeled as part of its containers in the ground truth, which cause the AP at higher IoU threshold of our method drops severely.",
1311
+ "bbox": [
1312
+ 496,
1313
+ 313,
1314
+ 890,
1315
+ 464
1316
+ ],
1317
+ "page_idx": 6
1318
+ },
1319
+ {
1320
+ "type": "text",
1321
+ "text": "Compared to OVIR-3D, our method has two main advantages: i) OVIR-3D can't merges masks that have low geometric overlap but correspond to a same object well. For example, in ScanNet (b), items on the coffee table split the table point cloud into two pieces, making OVIR-3D fail to merge these two parts together. So do the sofa chair in ScanNet b) and the right rug in the MatterPort3D example. In the contrary, our method merges these objects well based on view consensus as explained in Section 3.2.1. ii) The strict filtering process in OVIR-3D falsely filter out many objects, e.g., counter and pictures in ScanNet a) while our method only conservatively filter out under-segment masks.",
1322
+ "bbox": [
1323
+ 496,
1324
+ 464,
1325
+ 890,
1326
+ 647
1327
+ ],
1328
+ "page_idx": 6
1329
+ },
1330
+ {
1331
+ "type": "text",
1332
+ "text": "4.5. Limitations",
1333
+ "text_level": 1,
1334
+ "bbox": [
1335
+ 500,
1336
+ 656,
1337
+ 625,
1338
+ 671
1339
+ ],
1340
+ "page_idx": 6
1341
+ },
1342
+ {
1343
+ "type": "text",
1344
+ "text": "While our approach demonstrates remarkable performance, it is important to acknowledge two notable limitations. Firstly, this work assumes near-perfect 2D segmentation and 2D-3D correspondence, which may not always be the case in certain applications. Presently, we only generate object-level masks, whereas real-world applications may necessitate multi-level masks spanning from parts and objects to clusters.",
1345
+ "bbox": [
1346
+ 496,
1347
+ 679,
1348
+ 890,
1349
+ 800
1350
+ ],
1351
+ "page_idx": 6
1352
+ },
1353
+ {
1354
+ "type": "text",
1355
+ "text": "5. Conclusion",
1356
+ "text_level": 1,
1357
+ "bbox": [
1358
+ 500,
1359
+ 814,
1360
+ 617,
1361
+ 830
1362
+ ],
1363
+ "page_idx": 6
1364
+ },
1365
+ {
1366
+ "type": "text",
1367
+ "text": "In this work, we propose a view consensus based mask graph clustering algorithm for open-vocabulary 3D instance segmentation. Specifically, our method constructs a global mask graph and leverages the view consensus to cluster",
1368
+ "bbox": [
1369
+ 496,
1370
+ 839,
1371
+ 890,
1372
+ 901
1373
+ ],
1374
+ "page_idx": 6
1375
+ },
1376
+ {
1377
+ "type": "image",
1378
+ "img_path": "images/f882d3366b7b5ff71981c7cd26fc781cc1cf487210e8b5f7c7eae06348cc0087.jpg",
1379
+ "image_caption": [
1380
+ "Figure 5. Comparison of 3D zero-shot segmentation performance. We compare our methods with OpenMask3D [37] and OVIR-3D [28] on ScanNet [4] and Matterport3D [1]."
1381
+ ],
1382
+ "image_footnote": [],
1383
+ "bbox": [
1384
+ 81,
1385
+ 87,
1386
+ 890,
1387
+ 571
1388
+ ],
1389
+ "page_idx": 7
1390
+ },
1391
+ {
1392
+ "type": "image",
1393
+ "img_path": "images/a5c366843025c9d196716edc40e442d002cfa6e80b91f75c064451eace98a414.jpg",
1394
+ "image_caption": [
1395
+ "Figure 6. Open-vocabulary queries of different shapes, colors and contents."
1396
+ ],
1397
+ "image_footnote": [],
1398
+ "bbox": [
1399
+ 99,
1400
+ 643,
1401
+ 450,
1402
+ 847
1403
+ ],
1404
+ "page_idx": 7
1405
+ },
1406
+ {
1407
+ "type": "text",
1408
+ "text": "the masks belonging to the same 3D instances. Besides, the mask clustering guided the clustering of the open-vocabulary features for text queries. The results demonstrate that our method achieves SOTA performance on zero-shot mask prediction and open-vocabulary understating. In the future, we would like to investigate the application of the proposed method on robotic tasks, such as open-vocabulary object navigation.",
1409
+ "bbox": [
1410
+ 498,
1411
+ 637,
1412
+ 890,
1413
+ 758
1414
+ ],
1415
+ "page_idx": 7
1416
+ },
1417
+ {
1418
+ "type": "text",
1419
+ "text": "6. Acknowledgements",
1420
+ "text_level": 1,
1421
+ "bbox": [
1422
+ 500,
1423
+ 771,
1424
+ 687,
1425
+ 789
1426
+ ],
1427
+ "page_idx": 7
1428
+ },
1429
+ {
1430
+ "type": "text",
1431
+ "text": "This work was supported in part by National Key R&D Program of China 2022ZD0160801.",
1432
+ "bbox": [
1433
+ 500,
1434
+ 797,
1435
+ 890,
1436
+ 827
1437
+ ],
1438
+ "page_idx": 7
1439
+ },
1440
+ {
1441
+ "type": "text",
1442
+ "text": "References",
1443
+ "text_level": 1,
1444
+ "bbox": [
1445
+ 78,
1446
+ 89,
1447
+ 173,
1448
+ 104
1449
+ ],
1450
+ "page_idx": 8
1451
+ },
1452
+ {
1453
+ "type": "list",
1454
+ "sub_type": "ref_text",
1455
+ "list_items": [
1456
+ "[1] Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Niessner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3d: Learning from rgb-d data in indoor environments. International Conference on 3D Vision (3DV), 2017. 2, 6, 8",
1457
+ "[2] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1290–1299, 2022. 2",
1458
+ "[3] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3075-3084, 2019. 2",
1459
+ "[4] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5828-5839, 2017. 1, 2, 6, 8",
1460
+ "[5] Martin Ester, Hans-Peter Kriegel, Jörg Sander, Xiaowei Xu, et al. A density-based algorithm for discovering clusters in large spatial databases with noise. In kdd, pages 226-231, 1996. 2",
1461
+ "[6] Pedro F Felzenszwalb and Daniel P Huttenlocher. Efficient graph-based image segmentation. International journal of computer vision, 59:167-181, 2004. 2",
1462
+ "[7] Golnaz Ghiasi, Xiuye Gu, Yin Cui, and Tsung-Yi Lin. Scaling open-vocabulary image segmentation with image-level labels. In European Conference on Computer Vision, 2021. 2",
1463
+ "[8] Qiao Gu, Ali Kuwajerwala, Sacha Morin, Krishna Murthy Jatavallabhula, Bipasha Sen, Aditya Agarwal, Corban Rivera, William Paul, Kirsty Ellis, Ramalingam Chellappa, Chuang Gan, Celso Miguel de Melo, Joshua B. Tenenbaum, Antonio Torralba, Florian Shkurti, and Liam Paull. Conceptgraphs: Open-vocabulary 3d scene graphs for perception and planning. ArXiv, abs/2309.16650, 2023. 1, 2",
1464
+ "[9] Lei Han, Tian Zheng, Lan Xu, and Lu Fang. Occuseg: Occupancy-aware 3d instance segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2940-2949, 2020. 2",
1465
+ "[10] Erez Hartuv and Ron Shamir. A clustering algorithm based on graph connectivity. Information processing letters, 76(4-6):175-181, 2000. 7",
1466
+ "[11] Shuting He, Henghui Ding, and Wei Jiang. Semantic-promoted debiasing and background disambiguation for zero-shot instance segmentation. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 19498-19507, 2023. 1, 2",
1467
+ "[12] Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4421-4430, 2019. 1",
1468
+ "[13] Wenbo Hu, Hengshuang Zhao, Li Jiang, Jiaya Jia, and Tien-Tsin Wong. Bidirectional projection network for cross"
1469
+ ],
1470
+ "bbox": [
1471
+ 78,
1472
+ 114,
1473
+ 470,
1474
+ 901
1475
+ ],
1476
+ "page_idx": 8
1477
+ },
1478
+ {
1479
+ "type": "list",
1480
+ "sub_type": "ref_text",
1481
+ "list_items": [
1482
+ "dimension scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14373-14382, 2021. 2",
1483
+ "[14] Zeyu Hu, Xuyang Bai, Jiaxiang Shang, Runze Zhang, Jiayu Dong, Xin Wang, Guangyuan Sun, Hongbo Fu, and Chiew-Lan Tai. Vmnet: Voxel-mesh network for geodesic-aware 3d semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15488-15498, 2021. 2",
1484
+ "[15] Chenguang Huang, Oier Mees, Andy Zeng, and Wolfram Burgard. Visual language maps for robot navigation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 10608-10615. IEEE, 2023. 1",
1485
+ "[16] Jingwei Huang, Haotian Zhang, Li Yi, Thomas Funkhouser, Matthias Nießner, and Leonidas J Guibas. Texturenet: Consistent local parametrizations for learning from high-resolution signals on meshes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4440-4449, 2019. 2",
1486
+ "[17] Shi-Sheng Huang, Ze-Yu Ma, Tai-Jiang Mu, Hongbo Fu, and Shi-Min Hu. Supervoxel convolution for online 3d semantic segmentation. ACM Transactions on Graphics (TOG), 40(3): 1-15, 2021. 2",
1487
+ "[18] Zhening Huang, Xiaoyang Wu, Xi Chen, Hengshuang Zhao, Lei Zhu, and Joan Lasenby. Openins3d: Snap and lookup for 3d open-vocabulary instance segmentation. ArXiv, abs/2309.00616, 2023. 1",
1488
+ "[19] Zhening Huang, Xiaoyang Wu, Xi Chen, Hengshuang Zhao, Lei Zhu, and Joan Lasenby. Openins3d: Snap and lookup for 3d open-vocabulary instance segmentation. arXiv preprint arXiv:2309.00616, 2023. 1, 2",
1489
+ "[20] Dat T. Huynh, Jason Kuen, Zhe nan Lin, Jiumiang Gu, and Ehsan Elhamifar. Open-vocabulary instance segmentation via robust cross-modal pseudo-labeling. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7010-7021, 2021. 1, 2",
1490
+ "[21] Prannay Kaul, Weidi Xie, and Andrew Zisserman. Multimodal classifiers for open-vocabulary object detection. In International Conference on Machine Learning, pages 15946-15969. PMLR, 2023. 2",
1491
+ "[22] Justin Kerr, Chung Min Kim, Ken Goldberg, Angjoo Kanazawa, and Matthew Tancik. Leref: Language embedded radiance fields. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19729-19739, 2023. 1",
1492
+ "[23] Dahun Kim, Anelia Angelova, and Weicheng Kuo. Region-aware pretraining for open-vocabulary object detection with vision transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11144-11154, 2023. 2",
1493
+ "[24] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4015-4026, 2023. 2",
1494
+ "[25] Boyi Li, Kilian Q Weinberger, Serge Belongie, Vladlen Koltun, and Rene Ranftl. Language-driven semantic seg-"
1495
+ ],
1496
+ "bbox": [
1497
+ 503,
1498
+ 92,
1499
+ 893,
1500
+ 901
1501
+ ],
1502
+ "page_idx": 8
1503
+ },
1504
+ {
1505
+ "type": "list",
1506
+ "sub_type": "ref_text",
1507
+ "list_items": [
1508
+ "mentation. In International Conference on Learning Representations, 2022. 2",
1509
+ "[26] Jinke Li, Xiao He, Yang Wen, Yuan Gao, Xiaogiang Cheng, and Dan Zhang. Panoptic-phnet: Towards real-time and high-precision lidar panoptic segmentation via clustering pseudo heatmap. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11809–11818, 2022. 2",
1510
+ "[27] Leyao Liu, Tian Zheng, Yun-Jou Lin, Kai Ni, and Lu Fang. Ins-conv: Incremental sparse convolution for online 3d segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18975–18984, 2022. 2",
1511
+ "[28] Shiyang Lu, Haonan Chang, Eric Pu Jing, Abdeslam Boularias, and Kostas Bekris. Ovir-3d: Open-vocabulary 3d instance retrieval without training on 3d data. In Conference on Robot Learning, pages 1610–1620. PMLR, 2023. 1, 2, 6, 8",
1512
+ "[29] Gaku Narita, Takashi Seno, Tomoya Ishikawa, and Yohsuke Kaji. Panopticfusion: Online volumetric semantic mapping at the level of stuff and things. In 2019 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 4205-4212. IEEE, 2019. 2",
1513
+ "[30] Phuc DA Nguyen, Tuan Duc Ngo, Chuang Gan, Evangelos Kalogerakis, Anh Tran, Cuong Pham, and Khoi Nguyen. Open3dis: Open-vocabulary 3d instance segmentation with 2d mask guidance. arXiv preprint arXiv:2312.10671, 2023. 2",
1514
+ "[31] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 815-824, 2023. 2",
1515
+ "[32] Lu Qi, Jason Kuen, Tiancheng Shen, Jiuxiang Gu, Wenbo Li, Weidong Guo, Jiaya Jia, Zhe Lin, and Ming-Hsuan Yang. High quality entity segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4047-4056, 2023. 2, 3, 5",
1516
+ "[33] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 2, 5",
1517
+ "[34] Damien Robert, Bruno Vallet, and Loic Landrieu. Learning multi-view aggregation in the wild for large-scale 3d semantic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5575-5584, 2022. 2",
1518
+ "[35] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In European Conference on Computer Vision, pages 125-141. Springer, 2022. 1, 2, 6",
1519
+ "[36] Jonas Schult, Francis Engelmann, Alexander Hermans, Or Litany, Siyu Tang, and Bastian Leibe. Mask3d: Mask transformer for 3d semantic instance segmentation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 8216-8223. IEEE, 2023. 2, 6"
1520
+ ],
1521
+ "bbox": [
1522
+ 78,
1523
+ 90,
1524
+ 468,
1525
+ 898
1526
+ ],
1527
+ "page_idx": 9
1528
+ },
1529
+ {
1530
+ "type": "list",
1531
+ "sub_type": "ref_text",
1532
+ "list_items": [
1533
+ "[37] Ayca Takmaz, Elisabetta Fedele, Robert W. Sumner, Marc Pollefeys, Federico Tombari, and Francis Engelmann. OpenMask3D: Open-Vocabulary 3D Instance Segmentation. In Advances in Neural Information Processing Systems (NeurIPS), 2023. 1, 2, 5, 6, 8",
1534
+ "[38] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In Vision Algorithms: Theory and Practice: International Workshop on Vision Algorithms Corfu, Greece, September 21–22, 1999 Proceedings, pages 298–372. Springer, 2000. 2",
1535
+ "[39] VS Vibashan, Ning Yu, Chen Xing, Can Qin, Mingfei Gao, Juan Carlos Niebles, Vishal M. Patel, and Ran Xu. Mask-free ovis: Open-vocabulary instance segmentation without manual mask annotations. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 23539-23549, 2023. 1, 2",
1536
+ "[40] Suhani Vora, Noha Radwan, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi SM Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes. arXiv preprint arXiv:2111.13260, 2021. 2",
1537
+ "[41] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, Junyeong Kim, and Chang D Yoo. Softgroup++: Scalable 3d instance segmentation with octree pyramid grouping. arXiv preprint arXiv:2209.08263, 2022. 2",
1538
+ "[42] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, and Chang D Yoo. Softgroup for 3d instance segmentation on point clouds. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2708-2717, 2022. 2",
1539
+ "[43] Haoxiang Wang, Pavan Kumar Anasosalu Vasu, Fartash Faghri, Raviteja Vemulapalli, Mehrdad Farajtabar, Sachin Mehta, Mohammad Rastegari, Oncel Tuzel, and Hadi Pouransari. Sam-clip: Merging vision foundation models towards semantic and spatial understanding. ArXiv, abs/2310.15308, 2023. 1, 2",
1540
+ "[44] Jianzong Wu, Xiangtai Li, Henghui Ding, Xia Li, Guangliang Cheng, Yunhai Tong, and Chen Change Loy. Betrayed by captions: Joint caption grounding and generation for open vocabulary instance segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 21938-21948, 2023. 1, 2",
1541
+ "[45] Kashu Yamazaki, Taisei Hanyu, Khoa Vo, Thang Pham, Minh Tran, Gianfranco Doretto, Anh Nguyen, and Ngan Le. Open-fusion: Real-time open-vocabulary 3d mapping and queryable scene representation. arXiv preprint arXiv:2310.03923, 2023. 1",
1542
+ "[46] Yunhan Yang, Xiaoyang Wu, Tong He, Hengshuang Zhao, and Xihui Liu. Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908, 2023. 2",
1543
+ "[47] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023. 2, 6",
1544
+ "[48] Yingda Yin, Yuzheng Liu, Yang Xiao, Daniel Cohen-Or, Jingwei Huang, and Baoquan Chen. Sai3d: Segment any in"
1545
+ ],
1546
+ "bbox": [
1547
+ 501,
1548
+ 92,
1549
+ 890,
1550
+ 898
1551
+ ],
1552
+ "page_idx": 9
1553
+ },
1554
+ {
1555
+ "type": "list",
1556
+ "sub_type": "ref_text",
1557
+ "list_items": [
1558
+ "stance in 3d scenes. arXiv preprint arXiv:2312.11557, 2023. 2",
1559
+ "[49] Jiazhao Zhang, Chenyang Zhu, Lintao Zheng, and Kai Xu. Fusion-aware point convolution for online semantic 3d scene segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4534-4543, 2020. 2",
1560
+ "[50] Jiazhao Zhang, Liu Dai, Fanpeng Meng, Qingnan Fan, Xuelin Chen, Kai Xu, and He Wang. 3d-aware object goal navigation via simultaneous exploration and identification. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6672-6682, 2023. 1",
1561
+ "[51] Lintao Zheng, Chenyang Zhu, Jiazhao Zhang, Hang Zhao, Hui Huang, Matthias Niessner, and Kai Xu. Active scene understanding via online semantic reconstruction. In Computer Graphics Forum, pages 103-114. Wiley Online Library, 2019. 2",
1562
+ "[52] Shuaifeng Zhi, Tristan Laidlow, Stefan Leutenegger, and Andrew J Davison. In-place scene labelling and understanding with implicit scene representation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15838-15847, 2021. 2",
1563
+ "[53] Xingyi Zhou, Rohit Girdhar, Armand Joulin, Philipp Krahenbuhl, and Ishan Misra. Detecting twenty-thousand classes using image-level supervision. In European Conference on Computer Vision, pages 350-368. Springer, 2022. 2"
1564
+ ],
1565
+ "bbox": [
1566
+ 78,
1567
+ 90,
1568
+ 468,
1569
+ 468
1570
+ ],
1571
+ "page_idx": 10
1572
+ }
1573
+ ]