SlowGuess commited on
Commit
b3266bb
·
verified ·
1 Parent(s): 2f51ae3

Add Batch dc3bee90-994c-4818-a846-1d9be2f1e66c

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +63 -0
  2. 2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_content_list.json +0 -0
  3. 2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_model.json +0 -0
  4. 2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_origin.pdf +3 -0
  5. 2303.01xxx/2303.01037/full.md +475 -0
  6. 2303.01xxx/2303.01037/images.zip +3 -0
  7. 2303.01xxx/2303.01037/layout.json +0 -0
  8. 2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_content_list.json +1163 -0
  9. 2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_model.json +1760 -0
  10. 2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_origin.pdf +3 -0
  11. 2303.01xxx/2303.01047/full.md +256 -0
  12. 2303.01xxx/2303.01047/images.zip +3 -0
  13. 2303.01xxx/2303.01047/layout.json +0 -0
  14. 2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_content_list.json +0 -0
  15. 2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_model.json +0 -0
  16. 2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_origin.pdf +3 -0
  17. 2303.01xxx/2303.01055/full.md +618 -0
  18. 2303.01xxx/2303.01055/images.zip +3 -0
  19. 2303.01xxx/2303.01055/layout.json +0 -0
  20. 2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_content_list.json +1706 -0
  21. 2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_model.json +0 -0
  22. 2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_origin.pdf +3 -0
  23. 2303.01xxx/2303.01067/full.md +318 -0
  24. 2303.01xxx/2303.01067/images.zip +3 -0
  25. 2303.01xxx/2303.01067/layout.json +0 -0
  26. 2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_content_list.json +1798 -0
  27. 2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_model.json +2209 -0
  28. 2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_origin.pdf +3 -0
  29. 2303.01xxx/2303.01177/full.md +355 -0
  30. 2303.01xxx/2303.01177/images.zip +3 -0
  31. 2303.01xxx/2303.01177/layout.json +0 -0
  32. 2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_content_list.json +0 -0
  33. 2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_model.json +0 -0
  34. 2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_origin.pdf +3 -0
  35. 2303.01xxx/2303.01179/full.md +0 -0
  36. 2303.01xxx/2303.01179/images.zip +3 -0
  37. 2303.01xxx/2303.01179/layout.json +0 -0
  38. 2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_content_list.json +728 -0
  39. 2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_model.json +969 -0
  40. 2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_origin.pdf +3 -0
  41. 2303.01xxx/2303.01211/full.md +152 -0
  42. 2303.01xxx/2303.01211/images.zip +3 -0
  43. 2303.01xxx/2303.01211/layout.json +0 -0
  44. 2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_content_list.json +0 -0
  45. 2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_model.json +0 -0
  46. 2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_origin.pdf +3 -0
  47. 2303.01xxx/2303.01212/full.md +0 -0
  48. 2303.01xxx/2303.01212/images.zip +3 -0
  49. 2303.01xxx/2303.01212/layout.json +0 -0
  50. 2303.01xxx/2303.01217/ff487c30-0260-4593-894c-b42a3537b180_content_list.json +1284 -0
.gitattributes CHANGED
@@ -10393,3 +10393,66 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10393
  2308.15xxx/2308.15745/f395a497-6f70-4b8a-83cc-e0faeb192441_origin.pdf filter=lfs diff=lfs merge=lfs -text
10394
  2309.15xxx/2309.15467/d02c2f03-8b4e-444a-a94c-1dbcf5f10982_origin.pdf filter=lfs diff=lfs merge=lfs -text
10395
  2309.15xxx/2309.15492/04cb662c-ac88-4e61-a628-eb9f34e12559_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10393
  2308.15xxx/2308.15745/f395a497-6f70-4b8a-83cc-e0faeb192441_origin.pdf filter=lfs diff=lfs merge=lfs -text
10394
  2309.15xxx/2309.15467/d02c2f03-8b4e-444a-a94c-1dbcf5f10982_origin.pdf filter=lfs diff=lfs merge=lfs -text
10395
  2309.15xxx/2309.15492/04cb662c-ac88-4e61-a628-eb9f34e12559_origin.pdf filter=lfs diff=lfs merge=lfs -text
10396
+ 2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10397
+ 2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10398
+ 2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_origin.pdf filter=lfs diff=lfs merge=lfs -text
10399
+ 2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_origin.pdf filter=lfs diff=lfs merge=lfs -text
10400
+ 2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10401
+ 2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_origin.pdf filter=lfs diff=lfs merge=lfs -text
10402
+ 2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_origin.pdf filter=lfs diff=lfs merge=lfs -text
10403
+ 2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10404
+ 2303.01xxx/2303.01217/ff487c30-0260-4593-894c-b42a3537b180_origin.pdf filter=lfs diff=lfs merge=lfs -text
10405
+ 2303.01xxx/2303.01237/f862e8c8-723e-4ee5-86ae-2deac25e9855_origin.pdf filter=lfs diff=lfs merge=lfs -text
10406
+ 2303.01xxx/2303.01267/bcb527bc-cc38-4469-84ce-2ea201c034ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
10407
+ 2303.01xxx/2303.01272/5f53256c-3bc6-476c-8504-44791ae5a0a4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10408
+ 2303.01xxx/2303.01274/b03cce5f-2a45-4ad9-aa0d-118b2439b1d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10409
+ 2303.01xxx/2303.01276/36d07a80-a75e-432a-9119-e36f6bd88f2d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10410
+ 2303.01xxx/2303.01284/ae9f87d1-b79e-4238-8f62-6d9bdc24d9eb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10411
+ 2303.01xxx/2303.01325/943bbf40-9b75-4f9a-82a9-0d45f6bd38f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10412
+ 2303.01xxx/2303.01372/6d8c84fe-9c9e-4d14-a80d-e5fe1bc68935_origin.pdf filter=lfs diff=lfs merge=lfs -text
10413
+ 2303.01xxx/2303.01381/6f05fb2f-8837-4e44-b993-583828f88627_origin.pdf filter=lfs diff=lfs merge=lfs -text
10414
+ 2303.01xxx/2303.01416/9bd70d98-18f5-4936-bfee-bbb252ea19f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10415
+ 2303.01xxx/2303.01418/50f8b633-6305-44cf-a9f2-d888a1b0d9c9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10416
+ 2303.01xxx/2303.01432/763037bd-f44c-456d-8930-26a2d8c41bc9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10417
+ 2303.01xxx/2303.01469/26842c2a-641f-48f2-ba5a-8e0a43dd91d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10418
+ 2303.01xxx/2303.01480/d73a4f70-8618-4b2e-bb02-024b122a48ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10419
+ 2303.01xxx/2303.01486/a22081ef-05bd-4bfb-b214-7158d156ef73_origin.pdf filter=lfs diff=lfs merge=lfs -text
10420
+ 2303.01xxx/2303.01494/330a205b-a20b-4229-ab8d-34b35d1a050e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10421
+ 2303.01xxx/2303.01497/00eccbe6-a3c6-4147-8f90-89e8d4c227cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10422
+ 2303.01xxx/2303.01500/63d818c3-4cef-49f5-bcbb-090a5a8b6e35_origin.pdf filter=lfs diff=lfs merge=lfs -text
10423
+ 2303.01xxx/2303.01560/6fea8816-d619-42a7-afed-ec127159cd16_origin.pdf filter=lfs diff=lfs merge=lfs -text
10424
+ 2303.01xxx/2303.01610/f047f48a-4a01-4753-ac9f-43f3907eeeda_origin.pdf filter=lfs diff=lfs merge=lfs -text
10425
+ 2303.01xxx/2303.01614/b642fc29-5dd5-4631-b618-e4614fa4748b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10426
+ 2303.01xxx/2303.01633/26407586-12b1-4959-886e-1258b852e7e4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10427
+ 2303.01xxx/2303.01664/f6046c15-fe2e-48ed-8f98-d88f5e1703bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10428
+ 2303.01xxx/2303.01679/b3ec48c8-1732-4fd5-b338-c08552f7f787_origin.pdf filter=lfs diff=lfs merge=lfs -text
10429
+ 2303.01xxx/2303.01710/ffdc7c0b-c7fa-468e-a7ce-510783c34bcb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10430
+ 2303.01xxx/2303.01758/3b46a19f-2449-424a-a7a7-700d23d17f00_origin.pdf filter=lfs diff=lfs merge=lfs -text
10431
+ 2303.01xxx/2303.01818/e45087a8-1c84-442e-8f75-34cb72747c6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10432
+ 2303.01xxx/2303.01859/0038786b-ae4f-49b3-b065-b39805fa0d71_origin.pdf filter=lfs diff=lfs merge=lfs -text
10433
+ 2303.01xxx/2303.01861/80b6c663-f31a-406e-860f-df30bb01ee76_origin.pdf filter=lfs diff=lfs merge=lfs -text
10434
+ 2303.01xxx/2303.01870/9b05ab73-21b7-4bb5-8e36-654e9d40980b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10435
+ 2303.01xxx/2303.01871/ab205ed3-a90a-4218-af7b-95942cfa0314_origin.pdf filter=lfs diff=lfs merge=lfs -text
10436
+ 2303.01xxx/2303.01904/86e5f4d6-5191-4007-8ea2-b7111550fe80_origin.pdf filter=lfs diff=lfs merge=lfs -text
10437
+ 2303.01xxx/2303.01911/3fa7d336-b6be-42c0-b187-ba0bb398d69e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10438
+ 2303.01xxx/2303.01943/e3eb1ee4-2464-49de-bcb2-2365fafce702_origin.pdf filter=lfs diff=lfs merge=lfs -text
10439
+ 2303.02xxx/2303.02001/cc2f7413-fd62-43ad-8ed0-53e712ff486f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10440
+ 2303.02xxx/2303.02011/26bc766f-555a-4528-a459-c33bce53a764_origin.pdf filter=lfs diff=lfs merge=lfs -text
10441
+ 2303.02xxx/2303.02045/223511af-7625-4a73-9a1c-e27c21307c9a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10442
+ 2303.02xxx/2303.02063/ef9db8fd-99ca-44ea-9353-4417d307672a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10443
+ 2303.02xxx/2303.02091/e4a91fba-7d49-42c4-ae0b-79c871a0661d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10444
+ 2303.02xxx/2303.02151/a06739b2-9f44-461d-9ded-7780417f90b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10445
+ 2303.02xxx/2303.02153/a2de2467-9ecc-4e55-bef4-dab85072117e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10446
+ 2303.02xxx/2303.02190/75081598-bae2-4b1a-bb14-175b0fdede00_origin.pdf filter=lfs diff=lfs merge=lfs -text
10447
+ 2303.02xxx/2303.02213/115e4adc-499c-4318-aeea-6ff66e9bf77a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10448
+ 2303.02xxx/2303.02269/b09f3beb-b123-40c3-8252-775c19be885d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10449
+ 2303.02xxx/2303.02314/5669cde5-7bb0-480a-a61b-277fb214846d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10450
+ 2303.02xxx/2303.02328/b0cf3b0e-f182-42a2-bc70-dc6bcff6f669_origin.pdf filter=lfs diff=lfs merge=lfs -text
10451
+ 2303.02xxx/2303.02346/81a74bcb-a689-4949-a854-8a7710022b78_origin.pdf filter=lfs diff=lfs merge=lfs -text
10452
+ 2303.02xxx/2303.02389/f6fa60ec-7182-44ea-b322-ab389dbe986e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10453
+ 2303.03xxx/2303.03108/71aa46df-4e8e-4539-bd3b-403ff6c42779_origin.pdf filter=lfs diff=lfs merge=lfs -text
10454
+ 2303.03xxx/2303.03186/19426b3e-a2d0-49c3-a088-c43512efe1f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10455
+ 2303.03xxx/2303.03329/7942c3b2-490e-4a6e-ae0e-e58f4cd012d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10456
+ 2303.03xxx/2303.03875/f4d2309f-e1d1-4fbd-b928-329455a8e403_origin.pdf filter=lfs diff=lfs merge=lfs -text
10457
+ 2303.04xxx/2303.04741/bf98aa81-d234-438b-af75-4a077950a071_origin.pdf filter=lfs diff=lfs merge=lfs -text
10458
+ 2303.05xxx/2303.05398/ef2b6bbf-4244-4f81-93d5-b68aeda66fed_origin.pdf filter=lfs diff=lfs merge=lfs -text
2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01037/092875b4-b785-48f9-b77b-c286523da2d0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e5e6c89446a195d33c6ca77f8c9d21862059097bd698912986cf95ecbdd42fa
3
+ size 1175786
2303.01xxx/2303.01037/full.md ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Google USM: Scaling Automatic Speech Recognition Beyond 100 Languages
2
+
3
+ Yu Zhang Wei Han James Qin Yongqiang Wang Ankur Bapna Zhehuai Chen
4
+
5
+ Nanxin Chen Bo Li Vera Axelrod Gary Wang Zhong Meng Ke Hu
6
+
7
+ Andrew Rosenberg Rohit Prabhavalkar Daniel S. Park Parisa Haghani
8
+
9
+ Jason Riesa Ginger Perng Hagen Soltau Trevor Strohman
10
+
11
+ Bhuvana Ramabhadran Tara Sainath Pedro Moreno Chung-Cheng Chiu
12
+
13
+ Johan Schalkwyk Frangoise Beaufays Yonghui Wu
14
+
15
+ # Abstract
16
+
17
+ We introduce the Universal Speech Model (USM), a single large model that performs automatic speech recognition (ASR) across 100+ languages. This is achieved by pre-training the encoder of the model on a large unlabeled multilingual dataset of 12 million (M) hours spanning over 300 languages, and fine-tuning on a smaller labeled dataset. We use multilingual pre-training with random-projection quantization and speech-text modality matching to achieve state-of-the-art performance on downstream multilingual ASR and speech-to-text translation tasks. We also demonstrate that despite using a labeled training set 1/7-th the size of that used for the Whisper model [1], our model exhibits comparable or better performance on both in-domain and out-of-domain speech recognition tasks across many languages.
18
+
19
+ # 1 Introduction
20
+
21
+ Recent advances in self-supervised learning have ushered in a new era for speech recognition. Whereas previous works focused mostly on improving the quality of monolingual models for mainstream languages, recent studies have increasingly turned to "universal" models [1-4]. These may take the form of a single model that performs well on multiple tasks [1,2], or one that covers multiple domains [2,3], or one that supports multiple languages [1,5]. In this work, we explore the frontiers of language expansion. Our long-term goal is to train a universal ASR model that covers all the spoken languages in the world.
22
+
23
+ A fundamental challenge in scaling speech technologies to many languages is obtaining enough data to train high-quality models. With conventional supervised training approaches, audio data needs to be manually transcribed, which is lengthy and expensive, or collected from existing transcribed sources which are hard to find for tail languages. While transcribed speech may be scarce in many
24
+
25
+ languages, untranscribed speech and text data are practically unlimited. Recent developments in semi-supervised algorithms for speech recognition makes it possible to leverage such data for pre-training and produce high-quality speech models with a limited amount of transcribed data [3,6].
26
+
27
+ Moreover, recent studies have shown that a single large model can utilize large data sets more effectively than smaller models [1,4]. This all points to a promising direction where large amounts of unpaired multilingual speech and text data and smaller amounts of transcribed data can contribute to training a single large universal ASR model.
28
+
29
+ # 1.1 Our approach
30
+
31
+ We produce large "Universal Speech Models" (USMs) through a training pipeline that utilizes three types of datasets:
32
+
33
+ # - Unpaired Audio:
34
+
35
+ - YT-NTL-U: A large unlabeled multilingual dataset consisting of 12M hours of YouTube-based audio covering over 300 languages.
36
+ - Pub-U: 429k hours of unlabeled speech in 51 languages based on public datasets.
37
+
38
+ # - Unpaired Text:
39
+
40
+ - Web-NTL: A large multilingual text-only corpus with 28B sentences spanning over 1140 languages.
41
+
42
+ - Paired ASR Data: We utilize two corpora of paired audio-text data with O(10k) hours of audio for supervised training.
43
+
44
+ - YT-SUP+: 90k hours of labeled multilingual data covering 73 language and 100k hours of en-US pseudo-labeled data generated by noisy student training (NST) [7,8] from YT-NTL-U.
45
+ - Pub-S: 10k hours of labeled multi-domain en-US public data and 10k labeled multilingual public data covering 102 languages.
46
+
47
+ 2B-parameter Conformer [9] models are built using these datasets through the following steps:
48
+
49
+ 1. Unsupervised Pre-training: BEST-RQ (BERT-based Speech pre-Training with Random-projection Quantizer) [10] is used to pre-train the encoder of the model with YT-NTL-U.
50
+ 2. MOST (Multi-Objective Supervised pre-Training): The model can optionally be further prepared by a multi-objective supervised pre-training pipeline that utilizes all three kinds of datasets: YT-NTL-U, Pub-U, Web-NTL and Pub-S. Here, a weighted sum of the BEST-RQ masked language model loss [11], along with the text-injection losses (including the supervised ASR loss and modality matching losses) [12, 13] is optimized during training.
51
+ 3. Supervised ASR Training: We produce generic ASR models trained with connectionist temporal classification (CTC) [14] and Listen, Attend, and Spell (LAS) [15] tranducers for downstream tasks.
52
+
53
+ Two types of models are produced through this pipeline—pre-trained models that can be fine-tuned on downstream tasks, and generic ASR models for which we assume no downstream fine-tuning occurs. The generic ASR models are trained with chunk-wise attention, which we introduce later in this report.
54
+
55
+ Table 1: USM models prepared in this work. The generic ASR models are trained on a large "upstream" ASR corpus and not finetuned further, while the pre-trained models are fine-tuned on downstream tasks.
56
+
57
+ <table><tr><td>Model</td><td>BEST-RQ</td><td>MOST</td><td>Model-Type</td><td>Decoder</td><td>Upstream ASR Dataset</td><td>Chunk-wise Attention</td></tr><tr><td>USM</td><td rowspan="4">YT-NTL-U</td><td>N</td><td>Pre-trained</td><td>Downstream Dependent</td><td>-</td><td>N</td></tr><tr><td>USM-M</td><td>Y</td><td>Pre-trained</td><td>Downstream Dependent</td><td>-</td><td>N</td></tr><tr><td>USM-LAS</td><td>N</td><td>Generic ASR</td><td>LAS</td><td>YT-SUP+</td><td>Y</td></tr><tr><td>USM-CTC</td><td>N</td><td>Generic ASR</td><td>CTC</td><td>YT-SUP+</td><td>Y</td></tr></table>
58
+
59
+ We denote the pre-trained models USM and USM-M, where the appendix -M indicates that MOST has been utilized for the preparation of the model. The USM and USM-M models can be further fine-tuned on the downstream task of choice with an appropriate transducer unit, which can be a CTC, LAS or RNN transducer (RNN-T) unit. We evaluate our USM models on two types of benchmarks:
60
+
61
+ - Automatic Speech Recognition (ASR): We use YouTube data to train USMs for YouTube (e.g., closed captions). We evaluate the USMs on two public benchmarks, SpeechStew [2] and FLEURS [16]. We also report results on the long-form test set CORAAL [17] for which only the evaluation set is available.
62
+ - Automatic Speech Translation (AST): We test AST performance on CoVoST 2 [18].
63
+
64
+ As indicated in Table 1, the generic ASR models are trained with YT-SUP+ and not fine-tuned on domain-specific datasets for downstream ASR tasks. We, however, explore the possibility of attaching additional "adapter" units [19] to both generic and pre-trained ASR models and training adapter weights while keeping the rest of the model frozen.
65
+
66
+ ![](images/31c5294cfd8e5cab1d70f1e7e82942d052cac27c78c8a3da4c02ef98e4993836.jpg)
67
+ Figure 1: An overview of our approach. Training is split into three stages. (i) The first stage trains a conformer backbone on a large unlabeled speech dataset, optimizing for the BEST-RQ objective. (ii) We continue training this speech representation learning model while optimizing for multiple objectives, the BEST-RQ objective on unlabeled speech, the modality matching, supervised ASR and duration modeling losses on paired speech and transcript data and the text reconstruction objective with an RNN-T decoder on unlabeled text. (iii) The third stage fine-tunes this pre-trained encoder on the ASR or AST tasks.
68
+
69
+ The overall training pipeline of our models is summarized in Fig. 1. In our design, once a large amount of compute is expended in the pre-training stages, the downstream application can be conveniently fine-tuned from a model trained from stage-1 or stage-2 with a task-specific transducer. Our experimental results demonstrate that this pipelined training framework enables us to build both generic multilingual ASR systems and domain specific models with state-of-the-art performance.
70
+
71
+ We next present the key findings of our research, provide an overall view of the report, and review related work.
72
+
73
+ # 1.2 Key Findings
74
+
75
+ SoTA results for downstream multilingual speech tasks: Our USM models achieve state-of-the-art performance for multilingual ASR and AST for multiple datasets in multiple domains. This includes SpeechStew (mono-lingual ASR) [2], CORAAL (African American Vernacular English (AAVE) ASR) [17], FLEURS (multi-lingual ASR) [16], YT (multilingual long-form ASR), and CoVoST (AST from English to multiple languages). We depict our model's performance in the first panel of Fig. 2. We also build an ASR model for YouTube captioning – i.e., the transcription of speech in YouTube videos, that achieves $< 30\%$ WER on 73 languages. With only 90k hours of supervised data, this model performs better than Whisper [1], a strong general ASR system trained on more than
76
+
77
+ ![](images/61d931e15898ab8d4123ee301febdd63fea2a673878d8716a0dddaff9cf379b0.jpg)
78
+ Figure 2: $(\text{Left})^{\dagger}$ WERs (\%) Our language expansion effort to support more languages on YouTube (73 languages) and extending to $100+$ languages on the public dataset (FLEURS). Lower is better. To the best of our knowledge, no published model can successfully decode all 73 languages from our YouTube set, thus we only list our results. $(\text{Middle})^{\dagger}$ Our results on ASR benchmarks, with or without in-domain data. Lower is better. (Right) SoTA results on public speech translation tasks. Results presented are presented as high/middle/low resources languages defined in [20]. Higher is better.
79
+
80
+ ![](images/fc6aef4d89dd2709a196e4f7084db72a207fc69bb050a1d2cf2f78b190010c4e.jpg)
81
+
82
+ ![](images/c0179f6245575d7f8ebad9d3f601342f57e02a3164eb3a8cb3ac989982e05606.jpg)
83
+
84
+ 400k hours of transcribed data (we select 18 languages that Whisper can successfully decode with lower than $40\%$ WER). The second panel of Fig. 2 demonstrates that our YouTube captions model generalizes well to unseen domains.
85
+
86
+ BEST-RQ is a scalable speech representation learner: We find that BEST-RQ pre-training can effectively scale to the very large data regime with a 2B parameter Conformer-based backbone, comparing favorably against Wav2Vec 2.0 [6] and W2v-BERT [21] in this setting.
87
+
88
+ MOST (BEST-RQ + text-injection) is a scalable speech and text representation learner: We demonstrate that MOST is an effective method for utilizing large scale text data for improving quality on downstream speech tasks, as demonstrated by quality gains exhibited for the FLEURS and CoVoST 2 tasks. Fig. 2 depicts USM's performance, establishing a new state-of-the-art on the FLEURS benchmark across 102 languages for ASR and on CoVoST 2 across 21 languages on AST.
89
+
90
+ Representations from MOST (BEST-RQ + text-injection) can quickly adapt to new domains: We find that it is possible to obtain powerful downstream ASR/AST models by attaching and training light-weight residual adapter modules, which only add $2\%$ of additional parameters, while keeping the rest of the model frozen.
91
+
92
+ Chunk-wise attention for robust long-form speech recognition: We introduce chunk-wise attention, an effective, scalable method for extending the performance of ASR models trained on shorter utterances to very long speech inputs. We find that the USM-CTC/LAS models trained with chunk-wise attention is able to produce high-quality transcripts for very long utterances in the YouTube evaluation sets.
93
+
94
+ # 1.3 Outline
95
+
96
+ The outline of this report is as follows:
97
+
98
+ Methods: We review the architecture and the methods used in the paper. We provide brief summaries of the Conformer [9], BEST-RQ [10], text-injection [12, 13] used for MOST, and Noisy Student Training (NST) [7,8]. We also introduce chunk-wise attention for scalable training on long utterances.
99
+
100
+ Data: We describe the four types of datasets used to train our models: the unlabeled multilingual speech dataset YT-NTL-U, the multilingual text corpus Web-NTL, labeled datasets, and pseudolabeled datasets.
101
+
102
+ Key Results: We present the performance of our USM models on downstream ASR and AST tasks. We demonstrate that USM establishes new states-of-the-art on several speech understanding benchmarks.
103
+
104
+ Analysis and Ablations: We present analysis of the effects of the key components of our work and compare their performance against existing methods.
105
+
106
+ # 1.4 Related Work
107
+
108
+ There is extensive literature on pre-training [6, 12, 22-33] and self-training [8, 34-44] for ASR. Large speech models trained on large datasets have been studied previously in both monolingual [3] and multilingual contexts [1, 4]. Large multi-modal speech models have been explored in [13, 20, 45-54]. Various unsupervised pre-training methods for speech models have been proposed and applied in [6, 10, 21].
109
+
110
+ Our work is an extension of a host of recent research efforts [3, 10, 13, 53, 55] that have studied semi-supervised learning for ASR in the context of deep-learning. Large speech models ( $>1\mathrm{B}$ ) were first studied in [3]; we expand upon this approach to train multilingual speech models in this work. We improve the methods used in [3] by employing a more scalable self-supervised learning algorithm (BEST-RQ) and additionally applying multi-modal pre-training (text-injection) to prepare the models. We introduce an improvement to BEST-RQ [10] by utilizing a multi-softmax loss. We also incorporate Multi-Objective Supervised Training (BEST-RQ with text-injection) to improve the quality of speech representations learnt during pre-training, by utilizing transcribed data and unlabeled text. Long-form ASR has been studied in [1, 56, 57]; we propose chunk-wise attention as an alternative solution to chunk-based decoding.
111
+
112
+ In this paper, we propose a scalable self-supervised training framework for multilingual ASR which extends to hundreds of languages. In particular:
113
+
114
+ - We demonstrate that USMs pre-trained on 300 languages can successfully adapt to both ASR and AST tasks in new languages with a small amount of supervised data.
115
+ - We build a generic ASR model on 73 languages by fine-tuning pre-trained models on 90k hours of supervised data. We show that the generic ASR models can carry out inference efficiently on TPUs and can reliably transcribe hours-long audio on YouTube Caption ASR benchmarks.
116
+ - We conduct a systematic study on the effects of pre-training, noisy student training, text injection, and model size for multilingual ASR.
117
+
118
+ # 2 Methods
119
+
120
+ # 2.1 Model Architecture: Conformer
121
+
122
+ We use the convolution-augmented transformer [9], or Conformer, with relative attention [58] as an encoder model. For downstream speech tasks such as ASR or AST, the features produced by the Conformer are either used as an input to a connectionist temporal classification (CTC) [14], RNN transducer (RNN-T) [59] or a Listen, Attend, and Spell (LAS) [15] unit after additional projection. As will be discussed further, BEST-RQ pre-training is exclusively applied to the encoder, while other forms of training (e.g., T5 [60]) train the entire task network as a whole.
123
+
124
+ For our experiments, we consider two models with 600M and 2B parameters respectively. While the main results presented have been obtained using the 2B model, the 600M model is utilized for ablation studies and observing model scaling behavior. Some features of the models are listed in Table 2.
125
+
126
+ Table 2: Conformer model parameters.
127
+
128
+ <table><tr><td>Model</td><td># Params (B)</td><td># Layers</td><td>Dimension</td><td>Att. Heads</td><td>Conv. Kernel Size</td></tr><tr><td>Conformer-0.6</td><td>0.6</td><td>24</td><td>1024</td><td>8</td><td>5</td></tr><tr><td>Conformer-2B</td><td>2.0</td><td>32</td><td>1536</td><td>16</td><td>5</td></tr></table>
129
+
130
+ # 2.2 Pre-training: BEST-RQ
131
+
132
+ We select BEST-RQ [10] as the method to pre-train our networks with speech audio. BEST-RQ provides a simple framework with a small number of hyperparameters for unsupervised training on
133
+
134
+ ![](images/53ef1f7c83a5f82e808a89821e36c94181092dcf274f06359832272c3e8c19a6.jpg)
135
+ Figure 3: BEST-RQ based pre-training with conformer encoder.
136
+
137
+ large-scale unlabeled audio data. We discuss the comparative advantage of BEST-RQ against other pre-training methods in section 5.3.
138
+
139
+ BEST-RQ employs a BERT-style training task for the audio input that attempts to predict masked speech features. To make the task compatible with BERT-style training, the original speech features corresponding to the masked frames are quantized, and the task requires predicting the quantized label of these features. For a given number of quantization targets $c$ , random "codebook" vectors $v_{0},\dots ,v_{c - 1}$ are chosen in an embedding space. The discrete label of the speech feature is obtained by first projecting the feature into the embedding space by a randomly initialized, frozen projection matrix and then finding the closest codebook vector. The index of this codebook vector is identified as the label of the speech feature. Cosine similarity is used as the distance measure for determining the code.
140
+
141
+ We note that while w2v-BERT [21] pre-training has proven to be an effective method for unsupervised pre-training, it requires an additional quantization module which introduces more complexity. As we increase the model size and language coverage, the learnt codebook module proves costly to tune and can impede progress of model development. Meanwhile, the BEST-RQ algorithm does not require such a module, making it a more scalable method for pre-training.
142
+
143
+ # 2.2.1 Multi-softmax
144
+
145
+ Instead of utilizing a single codebook [10], we use multiple codebooks to improve BEST-RQ training in this study. More precisely, we use $N$ softmax layers to produce $N$ probability predictions from the output of the encoder to compare against $N$ independent quantization targets obtained from the masked speech features. We train the network with equal weights for each softmax layer. The use of multiple codebooks improves the stability and convergence of the model.
146
+
147
+ # 2.3 Self-training: Noisy Student Training
148
+
149
+ We utilize noisy student training (NST) [7, 8] to generate pseudo-labeled data to augment supervised training. This is done by first training a teacher model with augmentation on a supervised set, then using that teacher to generate transcripts for unlabeled audio data. A heuristic filtering method based on the ratio between the number of words and audio length is used to filter the pseudo-labeled data. The pseudo-labeled data is mixed with supervised data to train the student model.
150
+
151
+ # 2.4 Chunk-wise Attention for Long-form ASR
152
+
153
+ In many real-world applications, ASR systems are required to transcribe minutes- or hours-long audio. This poses significant challenges to many end-to-end ASR systems, as these ASR systems
154
+
155
+ are usually trained on much shorter segments, typically less than 30 seconds. For systems that use attention-based encoders, it is impractical to use global attention to attend to the entire audio. Local self attention, which only attends to the fixed length of left and right context, is thus widely used. For example, in BEST-RQ pre-training, only 128 left and 128 right context frames are used for local self attention. However, stacking many local self attention layers creates a significant receptive field mismatch between training and inference. The left figure in Fig. 4 illustrates this issue with a network consisting of 4 local self attention layers, each using only 1 left and 1 right context frames. Since the context is leaked in every layer, the receptive field width grows linearly with respect to the number of layers; for a big encoder like that of the Conformer-2B, this means that the receptive field width for the encoder output is longer than 327 seconds. During training, the model is trained with at most 30 seconds speech segments, while at inference time, when minutes or hours long audio is fed to the model, the encoder needs to process over 300 seconds of audio to produce one encoder output—a pattern it has never trained on. Our empirical observations demonstrate that, under this train-test mismatch, these models with deep architectures and high capacity suffer from high deletion errors. We henceforth refer to this problem as the "long-form (performance) degradation" problem.
156
+
157
+ ![](images/7ec50a64eb6e037686ccaa250d30e7ee825d78244a640df1bfe06eece0908fc5.jpg)
158
+ Local Self-Attention
159
+
160
+ ![](images/44760374f8868fb7bf2a33114a46d8ae4e23250a922d323282e2b4dd8377c7a9.jpg)
161
+ Receptive field for y0..y3
162
+ Figure 4: Comparing receptive fields of two networks with 4 layers of local self attention and chunkwise attention.
163
+
164
+ ![](images/aab26532fbca0e79c1714a964a7d247a9d5ec6d29eea0e042bdf0360726fab64.jpg)
165
+ Chunk-wise Self-Attention
166
+ Receptive field for y4.,y8
167
+
168
+ To solve this problem, we propose a simple modification to the attention mechanism; the attention is restricted to audio chunks. This is illustrated on the right side of Fig. 4, in which 8 frames are divided into 2 chunks, and the attention is performed within each chunk. In this case, there is no context leaking in the attention layer, and thus the receptive field width is independent of the number of layers. In our experiments an 8-second chunk resulted in the best recognition quality vs. computational cost trade-off.
169
+
170
+ It is worthwhile to note there are a few other works in the literature which also modify the attention pattern to deal with the long-form audio in ASR, e.g., [61-66]. Though conceptually similar to block processing (e.g. [65, 66]), chunk-wise attention is more flexible. Block processing is performed at the input feature level, which limits the encoder layers to the context frame at the current chunk. On the other hand, chunk-wise attention allows other layers in the encoder (e.g., convolution layers) to process contextual frames beyond the current chunk. Compared with Whisper [1], which segments the audio into 30 second chunks and uses a heuristic process to carry the decoder states over, we only chunk the attention state, and allow the decoder to access the entire encoder output. We also use either a CTC or RNN-T decoder to decode on long-form audio, neither of which have been observed to hallucinate compared to attention-based sequence-to-sequence decoders. We observe our systems are robust on long-form ASR tasks with a simpler decoding process on long-form speech signals.
171
+
172
+ ![](images/a7569696cbd824e494cd6e3f43d82b3ac35ec2cd37c0228cc055c9c254fdef21.jpg)
173
+ Figure 5: Overview of MOST text injection. The left-most panel depicts MOST training on unlabeled speech input; the center panel depicts training on paired speech and text input; the right-most panel depicts training on unlabeled text data.
174
+
175
+ # 2.5 Multi-Objective Supervised Pre-training: BEST-RQ + text-injection
176
+
177
+ In addition to pre-training with unlabeled speech, we add an additional stage of Multi-Objective Supervised pre-Training (MOST) as shown in Fig. 5, where we train the model jointly on unlabeled speech, unlabeled text and paired speech and text data. The training loss for this procedure is based on the text-injection loss including duration modeling and consistency regularization as in [13], to which we add a weighted BEST-RQ loss for the encoder of the model. MOST yields two benefits: (i) Training with paired speech and text data with alignment losses results in learning speech representations that are better aligned with text, improving quality on tasks like ASR and AST that require mapping the acoustics of the speech signal to text. (ii) Training simultaneously on unlabeled text in a model that learns speech and text representations jointly improves the robustness of learned representations, especially on low resource languages and domains, also generalizing to new languages with no paired data seen during training [67].
178
+
179
+ The key architectural components for constructing the text-injection loss as utilized in our approach include: (i) A speech-only encoder that utilizes a convolutional sub-sampling feature encoder and a single conformer layer. For continued pre-training the feature encoder is initialized from the BEST-RQ pre-trained checkpoint while the conformer layer is initialized randomly. (ii) A text-only encoder that consists of an embedding layer, an upsampler, and a conformer layer block. The upsampler used in this work is a learned duration based upsampling model [13], though a fixed or random repetition upsampler can also be used for text-injection [47, 53]. All components are initialized randomly. (iii) A shared conformer encoder initialized from the pre-trained BEST-RQ speech encoder. (iv) The BEST-RQ speech softmax layers initialized from the BEST-RQ checkpoint. (v) The decoder unit which is initialized randomly.
180
+
181
+ The main idea of text-injection (e.g. [13,53,54]) is to produce joint, co-aligned embeddings of speech and text as sequences in the same embedding space. Given this embedding space, text data with no associated audio can contribute to improving the speech task. The speech and text encoders presented above are intended to produce these embeddings, which need to be matched in the embedding space and are also required to be co-aligned in the time dimension. The embeddings enable the text data to contribute to preparing the model for downstream tasks.
182
+
183
+ To achieve these objectives, the architecture as presented above is trained using three types of data, each contributing to different types of losses:
184
+
185
+ 1. The unlabeled speech passes through the shared encoder and the BEST-RQ softmax layers to contribute to the BEST-RQ loss.
186
+
187
+ 2. The paired speech-text data serves multiple functions.
188
+
189
+ - The labeled speech flows through the speech encoder, the shared encoder and the decoder unit and contributes to the standard ASR loss computed against the paired text. Here, the speech-text alignments of the paired data are extracted from the decoder unit and used to train the duration upsampler within the text encoder.
190
+ - The text of the paired data also passes through the text encoder. The encoded text sequence is used to compute a consistency loss against the encoded speech sequence. This loss is used to train solely the text encoder—the speech encoder weights are frozen for this particular forward-propagation.
191
+
192
+ 3. The unlabeled text data contributes to a reconstruction loss. This loss is constructed by passing the text through the text encoder, then masking chunks of the feature sequence produced. These masked text features live in the same embedding space as masked speech features, and thus can be passed through the shared encoder and the decoder unit to compute the ASR loss against the original text. This is the reconstruction loss used to train the model.
193
+
194
+ For training stability, MOST proceeds in two stages—we first train solely on paired data to learn stable decoder alignments for 20k steps. We then train the duration upsampler and activate the losses for unlabeled text. We refer the reader to [13] for further details.
195
+
196
+ When fine-tuning for ASR, we initialize the feature encoder of the ASR model with the speech feature encoder, initialize the conformer block with the shared conformer encoder, and add a randomly initialized task-specific transducer.
197
+
198
+ In the MOST set-up, the speech and text representations live in a shared representation space, thereby allowing us to utilize text machine translation (MT) data during the fine-tuning stage of AST tasks. We follow the same approach described in [13, 20] and report the AST results with joint fine-tuning for models prepared with MOST.
199
+
200
+ # 2.6 Residual Adaptation with a Frozen Encoder
201
+
202
+ Ideally, the fine-tuning process of the model should be scalable with the number of downstream tasks while in reality, fine-tuning the pre-trained USM individually for various domains and tasks becomes prohibitively expensive. In order to mitigate this issue, we explore a lightweight alternative [19] to training the full network where residual adapters with a small number of parameters are added for each individual language while the pre-trained USM is entirely frozen during fine-tuning. We experiment with adding two parallel adapters to each Conformer block, whose parameter count amounts to $2\%$ of the original pre-trained USM, and fine-tune the adapters on downstream language tasks. When serving the model, the adapter is dynamically loaded according to the language of the input batch [68, 69]. This enables one to conduct inference on $100+$ languages while keeping the total number of parameters manageable by re-using the same parameters and computation process for the majority of the time. We also find that training the adapter versus fine-tuning the entire model can reduce over-fitting especially when the training data is limited.
203
+
204
+ # 2.7 Training Details
205
+
206
+ Data Processing: The audio is uniformly sampled to $16\mathrm{kHz}$ quality—any audio with a different native sampling rate is either up-sampled or down-sampled. The audio is then featurized into 128-dimensional log-mel filterbank coefficients. Graphemes are used to tokenize the text for FLEURS in-domain fine-tuning, while word-piece models (WPMs) [70] are used for tokenization for all other tasks.
207
+
208
+ BEST-RQ: We follow default masking and quantization parameters of BEST-RQ as in [10]. We use a 16 codebook multi-softmax loss to stabilize training and improve performance as described in 5.1. We do not use EMA for pre-training.
209
+
210
+ MOST: We follow the text encoder and decoder architecture described in [13] but use 4k sentence-piece models (SPMs). We use a single 1536-dimensional Conformer layer as the speech encoder and Conformer-2B encoder as the shared encoder. We mix un-transcribed speech, unspoken text, and transcribed speech in each batch with fixed batch sizes of, respectively, 4096, 8192, and 1024. The model is initialized with the BEST-RQ pre-trained encoder. MOST employs a curriculum learning
211
+
212
+ schedule where training initially is conducted with un-transcribed speech and paired speech-text data, and unspoken text is utilized only after 20k steps. The joint training employing all three types of data lasts for another 100K steps.
213
+
214
+ Supervised Training: We use two separate optimizers for the encoder parameters and the decoder parameters of the network [71]. For USM-CTC and USM-LAS, we train the model for 100k steps with 2048 batch size. For in-domain experiments, the checkpoint is selected based on development set performance.
215
+
216
+ Training Large Models: We use the GShard [72] framework with the GSPMD backend [73] to train our large models on TPUs.
217
+
218
+ # 3 Datasets
219
+
220
+ # 3.1 Audio Data
221
+
222
+ ![](images/28341b174248990474eee4fee8181d6d4c9a31186c53dfc161cc4621f32d3162.jpg)
223
+ Figure 6: The video category and length distribution of YT-513-U.
224
+
225
+ The following audio datasets are used in this report to train our models:
226
+
227
+ # YouTube SUPervised Plus (YT-SUP+):
228
+
229
+ - YT-SUP: 90k hours of segmented, labeled audio across 75 languages.
230
+ - YT-Pseudo-Labeled: 100k hours of segmented, pseudo-labeled en-US audio from YT-NTL-U. The pseudo-labels are generated by a 600M CTC model trained on YT-SUP en-US data.
231
+
232
+ # - YouTube Next Thousand Languages Unsupervised (YT-NTL-U): 12.1M hours of segmented, unlabeled audio, including:
233
+
234
+ - YT-55-U: 12M hours of segmented, unlabeled audio on 55 rich resource languages identified by YouTube production language id models.
235
+ - YT-513-U: 100k hours of segmented, unlabeled audio across 513 tail languages not covered by YouTube production language id models. These languages are identified by vendors.
236
+
237
+ Let us expand upon how each dataset has been constructed.
238
+
239
+ YT-SUP+: YT-SUP is a dataset with audio from videos that have user-uploaded transcripts from 75 languages. We group consecutive segments into a longer unit similar to [57]. The maximal sequence length for training is 30 seconds. The total amount of training data is 90k hours, ranging from English (en-US) (3.5k hours) to Amharic (Am-Et) (150 hours). We also introduce an additional 100k hours of en-US audio from YT-NTL-U to YT-SUP. We choose to generate pseudo-labels on this dataset using a 600M-parameter CTC YT teacher model trained on YT-SUP. Each audio is randomly segmented between 5 to 15 seconds.
240
+
241
+ YT-55-U: YT-55-U is built by first randomly collecting 3 million hours of audio from "speech-heavy" YouTube videos, filtered by language. The 3 million hours of audio is then further segmented by the YT teacher model. Instead of using a teacher model as in [3], the non-speech segments identified by a Voice Activity Detection (VAD) model are removed to yield approximately 1 million hours of
242
+
243
+ unlabeled audio data. Later, we use a YouTube production language identification model to select 55 languages from that audio.
244
+
245
+ YT-513-U: We create an additional dataset called YT-513-U to ensure coverage of lower resource languages in our pre-training dataset. We reached out to vendors and native speakers to identify YT videos containing speech in specific long tail languages, collecting a dataset of unlabeled speech in 513 languages. Vendors were tasked with ensuring a variety of domains, voices, and content in the videos that are collected in each language. These videos are segmented into speech segments using a VAD model, resulting in a total of $102\mathrm{k}$ hours of speech. Our final YT-513-U dataset contains 88 languages with over 500 hours of speech each, 237 languages with between 100-500 hours, and 188 languages with less than 100 hours of data. The languages chosen for this collection are wide-ranging, with a majority of our data corresponding to languages from South Asia, Southeast Asia, West Africa, and East Africa. The distribution of video categories and lengths in our dataset are depicted in Figure 6.
246
+
247
+ In addition to YouTube data, we also include public data for MOST training:
248
+
249
+ - Public Unsupervised (Pub-U): Following [20], we use approximately 429k hours of unlabeled speech data in 51 languages. It includes: 372k hours of speech data spanning 23 languages from VoxPopuli [74], read speech data in 25 languages drawn from the v6.1 release of Common Voice [75], 50k hours of read books data in eight European languages from Multilingual LibriSpeech [76] and 1k hours of telephonic conversation data spanning 17 African and Asian languages from BABEL [77].
250
+ - Public Supervised (Pub-S): Similar to [20], our public supervised set includes approximately 1.3k hours of speech and transcript data spanning 14 languages from VoxPopuli, 10 hour training splits for each of the 8 MLS languages, and $1k$ hours of data spanning 17 languages from the Babel ASR task.
251
+
252
+ Note that the public data is only used for in-domain pre-training and is excluded for training the generic USM-LAS/CTC models. This allows us to treat the public task performance as out-of-domain benchmarks for the USM-LAS/CTC models.
253
+
254
+ # 3.2 Text Data
255
+
256
+ Web-NTL: For pre-training with unlabeled text, we use a web-crawled corpus of monolingual text containing over 28B sentences [78]. The dataset spans 1140 languages, 205 of which have over $1M$ sentences and 199 of which have between $100k$ and $1M$ sentences. We up-sample lower resource languages using temperature-based sampling [79] with $T = 3.0$ . More details about the dataset and the mining approach have been described in Section 2 of [78].
257
+
258
+ # 3.3 Downstream Benchmarks
259
+
260
+ # 3.3.1 Speech Recognition (ASR)
261
+
262
+ We present our results on two public tasks, SpeechStew [2] and FLEURS [16], and an internal benchmark on YouTube.
263
+
264
+ The SpeechStew [2] dataset is assembled by putting together seven public speech corpora—AMI [80], Common Voice [81], English Broadcast News $^3$ , LibriSpeech [82], Switchboard/Fisher $^4$ , TED-LIUM v3 [83, 84] and Wall Street Journal $^5$ , which are all standard benchmarks [85–87] covering different domains in en-US.
265
+
266
+ The FLEURS [16] dataset is a publicly available, multi-way parallel dataset of 10 hours of read speech in 102 languages spanning 7 geo-groups. We restrict our use of the dataset to its ASR benchmark. Among the 102 languages present in the FLEURS benchmark, we select 62 to serve as a sub-group to compare our generic ASR system with Whisper [1], as those languages are covered by the training sets of both models. We also report full results for in-domain fine-tuning and adaptation. Unlike [16], we report both WER and CER metrics, as CER is inappropriate as an indicator of
267
+
268
+ Table 3: WERs (%) across multiple tasks for multiple settings compared against pre-existing baselines, with the exception of CoVoST 2, for which the BLEU score is presented. For the YouTube long-form set, we select the top-25 languages Whisper was trained on and exclude all languages for which Whisper produces $>40\%$ WER to reduce the noise introduced by LAS hallucination in the Whisper model. For FLEURS, we report both the WER and the CER for our models. Results omitted for the Whisper-shortform model on the YouTube long-form dataset as the model has a high deletion problem on this set. The Whisper-shortform model uses segmented decoding to reduce its hallucination problem on CORAAL. Our adapter setup adds about $2.3\%$ of the total parameters while keeping the encoder frozen from pre-training.
269
+
270
+ <table><tr><td>Task</td><td colspan="4">Multilingual Long-form ASR</td><td>Multidomain en-US</td><td colspan="2">Multilingual ASR</td><td>AST</td></tr><tr><td>Dataset</td><td colspan="2">YouTube</td><td colspan="2">CORAAL</td><td>SpeechStew</td><td colspan="2">FLEURS</td><td>CoVoST 2</td></tr><tr><td>Langauges</td><td>en-US</td><td>18</td><td>en-US</td><td>en-US</td><td>en-US</td><td>62</td><td>102</td><td>21</td></tr><tr><td>Prior Work (single model)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Whisper-longform</td><td>17.7</td><td>27.8</td><td>-</td><td>23.9</td><td>12.8</td><td></td><td></td><td></td></tr><tr><td>Whisper-shortform†</td><td>-</td><td>-</td><td>-</td><td>13.2‡</td><td>11.5</td><td>36.6</td><td>-</td><td>29.1</td></tr><tr><td>Our Work (single model)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>USM-LAS</td><td>14.4</td><td>19.0</td><td>29.8</td><td>11.2</td><td>10.5</td><td>12.5</td><td>-</td><td>-</td></tr><tr><td>USM-CTC</td><td>13.7</td><td>18.7</td><td>26.7</td><td>12.1</td><td>10.8</td><td>15.5</td><td>-</td><td>-</td></tr><tr><td>Prior Work (in-domain fine-tuning)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>BigSSL [3]</td><td>14.8</td><td>-</td><td>-</td><td>-</td><td>7.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Maestro [67]</td><td></td><td></td><td></td><td></td><td>7.2</td><td></td><td></td><td>25.2</td></tr><tr><td>Maestro-U [67]</td><td></td><td></td><td></td><td></td><td></td><td></td><td>26.0 (8.7)</td><td></td></tr><tr><td>Our Work (in-domain fine-tuning)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>USM</td><td>13.2</td><td>-</td><td>-</td><td>-</td><td>7.4</td><td>13.5</td><td>19.2 (6.9)</td><td>28.7</td></tr><tr><td>USM-M</td><td>12.5</td><td>-</td><td>-</td><td>-</td><td>7.0</td><td>11.8</td><td>17.4 (6.5)</td><td>30.7</td></tr><tr><td>Our Work (frozen encoder)</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>USM-M-adapter§</td><td>-</td><td>-</td><td>-</td><td>-</td><td>7.5</td><td>12.4</td><td>17.6 (6.7)</td><td>29.6</td></tr></table>
271
+
272
+ performance for some languages. When presenting the error rate metrics, we use CER for Chinese, Japanese, Thai, Lao, and Burmese to be consistent with Whisper [1].
273
+
274
+ The test set for the YouTube domain consists of utterances from 73 languages with an average of 15 hours of audio per language, the audio length for each individual language ranging from 1 to 24 hours. The audio is transcribed manually from popular YouTube videos, each with a duration of up to 30 minutes.
275
+
276
+ # 3.3.2 Speech Translation (AST)
277
+
278
+ Following [20], we use CoVoST 2 [18] to benchmark multilingual speech translation. We evaluate the multilingual XX-to-English task that covers translation from 21 source languages into English. Depending on the language, the training data ranges in size from 1 - 264 hours.
279
+
280
+ Besides speech translation data, we also add text-to-text translation data for training the model as in [20]. This dataset includes the text translation data from CoVoST 2 combined with all data from either WMT or TED Talks, as available.
281
+
282
+ # 4 Key Results
283
+
284
+ # 4.1 Robust Speech Recognition for Massively Multilingual Tasks
285
+
286
+ In this section, we compare the performance of our models against public baselines, including Whisper large-v2 $^6$ [1], which has been trained on 680k hours of weakly supervised data across 100 languages.
287
+
288
+ For the massively multilingual speech recognition test dataset from YouTube, we observe that Whisper hallucinates in many languages, resulting in a WER exceeding $100\%$ . For a reasonable comparison, we restrict the language set on which we compare the performance USM against Whisper by first selecting the top-25 languages from the training data for Whisper and further excluding languages for which Whisper produces $>40\%$ WER. We also use segmented decoding for Whisper with 30-second segments to further reduce the effect of hallucinations. As shown in Table 3, our USM-LAS and
289
+
290
+ USM-CTC models outperform Whisper by a wide margin on YouTube en-US, despite training on significantly less supervised data (3.5k hours versus Whisper's 400k hours [1]). While the USM-LAS model also requires segmented decoding to reduce long-form degradation as discussed in section 2.4, it is far more robust, out-performing Whisper by a relative $30\%$ WER on those 18 languages. USM-CTC does not exhibit long-form performance degradation and achieves the best performance on YouTube.
291
+
292
+ On the out-of-domain long-form CORAAL set, both USM-CTC and USM-LAS outperform Whisper by more than $10\%$ relative WER. USM-CTC and USM-LAS similar outperform Whisper on SpeechStew, whose training data the models have not had access to.
293
+
294
+ We further compare the multilingual performance of the models on the held-out set from FLEURS. As shown in Table 3, USM-LAS and USM-CTC both outperform Whisper by $66\%$ relative WER, despite using a smaller amount of multilingual supervised data (90k versus Whisper's 117k, when en-US is excluded). USM-LAS consistently outperforms USM-CTC for short-form ASR tasks.
295
+
296
+ # 4.2 Massively Multilingual Results Beyond 100 Languages
297
+
298
+ The lower part of Table 3 shows our results for in-domain fine-tuning. Our pre-trained model improves the FLEURS benchmark significantly, even when using only 10 hours per language. Compared to the previous SoTA in [67], our model achieves a $30\%$ relative improvement in terms of WER across 102 languages. Our results show that while generic speech models can be powerful, performance is still maximized by in-domain fine-tuning.
299
+
300
+ # 4.3 MOST Produces Robust Representations that Generalize to New Domains
301
+
302
+ MOST training aligns the representations of speech and text by training simultaneously on the two modalities. We investigate whether MOST representations are useful for adapting the model to new domains by freezing the entire learned encoder produced by MOST and adjusting a small amount of parameters added to the network by residual adapters. As shown in Table 3, by adding only $2\%$ to the total number of parameters, the MOST representation model (USM-M-adapter) only performs slightly worse than the fine-tuning baselines, still showing competitive performance on downstream ASR and AST tasks. The small number of parameters being trained in this approach makes it feasible to extend our system to a large number of new domains and new tasks, even with a limited amount of training data, such as in FLEURS.
303
+
304
+ # 4.4 Pushing the Quality of ASR on Unseen Languages
305
+
306
+ Table 4: Noisy student training for unseen languages. WERs (%) for the teacher adapter models and the student models are presented. The relative improvement (%) of the student models can be found in the last column.
307
+
308
+ <table><tr><td>Languages</td><td>Whisper-v2</td><td># hrs in YT-NTL</td><td>USM-LAS-Adapter</td><td>USM-M + pseudo label</td><td>Rel. Improv.</td></tr><tr><td>Hausa (ha)</td><td>88.9</td><td>2175.0</td><td>24.5</td><td>22.8</td><td>7.5</td></tr><tr><td>Kazakh (kk)</td><td>37.7</td><td>196.0</td><td>11.8</td><td>10.9</td><td>8.3</td></tr><tr><td>Shona (sn)</td><td>121.0</td><td>247.0</td><td>29.1</td><td>22.2</td><td>31.1</td></tr><tr><td>Pashto (ps)</td><td>93.7</td><td>254.0</td><td>36.0</td><td>35.4</td><td>1.7</td></tr><tr><td>Yoruba (yo)</td><td>94.8</td><td>1292.0</td><td>33.4</td><td>30.6</td><td>9.2</td></tr></table>
309
+
310
+ Tail languages often do not have paired transcriptions for supervised learning—we refer to these languages as unseen languages, as the model has not seen paired data for these lanugages during training. To create pseudo-labels for these languages, we first build a USM-LAS-Adapter by attaching residual adapters to USM-LAS and training them using FLEURS data. By using the USM-LAS-Adapter as a teacher, we can now transcribe the unlabeled data in the unseen languages as part of the YT-NTL dataset. As shown in Table 4, we observe consistent wins for all languages on the FLEURS benchmark. For some languages, the improvement is larger than $30\%$ . This further demonstrates the robustness of the USM-LAS model—despite using only 10 hours of out of domain data from FLEURS, the USM-LAS-Adapter is able to transcribe YouTube data to produce meaningful recognition results that lead to these improvements. We find the approach of training adapter models
311
+
312
+ on small datasets and utilizing them for pseudo-labeling to be a promising route for scaling up the number of languages that can be transcribed by USMs.
313
+
314
+ # 4.5 USMs are Strong AST Models
315
+
316
+ The multi-lingual speech translation performance of fine-tuned USMs are shown in Table 3. We find that we are already comparable to the CoVoST 2 SoTA BLEU score by fine-tuning the speech-only USM. We note that the previous SoTA uses $125\mathrm{k}$ hours of supervised speech translation data compared to the 859 hours of data used by the USM. After MOST training, USM-M can use both speech and text as training input. By introducing text-to-text machine translation (MT) data during fine-tuning, USM-M is able to achieve an unprecedented $>30$ BLEU on CoVoST (a 1 BLEU increase from SoTA).
317
+
318
+ # 5 Analysis and Ablations
319
+
320
+ # 5.1 Multi-Softmax Loss for BEST-RQ
321
+
322
+ We observe a consistent $>5\%$ relative improvement in ASR and AST benchmarks by increasing the number of the softmax groups in the multi-softmax loss for BEST-RQ training from 1 to 16, as shown in Table 5. We also find that using multiple softmax groups significantly reduces performance variation across different pre-training runs and improves convergence speed.
323
+
324
+ Table 5: YT-55 versus YT-NTL across different domains, with and without multi-softmax groups. For simplicity, we report CER for FLEURS. For CoVoST, we report the BLEU score. YT-NTL covers 27 additional languages not covered in YT-55.
325
+
326
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">pre-train Set .</td><td rowspan="2"># Params (B)</td><td rowspan="2"># Softmax</td><td colspan="2">FLEURS (CER)</td><td rowspan="2">CoVoST (BLEU)</td></tr><tr><td>102 langs</td><td>27 langs</td></tr><tr><td>Conformer-0.6B</td><td>YT-55</td><td>0.6</td><td>1</td><td>9.5</td><td>-</td><td>20.9</td></tr><tr><td>Conformer-2B</td><td>YT-55</td><td>2.0</td><td>1</td><td>7.9</td><td>9.5</td><td>26.6</td></tr><tr><td>Conformer-2B</td><td>YT-NTL-U</td><td>2.0</td><td>1</td><td>7.4</td><td>8.5</td><td>27.5</td></tr><tr><td>Conformer-2B</td><td>YT-NTL-U</td><td>2.0</td><td>16</td><td>6.9</td><td>8.1</td><td>28.7</td></tr></table>
327
+
328
+ # 5.2 Model and Language Scaling
329
+
330
+ We find that scaling up the model size and increasing the language coverage of the pre-training dataset greatly benefits the performance of the USMs, as demonstrated in Table 5. In particular, we find a $10\%$ relative improvement of ASR and AST performance by using YT-NTL vs. YT-55 for pre-training, despite the fact that each newly added language in YT-NTL contains approximately 500 hours of speech—a relatively small amount. As could be expected, the relative gains on the newly covered languages are more substantial than those on other languages.
331
+
332
+ # 5.3 BEST-RQ is a Scalable Self-supervised Learner
333
+
334
+ BEST-RQ has been shown to outperform or be comparable to other prominent pre-training methods for speech recognition, including wav2vec 2.0 and W2v-BERT in the original work in which it was introduced [10]. Here we investigate its comparative performance and scaling properties, similar to what has been done for wav2vec 2.0 in [3] and W2v-BERT in [20]. We utilize the set-up of pre-training the model using YT-55 and fine-tuning it on CoVoST 2. As shown in Table 6, our results indicate that for the Conformer-0.6B, W2v-BERT and BEST-RQ perform similarly, but BEST-RQ obtains greater gains when scaled up. A contributing factor to this can be that W2v-BERT is more prone to codebook collapse and training instabilities at the 2B scale, while BEST-RQ by construction doesn't suffer from codebook collapse.
335
+
336
+ # 5.4 Chunk-wise attention for robust long-form speech recognition
337
+
338
+ Fig. 7 depicts the long-form performance degradation issue as described in section 2.4. In the figure, we see that for the shallow Conformer model with 17 layers, using a small local self attention context
339
+
340
+ Table 6: BLEU scores for the CoVoST $2\mathrm{X}\rightarrow \mathrm{En}$ task to compare BEST-RQ against W2v-BERT. Higher is better.
341
+
342
+ <table><tr><td>X → English</td><td>high</td><td>mid</td><td>low</td><td>all</td></tr><tr><td colspan="5">Previous Work</td></tr><tr><td>XLS-R (0.3B) [33]</td><td>30.6</td><td>18.9</td><td>5.1</td><td>13.2</td></tr><tr><td>XLS-R (1B) [33]</td><td>34.3</td><td>25.5</td><td>11.7</td><td>19.3</td></tr><tr><td>XLS-R (2B) [33]</td><td>36.1</td><td>27.7</td><td>15.1</td><td>22.1</td></tr><tr><td colspan="5">Conformer-0.6B</td></tr><tr><td>W2v-BERT</td><td>35.6</td><td>25.3</td><td>13.4</td><td>20.4</td></tr><tr><td>BEST-RQ</td><td>32.5</td><td>25.6</td><td>14.7</td><td>20.7</td></tr><tr><td colspan="5">Conformer-2B</td></tr><tr><td>W2v-BERT</td><td>36.0</td><td>27.8</td><td>15.6</td><td>22.4</td></tr><tr><td>BEST-RQ</td><td>35.8</td><td>31.3</td><td>21.5</td><td>26.6</td></tr></table>
343
+
344
+ ![](images/6beb9dfcc4c01dcbadc074a69dee31064fe2b9e2e7c45ed05bddc2b3b2089c34.jpg)
345
+ Figure 7: The word error rate measured on the YouTube en-US long-form test set for Conformer models with varying depth.
346
+
347
+ (65) length, the word error rate measured on the long-form test set gradually improves as the training progresses. With a deeper model that has 48 layers but roughly the same number of parameters, however, the larger receptive field mismatch results in higher test WERs as the training step increases.
348
+
349
+ Table 7 demonstrates that chunk-wise attention is able to address the long-form degradation issue and show robust performance across four different languages—en-US (English), ru-RU (Russian), ko-KR (Korean), and uk-UA (Ukrainian). We compare chunk-wise attention models with an 8-second chunk size (CW-8s in Table 7) against local self attention models which uses 128 context frames in each conformer layer (LSA-128). We note that further increasing the context window size of the local self attention model results in high deletion error rates on all languages of the YouTube long-form test sets. These results show that the chunk-wise attention models do not exhibit long-form performance degradation and are able to improve upon the performance of the local self attention models operating at the maximum allowed receptive field length.
350
+
351
+ Table 7: Chunk-wise attention. WER (\%) is reported on the YouTube long-form set.
352
+
353
+ <table><tr><td>Model</td><td># Params (B)</td><td># Layers</td><td>en-US</td><td>ru-RU</td><td>ko-KR</td><td>uk-UA</td></tr><tr><td>LSA-128</td><td>0.6</td><td>24</td><td>16.2</td><td>16.6</td><td>26.2</td><td>15.5</td></tr><tr><td>CW-8s</td><td>0.6</td><td>24</td><td>12.5</td><td>14.7</td><td>19.5</td><td>15.3</td></tr></table>
354
+
355
+ # 5.5 TPU Serving Capacity of USM-CTC Models
356
+
357
+ In section 4, we have demonstrated that USM-CTC models are powerful generic ASR models with reliable long-form transcription performance and excellent generalization properties. Here we
358
+
359
+ Table 8: RTF for USM-2B.
360
+
361
+ <table><tr><td>Model</td><td>bf-16</td><td>Streaming</td><td># Params (B)</td><td>TPU [88]</td><td>Batch Size</td><td>1.0/RTF</td></tr><tr><td>Conformer-0.1B</td><td>Y</td><td>Y</td><td>0.1</td><td>TPUv4i</td><td>64</td><td>3047</td></tr><tr><td>Conformer-0.6B</td><td>N</td><td>N</td><td>0.6</td><td>TPUv4i</td><td>64</td><td>1920</td></tr><tr><td>Conformer-2B</td><td>N</td><td>N</td><td>2.0</td><td>TPUv4i</td><td>32</td><td>827</td></tr></table>
362
+
363
+ measure the serving capacity of the USM-CTC model as represented by the real time factor (RTF) in an ideal setup where we assume that each batch sent to TPU is fully packed along the time axis. The results of these measurements are presented in Table 8. Surprisingly, we find that the 2B-parameter USM-CTC model is only $3.9 \times$ slower than the 100M-parameter streaming model [89], primarily due to the fact that our models operate at batch processing mode. This result demonstrates that the USM-CTC can be used as an offline transcriber efficiently on TPUs (or GPUs).
364
+
365
+ # 6 Discussion
366
+
367
+ In this report, we put forward a practical and flexible approach for training speech understanding models capable of scaling speech recognition to hundreds of languages. We conclude the report with summarizing insights gained in the process:
368
+
369
+ Unlabeled versus weakly labeled data: We believe diverse unlabeled data is more practical to acquire for building usable ASR for tail languages than weakly labeled data. We have demonstrated that collaborating with native speakers to identify unsupervised data in hundreds of tail languages can be an effective route to improving recognition performance on low resource languages.
370
+
371
+ In-domain data is best: We have demonstrated that we can build a robust ASR system across many domains by utilizing a large amount of unsupervised data and a small amount of labeled data. Our results, however, also confirm that the most effective way to optimize the performance for a given domain is to use in-domain data to fine-tune the model.
372
+
373
+ CTC vs RNN-T vs LAS: The best transducer depends on the downstream task. A large pre-trained model with a frozen encoder can allow experimenters to test different transducers quickly and select the optimal transducer for their purpose.
374
+
375
+ # Acknowledgments
376
+
377
+ We would like to thank Alexis Conneau, Min Ma, Shikhar Bharadwaj, Sid Dalmia, Jiahui Yu, Jian Cheng, Paul Rubenstein, Ye Jia, Justin Snyder, Vincent Tsang, Yuanzhong Xu, Tao Wang, Anusha Ramesh, Calum Barnes, Salem Haykal for useful discussions.
378
+
379
+ We appreciate valuable feedback and support from Eli Collins, Jeff Dean, Sissie Hsiao, Zoubin Ghahramani. Special thanks to Austin Tarango, Lara Tumeh, and Jason Porta for their guidance around responsible AI practices.
380
+
381
+ # References
382
+
383
+ [1] A. Radford, J. W. Kim, T. Xu, G. Brockman, C. McLeavey, and I. Sutskever, “Robust speech recognition via large-scale weak supervision,” arXiv preprint arXiv:2212.04356, 2022.
384
+ [2] W. Chan, D. Park, C. Lee, Y. Zhang, Q. Le, and M. Norouzi, "Speechstew: Simply mix all available speech recognition data to train one large neural network," arXiv preprint arXiv:2104.02133, 2021.
385
+ [3] Y. Zhang, D. S. Park, W. Han, J. Qin, A. Gulati, J. Shor, A. Jansen, Y. Xu, Y. Huang, S. Wang, Z. Zhou, B. Li, M. Ma, W. Chan, J. Yu, Y. Wang, L. Cao, K. C. Sim, B. Ramabhadran, T. N. Sainath, F. Beaufays, Z. Chen, Q. V. Le, C.-C. Chiu, R. Pang, and Y. Wu, "Bigssl: Exploring the frontier of large-scale semi-supervised learning for automatic speech recognition," IEEE Journal of Selected Topics in Signal Processing, vol. 16, no. 6, pp. 1519-1532, 2022.
386
+ [4] B. Li, R. Pang, T. N. Sainath, A. Gulati, Y. Zhang, J. Qin, P. Haghani, W. R. Huang, and M. Ma, "Scaling end-to-end models for large-scale multilingual asr," arXiv preprint arXiv:2104.14830, 2021.
387
+
388
+ [5] X. Li, F. Metze, D. R. Mortensen, A. W. Black, and S. Watanabe, "Asr2k: Speech recognition for around 2000 languages without audio," arXiv preprint arXiv:2209.02842, 2022.
389
+ [6] A. Baevski, H. Zhou, A. Mohamed, and M. Auli, "wav2vec 2.0: A framework for self-supervised learning of speech representations," arXiv preprint arXiv:2006.11477, 2020.
390
+ [7] Q. Xie, M.-T. Luong, E. Hovy, and Q. V. Le, "Self-training with noisy student improves imagenet classification," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 10687-10698.
391
+ [8] D. S. Park, Y. Zhang, Y. Jia, W. Han, C.-C. Chiu, B. Li, Y. Wu, and Q. V. Le, "Improved noisy student training for automatic speech recognition," arXiv preprint arXiv:2005.09629, 2020.
392
+ [9] A. Gulati, J. Qin, C.-C. Chiu, N. Parmar, Y. Zhang, J. Yu, W. Han, S. Wang, Z. Zhang, Y. Wu et al., "Conformer: Convolution-augmented transformer for speech recognition," arXiv preprint arXiv:2005.08100, 2020.
393
+ [10] C.-C. Chiu, J. Qin, Y. Zhang, J. Yu, and Y. Wu, "Self-supervised learning with random-projection quantizer for speech recognition," in Proceedings of the 39th International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, K. Chaudhuri, S. Jegelka, L. Song, C. Szepesvari, G. Niu, and S. Sabato, Eds., vol. 162. PMLR, 17-23 Jul 2022, pp. 3915-3924. [Online]. Available: https://proceedings.mlr.press/v162/chiu22a.html
394
+ [11] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova, “Bert: Pre-training of deep bidirectional transformers for language understanding,” arXiv preprint arXiv:1810.04805, 2018.
395
+ [12] Z. Chen, Y. Zhang, A. Rosenberg, B. Ramabhadran, G. Wang, and P. Moreno, "Injecting text in self-supervised speech pretraining," arXiv preprint arXiv:2108.12226, 2021.
396
+ [13] Z. Chen, Y. Zhang, A. Rosenberg, B. Ramabhadran, P. Moreno, A. Bapna, and H. Zen, “Maestro: Matched speech text representations through modality matching,” arXiv preprint arXiv:2204.03409, 2022.
397
+ [14] A. Graves, S. Fernández, F. Gomez, and J. Schmidhuber, “Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks,” in Proceedings of the 23rd international conference on Machine learning, 2006, pp. 369–376.
398
+ [15] W. Chan, N. Jaitly, Q. Le, and O. Vinyals, "Listen, attend and spell: A neural network for large vocabulary conversational speech recognition," in 2016 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2016, pp. 4960-4964.
399
+ [16] A. Conneau, M. Ma, S. Khanuja, Y. Zhang, V. Axelrod, S. Dalmia, J. Riesa, C. Rivera, and A. Bapna, “Fleurs: Few-shot learning evaluation of universal representations of speech,” arXiv preprint arXiv:2205.12446, 2022.
400
+ [17] T. Kendall and C. Farrington, “The corpus of regional african american language. version 2021.07. eugene, or: The online resources for african american language project,” 2021.
401
+ [18] C. Wang, A. Wu, and J. Pino, “CoVoST 2 and massively multilingual speech-to-text translation,” in interspeech, 2021.
402
+ [19] J. He, C. Zhou, X. Ma, T. Berg-Kirkpatrick, and G. Neubig, "Towards a unified view of parameter-efficient transfer learning," in International Conference on Learning Representations, 2022. [Online]. Available: https://openreview.net/forum?id=0RDcd5Axok
403
+ [20] A. Bapna, C. Cherry, Y. Zhang, Y. Jia, M. Johnson, Y. Cheng, S. Khanuja, J. Riesa, and A. Conneau, "mslam: Massively multilingual joint pre-training for speech and text," arXiv preprint arXiv:2202.01374, 2022.
404
+ [21] Y.-A. Chung, Y. Zhang, W. Han, C.-C. Chiu, J. Qin, R. Pang, and Y. Wu, "W2v-bert: Combining contrastive learning and masked language modeling for self-supervised speech pre-training," in 2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU). IEEE, 2021, pp. 244-250.
405
+ [22] W.-N. Hsu and J. Glass, "Extracting domain invariant features by unsupervised learning for robust automatic speech recognition," in 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2018, pp. 5614-5618.
406
+ [23] Y.-A. Chung and J. Glass, "Speech2vec: A sequence-to-sequence framework for learning word embeddings from speech," arXiv preprint arXiv:1803.08976, 2018.
407
+ [24] A. v. d. Oord, Y. Li, and O. Vinyals, “Representation learning with contrastive predictive coding,” arXiv preprint arXiv:1807.03748, 2018.
408
+ [25] Y.-A. Chung, W.-N. Hsu, H. Tang, and J. Glass, "An unsupervised autoregressive model for speech representation learning," arXiv preprint arXiv:1904.03240, 2019.
409
+ [26] J. Chorowski, R. J. Weiss, S. Bengio, and A. van den Oord, "Unsupervised speech representation learning using wavenet autoencoders," IEEE/ACM transactions on audio, speech, and language processing, vol. 27, no. 12, pp. 2041-2053, 2019.
410
+
411
+ [27] S. Schneider, A. Baevski, R. Collobert, and M. Auli, "wav2vec: Unsupervised pre-training for speech recognition," arXiv preprint arXiv:1904.05862, 2019.
412
+ [28] A. Baevski, S. Schneider, and M. Auli, "vq-wav2vec: Self-supervised learning of discrete speech representations," arXiv preprint arXiv:1910.05453, 2019.
413
+ [29] S. Ling, Y. Liu, J. Salazar, and K. Kirchhoff, “Deep contextualized acoustic representations for semi-supervised speech recognition,” in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 6429-6433.
414
+ [30] A. Baevski, M. Auli, and A. Mohamed, "Effectiveness of self-supervised pre-training for speech recognition," arXiv preprint arXiv:1911.03912, 2019.
415
+ [31] M. Riviere, A. Joulin, P.-E. Mazaré, and E. Dupoux, "Unsupervised pretraining transfers well across languages," in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 7414-7418.
416
+ [32] K. Kawakami, L. Wang, C. Dyer, P. Blunsom, and A. v. d. Oord, "Learning robust and multilingual speech representations," arXiv preprint arXiv:2001.11128, 2020.
417
+ [33] A. Babu, C. Wang, A. Tjandra, K. Lakhotia, Q. Xu, N. Goyal, K. Singh, P. von Platen, Y. Saraf, J. Pino et al., "Xls-r: Self-supervised cross-lingual speech representation learning at scale," arXiv preprint arXiv:2111.09296, 2021.
418
+ [34] G. Zavaliagkos and T. Colthurst, “Utilizing untranscribed training data to improve performance,” in DARPA Broadcast News Transcription and Understanding Workshop, Landsdowne, 1998, pp. 301–305.
419
+ [35] L. Lamel, J. luc Gauvain, and G. Adda, “Lightly supervised acoustic model training,” in Proc. ISCA ITRW ASR2000, 2000, pp. 150–154.
420
+ [36] S. Novotney and R. Schwartz, “Analysis of low-resource acoustic model self-training,” in Tenth Annual Conference of the International Speech Communication Association, 2009.
421
+ [37] S. Thomas, M. L. Seltzer, K. Church, and H. Hermansky, “Deep neural network features and semi-supervised training for low resource speech recognition,” in 2013 IEEE international conference on acoustics, speech and signal processing. IEEE, 2013, pp. 6704–6708.
422
+ [38] B. Li, T. N. Sainath, R. Pang, and Z. Wu, "Semi-supervised training for end-to-end models via weak distillation," in ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2019, pp. 2837-2841.
423
+ [39] J. Kahn, A. Lee, and A. Hannun, "Self-training for end-to-end speech recognition," in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2020, pp. 7084-7088.
424
+ [40] G. Synnaeve, Q. Xu, J. Kahn, T. Likhomanenko, E. Grave, V. Pratap, A. Sriram, V. Liptchinsky, and R. Collobert, "End-to-end asr: from supervised to semi-supervised learning with modern architectures," in arXiv, 2019.
425
+ [41] S. H. K. Parthasarathi and N. Strom, "Lessons from building acoustic models with a million hours of speech," in ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2019, pp. 6670-6674.
426
+ [42] W.-N. Hsu, A. Lee, G. Synnaeve, and A. Hannun, "Semi-supervised speech recognition via local prior matching," arXiv preprint arXiv:2002.10336, 2020.
427
+ [43] Q. Xu, T. Likhomanenko, J. Kahn, A. Hannun, G. Synnaeve, and R. Collobert, "Iterative pseudo-labeling for speech recognition," arXiv preprint arXiv:2005.09267, 2020.
428
+ [44] Z. Chen, A. Rosenberg, Y. Zhang, H. Zen, M. Ghodsi, Y. Huang, J. Emond, G. Wang, B. Ramabhadran, and P. J. Moreno, "Semi-Supervision in ASR: Sequential MixMatch and Factorized TTS-Based Augmentation," in Proc. Interspeech 2021, 2021, pp. 736-740.
429
+ [45] A. Renduchintala, S. Ding, M. Wiesner, and S. Watanabe, "Multi-modal data augmentation for end-to-end asr," arXiv preprint arXiv:1803.10299, 2018.
430
+ [46] A. Bapna, Y.-a. Chung, N. Wu, A. Gulati, Y. Jia, J. H. Clark, M. Johnson, J. Riesa, A. Conneau, and Y. Zhang, "Slam: A unified encoder for speech and language modeling via speech-text joint pre-training," arXiv preprint arXiv:2110.10329, 2021.
431
+ [47] S. Thomas, B. Kingsbury, G. Saon, and H.-K. J. Kuo, "Integrating text inputs for training and adapting rn transducer asr models," in ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2022, pp. 8127-8131.
432
+ [48] Y. Cheng, Y. Zhang, M. Johnson, W. Macherey, and A. Bapna, "Mu² slam: Multitask, multilingual speech and language models," arXiv preprint arXiv:2212.09553, 2022.
433
+
434
+ [49] Z.-H. Zhang, L. Zhou, J. Ao, S. Liu, L. Dai, J. Li, and F. Wei, "Speechut: Bridging speech and text with hidden-unit for encoder-decoder based speech-text pre-training," in Conference on Empirical Methods in Natural Language Processing, 2022.
435
+ [50] Z.-H. Zhang, S. Chen, L. Zhou, Y. Wu, S. Ren, S. Liu, Z. Yao, X. Gong, L. Dai, J. Li, and F. Wei, "Speechlm: Enhanced speech pre-training with unpaired textual data," ArXiv, vol. abs/2209.15329, 2022.
436
+ [51] S. Khurana, A. Laurent, and J. R. Glass, "Samu-xlSr: Semantically-aligned multimodal utterance-level cross-lingual speech representation," IEEE Journal of Selected Topics in Signal Processing, vol. 16, pp. 1493–1504, 2022.
437
+ [52] X. Zhou, J. Wang, Z. Cui, S. Zhang, Z. Yan, J. Zhou, and C. Zhou, "Mmspeech: Multi-modal multi-task encoder-decoder pre-training for speech recognition," ArXiv, vol. abs/2212.00500, 2022.
438
+ [53] T. N. Sainath, R. Prabhavalkar, A. Bapna, Y. Zhang, Z. Huo, Z. Chen, B. Li, W. Wang, and T. Strohman, "Joist: A joint speech and text streaming model for asr," in 2022 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2023, pp. 52-59.
439
+ [54] Z. Meng, W. Wang, R. Prabhavalkar, T. N. Sainath, T. Chen, E. Variani, Y. Zhang, B. Li, A. Rosenberg, and B. Ramabhadran, "Jeit: Joint end-to-end model and internal language model training for speech recognition," in ICASSP, 2023, 2023.
440
+ [55] Z. Meng, T. Chen, R. Prabhavalkar, Y. Zhang, G. Wang, K. Audhkhasi, J. Emond, T. Strohman, B. Ramabhadran, W. R. Huang et al., "Modular hybrid autoregressive transducer," in 2022 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2023, pp. 197-204.
441
+ [56] C.-C. Chiu, W. Han, Y. Zhang, R. Pang, S. Kishchenko, P. Nguyen, A. Narayanan, H. Liao, S. Zhang, A. Kannan et al., “A comparison of end-to-end models for long-form speech recognition,” in 2019 IEEE automatic speech recognition and understanding workshop (ASRU). IEEE, 2019, pp. 889–896.
442
+ [57] Z. Lu, Y. Pan, T. Doutre, P. Haghani, L. Cao, R. Prabhavalkar, C. Zhang, and T. Strohman, "Input length matters: Improving rnN-t and mWer training for long-form telephony speech recognition," arXiv preprint arXiv:2110.03841, 2021.
443
+ [58] Z. Dai, Z. Yang, Y. Yang, J. Carbonell, Q. V. Le, and R. Salakhutdinov, "Transformer-xl: Attentive language models beyond a fixed-length context," arXiv preprint arXiv:1901.02860, 2019.
444
+ [59] A. Graves, "Sequence transduction with recurrent neural networks," arXiv preprint arXiv:1211.3711, 2012.
445
+ [60] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu, "Exploring the limits of transfer learning with a unified text-to-text transformer," The Journal of Machine Learning Research, vol. 21, no. 1, pp. 5485-5551, 2020.
446
+ [61] B. Ramabhadran, K. Audhkhasi, P. J. M. Mengibar, and T. Chen, "Mixture model attention: Flexible streaming and non-streaming automatic speech recognition," in Proceedings of Interspeech, 2021, 2021.
447
+ [62] L. Lu, C. Liu, J. Li, and Y. Gong, "Exploring transformers for large-scale speech recognition," arXiv preprint arXiv:2005.09684, 2020.
448
+ [63] X. Chen, Y. Wu, Z. Wang, S. Liu, and J. Li, "Developing real-time streaming transformer transducer for speech recognition on large-scale dataset," in International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021, pp. 5904-5908.
449
+ [64] C. Wu, Y. Wang, Y. Shi, C.-F. Yeh, and F. Zhang, "Streaming transformer-based acoustic models using self-attention with augmented memory," arXiv preprint arXiv:2005.08042, 2020.
450
+ [65] Y. Shi, Y. Wang, C. Wu, C.-F. Yeh, J. Chan, F. Zhang, D. Le, and M. Seltzer, "Emformer: Efficient memory transformer based acoustic model for low latency streaming speech recognition," in International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2021, pp. 6783-6787.
451
+ [66] E. Tsunoo, Y. Kashiwagi, T. Kumakura, and S. Watanabe, "Transformer asr with contextual block processing," in 2019 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU). IEEE, 2019, pp. 427-433.
452
+ [67] Z. Chen, A. Bapna, A. Rosenberg, Y. Zhang, B. Ramabhadran, P. Moreno, and N. Chen, “Maestro: Leveraging joint speech-text representation learning for zero supervised speech asr,” arXiv preprint arXiv:2210.10027, 2022.
453
+ [68] F. Biadsy, Y. Chen, X. Zhang, O. Rybakov, A. Rosenberg, and P. J. Moreno, "A scalable model specialization framework for training and inference using submodels and its application to speech model personalization," in Proc. Interspeech 2022. ISCA, 2022, pp. 5125-5129.
454
+ [69] K. Tomanek, V. Zayats, D. Padfield, K. Vaillancourt, and F. Biadsy, “Residual adapters for parameter-efficient asr adaptation to atypical and accented speech,” in Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, 2021, pp. 6751–6760.
455
+ [70] M. Schuster and K. Nakajima, "Japanese and korean voice search," in 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2012, pp. 5149-5152.
456
+
457
+ [71] Y. Zhang, J. Qin, D. S. Park, W. Han, C.-C. Chiu, R. Pang, Q. V. Le, and Y. Wu, “Pushing the limits of semi-supervised learning for automatic speech recognition,” arXiv preprint arXiv:2010.10504, 2020.
458
+ [72] D. Lepikhin, H. Lee, Y. Xu, D. Chen, O. Firat, Y. Huang, M. Krikun, N. Shazeer, and Z. Chen, "GShard: Scaling giant models with conditional computation and automatic sharding," CoRR, vol. abs/2006.16668, 2020. [Online]. Available: https://arxiv.org/abs/2006.16668
459
+ [73] Y. Xu, H. Lee, D. Chen, B. A. Hechtman, Y. Huang, R. Joshi, M. Krikun, D. Lepikhin, A. Ly, M. Maggioni, R. Pang, N. Shazeer, S. Wang, T. Wang, Y. Wu, and Z. Chen, "GSPMD: general and scalable parallelization for ML computation graphs," CoRR, vol. abs/2105.04663, 2021. [Online]. Available: https://arxiv.org/abs/2105.04663
460
+ [74] C. Wang, M. Rivière, A. Lee, A. Wu, C. Talnikar, D. Haziza, M. Williamson, J. Pino, and E. Dupoux, "Voxpopuli: A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation," arXiv preprint arXiv:2101.00390, 2021.
461
+ [75] R. Ardila, M. Branson, K. Davis, M. Henretty, M. Kohler, J. Meyer, R. Morais, L. Saunders, F. M. Tyers, and G. Weber, "Common voice: A massively-multilingual speech corpus," arXiv preprint arXiv:1912.06670, 2019.
462
+ [76] V. Pratap, Q. Xu, A. Sriram, G. Synnaeve, and R. Collobert, "Mls: A large-scale multilingual dataset for speech research," arXiv preprint arXiv:2012.03411, 2020.
463
+ [77] M. J. F. Gales, K. Knill, A. Ragni, and S. P. Rath, "Speech recognition and keyword spotting for low-resource languages: Babel project research at cued," in SLTU, 2014.
464
+ [78] A. Bapna, I. Caswell, J. Kreutzer, O. First, D. van Esch, A. Siddhant, M. Niu, P. Baljekar, X. Garcia, W. Macherey et al., "Building machine translation systems for the next thousand languages," arXiv preprint arXiv:2205.03983, 2022.
465
+ [79] N. Arivazhagan, A. Bapna, O. First, D. Lepikhin, M. Johnson, M. Krikun, M. X. Chen, Y. Cao, G. Foster, C. Cherry et al., "Massively multilingual neural machine translation in the wild: Findings and challenges," arXiv preprint arXiv:1907.05019, 2019.
466
+ [80] J. Carletta, S. Ashby, S. Bourban, M. Flynn, M. Guillemot, T. Hain, J. Kadlec, V. Karaiskos, W. Kraaij, M. Kronenthal et al., "The am meeting corpus: A pre-announcement," in International workshop on machine learning for multimodal interaction. Springer, 2005, pp. 28-39.
467
+ [81] R. Ardila, M. Branson, K. Davis, M. Henretty, M. Kohler, J. Meyer, R. Morais, L. Saunders, F. M. Tyers, and G. Weber, "Common voice: A massively-multilingual speech corpus," arXiv preprint arXiv:1912.06670, 2019.
468
+ [82] V. Panayotov, G. Chen, D. Povey, and S. Khudanpur, "Librispeech: an asr corpus based on public domain audio books," in 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015, pp. 5206-5210.
469
+ [83] A. Rousseau, P. Deléglise, and Y. Esteve, “Ted-lium: an automatic speech recognition dedicated corpus.” in LREC, 2012, pp. 125–129.
470
+ [84] F. Hernandez, V. Nguyen, S. Ghannay, N. Tomashenko, and Y. Esteve, “Ted-lium 3: twice as much data and corpus repartition for experiments on speaker adaptation,” in International conference on speech and computer. Springer, 2018, pp. 198–208.
471
+ [85] J.-L. Gauvain, L. F. Lamel, G. Adda, and M. Adda-Decker, "The limsi continuous speech dictation system: evaluation on the arpa wall street journal task," in Proceedings of ICASSP'94. IEEE International Conference on Acoustics, Speech and Signal Processing, vol. 1. IEEE, 1994, pp. I-557.
472
+ [86] F. Kubala, J. Davenport, H. Jin, D. Liu, T. Leek, S. Matsoukas, D. Miller, L. Nguyen, F. Richardson, R. Schwartz et al., "The 1997 bbn byblos system applied to broadcast news transcription," in Proc. DARPA Broadcast News Transcription and Understanding Workshop. Morgan Kaufmann, 1998, pp. 35-40.
473
+ [87] S. Chen, M. Gales, P. Gopalakrishnan, R. Gopinath, H. Printz, D. Kanevsky, P. Olsen, and L. Polymenakos, "Ibm's lvcsr system for transcription of broadcast news used in the 1997 hub4 english evaluation," in Proceedings of the Speech Recognition Workshop. Citeseer, 1998.
474
+ [88] N. P. Jouppi, D. H. Yoon, M. Ashcraft, M. Gottscho, T. B. Jablin, G. Kurian, J. Laudon, S. Li, P. Ma, X. Ma et al., “Ten lessons from three generations shaped google's tpuv4i: Industrial product,” in 2021 ACM/IEEE 48th Annual International Symposium on Computer Architecture (ISCA). IEEE, 2021, pp. 1-14.
475
+ [89] B. Li, A. Gulati, J. Yu, T. N. Sainath, C.-C. Chiu, A. Narayanan, S.-Y. Chang, R. Pang, Y. He, J. Qin et al., “A better and faster end-to-end model for streaming asr,” in ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2021, pp. 5634-5638.
2303.01xxx/2303.01037/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8daecb32a9d50ad97288dca283f53c553b02a70ff1a7d8d80fb2f82bd111aca4
3
+ size 512035
2303.01xxx/2303.01037/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_content_list.json ADDED
@@ -0,0 +1,1163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Task-Specific Context Decoupling for Object Detection",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 209,
8
+ 130,
9
+ 759,
10
+ 152
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jiayuan Zhuang $^{1}$ Zheng Qin $^{1}$ Hao Yu $^{2}$ Xucan Chen $^{1}$",
17
+ "bbox": [
18
+ 251,
19
+ 179,
20
+ 715,
21
+ 198
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ National University of Defense Technology $^{2}$ Technical University of Munich",
28
+ "bbox": [
29
+ 166,
30
+ 198,
31
+ 800,
32
+ 217
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "alpc111@163.com qinzheng12@nudt.edu.cn hao.yu@tum.de xcchen18@139.com",
39
+ "bbox": [
40
+ 165,
41
+ 219,
42
+ 800,
43
+ 233
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 233,
53
+ 268,
54
+ 313,
55
+ 284
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Classification and localization are two main sub-tasks in object detection. Nonetheless, these two tasks have inconsistent preferences for feature context, i.e., localization expects more boundary-aware features to accurately regress the bounding box, while more semantic context is preferred for object classification. Existing methods usually leverage disentangled heads to learn different feature context for each task. However, the heads are still applied on the same input features, which leads to an imperfect balance between classification and localization. In this work, we propose a novel Task-Specific COntext DEcoupling (TSCODE) head which further disentangles the feature encoding for two tasks. For classification, we generate spatially-coarse but semantically-strong feature encoding. For localization, we provide high-resolution feature map containing more edge information to better regress object boundaries. TSCODE is plug-and-play and can be easily incorporated into existing detection pipelines. Extensive experiments demonstrate that our method stably improves different detectors by over 1.0 AP with less computational cost. Our code and models will be publicly released.",
62
+ "bbox": [
63
+ 75,
64
+ 300,
65
+ 473,
66
+ 618
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 76,
76
+ 633,
77
+ 209,
78
+ 648
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Object detection aims to recognize and localize objects existing in a natural image, which is a fundamental but challenging task in many computer vision applications. Recent advances in object detection have been predominated by deep learning-based methods [15, 18, 26, 33, 36], where the task is typically formulated as the classification of regions of interest and the localization of bounding boxes.",
85
+ "bbox": [
86
+ 75,
87
+ 659,
88
+ 468,
89
+ 763
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Semantic context encoded in the high-level semantic-rich features plays a crucial role in both the classification and localization task of object detection [1, 31]. Feature pyramid network (FPN) [25] distills the semantic context from the high-level semantic-rich features and fuses it into the low-level detail-preserving feature maps, where small-scale objects can be better detected. This design effectively provides more semantic context for early-stage features and facilitates detecting objects in various scales. Early",
96
+ "bbox": [
97
+ 75,
98
+ 763,
99
+ 470,
100
+ 902
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "image",
106
+ "img_path": "images/4ccf1fe52c8b470cec41e5c9038f088e3377cea4f88bbb2b05c9b493c90c734a.jpg",
107
+ "image_caption": [],
108
+ "image_footnote": [],
109
+ "bbox": [
110
+ 501,
111
+ 267,
112
+ 893,
113
+ 404
114
+ ],
115
+ "page_idx": 0
116
+ },
117
+ {
118
+ "type": "image",
119
+ "img_path": "images/db750b9af10cc38edcf3ef381f4366249fcb0e970638bf121564303111d1e677.jpg",
120
+ "image_caption": [
121
+ "Figure 1. Top: Inference from original FCOS [43]. Bottom: Inference from FCOS with our TSCODE. Results are shown before NMS. The bounding box that has the highest IoU with the ground truth is shown in green, while the top-3 bounding-boxes with the highest classification scores are marked in other colors. The competition between the two tasks in FCOS can be clearly observed in the top figure, i.e., the bounding box with the best IoU (green) has lower classification confidence, while that with the best classification score (blue) has a poor bounding box prediction. Thanks to our TSCODE, the competition issue is addressed in the bottom figure, where the blue bounding box with the most confident classification prediction also has a great IoU with the ground truth."
122
+ ],
123
+ "image_footnote": [],
124
+ "bbox": [
125
+ 501,
126
+ 406,
127
+ 893,
128
+ 542
129
+ ],
130
+ "page_idx": 0
131
+ },
132
+ {
133
+ "type": "text",
134
+ "text": "works [17, 33, 36] usually tackle the detection problem by attaching a head network shared by the two tasks on each level of feature map for a specific scale of objects, though the two tasks are semantically misaligned which is noted by Double-Head R-CNN [45] and TSD [40] afterwards. Typically, bounding box regression expects more texture details and edge information to accurately localize the object boundaries, while more semantic context is required to facilitate the classification of objects [6, 7]. Based on this insight, they propose to use two decoupled head branches for different tasks on each feature level from FPN. In this pa",
135
+ "bbox": [
136
+ 496,
137
+ 734,
138
+ 893,
139
+ 902
140
+ ],
141
+ "page_idx": 0
142
+ },
143
+ {
144
+ "type": "aside_text",
145
+ "text": "arXiv:2303.01047v1 [cs.CV] 2 Mar 2023",
146
+ "bbox": [
147
+ 22,
148
+ 265,
149
+ 58,
150
+ 700
151
+ ],
152
+ "page_idx": 0
153
+ },
154
+ {
155
+ "type": "text",
156
+ "text": "per, we name this decoupling strategy the parameter decoupling, which relies on separate heads to encode task-specific semantic context from the same feature map.",
157
+ "bbox": [
158
+ 75,
159
+ 90,
160
+ 468,
161
+ 136
162
+ ],
163
+ "page_idx": 1
164
+ },
165
+ {
166
+ "type": "text",
167
+ "text": "However, we observe that disentangling classification and localization only in the parameter level leads to an imperfect trade-off between two tasks. Fig. 1 (top) visualizes the top-3 (for better view) bounding boxes with highest confidence scores and the box that has the highest intersection-over-union (IoU) with corresponding ground-truth table box before non-maximum suppression (NMS) [38] predicted by FCOS [43]. FCOS has already extracted accurate enough boxes (see the green one), but they are suppressed during NMS due to relatively low confidence scores. We owe this to the intrinsic competition between the two tasks, and merely relying on the learning ability of head networks to provide the task-specific context from a shared feature map usually shows bias towards one task, while suppressing the other. This phenomenon has been noticed in [6, 40], but still without proper solution being proposed. Some works [13, 28, 41, 42] attempt to incorporate more semantic context to improve the detection results. However, more is not always better, and there are still three problems: 1) The generality of these methods is limited and only a small number of detectors can benefit from them; 2) More computation overhead is introduced, which requires long training schedules and harms the inference efficiency; 3) More importantly, the essential problem still exists, i.e., the shared feature map is still jointly optimized for two tasks that compete with each other.",
168
+ "bbox": [
169
+ 75,
170
+ 138,
171
+ 472,
172
+ 530
173
+ ],
174
+ "page_idx": 1
175
+ },
176
+ {
177
+ "type": "text",
178
+ "text": "To address these issues, we propose to directly disentangle the feature encoding for classification and localization, namely Task-Specific Context Decoupling (TSCODE for short), so that more suitable semantic context is selected for resolving specific tasks. For the classification branch, it usually demands features with rich semantic context contained in the picture to infer the category of objects, thus we adopt spatially-coarse but semantically-strong feature encoding for it. For the localization branch, it usually requires more boundary information around objects, thus we provide it with high-resolution feature map containing more edge information for finer regression of object boundaries. Moreover, the feature fusion in two branches is designed in an efficient fashion and further boosts the performance. Benefiting from our disentangled design, incoherent context preference of the two tasks is alleviated, which allows the detector to converge faster and achieve better performance. As shown in Fig. 1 (bottom), with TSCODE, the competition issue is addressed and the bounding box with the most confident classification prediction also has a great IoU with the ground truth.",
179
+ "bbox": [
180
+ 75,
181
+ 532,
182
+ 472,
183
+ 848
184
+ ],
185
+ "page_idx": 1
186
+ },
187
+ {
188
+ "type": "text",
189
+ "text": "Our main contributions are summarized as follows:",
190
+ "bbox": [
191
+ 96,
192
+ 849,
193
+ 436,
194
+ 864
195
+ ],
196
+ "page_idx": 1
197
+ },
198
+ {
199
+ "type": "text",
200
+ "text": "- We delve into the different roles of semantic contexts in classification and localization tasks and discover the",
201
+ "bbox": [
202
+ 94,
203
+ 869,
204
+ 468,
205
+ 898
206
+ ],
207
+ "page_idx": 1
208
+ },
209
+ {
210
+ "type": "text",
211
+ "text": "source of their inherent conflict.",
212
+ "bbox": [
213
+ 532,
214
+ 90,
215
+ 746,
216
+ 104
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "list",
222
+ "sub_type": "text",
223
+ "list_items": [
224
+ "- We propose a novel TSCODE head to deal with the tangled context conflict, where different feature encodings with task-specific context can be generated.",
225
+ "- Extensive experiments show that TSCODE is plug-and-play and can easily boosts the performance of existing detectors with even less computational cost."
226
+ ],
227
+ "bbox": [
228
+ 517,
229
+ 111,
230
+ 893,
231
+ 208
232
+ ],
233
+ "page_idx": 1
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "2. Related Work",
238
+ "text_level": 1,
239
+ "bbox": [
240
+ 500,
241
+ 238,
242
+ 640,
243
+ 255
244
+ ],
245
+ "page_idx": 1
246
+ },
247
+ {
248
+ "type": "text",
249
+ "text": "Object Detection. The current mainstream CNN-based object detectors can be divided into two-stage [17, 36] and one-stage [29, 33]. Most of them use the feature pyramid network (FPN) [25] to cope with the large-scale variation [39, 42] of objects. Compared with works [3, 5] that merely leverage a single feature map for detecting all the scale-varying objects, FPN takes advantage of the Convolutional Neural Networks (CNN) hierarchy, i.e., it not only fuses detail-preserving low-level features and semantic-rich high-level features, but also distributes the classification and localization tasks of different objects to corresponding feature maps according to their scale on images. While the insightful point [28] that the feature maps in low layers strongly respond to edges or instance parts manifests the necessity of augmenting a bottom-up path to propagate features with strong boundary information and enhance all features with reasonable localization capability. Recent works [41, 42] try to stack this feature fusion structure several times for sufficient information exchange between high-level features and low-level features. These designs maybe a compromise to the inaccurate localization information contained in high-level feature maps and the insufficient semantic context contained in low-level feature maps.",
250
+ "bbox": [
251
+ 496,
252
+ 263,
253
+ 890,
254
+ 611
255
+ ],
256
+ "page_idx": 1
257
+ },
258
+ {
259
+ "type": "text",
260
+ "text": "Decoupled head. Decoupled head has long been the standard configuration of the one-stage detectors [24, 26, 43, 50]. Recent works, Double-Head R-CNN [45] and TSD [40] revisit the specialized sibling head that is widely used in R-CNN family [2, 14, 15, 17, 32, 36] and finally figure out the essence of the tasks misalignment between classification and localization. YOLOX [12] also points out that the coupled detection head may harm the performance, it introduces decoupled head to the YOLO family [33-35] for the first time and greatly improves the converging speed and boosts the permanformance. Base on the decoupled head, DDOD [6] proposes to use deformable convolutions to learn separate convolutional offset for each branch, aiming to adaptively select specific spatial features for each head. These works demonstrate the importance of decoupling between classification and localization tasks. However, as mentioned in section 1, the decoupling for classification and localization only in the parameter level leads to an imperfect trade-off between two tasks.",
261
+ "bbox": [
262
+ 496,
263
+ 613,
264
+ 890,
265
+ 900
266
+ ],
267
+ "page_idx": 1
268
+ },
269
+ {
270
+ "type": "image",
271
+ "img_path": "images/a6bf0e29b012f77dff64c79136ac9cb2e44a4d4575231f402050dba6f18288e5.jpg",
272
+ "image_caption": [
273
+ "Figure 2. An illustration of ours novel Task-Specific Context Decoupling (TSCODE). Detector head at the $l^{th}$ pyramid level receive feature maps $P_{l+1}, P_l$ and $P_{l-1}$ from FPN [25], TSCODE then further disentangles the feature encoding for classification and localization tasks."
274
+ ],
275
+ "image_footnote": [],
276
+ "bbox": [
277
+ 78,
278
+ 85,
279
+ 488,
280
+ 303
281
+ ],
282
+ "page_idx": 2
283
+ },
284
+ {
285
+ "type": "image",
286
+ "img_path": "images/c5fa428a3e357bb3ae64480eb61b2e683be6dbf42f5b5c1c789b07eba47e6cc3.jpg",
287
+ "image_caption": [],
288
+ "image_footnote": [],
289
+ "bbox": [
290
+ 491,
291
+ 85,
292
+ 893,
293
+ 304
294
+ ],
295
+ "page_idx": 2
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "3. Method",
300
+ "text_level": 1,
301
+ "bbox": [
302
+ 76,
303
+ 358,
304
+ 166,
305
+ 373
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "3.1. Motivation and Framework",
312
+ "text_level": 1,
313
+ "bbox": [
314
+ 76,
315
+ 383,
316
+ 326,
317
+ 398
318
+ ],
319
+ "page_idx": 2
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "Classification and localization are two highly related but still contradictory tasks in object detection. For each object, classification is more coarse-grained which requires richer semantic context, while localization is rather fine-grained and demands more on detailed boundary information. For this reason, mainstream detectors [11, 24, 26, 40, 45, 50] apply decoupled head to cope with this conflict. Specifically, given an ground-truth object assigned to a specific pyramid level $l$ , with bounding box $\\mathcal{B}$ and class label $c$ , the detectors with the typical decoupled head minimize the classification and localization loss based on the same feature map $P_l$ :",
324
+ "bbox": [
325
+ 75,
326
+ 406,
327
+ 470,
328
+ 571
329
+ ],
330
+ "page_idx": 2
331
+ },
332
+ {
333
+ "type": "equation",
334
+ "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {c l s} \\left(\\mathcal {F} _ {c} \\left(P _ {l}\\right), c\\right) + \\mathcal {L} _ {l o c} \\left(\\mathcal {F} _ {r} \\left(P _ {l}\\right), \\mathcal {B}\\right), \\tag {1}\n$$\n",
335
+ "text_format": "latex",
336
+ "bbox": [
337
+ 135,
338
+ 585,
339
+ 468,
340
+ 602
341
+ ],
342
+ "page_idx": 2
343
+ },
344
+ {
345
+ "type": "text",
346
+ "text": "where $\\mathcal{F}_c(\\cdot) = \\{f_{cls}(\\cdot),\\mathcal{C}(\\cdot)\\}$ , $\\mathcal{F}_r(\\cdot) = \\{f_{loc}(\\cdot),\\mathcal{R}(\\cdot)\\}$ , are the classification and localization branches. $f_{cls}(\\cdot)$ and $f_{loc}(\\cdot)$ are the feature projection functions for classification and localization, while $\\mathcal{C}(\\cdot)$ and $\\mathcal{R}(\\cdot)$ are the final layers in two branches which decode features to classification scores and bounding box positions. In the common decoupled-head design, $f_{cls}(\\cdot)$ and $f_{loc}(\\cdot)$ share the same structure but are learned with separate parameters to provide each task with different feature contexts, i.e., parameter decoupling. However, this simplistic design cannot fully solve this problem as the semantic context has been largely determined by the shared input features $P_l$ . Although recent work [6] attempts to learn features with more flexible context for each task with deformable convolutions, the fact that they still originate from the same features, however, limits its effectiveness. Therefore, the conflict between classification and localization imposes opposite preferences of context in $P_l$ , leading to an imperfect balance between the two tasks.",
347
+ "bbox": [
348
+ 75,
349
+ 612,
350
+ 468,
351
+ 883
352
+ ],
353
+ "page_idx": 2
354
+ },
355
+ {
356
+ "type": "text",
357
+ "text": "To address this issue, our TSCODE decouples the fea",
358
+ "bbox": [
359
+ 96,
360
+ 885,
361
+ 468,
362
+ 900
363
+ ],
364
+ "page_idx": 2
365
+ },
366
+ {
367
+ "type": "text",
368
+ "text": "ture encoding for the two tasks at the source and leverages feature maps with different semantic context in the two branches. Instead of using $P_{l}$ as the common input, we feed the two branches with task-specific input features, i.e., $G_{l}^{cls}$ and $G_{l}^{loc}$ . To this end, Eq. (1) can be written as:",
369
+ "bbox": [
370
+ 496,
371
+ 359,
372
+ 890,
373
+ 436
374
+ ],
375
+ "page_idx": 2
376
+ },
377
+ {
378
+ "type": "equation",
379
+ "text": "\n$$\n\\mathcal {L} = \\mathcal {L} _ {c l s} \\left(\\mathcal {F} _ {c} \\left(G _ {l} ^ {c l s}\\right), c\\right) + \\lambda \\mathcal {L} _ {l o c} \\left(\\mathcal {F} _ {r} \\left(G _ {l} ^ {l o c}\\right), \\mathcal {B}\\right). \\tag {2}\n$$\n",
380
+ "text_format": "latex",
381
+ "bbox": [
382
+ 540,
383
+ 446,
384
+ 890,
385
+ 465
386
+ ],
387
+ "page_idx": 2
388
+ },
389
+ {
390
+ "type": "text",
391
+ "text": "For the classification branch, we generate spatially coarser but semantically richer feature maps. While for the localization branch, we provide it with feature maps contain more detailed texture and boundary information.",
392
+ "bbox": [
393
+ 496,
394
+ 476,
395
+ 890,
396
+ 535
397
+ ],
398
+ "page_idx": 2
399
+ },
400
+ {
401
+ "type": "text",
402
+ "text": "As illustrated in Fig. 2, our method follows the common one-stage detection framework, which is composed of the backbone, the feature pyramid and the detection head. The backbone and feature pyramid generate multi-scale feature maps from the input images. Our TSCODE head then receives three levels of feature maps and generate decoupled feature maps for classification and localization. More importantly, TSCODE is plug-and-play and can be easily incorporated into most of the popular detectors, whether it is anchor-based or anchor-free.",
403
+ "bbox": [
404
+ 496,
405
+ 536,
406
+ 890,
407
+ 686
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "3.2. Semantic Context Encoding for Classification",
414
+ "text_level": 1,
415
+ "bbox": [
416
+ 500,
417
+ 696,
418
+ 883,
419
+ 712
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "In object detection, classification is a relatively coarse-grained task which recognizes what an bounding box covers. On the one hand, as mentioned in [6, 10, 20, 40], the classification can often be pinned down by the features of its key or salient parts of an object, i.e., local focus. However, existing methods ignore that the salient areas could be sparsely distributed, indicating that there could be feature redundancy in the classification branch. We find in the experiments that, feeding the classification branch with down-sampled feature maps witnesses almost no performance drop, but reduces the computational cost (results can be seen in Sec. 4.1). On the other hand, the category of an",
426
+ "bbox": [
427
+ 496,
428
+ 719,
429
+ 890,
430
+ 900
431
+ ],
432
+ "page_idx": 2
433
+ },
434
+ {
435
+ "type": "image",
436
+ "img_path": "images/938b0d411e59bef5f8f5691f9aa5405982194bf150571247fce6c99b24fa1963.jpg",
437
+ "image_caption": [
438
+ "Figure 3. Semantic context encoding for classification."
439
+ ],
440
+ "image_footnote": [],
441
+ "bbox": [
442
+ 76,
443
+ 85,
444
+ 472,
445
+ 227
446
+ ],
447
+ "page_idx": 3
448
+ },
449
+ {
450
+ "type": "text",
451
+ "text": "object could be inferred from its surrounding environment, e.g., a chair is more likely to co-occur with a table, or an occluded table can be recognized from a larger region. This means the global abstraction from a richer semantic context could facilitate the classification task.",
452
+ "bbox": [
453
+ 75,
454
+ 268,
455
+ 468,
456
+ 343
457
+ ],
458
+ "page_idx": 3
459
+ },
460
+ {
461
+ "type": "text",
462
+ "text": "Based on these insights, we devise Semantic Context Encoding (SCE) for efficient and accurate classification. Specifically, at each pyramid level $l$ , SCE leverages the feature maps from two levels, i.e., $P_{l}$ and $P_{l + 1}$ , to generate a semantically-rich feature map for classification. As shown in Fig. 3, we first downsample $P_{l}$ by a factor of 2 and concatenate it with $P_{l + 1}$ to generate the final $G_{l}^{cls}$ :",
463
+ "bbox": [
464
+ 75,
465
+ 344,
466
+ 472,
467
+ 450
468
+ ],
469
+ "page_idx": 3
470
+ },
471
+ {
472
+ "type": "equation",
473
+ "text": "\n$$\nG _ {l} ^ {c l s} = \\operatorname {C o n c a t} \\left(\\mathrm {D C o n v} \\left(P _ {l}\\right), P _ {l + 1}\\right), \\tag {3}\n$$\n",
474
+ "text_format": "latex",
475
+ "bbox": [
476
+ 153,
477
+ 458,
478
+ 468,
479
+ 477
480
+ ],
481
+ "page_idx": 3
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "where $\\mathrm{Concat}(\\cdot)$ and $\\mathrm{DConv}(\\cdot)$ represent concatenation and a shared downsampling convolutional layer. Note that $G_{l}^{cls}$ is in the $1/2$ resolution of $P_{l}$ . $G_{l}^{cls}$ is then passed into $\\mathcal{F}_{c}(\\cdot) = \\{f_{cls}(\\cdot), \\mathcal{C}(\\cdot)\\}$ to predict the classification scores. Instead of using 4 convolutional layers with 256 channels, we adopt a shallow-but-wide design of $f_{cls}(\\cdot)$ using 2 convolutional layers with 512 channels. We argue that this design can encode more semantic information than the deep-but-narrow one, which requires little extra computational cost but facilitates more accurate classification. Since $G_{l}^{cls}$ is $2 \\times$ downsampled than $P_{l}$ , each location $(x,y)$ in $G_{l}^{cls}$ predicts the classification scores of its four nearest neighbors in $P_{l}$ , denoted as $\\tilde{C} \\in \\mathbb{R}^{H_{l+1} \\times W_{l+1} \\times 4N}$ , and $N$ is the number of categories. $\\tilde{C}$ is then rearranged to $\\hat{C} \\in \\mathbb{R}^{H_{l} \\times W_{l} \\times N}$ to recover the resolution:",
486
+ "bbox": [
487
+ 75,
488
+ 484,
489
+ 472,
490
+ 712
491
+ ],
492
+ "page_idx": 3
493
+ },
494
+ {
495
+ "type": "equation",
496
+ "text": "\n$$\n\\hat {C} [ 2 x + i, 2 y + j, c ] = \\tilde {C} [ x, y, (2 i + j) c ], \\forall i, j \\in \\{0, 1 \\}. \\tag {4}\n$$\n",
497
+ "text_format": "latex",
498
+ "bbox": [
499
+ 89,
500
+ 719,
501
+ 468,
502
+ 739
503
+ ],
504
+ "page_idx": 3
505
+ },
506
+ {
507
+ "type": "text",
508
+ "text": "By this way, we can not only leverage the sparsity of the salient features (from $P_l$ ), but also benefit from the rich semantic context in higher pyramid level (from $P_{l + 1}$ ). This helps infer object categories more effectively, especially for those texture-less objects or those with severe occlusion.",
509
+ "bbox": [
510
+ 75,
511
+ 747,
512
+ 468,
513
+ 823
514
+ ],
515
+ "page_idx": 3
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "3.3. Detail-Preserving Encoding for Localization",
520
+ "text_level": 1,
521
+ "bbox": [
522
+ 76,
523
+ 830,
524
+ 452,
525
+ 848
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "Unlike classification, localization is a more fine-grained task which relies on more texture details and boundary information to predict the corners of an object. However,",
532
+ "bbox": [
533
+ 75,
534
+ 854,
535
+ 470,
536
+ 901
537
+ ],
538
+ "page_idx": 3
539
+ },
540
+ {
541
+ "type": "image",
542
+ "img_path": "images/c24c6b3dacfd74b1414432f9d327f21c9db373c4077ba6aecb5f7c92a454dd22.jpg",
543
+ "image_caption": [
544
+ "Figure 4. Detail-preserving encoding for localization."
545
+ ],
546
+ "image_footnote": [],
547
+ "bbox": [
548
+ 498,
549
+ 85,
550
+ 893,
551
+ 250
552
+ ],
553
+ "page_idx": 3
554
+ },
555
+ {
556
+ "type": "text",
557
+ "text": "existing methods usually regress the object corners from a single-scale feature map $P_{l}$ . The feature maps in lower pyramid levels have stronger response to contour, edge and detailed texture of objects. This can further benefit the localization task but often comes with huge extra computational cost [28, 47]. QueryDet [47] uses sparse convolution [16] to reduce computational cost on low-level feature maps, but it still requires an extra auxiliary branch with specific supervision signal. On the other hand, we further note that high-level feature maps are also important for localization as it helps see the whole object as completely as possible, which provides more information to infer the overall shape of objects.",
558
+ "bbox": [
559
+ 496,
560
+ 295,
561
+ 890,
562
+ 491
563
+ ],
564
+ "page_idx": 3
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "Based on these observations, we propose Detail-Preseving Encoding (DPE) to achieve accurate localization without sacrificing efficiency. In each pyramid level $l$ , our DPE accepts the feature maps from three pyramid levels, i.e., $P_{l - 1}$ , $P_l$ and $P_{l + 1}$ . $P_{l - 1}$ provides more detail and edge features while $P_{l + 1}$ provides a more thorough perspective of objects. We demonstrate that each pyramid level is mainly related to the two neighboring levels and further levels could even harm the performance. The structure of DPE is shown in Fig. 4. For computational efficiency, we adopt a simplistic U-Net [37] to fuse $P_{l - 1}$ and $P_{l + 1}$ . $P_l$ is first upsampled by a factor of 2 and then aggregated with $P_{l - 1}$ . And a $3\\times 3$ convolutional layer with a stride of 2 downsamples it to the resolution of $P_l$ . This design effectively preserves the detail information in $P_{l - 1}$ with little extra computational cost. At last, $P_{l + 1}$ is upsampled and aggregated to generate the final $G_l^{loc}$ . The computation can be written as:",
569
+ "bbox": [
570
+ 496,
571
+ 492,
572
+ 892,
573
+ 763
574
+ ],
575
+ "page_idx": 3
576
+ },
577
+ {
578
+ "type": "equation",
579
+ "text": "\n$$\nG _ {l} ^ {\\text {l o c}} = P _ {l} + \\mu \\left(P _ {l + 1}\\right) + \\mathrm {D C o n v} \\left(\\mu \\left(P _ {l}\\right) + P _ {l - 1}\\right) \\tag {5}\n$$\n",
580
+ "text_format": "latex",
581
+ "bbox": [
582
+ 519,
583
+ 777,
584
+ 890,
585
+ 796
586
+ ],
587
+ "page_idx": 3
588
+ },
589
+ {
590
+ "type": "text",
591
+ "text": "where $\\mu (\\cdot)$ represents upsampling and DConv $(\\cdot)$ is another shared downsampling convolutional layer. Speically, we compute $G_{3}^{loc}$ with $C_2$ , $P_{3}$ and $P_{4}$ , as computing $P_{2}$ through FPN induces huge computational cost. Afterwards, $\\mathcal{F}_r(\\cdot) = \\{f_{los}(\\cdot),\\mathcal{R}(\\cdot)\\}$ further predicts the bounding boxes in the $l^{\\mathrm{th}}$ pyramid level based on $G_{l}^{loc}$ .",
592
+ "bbox": [
593
+ 496,
594
+ 809,
595
+ 893,
596
+ 902
597
+ ],
598
+ "page_idx": 3
599
+ },
600
+ {
601
+ "type": "table",
602
+ "img_path": "images/fa7110de5491a43605b27b2f9bd8810ec818184a12104af0cf1c6e9c68ab2934.jpg",
603
+ "table_caption": [],
604
+ "table_footnote": [],
605
+ "table_body": "<table><tr><td>Method</td><td>SCE</td><td>DPE</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td><td>\\( AR_S \\)</td><td>\\( AR_M \\)</td><td>\\( AR_L \\)</td><td>GFLOPs</td></tr><tr><td rowspan=\"4\">FCOS [43]</td><td rowspan=\"3\">✓</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td><td>36.8</td><td>61.7</td><td>73.2</td><td>200.59</td></tr><tr><td></td><td>\\( 39.3_{+0.6} \\)</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td><td>37.5</td><td>62.1</td><td>73.2</td><td>182.62</td></tr><tr><td>✓</td><td>\\( 38.9_{+0.2} \\)</td><td>57.5</td><td>41.8</td><td>22.9</td><td>42.8</td><td>50.3</td><td>37.0</td><td>62.4</td><td>73.2</td><td>213.19</td></tr><tr><td>✓</td><td>✓</td><td>\\( 40.0_{+1.3} \\)</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td><td>38.0</td><td>62.7</td><td>73.3</td><td>195.22</td></tr><tr><td rowspan=\"4\">ATSS [50]</td><td rowspan=\"3\">✓</td><td></td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td><td>38.2</td><td>63.5</td><td>73.6</td><td>205.30</td></tr><tr><td></td><td>\\( 40.2_{+0.8} \\)</td><td>58.6</td><td>43.8</td><td>23.9</td><td>44.0</td><td>52.2</td><td>38.9</td><td>63.7</td><td>74.0</td><td>187.35</td></tr><tr><td>✓</td><td>\\( 39.8_{+0.4} \\)</td><td>57.8</td><td>42.9</td><td>23.7</td><td>43.1</td><td>50.9</td><td>38.0</td><td>64.0</td><td>75.3</td><td>217.89</td></tr><tr><td>✓</td><td>✓</td><td>\\( 40.8_{+1.4} \\)</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td><td>38.7</td><td>64.4</td><td>75.4</td><td>199.94</td></tr><tr><td rowspan=\"4\">GFL [24]</td><td rowspan=\"3\">✓</td><td></td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td><td>38.0</td><td>62.9</td><td>74.1</td><td>208.39</td></tr><tr><td></td><td>\\( 41.1_{+0.9} \\)</td><td>59.3</td><td>44.5</td><td>24.0</td><td>44.6</td><td>53.8</td><td>37.7</td><td>63.4</td><td>75.2</td><td>190.44</td></tr><tr><td>✓</td><td>\\( 40.6_{+0.4} \\)</td><td>58.4</td><td>43.8</td><td>23.5</td><td>44.2</td><td>53.2</td><td>37.2</td><td>63.9</td><td>74.6</td><td>220.99</td></tr><tr><td>✓</td><td>✓</td><td>\\( 41.6_{+1.4} \\)</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td><td>38.4</td><td>64.2</td><td>75.0</td><td>203.04</td></tr></table>",
606
+ "bbox": [
607
+ 122,
608
+ 88,
609
+ 848,
610
+ 282
611
+ ],
612
+ "page_idx": 4
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "Table 1. Ablation studies on COCO mini-val set. SCE stands for using of Semantic Context Encoding on classification; DPE stands for using of Detail-Preserving Encoding on localization.",
617
+ "bbox": [
618
+ 75,
619
+ 295,
620
+ 892,
621
+ 324
622
+ ],
623
+ "page_idx": 4
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "4. Experiments",
628
+ "text_level": 1,
629
+ "bbox": [
630
+ 76,
631
+ 345,
632
+ 209,
633
+ 363
634
+ ],
635
+ "page_idx": 4
636
+ },
637
+ {
638
+ "type": "text",
639
+ "text": "Dataset and Evaluation Metric. Our experiments are conducted on the large-scale detection benchmark MSCOCO 2017 [27]. Following common practice [24, 43, 50], we use COCO trainval35k split (115K images) for training and minival split (5K images) for validation. We report our main results and compare with previous detectors on the test-dev split (20K images) by uploading the detection results to the evaluation server.",
640
+ "bbox": [
641
+ 75,
642
+ 369,
643
+ 468,
644
+ 489
645
+ ],
646
+ "page_idx": 4
647
+ },
648
+ {
649
+ "type": "text",
650
+ "text": "Implementation and Training Details. We implement our TSCODE with MMDetection [4] and cvpods [51]. Unless otherwise noted, we utilize a ResNet-50 [19] backbone network which is pretrained on ImageNet [9]. As TSCODE can be applied as a plug-and-play head to other basic detectors, we follow the original settings in the basic detectors (including loss function, bounding box parameterization, label assignment strategy and hyper-parameters) and merely replace the head part with TSCODE. The input images are resized to a maximum scale of $1333 \\times 800$ during testing without changing the aspect ratio. The computational cost (measured in FLOPs) is calculated under the input size of $1280 \\times 800$ . We set the batch size to 16 and use 8 V100 GPUs (2 images per GPU) to train our models. Following common practice, we apply the standard $1 \\times$ training schedule in the ablation studies. The learning rate starts from 0.01 and decayed by a factor of 10 after 8 and 11 epochs, respectively. We also compare our method with state-of-the-art detectors with different backbone networks, where we adopt $2 \\times$ training schedule and multi-scale training. Please refer to Sec. 4.3 for more details.",
651
+ "bbox": [
652
+ 75,
653
+ 491,
654
+ 470,
655
+ 809
656
+ ],
657
+ "page_idx": 4
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "4.1. Ablation Studies",
662
+ "text_level": 1,
663
+ "bbox": [
664
+ 76,
665
+ 816,
666
+ 241,
667
+ 832
668
+ ],
669
+ "page_idx": 4
670
+ },
671
+ {
672
+ "type": "text",
673
+ "text": "We first conduct extensive ablation studies to evaluate the efficacy of our design on COCO minival. We use ResNet-50 as the backbone network and all the models are trained for 12 epochs following $1 \\times$ schedule.",
674
+ "bbox": [
675
+ 75,
676
+ 839,
677
+ 470,
678
+ 902
679
+ ],
680
+ "page_idx": 4
681
+ },
682
+ {
683
+ "type": "image",
684
+ "img_path": "images/9cbe7a719300cd4f1b984b9b786fb35fe9550637fab54521bf65cb0ec7b688e5.jpg",
685
+ "image_caption": [
686
+ "Figure 5. Comparison of classification training loss between FCOS with and without TSCODE. TSCODE can accelerate the training and contributes to better convergence."
687
+ ],
688
+ "image_footnote": [],
689
+ "bbox": [
690
+ 501,
691
+ 344,
692
+ 870,
693
+ 554
694
+ ],
695
+ "page_idx": 4
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "Effects of Individual Components. We first study the effectiveness of each component of TSCODE. In the experiments, we ablate our method with three basic detectors, i.e., FCOS [43], ATSS [50] and GFL [24], to evaluate the generality of our designs. For fair comparison, we retrain the baseline models strictly following the original papers with similar or slightly better results.",
700
+ "bbox": [
701
+ 496,
702
+ 627,
703
+ 890,
704
+ 733
705
+ ],
706
+ "page_idx": 4
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "As demonstrated in Tab. 1, applying only Semantic Context Encoding (SCE) improves the AP of the baseline models by $0.6 \\sim 0.9$ points, while reducing the computational cost (FLOPs) by $9\\%$ . And note that more significant improvements are observed for large and medium objects. This is consistent with our motivation to leverage more semantic context for the classification task. Moreover, better classification accuracy also facilitates find more objects, and thus SCE achieves better AR results on all the basic detectors.",
711
+ "bbox": [
712
+ 496,
713
+ 734,
714
+ 890,
715
+ 883
716
+ ],
717
+ "page_idx": 4
718
+ },
719
+ {
720
+ "type": "text",
721
+ "text": "When using Detail-Preserving Encoding (DPE) alone,",
722
+ "bbox": [
723
+ 517,
724
+ 885,
725
+ 890,
726
+ 901
727
+ ],
728
+ "page_idx": 4
729
+ },
730
+ {
731
+ "type": "table",
732
+ "img_path": "images/d830e708cef008c7fbac3d46e23063e7ea166e2c890065257422f1eff6c34798.jpg",
733
+ "table_caption": [],
734
+ "table_footnote": [],
735
+ "table_body": "<table><tr><td>Layer</td><td>Kernel</td><td>Pl+1</td><td>AP</td><td>AP50</td><td>AP75</td><td>GFLOPs</td></tr><tr><td>-</td><td>-</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>conv</td><td>3 × 3</td><td></td><td>38.6</td><td>57.3</td><td>41.8</td><td>165.99</td></tr><tr><td>avg</td><td></td><td>✓</td><td>39.1</td><td>57.8</td><td>42.7</td><td>179.47</td></tr><tr><td>max</td><td>3 × 3</td><td>✓</td><td>39.1</td><td>57.9</td><td>42.5</td><td>179.47</td></tr><tr><td>conv</td><td></td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td rowspan=\"3\">conv</td><td>3 × 3</td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td>5 × 5</td><td>✓</td><td>39.2</td><td>58.1</td><td>42.8</td><td>188.23</td></tr><tr><td>7 × 7</td><td>✓</td><td>39.2</td><td>58.2</td><td>42.3</td><td>196.65</td></tr></table>",
736
+ "bbox": [
737
+ 81,
738
+ 89,
739
+ 465,
740
+ 227
741
+ ],
742
+ "page_idx": 5
743
+ },
744
+ {
745
+ "type": "text",
746
+ "text": "we also observe consistent improvements on all basic detectors, especially on large objects. However, the gains are less than those from SCE, indicating that the feature context in the original decoupled-head design tends to lean to the localization task.",
747
+ "bbox": [
748
+ 75,
749
+ 280,
750
+ 467,
751
+ 354
752
+ ],
753
+ "page_idx": 5
754
+ },
755
+ {
756
+ "type": "text",
757
+ "text": "At last, we observe significant improvements on all metrics with our full model. The models with TSCODE consistently outperforms the baseline models by $1.3\\sim 1.4$ AP points, which is higher than the sum of the individual improvements. Benefitting from the decoupled feature contexts, the classification branch can learn richer semantic information to infer the category of objects, while the localization branch can benefit from more edge details to accurately predict the object boundaries. Moreover, our method also slightly reduces the computational cost, demonstrating the good efficiency of our design. We further visualize the classification loss when training FCOS [43] with and without TSCODE in Fig. 5. TSCODE can accelerate the training and contributes to better convergence. And similar results can be observed in other basic detectors.",
758
+ "bbox": [
759
+ 75,
760
+ 357,
761
+ 467,
762
+ 580
763
+ ],
764
+ "page_idx": 5
765
+ },
766
+ {
767
+ "type": "text",
768
+ "text": "Semantic Context Encoding. Next, we further study the influence of different ways to conduct SCE with FCOS. The results are shown in Tab. 2. We first feed the classification branch with merely the downsampled $P_{l}$ . It is observed that this model achieves similar results with the baseline but with significantly less computation, indicating the feature redundancy in the classification branch. Next, we put the high-level $P_{l+1}$ into SCE and vary the operation to down-sample $P_{l}$ . The kernel size is fixed to $3 \\times 3$ and the stride is 2 for all operations. As observed in Tab. 2, average pooling and max pooling performs slightly worse than convolution, albeit with less computational cost. We assume that average pooling is inefficient in extracting sharp classification signals, while the sparse connections caused by max pooling harms the convergence of the models. At last, we study the influence of the kernel size in convolutions. And a large kernel does not necessarily brings better results. We argue that a large kernel may include too much noise signals, which harms the classification performance. However, the performance of the detector is still improved regardless of how SCE is generated.",
769
+ "bbox": [
770
+ 75,
771
+ 583,
772
+ 467,
773
+ 900
774
+ ],
775
+ "page_idx": 5
776
+ },
777
+ {
778
+ "type": "table",
779
+ "img_path": "images/061115e3f8570049d0cb085154d87707d2c8af82bff12033f7098ebb369c1363.jpg",
780
+ "table_caption": [
781
+ "Table 2. Performance of different ways to generate Semantic Context Encoding (SCE) for classification branch on FCOS [43]."
782
+ ],
783
+ "table_footnote": [],
784
+ "table_body": "<table><tr><td>Pl+2</td><td>Pl+1</td><td>Pl</td><td>P1-1</td><td>AP</td><td>AP50</td><td>AP75</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td></td><td></td><td>✓</td><td></td><td>39.3</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td></td><td>39.4</td><td>58.3</td><td>42.6</td><td>23.3</td><td>43.2</td><td>51.0</td></tr><tr><td></td><td>✓</td><td></td><td>✓</td><td>39.6</td><td>58.4</td><td>42.8</td><td>23.9</td><td>43.3</td><td>51.4</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>39.7</td><td>58.1</td><td>43.1</td><td>23.2</td><td>43.7</td><td>51.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>40.0</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>39.9</td><td>58.6</td><td>43.2</td><td>23.1</td><td>43.7</td><td>52.1</td></tr></table>",
785
+ "bbox": [
786
+ 504,
787
+ 89,
788
+ 888,
789
+ 202
790
+ ],
791
+ "page_idx": 5
792
+ },
793
+ {
794
+ "type": "table",
795
+ "img_path": "images/95eaa197eed2377a616648365b10d48f5053d0feebecc0f027caadf7ba86d55c.jpg",
796
+ "table_caption": [
797
+ "Table 3. Performance of different ways to generate Detail-Preserving Encoding (DPE) for localization branch on FCOS [43]."
798
+ ],
799
+ "table_footnote": [],
800
+ "table_body": "<table><tr><td>Method</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td></tr><tr><td>FCOS (+ours)</td><td>40.0+1.3</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td></tr><tr><td>ATSS (+ours)</td><td>40.8+1.4</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td></tr><tr><td>GFL (+ours)</td><td>41.6+1.4</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td></tr><tr><td>AutoAssign [52]</td><td>40.4</td><td>59.6</td><td>43.7</td><td>22.7</td><td>44.1</td><td>52.9</td></tr><tr><td>AutoAssign (+ours)</td><td>41.1+0.7</td><td>60.2</td><td>44.1</td><td>23.0</td><td>45.0</td><td>54.2</td></tr><tr><td>DDOD [6]</td><td>41.6</td><td>59.9</td><td>45.2</td><td>23.9</td><td>44.9</td><td>54.4</td></tr><tr><td>DDOD (+ours)</td><td>42.4+0.8</td><td>60.2</td><td>46.3</td><td>24.5</td><td>45.5</td><td>56.0</td></tr><tr><td>DeFCN* [44]</td><td>41.4</td><td>59.5</td><td>45.6</td><td>26.1</td><td>44.9</td><td>52.0</td></tr><tr><td>DeFCN* (+ours)</td><td>42.3+0.9</td><td>60.7</td><td>46.7</td><td>27.0</td><td>45.5</td><td>53.6</td></tr><tr><td>OTA [11]</td><td>40.7</td><td>58.4</td><td>44.3</td><td>23.2</td><td>45.0</td><td>53.7</td></tr><tr><td>OTA (+ours)</td><td>41.5+0.8</td><td>59.2</td><td>45.3</td><td>23.4</td><td>45.2</td><td>55.7</td></tr><tr><td>DW [22]</td><td>41.5</td><td>59.8</td><td>44.8</td><td>23.4</td><td>44.9</td><td>54.8</td></tr><tr><td>DW (+ours)</td><td>42.0+0.5</td><td>60.3</td><td>45.1</td><td>24.1</td><td>45.5</td><td>56.0</td></tr></table>",
801
+ "bbox": [
802
+ 503,
803
+ 252,
804
+ 888,
805
+ 502
806
+ ],
807
+ "page_idx": 5
808
+ },
809
+ {
810
+ "type": "text",
811
+ "text": "Table 4. Applying TSCODE into various popular dense object detectors. Method with * means training for $3 \\times$ schedule follow its official repository.",
812
+ "bbox": [
813
+ 498,
814
+ 507,
815
+ 890,
816
+ 550
817
+ ],
818
+ "page_idx": 5
819
+ },
820
+ {
821
+ "type": "text",
822
+ "text": "Detail-Preserving Encoding. At last, we investigate the efficacy of our design of DPE by ablating the feature maps from different levels in Tab. 3, where SCE is used in all the models by default. Individually aggregating the high-level feature map $P_{l+1}$ with $P_l$ only achieves marginal improvements, but the incorporation of $P_{l-1}$ contributes to more significant performance gains as more detail and edge information is obtained. Interestingly, the model with $P_{l+1}$ and $P_{l-1}$ outperforms the one with $P_{l+1}$ and $P_l$ , which again demonstrates the importance of detail information. And the model with all three feature maps achieves significant improvements, especially on large and medium objects. Note that the gains from three levels are greater than the sum of the individual gains, which means $P_{l+1}$ could provide more benefit if there are adequate detail information. At last, we further add $P_{l+2}$ and find a slight decay in the results, suggesting that too much environmental information may be useless or even harmful for bounding box regression.",
823
+ "bbox": [
824
+ 496,
825
+ 565,
826
+ 890,
827
+ 837
828
+ ],
829
+ "page_idx": 5
830
+ },
831
+ {
832
+ "type": "text",
833
+ "text": "4.2. Generality to Different Detectors",
834
+ "text_level": 1,
835
+ "bbox": [
836
+ 500,
837
+ 847,
838
+ 790,
839
+ 863
840
+ ],
841
+ "page_idx": 5
842
+ },
843
+ {
844
+ "type": "text",
845
+ "text": "To evaluate the generality of TSCODE, we further apply it to 8 recent popular one-stage detectors [6, 11, 22, 24, 43,",
846
+ "bbox": [
847
+ 498,
848
+ 869,
849
+ 890,
850
+ 900
851
+ ],
852
+ "page_idx": 5
853
+ },
854
+ {
855
+ "type": "table",
856
+ "img_path": "images/2dae580d6afbf1b2944a259fc2a929e740c81543e7351574379b7ca864c84cf9.jpg",
857
+ "table_caption": [],
858
+ "table_footnote": [
859
+ "Table 5. Performance comparison with state-of-the-art detectors (single-model and single-scale results except the last row) on COCO test-dev. TSCODE consistently outperforms the strong baseline GFL [24] by $1.6 \\sim 1.8$ AP and even outperforms its improved version GFLV2 [23]. DCN: Deformable Convolutional Network [8]."
860
+ ],
861
+ "table_body": "<table><tr><td>Method</td><td>Backbone</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td><td>Reference</td></tr><tr><td>ATSS [50]</td><td>ResNet-101</td><td>43.6</td><td>62.1</td><td>47.4</td><td>26.1</td><td>47.0</td><td>53.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNet-101</td><td>44.8</td><td>63.3</td><td>48.7</td><td>26.5</td><td>48.8</td><td>56.3</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNet-101</td><td>45.0</td><td>63.7</td><td>48.9</td><td>27.2</td><td>48.8</td><td>54.5</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNet-101</td><td>46.2</td><td>64.3</td><td>50.5</td><td>27.8</td><td>49.9</td><td>57.0</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNet-101</td><td>45.3</td><td>63.5</td><td>49.3</td><td>26.9</td><td>48.8</td><td>56.1</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNet-101</td><td>45.1</td><td>63.4</td><td>49.3</td><td>26.7</td><td>48.5</td><td>56.6</td><td>CVPR21</td></tr><tr><td>ObjectBox [48]</td><td>ResNet-101</td><td>46.1</td><td>65.0</td><td>48.3</td><td>26.0</td><td>48.7</td><td>57.3</td><td>ECCV22</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNet-101</td><td>46.7</td><td>65.1</td><td>50.8</td><td>28.2</td><td>50.3</td><td>57.7</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d</td><td>45.1</td><td>63.9</td><td>49.1</td><td>27.9</td><td>48.2</td><td>54.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d</td><td>46.6</td><td>65.6</td><td>50.8</td><td>28.8</td><td>50.4</td><td>57.9</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d</td><td>46.0</td><td>65.1</td><td>50.1</td><td>28.2</td><td>49.6</td><td>56.0</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d</td><td>47.2</td><td>65.7</td><td>51.7</td><td>29.1</td><td>50.8</td><td>58.2</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.8</td><td>51.1</td><td>29.2</td><td>50.4</td><td>57.9</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.7</td><td>51.1</td><td>29.1</td><td>50.5</td><td>57.9</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d</td><td>47.6</td><td>66.3</td><td>51.8</td><td>29.5</td><td>51.1</td><td>58.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d</td><td>48.3</td><td>67.1</td><td>52.6</td><td>30.0</td><td>51.9</td><td>59.9</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d-DCN</td><td>47.7</td><td>66.6</td><td>52.1</td><td>29.3</td><td>50.8</td><td>59.7</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.8</td><td>53.3</td><td>30.2</td><td>52.8</td><td>62.2</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>48.2</td><td>67.4</td><td>52.6</td><td>29.2</td><td>51.7</td><td>60.2</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d-DCN</td><td>49.0</td><td>67.6</td><td>53.5</td><td>29.7</td><td>52.4</td><td>61.4</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.2</td><td>67.6</td><td>53.5</td><td>30.0</td><td>52.5</td><td>62.3</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.5</td><td>53.1</td><td>30.0</td><td>52.3</td><td>62.0</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>50.0</td><td>68.5</td><td>54.6</td><td>31.0</td><td>53.4</td><td>62.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d-DCN</td><td>50.8</td><td>69.3</td><td>55.3</td><td>31.7</td><td>54.2</td><td>63.7</td><td>-</td></tr></table>",
862
+ "bbox": [
863
+ 122,
864
+ 85,
865
+ 841,
866
+ 529
867
+ ],
868
+ "page_idx": 6
869
+ },
870
+ {
871
+ "type": "text",
872
+ "text": "44,50,52] and evaluate the performance on the COCO minival. As shown in Tab. 4, TSCODE consistently improves the performance of different baseline detectors by $0.5\\sim 1.4$ AP points. Notably, TSCODE achieves improvements of 0.8 AP on the detector DDOD [6] which is designed with a specific disentanglement structure for the classification and localization tasks. Moreover, our method provides the latest detector DW [22] with a performance gain of 0.5 AP, which further demonstrates the efficacy of TSCODE.",
873
+ "bbox": [
874
+ 75,
875
+ 593,
876
+ 470,
877
+ 731
878
+ ],
879
+ "page_idx": 6
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "Moreover, DPE can also be regarded as a simple feature fusion module if its output features are directly sent to the classification branch (without context decoupling). To this end, we compare it with the popular PAFPN [28]. They differ in two aspects: First, we leverage a semantic context augmentation from $P_{l}$ to $P_{l-1}$ before bottom-up aggregation ( $P_{l-1}$ back to $P_{l}$ ) which can enrich the feature representation and benefit the classification and localization tasks. Second, the weight of convolution downsampling is shared between all the head, which not only makes the detector parameter-efficient but improves the detection per",
884
+ "bbox": [
885
+ 75,
886
+ 734,
887
+ 470,
888
+ 902
889
+ ],
890
+ "page_idx": 6
891
+ },
892
+ {
893
+ "type": "text",
894
+ "text": "formance. Third, for each pyramid level, we only aggregate the features from two neighboring levels as we assume that features in a lower level may be helpless for a higher level. We compare DPE and PAFPN on four basic detectors [10,24,43,50]. As observed in Tab. 6, our DPE achieves more performance improvements than PAFPN [28]. DPE outperforms the baselines by about $0.5\\sim 0.8$ AP with negligible extra computational cost, but PAFPN only achieves marginal improvements (about 0.2), indicating the strong generality of our DPE. However, as the context is not decoupled, the improvements are still limited.",
895
+ "bbox": [
896
+ 496,
897
+ 593,
898
+ 893,
899
+ 762
900
+ ],
901
+ "page_idx": 6
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "4.3. Comparisons with State-of-the-arts",
906
+ "text_level": 1,
907
+ "bbox": [
908
+ 498,
909
+ 770,
910
+ 807,
911
+ 787
912
+ ],
913
+ "page_idx": 6
914
+ },
915
+ {
916
+ "type": "text",
917
+ "text": "At last, we compare TSCODE with recent state-of-the-art detectors on COCO test-dev. Here we select GFL [24] as our baseline. Following common practice, we adopt $2 \\times$ schedule to train our models with standard multi-scale training strategy. The results are reported with single-model single-scale testing for all methods.",
918
+ "bbox": [
919
+ 496,
920
+ 794,
921
+ 890,
922
+ 883
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "As shown in Tab. 5, our method improves the perfor",
929
+ "bbox": [
930
+ 517,
931
+ 885,
932
+ 890,
933
+ 901
934
+ ],
935
+ "page_idx": 6
936
+ },
937
+ {
938
+ "type": "image",
939
+ "img_path": "images/2fae02773a97ba2878776dc1bd337085bf709e974762c3046a3ff3c4a3622e8b.jpg",
940
+ "image_caption": [
941
+ "Figure 6. Detection results before and after NMS predicted by FCOS [43] with and without TSCODE. TOP: Before NMS, the bounding boxes correspond to that in Fig. 1. Bottom: After NMS, the green boxes here mean ground-truth bounding boxes."
942
+ ],
943
+ "image_footnote": [],
944
+ "bbox": [
945
+ 76,
946
+ 85,
947
+ 890,
948
+ 338
949
+ ],
950
+ "page_idx": 7
951
+ },
952
+ {
953
+ "type": "text",
954
+ "text": "mance of GFL to 46.7 AP and 47.6 AP with ResNet-101 [19] and ResNeXt-101-32x4d [46] backbones, respectively, outperforming all other methods [11, 21, 23, 30, 50]. When using ResNeXt-101-64x4d [46] backbone, the performance of TSCODE can be further improved to 48.3 AP. We also conduct more experiments by adopting Deformable Convolutional Networks (DCN) [8] to ResNeXt backbones. Following [49], we replace the standard convolution in the last layer before prediction with deformable convolutions. Without bells and whistles, TSCODE achieves the AP of 50.8 points, demonstrating the strong compatibility of our method with the advanced techniques in object detection.",
955
+ "bbox": [
956
+ 75,
957
+ 395,
958
+ 472,
959
+ 577
960
+ ],
961
+ "page_idx": 7
962
+ },
963
+ {
964
+ "type": "text",
965
+ "text": "4.4. Qualitative Results",
966
+ "text_level": 1,
967
+ "bbox": [
968
+ 76,
969
+ 589,
970
+ 261,
971
+ 604
972
+ ],
973
+ "page_idx": 7
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "In Fig. 6, we qualitatively demonstrate how TSCODE helps detectors improve their performance. For simplicity, we take FCOS [43] as an example. In the typical decoupled-head design, the classification branch and the localization branch share the same input features. However, their different preferences for context causes severe competition. As demonstrated before, the feature context could lean to localization task, and thus detectors is prone to predict a poor classification confidence for the bounding box with high IoU. As a result, these high-quality bounding boxes is unlikely to survive after NMS. This phenomenon is more significant on objects with texture-less surfaces, e.g., dark couch (the $1^{\\mathrm{st}}$ column) or shiny spoon (the $2^{\\mathrm{nd}}$ column). Another example is the train (the $3^{\\mathrm{rd}}$ column), where more global context is required to successfully recognize the train. However, it is difficult to accurately localize the boundary of the train from a coarse feature map as the typical decoupled head does. On the contrary, our TSCODE disentangles the feature context and generates feature inputs",
978
+ "bbox": [
979
+ 75,
980
+ 613,
981
+ 470,
982
+ 900
983
+ ],
984
+ "page_idx": 7
985
+ },
986
+ {
987
+ "type": "table",
988
+ "img_path": "images/b423e6138d26a4390c9e981aad047121a9b3b7f022607fbedcec56faf57bdd45.jpg",
989
+ "table_caption": [],
990
+ "table_footnote": [],
991
+ "table_body": "<table><tr><td>Method</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>GFLOPs</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>FCOS w/ PAFPN</td><td>38.7</td><td>57.4</td><td>41.7</td><td>206.49</td></tr><tr><td>FCOS w/ DPE</td><td>\\( 39.2_{+0.5} \\)</td><td>58.0</td><td>42.4</td><td>213.62</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>205.30</td></tr><tr><td>ATSS w/ PAFPN</td><td>39.6</td><td>58.3</td><td>42.2</td><td>211.20</td></tr><tr><td>ATSS w/ DPE</td><td>\\( 40.2_{+0.8} \\)</td><td>58.2</td><td>42.3</td><td>217.89</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>208.39</td></tr><tr><td>GFL w/ PAFPN</td><td>40.4</td><td>58.6</td><td>43.6</td><td>214.29</td></tr><tr><td>GFL w/ DPE</td><td>\\( 40.8_{+0.6} \\)</td><td>58.8</td><td>43.9</td><td>220.99</td></tr><tr><td>TOOD [10]</td><td>42.4</td><td>59.8</td><td>46.1</td><td>-</td></tr><tr><td>TOOD w/ PAFPN</td><td>42.6</td><td>59.9</td><td>46.2</td><td>-</td></tr><tr><td>TOOD w/ DPE</td><td>\\( 43.0_{+0.6} \\)</td><td>60.3</td><td>46.4</td><td>-</td></tr></table>",
992
+ "bbox": [
993
+ 509,
994
+ 392,
995
+ 883,
996
+ 588
997
+ ],
998
+ "page_idx": 7
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "text": "Table 6. Comparison of our DPE and PAFPN [28]. FLOPs are measured on the input image size of $1280 \\times 800$ . Since TOOD [10] contains complex deformable sampling [8], we don't report its computational cost.",
1003
+ "bbox": [
1004
+ 498,
1005
+ 598,
1006
+ 892,
1007
+ 655
1008
+ ],
1009
+ "page_idx": 7
1010
+ },
1011
+ {
1012
+ "type": "text",
1013
+ "text": "with specific feature context for each task. As a result, we can leverage more semantic context for classification and more detail and edge information for localization.",
1014
+ "bbox": [
1015
+ 498,
1016
+ 672,
1017
+ 890,
1018
+ 718
1019
+ ],
1020
+ "page_idx": 7
1021
+ },
1022
+ {
1023
+ "type": "text",
1024
+ "text": "5. Conclusion",
1025
+ "text_level": 1,
1026
+ "bbox": [
1027
+ 500,
1028
+ 737,
1029
+ 617,
1030
+ 753
1031
+ ],
1032
+ "page_idx": 7
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "text": "In this paper, we have delved into the root causes of the inherent competition between classification and localization tasks and proposed a novel TSCODE to eliminate this competition. It decouples the semantic context of the two tasks through two efficient designs, SCE and DPE, and brings the features with richer semantic information for classification and with more edge information for localization. Extensive experiments on the MS COCO benchmark demonstrate the effectiveness the strong generality of TSCODE.",
1037
+ "bbox": [
1038
+ 496,
1039
+ 763,
1040
+ 890,
1041
+ 900
1042
+ ],
1043
+ "page_idx": 7
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "text": "References",
1048
+ "text_level": 1,
1049
+ "bbox": [
1050
+ 78,
1051
+ 89,
1052
+ 173,
1053
+ 104
1054
+ ],
1055
+ "page_idx": 8
1056
+ },
1057
+ {
1058
+ "type": "list",
1059
+ "sub_type": "ref_text",
1060
+ "list_items": [
1061
+ "[1] Sean Bell, C Lawrence Zitnick, Kavita Bala, and Ross Girshick. Inside-outside net: Detecting objects in context with skip pooling and recurrent neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2874-2883, 2016. 1",
1062
+ "[2] Zhaowei Cai and Nuno Vasconcelos. Cascade r-cnn: Delving into high quality object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6154-6162, 2018. 2",
1063
+ "[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2",
1064
+ "[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5",
1065
+ "[5] Qiang Chen, Yingming Wang, Tong Yang, Xiangyu Zhang, Jian Cheng, and Jian Sun. You only look one-level feature. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13039-13048, 2021. 2",
1066
+ "[6] Zehui Chen, Chenhongyi Yang, Qiaofei Li, Feng Zhao, Zheng-Jun Zha, and Feng Wu. Disentangle your dense object detector. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4939-4948, 2021. 1, 2, 3, 6, 7",
1067
+ "[7] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. Advances in neural information processing systems, 29, 2016. 1",
1068
+ "[8] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 764-773, 2017. 7, 8",
1069
+ "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 5",
1070
+ "[10] Chengjian Feng, Yujie Zhong, Yu Gao, Matthew R Scott, and Weilin Huang. Toed: Task-aligned one-stage object detection. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3490-3499. IEEE Computer Society, 2021. 3, 7, 8",
1071
+ "[11] Zheng Ge, Songtao Liu, Zeming Li, Osamu Yoshie, and Jian Sun. Ota: Optimal transport assignment for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 303-312, 2021. 3, 6, 7, 8",
1072
+ "[12] Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, and Jian Sun. Yolox: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430, 2021. 2",
1073
+ "[13] Golnaz Ghiasi, Tsung-Yi Lin, and Quoc V Le. Nas-fpn: Learning scalable feature pyramid architecture for object detection. In Proceedings of the IEEE/CVF conference on"
1074
+ ],
1075
+ "bbox": [
1076
+ 76,
1077
+ 114,
1078
+ 470,
1079
+ 901
1080
+ ],
1081
+ "page_idx": 8
1082
+ },
1083
+ {
1084
+ "type": "list",
1085
+ "sub_type": "ref_text",
1086
+ "list_items": [
1087
+ "computer vision and pattern recognition, pages 7036-7045, 2019. 2",
1088
+ "[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 2",
1089
+ "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1, 2",
1090
+ "[16] Benjamin Graham, Martin Engelcke, and Laurens Van Der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9224-9232, 2018. 4",
1091
+ "[17] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 1, 2",
1092
+ "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Spatial pyramid pooling in deep convolutional networks for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 37(9):1904-1916, 2015. 1",
1093
+ "[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5, 8",
1094
+ "[20] Borui Jiang, Ruixuan Luo, Jiayuan Mao, Tete Xiao, and Yun-ting Jiang. Acquisition of localization confidence for accurate object detection. In Proceedings of the European conference on computer vision (ECCV), pages 784–799, 2018. 3",
1095
+ "[21] Kang Kim and Hee Seok Lee. Probabilistic anchor assignment with iou prediction for object detection. In European Conference on Computer Vision, pages 355-371. Springer, 2020. 7, 8",
1096
+ "[22] Shuai Li, Chenhang He, Ruihuang Li, and Lei Zhang. A dual weighting label assignment scheme for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9387-9396, 2022. 6, 7",
1097
+ "[23] Xiang Li, Wenhai Wang, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss v2: Learning reliable localization quality estimation for dense object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11632-11641, 2021. 7, 8",
1098
+ "[24] Xiang Li, Wenhai Wang, Lijun Wu, Shuo Chen, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss: Learning qualified and distributed bounding boxes for dense object detection. Advances in Neural Information Processing Systems, 33:21002-21012, 2020. 2, 3, 5, 6, 7, 8",
1099
+ "[25] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2117-2125, 2017. 1, 2, 3"
1100
+ ],
1101
+ "bbox": [
1102
+ 501,
1103
+ 92,
1104
+ 890,
1105
+ 901
1106
+ ],
1107
+ "page_idx": 8
1108
+ },
1109
+ {
1110
+ "type": "list",
1111
+ "sub_type": "ref_text",
1112
+ "list_items": [
1113
+ "[26] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólár. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017. 1, 2, 3",
1114
+ "[27] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5",
1115
+ "[28] Shu Liu, Lu Qi, Haifang Qin, Jianping Shi, and Jiaya Jia. Path aggregation network for instance segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8759-8768, 2018. 2, 4, 7, 8",
1116
+ "[29] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. Ssd: Single shot multibox detector. In European conference on computer vision, pages 21-37. Springer, 2016. 2",
1117
+ "[30] Yuchen Ma, Songtao Liu, Zeming Li, and Jian Sun. Iqdet: Instance-wise quality distribution sampling for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1717-1725, 2021. 7, 8",
1118
+ "[31] Roozbeh Mottaghi, Xianjie Chen, Xiaobai Liu, Nam-Gyu Cho, Seong-Whan Lee, Sanja Fidler, Raquel Urtasun, and Alan Yuille. The role of context for object detection and semantic segmentation in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 1",
1119
+ "[32] Jiangmiao Pang, Kai Chen, Jianping Shi, Huajun Feng, Wanli Ouyang, and Dahua Lin. Libra r-cnn: Towards balanced learning for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 821-830, 2019. 2",
1120
+ "[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 1, 2",
1121
+ "[34] Joseph Redmon and Ali Farhadi. Yolo9000: better, faster, stronger. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7263-7271, 2017. 2",
1122
+ "[35] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018. 2",
1123
+ "[36] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1, 2",
1124
+ "[37] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4",
1125
+ "[38] Rasmus Rothe, Matthieu Guillaumin, and Luc Van Gool. Non-maximum suppression for object detection by passing messages between windows. In Asian conference on computer vision, pages 290-306. Springer, 2014. 2",
1126
+ "[39] Bharat Singh and Larry S Davis. An analysis of scale invariance in object detection snip. In Proceedings of the"
1127
+ ],
1128
+ "bbox": [
1129
+ 78,
1130
+ 90,
1131
+ 468,
1132
+ 900
1133
+ ],
1134
+ "page_idx": 9
1135
+ },
1136
+ {
1137
+ "type": "list",
1138
+ "sub_type": "ref_text",
1139
+ "list_items": [
1140
+ "IEEE conference on computer vision and pattern recognition, pages 3578-3587, 2018. 2",
1141
+ "[40] Guanglu Song, Yu Liu, and Xiaogang Wang. Revisiting the sibling head in object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11563-11572, 2020. 1, 2, 3",
1142
+ "[41] Mingxing Tan, Ruoming Pang, and Quoc V Le. Efficientdet: Scalable and efficient object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10781-10790, 2020. 2",
1143
+ "[42] Zhiyu Tan, Junyan Wang, Xiuyu Sun, Ming Lin, Hao Li, et al. Giraffedet: A heavy-neck paradigm for object detection. In International Conference on Learning Representations, 2021. 2",
1144
+ "[43] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 1, 2, 5, 6, 7, 8",
1145
+ "[44] Jianfeng Wang, Lin Song, Zeming Li, Hongbin Sun, Jian Sun, and Nanning Zheng. End-to-end object detection with fully convolutional network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15849-15858, 2021. 6",
1146
+ "[45] Yue Wu, Yinpeng Chen, Lu Yuan, Zicheng Liu, Lijuan Wang, Hongzhi Li, and Yun Fu. Rethinking classification and localization for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10186-10195, 2020. 1, 2, 3",
1147
+ "[46] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1492-1500, 2017. 8",
1148
+ "[47] Chenhongyi Yang, Zehao Huang, and Naiyan Wang. Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13668-13677, 2022. 4",
1149
+ "[48] Mohsen Zand, Ali Etemad, and Michael Greenspan. Object-box: From centers to boxes for anchor-free object detection. arXiv preprint arXiv:2207.06985, 2022. 7",
1150
+ "[49] Haoyang Zhang, Ying Wang, Feras Dayoub, and Niko Sunderhauf. Varifocalnet: An iou-aware dense object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8514-8523, 2021. 8",
1151
+ "[50] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z Li. Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9759-9768, 2020. 2, 3, 5, 6, 7, 8",
1152
+ "[51] Benjin Zhu*, Feng Wang*, Jianfeng Wang, Siwei Yang, Jianhu Chen, and Zeming Li. cvpods: All-in-one toolbox for computer vision research, 2020. 5",
1153
+ "[52] Benjin Zhu, Jianfeng Wang, Zhengkai Jiang, Fuhang Zong, Songtao Liu, Zeming Li, and Jian Sun. Autoassign: Differentiable label assignment for dense object detection. arXiv preprint arXiv:2007.03496, 2020. 6"
1154
+ ],
1155
+ "bbox": [
1156
+ 503,
1157
+ 92,
1158
+ 893,
1159
+ 896
1160
+ ],
1161
+ "page_idx": 9
1162
+ }
1163
+ ]
2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_model.json ADDED
@@ -0,0 +1,1760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.21,
7
+ 0.131,
8
+ 0.761,
9
+ 0.154
10
+ ],
11
+ "angle": 0,
12
+ "content": "Task-Specific Context Decoupling for Object Detection"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.252,
18
+ 0.18,
19
+ 0.717,
20
+ 0.199
21
+ ],
22
+ "angle": 0,
23
+ "content": "Jiayuan Zhuang\\(^{1}\\) Zheng Qin\\(^{1}\\) Hao Yu\\(^{2}\\) Xucan Chen\\(^{1}\\)"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.168,
29
+ 0.199,
30
+ 0.802,
31
+ 0.218
32
+ ],
33
+ "angle": 0,
34
+ "content": "\\(^{1}\\)National University of Defense Technology \\(^{2}\\)Technical University of Munich"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.166,
40
+ 0.22,
41
+ 0.802,
42
+ 0.234
43
+ ],
44
+ "angle": 0,
45
+ "content": "alpc111@163.com qinzheng12@nudt.edu.cn hao.yu@tum.de xcchen18@139.com"
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.235,
51
+ 0.269,
52
+ 0.314,
53
+ 0.285
54
+ ],
55
+ "angle": 0,
56
+ "content": "Abstract"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.076,
62
+ 0.301,
63
+ 0.474,
64
+ 0.619
65
+ ],
66
+ "angle": 0,
67
+ "content": "Classification and localization are two main sub-tasks in object detection. Nonetheless, these two tasks have inconsistent preferences for feature context, i.e., localization expects more boundary-aware features to accurately regress the bounding box, while more semantic context is preferred for object classification. Existing methods usually leverage disentangled heads to learn different feature context for each task. However, the heads are still applied on the same input features, which leads to an imperfect balance between classification and localization. In this work, we propose a novel Task-Specific COntext DEcoupling (TSCODE) head which further disentangles the feature encoding for two tasks. For classification, we generate spatially-coarse but semantically-strong feature encoding. For localization, we provide high-resolution feature map containing more edge information to better regress object boundaries. TSCODE is plug-and-play and can be easily incorporated into existing detection pipelines. Extensive experiments demonstrate that our method stably improves different detectors by over 1.0 AP with less computational cost. Our code and models will be publicly released."
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.078,
73
+ 0.634,
74
+ 0.21,
75
+ 0.649
76
+ ],
77
+ "angle": 0,
78
+ "content": "1. Introduction"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.076,
84
+ 0.66,
85
+ 0.47,
86
+ 0.765
87
+ ],
88
+ "angle": 0,
89
+ "content": "Object detection aims to recognize and localize objects existing in a natural image, which is a fundamental but challenging task in many computer vision applications. Recent advances in object detection have been predominated by deep learning-based methods [15, 18, 26, 33, 36], where the task is typically formulated as the classification of regions of interest and the localization of bounding boxes."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.076,
95
+ 0.765,
96
+ 0.471,
97
+ 0.903
98
+ ],
99
+ "angle": 0,
100
+ "content": "Semantic context encoded in the high-level semantic-rich features plays a crucial role in both the classification and localization task of object detection [1, 31]. Feature pyramid network (FPN) [25] distills the semantic context from the high-level semantic-rich features and fuses it into the low-level detail-preserving feature maps, where small-scale objects can be better detected. This design effectively provides more semantic context for early-stage features and facilitates detecting objects in various scales. Early"
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.503,
106
+ 0.268,
107
+ 0.895,
108
+ 0.405
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image",
115
+ "bbox": [
116
+ 0.502,
117
+ 0.407,
118
+ 0.895,
119
+ 0.543
120
+ ],
121
+ "angle": 0,
122
+ "content": null
123
+ },
124
+ {
125
+ "type": "image_caption",
126
+ "bbox": [
127
+ 0.498,
128
+ 0.553,
129
+ 0.895,
130
+ 0.721
131
+ ],
132
+ "angle": 0,
133
+ "content": "Figure 1. Top: Inference from original FCOS [43]. Bottom: Inference from FCOS with our TSCODE. Results are shown before NMS. The bounding box that has the highest IoU with the ground truth is shown in green, while the top-3 bounding-boxes with the highest classification scores are marked in other colors. The competition between the two tasks in FCOS can be clearly observed in the top figure, i.e., the bounding box with the best IoU (green) has lower classification confidence, while that with the best classification score (blue) has a poor bounding box prediction. Thanks to our TSCODE, the competition issue is addressed in the bottom figure, where the blue bounding box with the most confident classification prediction also has a great IoU with the ground truth."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.498,
139
+ 0.735,
140
+ 0.895,
141
+ 0.903
142
+ ],
143
+ "angle": 0,
144
+ "content": "works [17, 33, 36] usually tackle the detection problem by attaching a head network shared by the two tasks on each level of feature map for a specific scale of objects, though the two tasks are semantically misaligned which is noted by Double-Head R-CNN [45] and TSD [40] afterwards. Typically, bounding box regression expects more texture details and edge information to accurately localize the object boundaries, while more semantic context is required to facilitate the classification of objects [6, 7]. Based on this insight, they propose to use two decoupled head branches for different tasks on each feature level from FPN. In this pa"
145
+ },
146
+ {
147
+ "type": "aside_text",
148
+ "bbox": [
149
+ 0.023,
150
+ 0.266,
151
+ 0.059,
152
+ 0.701
153
+ ],
154
+ "angle": 270,
155
+ "content": "arXiv:2303.01047v1 [cs.CV] 2 Mar 2023"
156
+ }
157
+ ],
158
+ [
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.076,
163
+ 0.092,
164
+ 0.47,
165
+ 0.137
166
+ ],
167
+ "angle": 0,
168
+ "content": "per, we name this decoupling strategy the parameter decoupling, which relies on separate heads to encode task-specific semantic context from the same feature map."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.076,
174
+ 0.139,
175
+ 0.473,
176
+ 0.531
177
+ ],
178
+ "angle": 0,
179
+ "content": "However, we observe that disentangling classification and localization only in the parameter level leads to an imperfect trade-off between two tasks. Fig. 1 (top) visualizes the top-3 (for better view) bounding boxes with highest confidence scores and the box that has the highest intersection-over-union (IoU) with corresponding ground-truth table box before non-maximum suppression (NMS) [38] predicted by FCOS [43]. FCOS has already extracted accurate enough boxes (see the green one), but they are suppressed during NMS due to relatively low confidence scores. We owe this to the intrinsic competition between the two tasks, and merely relying on the learning ability of head networks to provide the task-specific context from a shared feature map usually shows bias towards one task, while suppressing the other. This phenomenon has been noticed in [6, 40], but still without proper solution being proposed. Some works [13, 28, 41, 42] attempt to incorporate more semantic context to improve the detection results. However, more is not always better, and there are still three problems: 1) The generality of these methods is limited and only a small number of detectors can benefit from them; 2) More computation overhead is introduced, which requires long training schedules and harms the inference efficiency; 3) More importantly, the essential problem still exists, i.e., the shared feature map is still jointly optimized for two tasks that compete with each other."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.076,
185
+ 0.533,
186
+ 0.473,
187
+ 0.849
188
+ ],
189
+ "angle": 0,
190
+ "content": "To address these issues, we propose to directly disentangle the feature encoding for classification and localization, namely Task-Specific Context Decoupling (TSCODE for short), so that more suitable semantic context is selected for resolving specific tasks. For the classification branch, it usually demands features with rich semantic context contained in the picture to infer the category of objects, thus we adopt spatially-coarse but semantically-strong feature encoding for it. For the localization branch, it usually requires more boundary information around objects, thus we provide it with high-resolution feature map containing more edge information for finer regression of object boundaries. Moreover, the feature fusion in two branches is designed in an efficient fashion and further boosts the performance. Benefiting from our disentangled design, incoherent context preference of the two tasks is alleviated, which allows the detector to converge faster and achieve better performance. As shown in Fig. 1 (bottom), with TSCODE, the competition issue is addressed and the bounding box with the most confident classification prediction also has a great IoU with the ground truth."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.097,
196
+ 0.851,
197
+ 0.437,
198
+ 0.866
199
+ ],
200
+ "angle": 0,
201
+ "content": "Our main contributions are summarized as follows:"
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.096,
207
+ 0.871,
208
+ 0.47,
209
+ 0.9
210
+ ],
211
+ "angle": 0,
212
+ "content": "- We delve into the different roles of semantic contexts in classification and localization tasks and discover the"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.533,
218
+ 0.092,
219
+ 0.747,
220
+ 0.106
221
+ ],
222
+ "angle": 0,
223
+ "content": "source of their inherent conflict."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.518,
229
+ 0.112,
230
+ 0.892,
231
+ 0.158
232
+ ],
233
+ "angle": 0,
234
+ "content": "- We propose a novel TSCODE head to deal with the tangled context conflict, where different feature encodings with task-specific context can be generated."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.518,
240
+ 0.163,
241
+ 0.894,
242
+ 0.209
243
+ ],
244
+ "angle": 0,
245
+ "content": "- Extensive experiments show that TSCODE is plug-and-play and can easily boosts the performance of existing detectors with even less computational cost."
246
+ },
247
+ {
248
+ "type": "list",
249
+ "bbox": [
250
+ 0.518,
251
+ 0.112,
252
+ 0.894,
253
+ 0.209
254
+ ],
255
+ "angle": 0,
256
+ "content": null
257
+ },
258
+ {
259
+ "type": "title",
260
+ "bbox": [
261
+ 0.5,
262
+ 0.239,
263
+ 0.642,
264
+ 0.256
265
+ ],
266
+ "angle": 0,
267
+ "content": "2. Related Work"
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.498,
273
+ 0.265,
274
+ 0.892,
275
+ 0.612
276
+ ],
277
+ "angle": 0,
278
+ "content": "Object Detection. The current mainstream CNN-based object detectors can be divided into two-stage [17, 36] and one-stage [29, 33]. Most of them use the feature pyramid network (FPN) [25] to cope with the large-scale variation [39, 42] of objects. Compared with works [3, 5] that merely leverage a single feature map for detecting all the scale-varying objects, FPN takes advantage of the Convolutional Neural Networks (CNN) hierarchy, i.e., it not only fuses detail-preserving low-level features and semantic-rich high-level features, but also distributes the classification and localization tasks of different objects to corresponding feature maps according to their scale on images. While the insightful point [28] that the feature maps in low layers strongly respond to edges or instance parts manifests the necessity of augmenting a bottom-up path to propagate features with strong boundary information and enhance all features with reasonable localization capability. Recent works [41, 42] try to stack this feature fusion structure several times for sufficient information exchange between high-level features and low-level features. These designs maybe a compromise to the inaccurate localization information contained in high-level feature maps and the insufficient semantic context contained in low-level feature maps."
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.498,
284
+ 0.614,
285
+ 0.892,
286
+ 0.901
287
+ ],
288
+ "angle": 0,
289
+ "content": "Decoupled head. Decoupled head has long been the standard configuration of the one-stage detectors [24, 26, 43, 50]. Recent works, Double-Head R-CNN [45] and TSD [40] revisit the specialized sibling head that is widely used in R-CNN family [2, 14, 15, 17, 32, 36] and finally figure out the essence of the tasks misalignment between classification and localization. YOLOX [12] also points out that the coupled detection head may harm the performance, it introduces decoupled head to the YOLO family [33-35] for the first time and greatly improves the converging speed and boosts the permanformance. Base on the decoupled head, DDOD [6] proposes to use deformable convolutions to learn separate convolutional offset for each branch, aiming to adaptively select specific spatial features for each head. These works demonstrate the importance of decoupling between classification and localization tasks. However, as mentioned in section 1, the decoupling for classification and localization only in the parameter level leads to an imperfect trade-off between two tasks."
290
+ }
291
+ ],
292
+ [
293
+ {
294
+ "type": "image",
295
+ "bbox": [
296
+ 0.08,
297
+ 0.087,
298
+ 0.49,
299
+ 0.304
300
+ ],
301
+ "angle": 0,
302
+ "content": null
303
+ },
304
+ {
305
+ "type": "image",
306
+ "bbox": [
307
+ 0.493,
308
+ 0.087,
309
+ 0.895,
310
+ 0.305
311
+ ],
312
+ "angle": 0,
313
+ "content": null
314
+ },
315
+ {
316
+ "type": "image_caption",
317
+ "bbox": [
318
+ 0.076,
319
+ 0.315,
320
+ 0.893,
321
+ 0.345
322
+ ],
323
+ "angle": 0,
324
+ "content": "Figure 2. An illustration of ours novel Task-Specific Context Decoupling (TSCODE). Detector head at the \\( l^{th} \\) pyramid level receive feature maps \\( P_{l+1}, P_l \\) and \\( P_{l-1} \\) from FPN [25], TSCODE then further disentangles the feature encoding for classification and localization tasks."
325
+ },
326
+ {
327
+ "type": "title",
328
+ "bbox": [
329
+ 0.077,
330
+ 0.359,
331
+ 0.168,
332
+ 0.374
333
+ ],
334
+ "angle": 0,
335
+ "content": "3. Method"
336
+ },
337
+ {
338
+ "type": "title",
339
+ "bbox": [
340
+ 0.078,
341
+ 0.384,
342
+ 0.327,
343
+ 0.399
344
+ ],
345
+ "angle": 0,
346
+ "content": "3.1. Motivation and Framework"
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.076,
352
+ 0.407,
353
+ 0.471,
354
+ 0.573
355
+ ],
356
+ "angle": 0,
357
+ "content": "Classification and localization are two highly related but still contradictory tasks in object detection. For each object, classification is more coarse-grained which requires richer semantic context, while localization is rather fine-grained and demands more on detailed boundary information. For this reason, mainstream detectors [11, 24, 26, 40, 45, 50] apply decoupled head to cope with this conflict. Specifically, given an ground-truth object assigned to a specific pyramid level \\( l \\), with bounding box \\( \\mathcal{B} \\) and class label \\( c \\), the detectors with the typical decoupled head minimize the classification and localization loss based on the same feature map \\( P_l \\):"
358
+ },
359
+ {
360
+ "type": "equation",
361
+ "bbox": [
362
+ 0.137,
363
+ 0.586,
364
+ 0.47,
365
+ 0.603
366
+ ],
367
+ "angle": 0,
368
+ "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {c l s} \\left(\\mathcal {F} _ {c} \\left(P _ {l}\\right), c\\right) + \\mathcal {L} _ {l o c} \\left(\\mathcal {F} _ {r} \\left(P _ {l}\\right), \\mathcal {B}\\right), \\tag {1}\n\\]"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.076,
374
+ 0.613,
375
+ 0.47,
376
+ 0.885
377
+ ],
378
+ "angle": 0,
379
+ "content": "where \\(\\mathcal{F}_c(\\cdot) = \\{f_{cls}(\\cdot),\\mathcal{C}(\\cdot)\\}\\), \\(\\mathcal{F}_r(\\cdot) = \\{f_{loc}(\\cdot),\\mathcal{R}(\\cdot)\\}\\), are the classification and localization branches. \\(f_{cls}(\\cdot)\\) and \\(f_{loc}(\\cdot)\\) are the feature projection functions for classification and localization, while \\(\\mathcal{C}(\\cdot)\\) and \\(\\mathcal{R}(\\cdot)\\) are the final layers in two branches which decode features to classification scores and bounding box positions. In the common decoupled-head design, \\(f_{cls}(\\cdot)\\) and \\(f_{loc}(\\cdot)\\) share the same structure but are learned with separate parameters to provide each task with different feature contexts, i.e., parameter decoupling. However, this simplistic design cannot fully solve this problem as the semantic context has been largely determined by the shared input features \\(P_l\\). Although recent work [6] attempts to learn features with more flexible context for each task with deformable convolutions, the fact that they still originate from the same features, however, limits its effectiveness. Therefore, the conflict between classification and localization imposes opposite preferences of context in \\(P_l\\), leading to an imperfect balance between the two tasks."
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.097,
385
+ 0.886,
386
+ 0.469,
387
+ 0.901
388
+ ],
389
+ "angle": 0,
390
+ "content": "To address this issue, our TSCODE decouples the fea"
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.498,
396
+ 0.361,
397
+ 0.892,
398
+ 0.437
399
+ ],
400
+ "angle": 0,
401
+ "content": "ture encoding for the two tasks at the source and leverages feature maps with different semantic context in the two branches. Instead of using \\( P_{l} \\) as the common input, we feed the two branches with task-specific input features, i.e., \\( G_{l}^{cls} \\) and \\( G_{l}^{loc} \\). To this end, Eq. (1) can be written as:"
402
+ },
403
+ {
404
+ "type": "equation",
405
+ "bbox": [
406
+ 0.541,
407
+ 0.447,
408
+ 0.892,
409
+ 0.466
410
+ ],
411
+ "angle": 0,
412
+ "content": "\\[\n\\mathcal {L} = \\mathcal {L} _ {c l s} \\left(\\mathcal {F} _ {c} \\left(G _ {l} ^ {c l s}\\right), c\\right) + \\lambda \\mathcal {L} _ {l o c} \\left(\\mathcal {F} _ {r} \\left(G _ {l} ^ {l o c}\\right), \\mathcal {B}\\right). \\tag {2}\n\\]"
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.498,
418
+ 0.477,
419
+ 0.892,
420
+ 0.536
421
+ ],
422
+ "angle": 0,
423
+ "content": "For the classification branch, we generate spatially coarser but semantically richer feature maps. While for the localization branch, we provide it with feature maps contain more detailed texture and boundary information."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.498,
429
+ 0.537,
430
+ 0.892,
431
+ 0.687
432
+ ],
433
+ "angle": 0,
434
+ "content": "As illustrated in Fig. 2, our method follows the common one-stage detection framework, which is composed of the backbone, the feature pyramid and the detection head. The backbone and feature pyramid generate multi-scale feature maps from the input images. Our TSCODE head then receives three levels of feature maps and generate decoupled feature maps for classification and localization. More importantly, TSCODE is plug-and-play and can be easily incorporated into most of the popular detectors, whether it is anchor-based or anchor-free."
435
+ },
436
+ {
437
+ "type": "title",
438
+ "bbox": [
439
+ 0.5,
440
+ 0.697,
441
+ 0.884,
442
+ 0.713
443
+ ],
444
+ "angle": 0,
445
+ "content": "3.2. Semantic Context Encoding for Classification"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.498,
451
+ 0.72,
452
+ 0.892,
453
+ 0.901
454
+ ],
455
+ "angle": 0,
456
+ "content": "In object detection, classification is a relatively coarse-grained task which recognizes what an bounding box covers. On the one hand, as mentioned in [6, 10, 20, 40], the classification can often be pinned down by the features of its key or salient parts of an object, i.e., local focus. However, existing methods ignore that the salient areas could be sparsely distributed, indicating that there could be feature redundancy in the classification branch. We find in the experiments that, feeding the classification branch with down-sampled feature maps witnesses almost no performance drop, but reduces the computational cost (results can be seen in Sec. 4.1). On the other hand, the category of an"
457
+ }
458
+ ],
459
+ [
460
+ {
461
+ "type": "image",
462
+ "bbox": [
463
+ 0.078,
464
+ 0.087,
465
+ 0.473,
466
+ 0.228
467
+ ],
468
+ "angle": 0,
469
+ "content": null
470
+ },
471
+ {
472
+ "type": "image_caption",
473
+ "bbox": [
474
+ 0.11,
475
+ 0.237,
476
+ 0.438,
477
+ 0.251
478
+ ],
479
+ "angle": 0,
480
+ "content": "Figure 3. Semantic context encoding for classification."
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.076,
486
+ 0.269,
487
+ 0.47,
488
+ 0.344
489
+ ],
490
+ "angle": 0,
491
+ "content": "object could be inferred from its surrounding environment, e.g., a chair is more likely to co-occur with a table, or an occluded table can be recognized from a larger region. This means the global abstraction from a richer semantic context could facilitate the classification task."
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.076,
497
+ 0.345,
498
+ 0.473,
499
+ 0.451
500
+ ],
501
+ "angle": 0,
502
+ "content": "Based on these insights, we devise Semantic Context Encoding (SCE) for efficient and accurate classification. Specifically, at each pyramid level \\( l \\), SCE leverages the feature maps from two levels, i.e., \\( P_{l} \\) and \\( P_{l + 1} \\), to generate a semantically-rich feature map for classification. As shown in Fig. 3, we first downsample \\( P_{l} \\) by a factor of 2 and concatenate it with \\( P_{l + 1} \\) to generate the final \\( G_{l}^{cls} \\):"
503
+ },
504
+ {
505
+ "type": "equation",
506
+ "bbox": [
507
+ 0.154,
508
+ 0.459,
509
+ 0.47,
510
+ 0.478
511
+ ],
512
+ "angle": 0,
513
+ "content": "\\[\nG _ {l} ^ {c l s} = \\operatorname {C o n c a t} \\left(\\mathrm {D C o n v} \\left(P _ {l}\\right), P _ {l + 1}\\right), \\tag {3}\n\\]"
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.076,
519
+ 0.486,
520
+ 0.473,
521
+ 0.713
522
+ ],
523
+ "angle": 0,
524
+ "content": "where \\(\\mathrm{Concat}(\\cdot)\\) and \\(\\mathrm{DConv}(\\cdot)\\) represent concatenation and a shared downsampling convolutional layer. Note that \\(G_{l}^{cls}\\) is in the \\(1/2\\) resolution of \\(P_{l}\\). \\(G_{l}^{cls}\\) is then passed into \\(\\mathcal{F}_{c}(\\cdot) = \\{f_{cls}(\\cdot), \\mathcal{C}(\\cdot)\\}\\) to predict the classification scores. Instead of using 4 convolutional layers with 256 channels, we adopt a shallow-but-wide design of \\(f_{cls}(\\cdot)\\) using 2 convolutional layers with 512 channels. We argue that this design can encode more semantic information than the deep-but-narrow one, which requires little extra computational cost but facilitates more accurate classification. Since \\(G_{l}^{cls}\\) is \\(2 \\times\\) downsampled than \\(P_{l}\\), each location \\((x,y)\\) in \\(G_{l}^{cls}\\) predicts the classification scores of its four nearest neighbors in \\(P_{l}\\), denoted as \\(\\tilde{C} \\in \\mathbb{R}^{H_{l+1} \\times W_{l+1} \\times 4N}\\), and \\(N\\) is the number of categories. \\(\\tilde{C}\\) is then rearranged to \\(\\hat{C} \\in \\mathbb{R}^{H_{l} \\times W_{l} \\times N}\\) to recover the resolution:"
525
+ },
526
+ {
527
+ "type": "equation",
528
+ "bbox": [
529
+ 0.091,
530
+ 0.72,
531
+ 0.47,
532
+ 0.74
533
+ ],
534
+ "angle": 0,
535
+ "content": "\\[\n\\hat {C} [ 2 x + i, 2 y + j, c ] = \\tilde {C} [ x, y, (2 i + j) c ], \\forall i, j \\in \\{0, 1 \\}. \\tag {4}\n\\]"
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.076,
541
+ 0.748,
542
+ 0.47,
543
+ 0.824
544
+ ],
545
+ "angle": 0,
546
+ "content": "By this way, we can not only leverage the sparsity of the salient features (from \\( P_l \\)), but also benefit from the rich semantic context in higher pyramid level (from \\( P_{l + 1} \\)). This helps infer object categories more effectively, especially for those texture-less objects or those with severe occlusion."
547
+ },
548
+ {
549
+ "type": "title",
550
+ "bbox": [
551
+ 0.077,
552
+ 0.832,
553
+ 0.453,
554
+ 0.849
555
+ ],
556
+ "angle": 0,
557
+ "content": "3.3. Detail-Preserving Encoding for Localization"
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.076,
563
+ 0.856,
564
+ 0.471,
565
+ 0.902
566
+ ],
567
+ "angle": 0,
568
+ "content": "Unlike classification, localization is a more fine-grained task which relies on more texture details and boundary information to predict the corners of an object. However,"
569
+ },
570
+ {
571
+ "type": "image",
572
+ "bbox": [
573
+ 0.499,
574
+ 0.087,
575
+ 0.895,
576
+ 0.25
577
+ ],
578
+ "angle": 0,
579
+ "content": null
580
+ },
581
+ {
582
+ "type": "image_caption",
583
+ "bbox": [
584
+ 0.536,
585
+ 0.26,
586
+ 0.856,
587
+ 0.274
588
+ ],
589
+ "angle": 0,
590
+ "content": "Figure 4. Detail-preserving encoding for localization."
591
+ },
592
+ {
593
+ "type": "text",
594
+ "bbox": [
595
+ 0.498,
596
+ 0.296,
597
+ 0.892,
598
+ 0.492
599
+ ],
600
+ "angle": 0,
601
+ "content": "existing methods usually regress the object corners from a single-scale feature map \\( P_{l} \\). The feature maps in lower pyramid levels have stronger response to contour, edge and detailed texture of objects. This can further benefit the localization task but often comes with huge extra computational cost [28, 47]. QueryDet [47] uses sparse convolution [16] to reduce computational cost on low-level feature maps, but it still requires an extra auxiliary branch with specific supervision signal. On the other hand, we further note that high-level feature maps are also important for localization as it helps see the whole object as completely as possible, which provides more information to infer the overall shape of objects."
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.498,
607
+ 0.493,
608
+ 0.893,
609
+ 0.765
610
+ ],
611
+ "angle": 0,
612
+ "content": "Based on these observations, we propose Detail-Preseving Encoding (DPE) to achieve accurate localization without sacrificing efficiency. In each pyramid level \\( l \\), our DPE accepts the feature maps from three pyramid levels, i.e., \\( P_{l - 1} \\), \\( P_l \\) and \\( P_{l + 1} \\). \\( P_{l - 1} \\) provides more detail and edge features while \\( P_{l + 1} \\) provides a more thorough perspective of objects. We demonstrate that each pyramid level is mainly related to the two neighboring levels and further levels could even harm the performance. The structure of DPE is shown in Fig. 4. For computational efficiency, we adopt a simplistic U-Net [37] to fuse \\( P_{l - 1} \\) and \\( P_{l + 1} \\). \\( P_l \\) is first upsampled by a factor of 2 and then aggregated with \\( P_{l - 1} \\). And a \\( 3\\times 3 \\) convolutional layer with a stride of 2 downsamples it to the resolution of \\( P_l \\). This design effectively preserves the detail information in \\( P_{l - 1} \\) with little extra computational cost. At last, \\( P_{l + 1} \\) is upsampled and aggregated to generate the final \\( G_l^{loc} \\). The computation can be written as:"
613
+ },
614
+ {
615
+ "type": "equation",
616
+ "bbox": [
617
+ 0.52,
618
+ 0.779,
619
+ 0.892,
620
+ 0.797
621
+ ],
622
+ "angle": 0,
623
+ "content": "\\[\nG _ {l} ^ {\\text {l o c}} = P _ {l} + \\mu \\left(P _ {l + 1}\\right) + \\mathrm {D C o n v} \\left(\\mu \\left(P _ {l}\\right) + P _ {l - 1}\\right) \\tag {5}\n\\]"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.498,
629
+ 0.81,
630
+ 0.895,
631
+ 0.903
632
+ ],
633
+ "angle": 0,
634
+ "content": "where \\(\\mu (\\cdot)\\) represents upsampling and DConv \\((\\cdot)\\) is another shared downsampling convolutional layer. Speically, we compute \\(G_{3}^{loc}\\) with \\(C_2\\), \\(P_{3}\\) and \\(P_{4}\\), as computing \\(P_{2}\\) through FPN induces huge computational cost. Afterwards, \\(\\mathcal{F}_r(\\cdot) = \\{f_{los}(\\cdot),\\mathcal{R}(\\cdot)\\}\\) further predicts the bounding boxes in the \\(l^{\\mathrm{th}}\\) pyramid level based on \\(G_{l}^{loc}\\)."
635
+ }
636
+ ],
637
+ [
638
+ {
639
+ "type": "table",
640
+ "bbox": [
641
+ 0.124,
642
+ 0.089,
643
+ 0.849,
644
+ 0.284
645
+ ],
646
+ "angle": 0,
647
+ "content": "<table><tr><td>Method</td><td>SCE</td><td>DPE</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td><td>\\( AR_S \\)</td><td>\\( AR_M \\)</td><td>\\( AR_L \\)</td><td>GFLOPs</td></tr><tr><td rowspan=\"4\">FCOS [43]</td><td rowspan=\"3\">✓</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td><td>36.8</td><td>61.7</td><td>73.2</td><td>200.59</td></tr><tr><td></td><td>\\( 39.3_{+0.6} \\)</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td><td>37.5</td><td>62.1</td><td>73.2</td><td>182.62</td></tr><tr><td>✓</td><td>\\( 38.9_{+0.2} \\)</td><td>57.5</td><td>41.8</td><td>22.9</td><td>42.8</td><td>50.3</td><td>37.0</td><td>62.4</td><td>73.2</td><td>213.19</td></tr><tr><td>✓</td><td>✓</td><td>\\( 40.0_{+1.3} \\)</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td><td>38.0</td><td>62.7</td><td>73.3</td><td>195.22</td></tr><tr><td rowspan=\"4\">ATSS [50]</td><td rowspan=\"3\">✓</td><td></td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td><td>38.2</td><td>63.5</td><td>73.6</td><td>205.30</td></tr><tr><td></td><td>\\( 40.2_{+0.8} \\)</td><td>58.6</td><td>43.8</td><td>23.9</td><td>44.0</td><td>52.2</td><td>38.9</td><td>63.7</td><td>74.0</td><td>187.35</td></tr><tr><td>✓</td><td>\\( 39.8_{+0.4} \\)</td><td>57.8</td><td>42.9</td><td>23.7</td><td>43.1</td><td>50.9</td><td>38.0</td><td>64.0</td><td>75.3</td><td>217.89</td></tr><tr><td>✓</td><td>✓</td><td>\\( 40.8_{+1.4} \\)</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td><td>38.7</td><td>64.4</td><td>75.4</td><td>199.94</td></tr><tr><td rowspan=\"4\">GFL [24]</td><td rowspan=\"3\">✓</td><td></td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td><td>38.0</td><td>62.9</td><td>74.1</td><td>208.39</td></tr><tr><td></td><td>\\( 41.1_{+0.9} \\)</td><td>59.3</td><td>44.5</td><td>24.0</td><td>44.6</td><td>53.8</td><td>37.7</td><td>63.4</td><td>75.2</td><td>190.44</td></tr><tr><td>✓</td><td>\\( 40.6_{+0.4} \\)</td><td>58.4</td><td>43.8</td><td>23.5</td><td>44.2</td><td>53.2</td><td>37.2</td><td>63.9</td><td>74.6</td><td>220.99</td></tr><tr><td>✓</td><td>✓</td><td>\\( 41.6_{+1.4} \\)</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td><td>38.4</td><td>64.2</td><td>75.0</td><td>203.04</td></tr></table>"
648
+ },
649
+ {
650
+ "type": "table_caption",
651
+ "bbox": [
652
+ 0.076,
653
+ 0.296,
654
+ 0.893,
655
+ 0.325
656
+ ],
657
+ "angle": 0,
658
+ "content": "Table 1. Ablation studies on COCO mini-val set. SCE stands for using of Semantic Context Encoding on classification; DPE stands for using of Detail-Preserving Encoding on localization."
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.077,
664
+ 0.346,
665
+ 0.21,
666
+ 0.364
667
+ ],
668
+ "angle": 0,
669
+ "content": "4. Experiments"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.076,
675
+ 0.371,
676
+ 0.47,
677
+ 0.491
678
+ ],
679
+ "angle": 0,
680
+ "content": "Dataset and Evaluation Metric. Our experiments are conducted on the large-scale detection benchmark MSCOCO 2017 [27]. Following common practice [24, 43, 50], we use COCO trainval35k split (115K images) for training and minival split (5K images) for validation. We report our main results and compare with previous detectors on the test-dev split (20K images) by uploading the detection results to the evaluation server."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.076,
686
+ 0.492,
687
+ 0.471,
688
+ 0.81
689
+ ],
690
+ "angle": 0,
691
+ "content": "Implementation and Training Details. We implement our TSCODE with MMDetection [4] and cvpods [51]. Unless otherwise noted, we utilize a ResNet-50 [19] backbone network which is pretrained on ImageNet [9]. As TSCODE can be applied as a plug-and-play head to other basic detectors, we follow the original settings in the basic detectors (including loss function, bounding box parameterization, label assignment strategy and hyper-parameters) and merely replace the head part with TSCODE. The input images are resized to a maximum scale of \\( 1333 \\times 800 \\) during testing without changing the aspect ratio. The computational cost (measured in FLOPs) is calculated under the input size of \\( 1280 \\times 800 \\). We set the batch size to 16 and use 8 V100 GPUs (2 images per GPU) to train our models. Following common practice, we apply the standard \\( 1 \\times \\) training schedule in the ablation studies. The learning rate starts from 0.01 and decayed by a factor of 10 after 8 and 11 epochs, respectively. We also compare our method with state-of-the-art detectors with different backbone networks, where we adopt \\( 2 \\times \\) training schedule and multi-scale training. Please refer to Sec. 4.3 for more details."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.077,
697
+ 0.818,
698
+ 0.242,
699
+ 0.833
700
+ ],
701
+ "angle": 0,
702
+ "content": "4.1. Ablation Studies"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.076,
708
+ 0.84,
709
+ 0.471,
710
+ 0.903
711
+ ],
712
+ "angle": 0,
713
+ "content": "We first conduct extensive ablation studies to evaluate the efficacy of our design on COCO minival. We use ResNet-50 as the backbone network and all the models are trained for 12 epochs following \\(1 \\times\\) schedule."
714
+ },
715
+ {
716
+ "type": "image",
717
+ "bbox": [
718
+ 0.502,
719
+ 0.345,
720
+ 0.871,
721
+ 0.555
722
+ ],
723
+ "angle": 0,
724
+ "content": null
725
+ },
726
+ {
727
+ "type": "image_caption",
728
+ "bbox": [
729
+ 0.499,
730
+ 0.566,
731
+ 0.892,
732
+ 0.609
733
+ ],
734
+ "angle": 0,
735
+ "content": "Figure 5. Comparison of classification training loss between FCOS with and without TSCODE. TSCODE can accelerate the training and contributes to better convergence."
736
+ },
737
+ {
738
+ "type": "text",
739
+ "bbox": [
740
+ 0.498,
741
+ 0.628,
742
+ 0.892,
743
+ 0.734
744
+ ],
745
+ "angle": 0,
746
+ "content": "Effects of Individual Components. We first study the effectiveness of each component of TSCODE. In the experiments, we ablate our method with three basic detectors, i.e., FCOS [43], ATSS [50] and GFL [24], to evaluate the generality of our designs. For fair comparison, we retrain the baseline models strictly following the original papers with similar or slightly better results."
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.498,
752
+ 0.735,
753
+ 0.892,
754
+ 0.884
755
+ ],
756
+ "angle": 0,
757
+ "content": "As demonstrated in Tab. 1, applying only Semantic Context Encoding (SCE) improves the AP of the baseline models by \\(0.6 \\sim 0.9\\) points, while reducing the computational cost (FLOPs) by \\(9\\%\\). And note that more significant improvements are observed for large and medium objects. This is consistent with our motivation to leverage more semantic context for the classification task. Moreover, better classification accuracy also facilitates find more objects, and thus SCE achieves better AR results on all the basic detectors."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.519,
763
+ 0.886,
764
+ 0.892,
765
+ 0.902
766
+ ],
767
+ "angle": 0,
768
+ "content": "When using Detail-Preserving Encoding (DPE) alone,"
769
+ }
770
+ ],
771
+ [
772
+ {
773
+ "type": "table",
774
+ "bbox": [
775
+ 0.082,
776
+ 0.09,
777
+ 0.467,
778
+ 0.228
779
+ ],
780
+ "angle": 0,
781
+ "content": "<table><tr><td>Layer</td><td>Kernel</td><td>Pl+1</td><td>AP</td><td>AP50</td><td>AP75</td><td>GFLOPs</td></tr><tr><td>-</td><td>-</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>conv</td><td>3 × 3</td><td></td><td>38.6</td><td>57.3</td><td>41.8</td><td>165.99</td></tr><tr><td>avg</td><td></td><td>✓</td><td>39.1</td><td>57.8</td><td>42.7</td><td>179.47</td></tr><tr><td>max</td><td>3 × 3</td><td>✓</td><td>39.1</td><td>57.9</td><td>42.5</td><td>179.47</td></tr><tr><td>conv</td><td></td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td rowspan=\"3\">conv</td><td>3 × 3</td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td>5 × 5</td><td>✓</td><td>39.2</td><td>58.1</td><td>42.8</td><td>188.23</td></tr><tr><td>7 × 7</td><td>✓</td><td>39.2</td><td>58.2</td><td>42.3</td><td>196.65</td></tr></table>"
782
+ },
783
+ {
784
+ "type": "table_caption",
785
+ "bbox": [
786
+ 0.077,
787
+ 0.235,
788
+ 0.468,
789
+ 0.264
790
+ ],
791
+ "angle": 0,
792
+ "content": "Table 2. Performance of different ways to generate Semantic Context Encoding (SCE) for classification branch on FCOS [43]."
793
+ },
794
+ {
795
+ "type": "text",
796
+ "bbox": [
797
+ 0.076,
798
+ 0.281,
799
+ 0.468,
800
+ 0.356
801
+ ],
802
+ "angle": 0,
803
+ "content": "we also observe consistent improvements on all basic detectors, especially on large objects. However, the gains are less than those from SCE, indicating that the feature context in the original decoupled-head design tends to lean to the localization task."
804
+ },
805
+ {
806
+ "type": "text",
807
+ "bbox": [
808
+ 0.076,
809
+ 0.358,
810
+ 0.468,
811
+ 0.581
812
+ ],
813
+ "angle": 0,
814
+ "content": "At last, we observe significant improvements on all metrics with our full model. The models with TSCODE consistently outperforms the baseline models by \\(1.3\\sim 1.4\\) AP points, which is higher than the sum of the individual improvements. Benefitting from the decoupled feature contexts, the classification branch can learn richer semantic information to infer the category of objects, while the localization branch can benefit from more edge details to accurately predict the object boundaries. Moreover, our method also slightly reduces the computational cost, demonstrating the good efficiency of our design. We further visualize the classification loss when training FCOS [43] with and without TSCODE in Fig. 5. TSCODE can accelerate the training and contributes to better convergence. And similar results can be observed in other basic detectors."
815
+ },
816
+ {
817
+ "type": "text",
818
+ "bbox": [
819
+ 0.076,
820
+ 0.584,
821
+ 0.468,
822
+ 0.901
823
+ ],
824
+ "angle": 0,
825
+ "content": "Semantic Context Encoding. Next, we further study the influence of different ways to conduct SCE with FCOS. The results are shown in Tab. 2. We first feed the classification branch with merely the downsampled \\( P_{l} \\). It is observed that this model achieves similar results with the baseline but with significantly less computation, indicating the feature redundancy in the classification branch. Next, we put the high-level \\( P_{l+1} \\) into SCE and vary the operation to down-sample \\( P_{l} \\). The kernel size is fixed to \\( 3 \\times 3 \\) and the stride is 2 for all operations. As observed in Tab. 2, average pooling and max pooling performs slightly worse than convolution, albeit with less computational cost. We assume that average pooling is inefficient in extracting sharp classification signals, while the sparse connections caused by max pooling harms the convergence of the models. At last, we study the influence of the kernel size in convolutions. And a large kernel does not necessarily brings better results. We argue that a large kernel may include too much noise signals, which harms the classification performance. However, the performance of the detector is still improved regardless of how SCE is generated."
826
+ },
827
+ {
828
+ "type": "table",
829
+ "bbox": [
830
+ 0.505,
831
+ 0.09,
832
+ 0.889,
833
+ 0.203
834
+ ],
835
+ "angle": 0,
836
+ "content": "<table><tr><td>Pl+2</td><td>Pl+1</td><td>Pl</td><td>P1-1</td><td>AP</td><td>AP50</td><td>AP75</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td></td><td></td><td>✓</td><td></td><td>39.3</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td></td><td>39.4</td><td>58.3</td><td>42.6</td><td>23.3</td><td>43.2</td><td>51.0</td></tr><tr><td></td><td>✓</td><td></td><td>✓</td><td>39.6</td><td>58.4</td><td>42.8</td><td>23.9</td><td>43.3</td><td>51.4</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>39.7</td><td>58.1</td><td>43.1</td><td>23.2</td><td>43.7</td><td>51.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>40.0</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>39.9</td><td>58.6</td><td>43.2</td><td>23.1</td><td>43.7</td><td>52.1</td></tr></table>"
837
+ },
838
+ {
839
+ "type": "table_caption",
840
+ "bbox": [
841
+ 0.499,
842
+ 0.211,
843
+ 0.892,
844
+ 0.24
845
+ ],
846
+ "angle": 0,
847
+ "content": "Table 3. Performance of different ways to generate Detail-Preserving Encoding (DPE) for localization branch on FCOS [43]."
848
+ },
849
+ {
850
+ "type": "table",
851
+ "bbox": [
852
+ 0.504,
853
+ 0.253,
854
+ 0.889,
855
+ 0.503
856
+ ],
857
+ "angle": 0,
858
+ "content": "<table><tr><td>Method</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td></tr><tr><td>FCOS (+ours)</td><td>40.0+1.3</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td></tr><tr><td>ATSS (+ours)</td><td>40.8+1.4</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td></tr><tr><td>GFL (+ours)</td><td>41.6+1.4</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td></tr><tr><td>AutoAssign [52]</td><td>40.4</td><td>59.6</td><td>43.7</td><td>22.7</td><td>44.1</td><td>52.9</td></tr><tr><td>AutoAssign (+ours)</td><td>41.1+0.7</td><td>60.2</td><td>44.1</td><td>23.0</td><td>45.0</td><td>54.2</td></tr><tr><td>DDOD [6]</td><td>41.6</td><td>59.9</td><td>45.2</td><td>23.9</td><td>44.9</td><td>54.4</td></tr><tr><td>DDOD (+ours)</td><td>42.4+0.8</td><td>60.2</td><td>46.3</td><td>24.5</td><td>45.5</td><td>56.0</td></tr><tr><td>DeFCN* [44]</td><td>41.4</td><td>59.5</td><td>45.6</td><td>26.1</td><td>44.9</td><td>52.0</td></tr><tr><td>DeFCN* (+ours)</td><td>42.3+0.9</td><td>60.7</td><td>46.7</td><td>27.0</td><td>45.5</td><td>53.6</td></tr><tr><td>OTA [11]</td><td>40.7</td><td>58.4</td><td>44.3</td><td>23.2</td><td>45.0</td><td>53.7</td></tr><tr><td>OTA (+ours)</td><td>41.5+0.8</td><td>59.2</td><td>45.3</td><td>23.4</td><td>45.2</td><td>55.7</td></tr><tr><td>DW [22]</td><td>41.5</td><td>59.8</td><td>44.8</td><td>23.4</td><td>44.9</td><td>54.8</td></tr><tr><td>DW (+ours)</td><td>42.0+0.5</td><td>60.3</td><td>45.1</td><td>24.1</td><td>45.5</td><td>56.0</td></tr></table>"
859
+ },
860
+ {
861
+ "type": "table_caption",
862
+ "bbox": [
863
+ 0.499,
864
+ 0.508,
865
+ 0.892,
866
+ 0.551
867
+ ],
868
+ "angle": 0,
869
+ "content": "Table 4. Applying TSCODE into various popular dense object detectors. Method with * means training for \\(3 \\times\\) schedule follow its official repository."
870
+ },
871
+ {
872
+ "type": "text",
873
+ "bbox": [
874
+ 0.498,
875
+ 0.566,
876
+ 0.892,
877
+ 0.838
878
+ ],
879
+ "angle": 0,
880
+ "content": "Detail-Preserving Encoding. At last, we investigate the efficacy of our design of DPE by ablating the feature maps from different levels in Tab. 3, where SCE is used in all the models by default. Individually aggregating the high-level feature map \\( P_{l+1} \\) with \\( P_l \\) only achieves marginal improvements, but the incorporation of \\( P_{l-1} \\) contributes to more significant performance gains as more detail and edge information is obtained. Interestingly, the model with \\( P_{l+1} \\) and \\( P_{l-1} \\) outperforms the one with \\( P_{l+1} \\) and \\( P_l \\), which again demonstrates the importance of detail information. And the model with all three feature maps achieves significant improvements, especially on large and medium objects. Note that the gains from three levels are greater than the sum of the individual gains, which means \\( P_{l+1} \\) could provide more benefit if there are adequate detail information. At last, we further add \\( P_{l+2} \\) and find a slight decay in the results, suggesting that too much environmental information may be useless or even harmful for bounding box regression."
881
+ },
882
+ {
883
+ "type": "title",
884
+ "bbox": [
885
+ 0.5,
886
+ 0.848,
887
+ 0.791,
888
+ 0.864
889
+ ],
890
+ "angle": 0,
891
+ "content": "4.2. Generality to Different Detectors"
892
+ },
893
+ {
894
+ "type": "text",
895
+ "bbox": [
896
+ 0.499,
897
+ 0.871,
898
+ 0.892,
899
+ 0.901
900
+ ],
901
+ "angle": 0,
902
+ "content": "To evaluate the generality of TSCODE, we further apply it to 8 recent popular one-stage detectors [6, 11, 22, 24, 43,"
903
+ }
904
+ ],
905
+ [
906
+ {
907
+ "type": "table",
908
+ "bbox": [
909
+ 0.123,
910
+ 0.086,
911
+ 0.842,
912
+ 0.53
913
+ ],
914
+ "angle": 0,
915
+ "content": "<table><tr><td>Method</td><td>Backbone</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>\\( AP_S \\)</td><td>\\( AP_M \\)</td><td>\\( AP_L \\)</td><td>Reference</td></tr><tr><td>ATSS [50]</td><td>ResNet-101</td><td>43.6</td><td>62.1</td><td>47.4</td><td>26.1</td><td>47.0</td><td>53.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNet-101</td><td>44.8</td><td>63.3</td><td>48.7</td><td>26.5</td><td>48.8</td><td>56.3</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNet-101</td><td>45.0</td><td>63.7</td><td>48.9</td><td>27.2</td><td>48.8</td><td>54.5</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNet-101</td><td>46.2</td><td>64.3</td><td>50.5</td><td>27.8</td><td>49.9</td><td>57.0</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNet-101</td><td>45.3</td><td>63.5</td><td>49.3</td><td>26.9</td><td>48.8</td><td>56.1</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNet-101</td><td>45.1</td><td>63.4</td><td>49.3</td><td>26.7</td><td>48.5</td><td>56.6</td><td>CVPR21</td></tr><tr><td>ObjectBox [48]</td><td>ResNet-101</td><td>46.1</td><td>65.0</td><td>48.3</td><td>26.0</td><td>48.7</td><td>57.3</td><td>ECCV22</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNet-101</td><td>46.7</td><td>65.1</td><td>50.8</td><td>28.2</td><td>50.3</td><td>57.7</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d</td><td>45.1</td><td>63.9</td><td>49.1</td><td>27.9</td><td>48.2</td><td>54.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d</td><td>46.6</td><td>65.6</td><td>50.8</td><td>28.8</td><td>50.4</td><td>57.9</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d</td><td>46.0</td><td>65.1</td><td>50.1</td><td>28.2</td><td>49.6</td><td>56.0</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d</td><td>47.2</td><td>65.7</td><td>51.7</td><td>29.1</td><td>50.8</td><td>58.2</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.8</td><td>51.1</td><td>29.2</td><td>50.4</td><td>57.9</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.7</td><td>51.1</td><td>29.1</td><td>50.5</td><td>57.9</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d</td><td>47.6</td><td>66.3</td><td>51.8</td><td>29.5</td><td>51.1</td><td>58.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d</td><td>48.3</td><td>67.1</td><td>52.6</td><td>30.0</td><td>51.9</td><td>59.9</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d-DCN</td><td>47.7</td><td>66.6</td><td>52.1</td><td>29.3</td><td>50.8</td><td>59.7</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.8</td><td>53.3</td><td>30.2</td><td>52.8</td><td>62.2</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>48.2</td><td>67.4</td><td>52.6</td><td>29.2</td><td>51.7</td><td>60.2</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d-DCN</td><td>49.0</td><td>67.6</td><td>53.5</td><td>29.7</td><td>52.4</td><td>61.4</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.2</td><td>67.6</td><td>53.5</td><td>30.0</td><td>52.5</td><td>62.3</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.5</td><td>53.1</td><td>30.0</td><td>52.3</td><td>62.0</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>50.0</td><td>68.5</td><td>54.6</td><td>31.0</td><td>53.4</td><td>62.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d-DCN</td><td>50.8</td><td>69.3</td><td>55.3</td><td>31.7</td><td>54.2</td><td>63.7</td><td>-</td></tr></table>"
916
+ },
917
+ {
918
+ "type": "table_footnote",
919
+ "bbox": [
920
+ 0.076,
921
+ 0.533,
922
+ 0.895,
923
+ 0.577
924
+ ],
925
+ "angle": 0,
926
+ "content": "Table 5. Performance comparison with state-of-the-art detectors (single-model and single-scale results except the last row) on COCO test-dev. TSCODE consistently outperforms the strong baseline GFL [24] by \\(1.6 \\sim 1.8\\) AP and even outperforms its improved version GFLV2 [23]. DCN: Deformable Convolutional Network [8]."
927
+ },
928
+ {
929
+ "type": "text",
930
+ "bbox": [
931
+ 0.076,
932
+ 0.594,
933
+ 0.471,
934
+ 0.732
935
+ ],
936
+ "angle": 0,
937
+ "content": "44,50,52] and evaluate the performance on the COCO minival. As shown in Tab. 4, TSCODE consistently improves the performance of different baseline detectors by \\(0.5\\sim 1.4\\) AP points. Notably, TSCODE achieves improvements of 0.8 AP on the detector DDOD [6] which is designed with a specific disentanglement structure for the classification and localization tasks. Moreover, our method provides the latest detector DW [22] with a performance gain of 0.5 AP, which further demonstrates the efficacy of TSCODE."
938
+ },
939
+ {
940
+ "type": "text",
941
+ "bbox": [
942
+ 0.076,
943
+ 0.735,
944
+ 0.471,
945
+ 0.903
946
+ ],
947
+ "angle": 0,
948
+ "content": "Moreover, DPE can also be regarded as a simple feature fusion module if its output features are directly sent to the classification branch (without context decoupling). To this end, we compare it with the popular PAFPN [28]. They differ in two aspects: First, we leverage a semantic context augmentation from \\(P_{l}\\) to \\(P_{l-1}\\) before bottom-up aggregation (\\(P_{l-1}\\) back to \\(P_{l}\\)) which can enrich the feature representation and benefit the classification and localization tasks. Second, the weight of convolution downsampling is shared between all the head, which not only makes the detector parameter-efficient but improves the detection per"
949
+ },
950
+ {
951
+ "type": "text",
952
+ "bbox": [
953
+ 0.498,
954
+ 0.594,
955
+ 0.895,
956
+ 0.763
957
+ ],
958
+ "angle": 0,
959
+ "content": "formance. Third, for each pyramid level, we only aggregate the features from two neighboring levels as we assume that features in a lower level may be helpless for a higher level. We compare DPE and PAFPN on four basic detectors [10,24,43,50]. As observed in Tab. 6, our DPE achieves more performance improvements than PAFPN [28]. DPE outperforms the baselines by about \\(0.5\\sim 0.8\\) AP with negligible extra computational cost, but PAFPN only achieves marginal improvements (about 0.2), indicating the strong generality of our DPE. However, as the context is not decoupled, the improvements are still limited."
960
+ },
961
+ {
962
+ "type": "title",
963
+ "bbox": [
964
+ 0.499,
965
+ 0.771,
966
+ 0.808,
967
+ 0.789
968
+ ],
969
+ "angle": 0,
970
+ "content": "4.3. Comparisons with State-of-the-arts"
971
+ },
972
+ {
973
+ "type": "text",
974
+ "bbox": [
975
+ 0.498,
976
+ 0.795,
977
+ 0.892,
978
+ 0.884
979
+ ],
980
+ "angle": 0,
981
+ "content": "At last, we compare TSCODE with recent state-of-the-art detectors on COCO test-dev. Here we select GFL [24] as our baseline. Following common practice, we adopt \\( 2 \\times \\) schedule to train our models with standard multi-scale training strategy. The results are reported with single-model single-scale testing for all methods."
982
+ },
983
+ {
984
+ "type": "text",
985
+ "bbox": [
986
+ 0.518,
987
+ 0.886,
988
+ 0.892,
989
+ 0.902
990
+ ],
991
+ "angle": 0,
992
+ "content": "As shown in Tab. 5, our method improves the perfor"
993
+ }
994
+ ],
995
+ [
996
+ {
997
+ "type": "image",
998
+ "bbox": [
999
+ 0.077,
1000
+ 0.087,
1001
+ 0.891,
1002
+ 0.339
1003
+ ],
1004
+ "angle": 0,
1005
+ "content": null
1006
+ },
1007
+ {
1008
+ "type": "image_caption",
1009
+ "bbox": [
1010
+ 0.076,
1011
+ 0.349,
1012
+ 0.893,
1013
+ 0.379
1014
+ ],
1015
+ "angle": 0,
1016
+ "content": "Figure 6. Detection results before and after NMS predicted by FCOS [43] with and without TSCODE. TOP: Before NMS, the bounding boxes correspond to that in Fig. 1. Bottom: After NMS, the green boxes here mean ground-truth bounding boxes."
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "bbox": [
1021
+ 0.076,
1022
+ 0.396,
1023
+ 0.473,
1024
+ 0.578
1025
+ ],
1026
+ "angle": 0,
1027
+ "content": "mance of GFL to 46.7 AP and 47.6 AP with ResNet-101 [19] and ResNeXt-101-32x4d [46] backbones, respectively, outperforming all other methods [11, 21, 23, 30, 50]. When using ResNeXt-101-64x4d [46] backbone, the performance of TSCODE can be further improved to 48.3 AP. We also conduct more experiments by adopting Deformable Convolutional Networks (DCN) [8] to ResNeXt backbones. Following [49], we replace the standard convolution in the last layer before prediction with deformable convolutions. Without bells and whistles, TSCODE achieves the AP of 50.8 points, demonstrating the strong compatibility of our method with the advanced techniques in object detection."
1028
+ },
1029
+ {
1030
+ "type": "title",
1031
+ "bbox": [
1032
+ 0.077,
1033
+ 0.59,
1034
+ 0.262,
1035
+ 0.606
1036
+ ],
1037
+ "angle": 0,
1038
+ "content": "4.4. Qualitative Results"
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "bbox": [
1043
+ 0.076,
1044
+ 0.614,
1045
+ 0.471,
1046
+ 0.901
1047
+ ],
1048
+ "angle": 0,
1049
+ "content": "In Fig. 6, we qualitatively demonstrate how TSCODE helps detectors improve their performance. For simplicity, we take FCOS [43] as an example. In the typical decoupled-head design, the classification branch and the localization branch share the same input features. However, their different preferences for context causes severe competition. As demonstrated before, the feature context could lean to localization task, and thus detectors is prone to predict a poor classification confidence for the bounding box with high IoU. As a result, these high-quality bounding boxes is unlikely to survive after NMS. This phenomenon is more significant on objects with texture-less surfaces, e.g., dark couch (the \\(1^{\\mathrm{st}}\\) column) or shiny spoon (the \\(2^{\\mathrm{nd}}\\) column). Another example is the train (the \\(3^{\\mathrm{rd}}\\) column), where more global context is required to successfully recognize the train. However, it is difficult to accurately localize the boundary of the train from a coarse feature map as the typical decoupled head does. On the contrary, our TSCODE disentangles the feature context and generates feature inputs"
1050
+ },
1051
+ {
1052
+ "type": "table",
1053
+ "bbox": [
1054
+ 0.51,
1055
+ 0.393,
1056
+ 0.885,
1057
+ 0.589
1058
+ ],
1059
+ "angle": 0,
1060
+ "content": "<table><tr><td>Method</td><td>AP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td><td>GFLOPs</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>FCOS w/ PAFPN</td><td>38.7</td><td>57.4</td><td>41.7</td><td>206.49</td></tr><tr><td>FCOS w/ DPE</td><td>\\( 39.2_{+0.5} \\)</td><td>58.0</td><td>42.4</td><td>213.62</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>205.30</td></tr><tr><td>ATSS w/ PAFPN</td><td>39.6</td><td>58.3</td><td>42.2</td><td>211.20</td></tr><tr><td>ATSS w/ DPE</td><td>\\( 40.2_{+0.8} \\)</td><td>58.2</td><td>42.3</td><td>217.89</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>208.39</td></tr><tr><td>GFL w/ PAFPN</td><td>40.4</td><td>58.6</td><td>43.6</td><td>214.29</td></tr><tr><td>GFL w/ DPE</td><td>\\( 40.8_{+0.6} \\)</td><td>58.8</td><td>43.9</td><td>220.99</td></tr><tr><td>TOOD [10]</td><td>42.4</td><td>59.8</td><td>46.1</td><td>-</td></tr><tr><td>TOOD w/ PAFPN</td><td>42.6</td><td>59.9</td><td>46.2</td><td>-</td></tr><tr><td>TOOD w/ DPE</td><td>\\( 43.0_{+0.6} \\)</td><td>60.3</td><td>46.4</td><td>-</td></tr></table>"
1061
+ },
1062
+ {
1063
+ "type": "table_caption",
1064
+ "bbox": [
1065
+ 0.499,
1066
+ 0.599,
1067
+ 0.893,
1068
+ 0.656
1069
+ ],
1070
+ "angle": 0,
1071
+ "content": "Table 6. Comparison of our DPE and PAFPN [28]. FLOPs are measured on the input image size of \\(1280 \\times 800\\). Since TOOD [10] contains complex deformable sampling [8], we don't report its computational cost."
1072
+ },
1073
+ {
1074
+ "type": "text",
1075
+ "bbox": [
1076
+ 0.499,
1077
+ 0.674,
1078
+ 0.892,
1079
+ 0.719
1080
+ ],
1081
+ "angle": 0,
1082
+ "content": "with specific feature context for each task. As a result, we can leverage more semantic context for classification and more detail and edge information for localization."
1083
+ },
1084
+ {
1085
+ "type": "title",
1086
+ "bbox": [
1087
+ 0.5,
1088
+ 0.738,
1089
+ 0.619,
1090
+ 0.754
1091
+ ],
1092
+ "angle": 0,
1093
+ "content": "5. Conclusion"
1094
+ },
1095
+ {
1096
+ "type": "text",
1097
+ "bbox": [
1098
+ 0.498,
1099
+ 0.765,
1100
+ 0.892,
1101
+ 0.901
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "In this paper, we have delved into the root causes of the inherent competition between classification and localization tasks and proposed a novel TSCODE to eliminate this competition. It decouples the semantic context of the two tasks through two efficient designs, SCE and DPE, and brings the features with richer semantic information for classification and with more edge information for localization. Extensive experiments on the MS COCO benchmark demonstrate the effectiveness the strong generality of TSCODE."
1105
+ }
1106
+ ],
1107
+ [
1108
+ {
1109
+ "type": "title",
1110
+ "bbox": [
1111
+ 0.08,
1112
+ 0.09,
1113
+ 0.174,
1114
+ 0.105
1115
+ ],
1116
+ "angle": 0,
1117
+ "content": "References"
1118
+ },
1119
+ {
1120
+ "type": "ref_text",
1121
+ "bbox": [
1122
+ 0.087,
1123
+ 0.116,
1124
+ 0.47,
1125
+ 0.186
1126
+ ],
1127
+ "angle": 0,
1128
+ "content": "[1] Sean Bell, C Lawrence Zitnick, Kavita Bala, and Ross Girshick. Inside-outside net: Detecting objects in context with skip pooling and recurrent neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2874-2883, 2016. 1"
1129
+ },
1130
+ {
1131
+ "type": "ref_text",
1132
+ "bbox": [
1133
+ 0.087,
1134
+ 0.187,
1135
+ 0.472,
1136
+ 0.243
1137
+ ],
1138
+ "angle": 0,
1139
+ "content": "[2] Zhaowei Cai and Nuno Vasconcelos. Cascade r-cnn: Delving into high quality object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6154-6162, 2018. 2"
1140
+ },
1141
+ {
1142
+ "type": "ref_text",
1143
+ "bbox": [
1144
+ 0.087,
1145
+ 0.245,
1146
+ 0.47,
1147
+ 0.302
1148
+ ],
1149
+ "angle": 0,
1150
+ "content": "[3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2"
1151
+ },
1152
+ {
1153
+ "type": "ref_text",
1154
+ "bbox": [
1155
+ 0.087,
1156
+ 0.302,
1157
+ 0.47,
1158
+ 0.37
1159
+ ],
1160
+ "angle": 0,
1161
+ "content": "[4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5"
1162
+ },
1163
+ {
1164
+ "type": "ref_text",
1165
+ "bbox": [
1166
+ 0.087,
1167
+ 0.373,
1168
+ 0.469,
1169
+ 0.429
1170
+ ],
1171
+ "angle": 0,
1172
+ "content": "[5] Qiang Chen, Yingming Wang, Tong Yang, Xiangyu Zhang, Jian Cheng, and Jian Sun. You only look one-level feature. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13039-13048, 2021. 2"
1173
+ },
1174
+ {
1175
+ "type": "ref_text",
1176
+ "bbox": [
1177
+ 0.087,
1178
+ 0.43,
1179
+ 0.47,
1180
+ 0.5
1181
+ ],
1182
+ "angle": 0,
1183
+ "content": "[6] Zehui Chen, Chenhongyi Yang, Qiaofei Li, Feng Zhao, Zheng-Jun Zha, and Feng Wu. Disentangle your dense object detector. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4939-4948, 2021. 1, 2, 3, 6, 7"
1184
+ },
1185
+ {
1186
+ "type": "ref_text",
1187
+ "bbox": [
1188
+ 0.087,
1189
+ 0.501,
1190
+ 0.47,
1191
+ 0.557
1192
+ ],
1193
+ "angle": 0,
1194
+ "content": "[7] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. Advances in neural information processing systems, 29, 2016. 1"
1195
+ },
1196
+ {
1197
+ "type": "ref_text",
1198
+ "bbox": [
1199
+ 0.087,
1200
+ 0.559,
1201
+ 0.47,
1202
+ 0.615
1203
+ ],
1204
+ "angle": 0,
1205
+ "content": "[8] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 764-773, 2017. 7, 8"
1206
+ },
1207
+ {
1208
+ "type": "ref_text",
1209
+ "bbox": [
1210
+ 0.087,
1211
+ 0.616,
1212
+ 0.47,
1213
+ 0.673
1214
+ ],
1215
+ "angle": 0,
1216
+ "content": "[9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 5"
1217
+ },
1218
+ {
1219
+ "type": "ref_text",
1220
+ "bbox": [
1221
+ 0.078,
1222
+ 0.673,
1223
+ 0.47,
1224
+ 0.743
1225
+ ],
1226
+ "angle": 0,
1227
+ "content": "[10] Chengjian Feng, Yujie Zhong, Yu Gao, Matthew R Scott, and Weilin Huang. Toed: Task-aligned one-stage object detection. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3490-3499. IEEE Computer Society, 2021. 3, 7, 8"
1228
+ },
1229
+ {
1230
+ "type": "ref_text",
1231
+ "bbox": [
1232
+ 0.078,
1233
+ 0.744,
1234
+ 0.47,
1235
+ 0.813
1236
+ ],
1237
+ "angle": 0,
1238
+ "content": "[11] Zheng Ge, Songtao Liu, Zeming Li, Osamu Yoshie, and Jian Sun. Ota: Optimal transport assignment for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 303-312, 2021. 3, 6, 7, 8"
1239
+ },
1240
+ {
1241
+ "type": "ref_text",
1242
+ "bbox": [
1243
+ 0.078,
1244
+ 0.814,
1245
+ 0.47,
1246
+ 0.857
1247
+ ],
1248
+ "angle": 0,
1249
+ "content": "[12] Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, and Jian Sun. Yolox: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430, 2021. 2"
1250
+ },
1251
+ {
1252
+ "type": "ref_text",
1253
+ "bbox": [
1254
+ 0.078,
1255
+ 0.858,
1256
+ 0.47,
1257
+ 0.902
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "[13] Golnaz Ghiasi, Tsung-Yi Lin, and Quoc V Le. Nas-fpn: Learning scalable feature pyramid architecture for object detection. In Proceedings of the IEEE/CVF conference on"
1261
+ },
1262
+ {
1263
+ "type": "list",
1264
+ "bbox": [
1265
+ 0.078,
1266
+ 0.116,
1267
+ 0.472,
1268
+ 0.902
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": null
1272
+ },
1273
+ {
1274
+ "type": "ref_text",
1275
+ "bbox": [
1276
+ 0.533,
1277
+ 0.093,
1278
+ 0.892,
1279
+ 0.12
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "computer vision and pattern recognition, pages 7036-7045, 2019. 2"
1283
+ },
1284
+ {
1285
+ "type": "ref_text",
1286
+ "bbox": [
1287
+ 0.503,
1288
+ 0.122,
1289
+ 0.892,
1290
+ 0.164
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "[14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 2"
1294
+ },
1295
+ {
1296
+ "type": "ref_text",
1297
+ "bbox": [
1298
+ 0.503,
1299
+ 0.166,
1300
+ 0.892,
1301
+ 0.236
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "[15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1, 2"
1305
+ },
1306
+ {
1307
+ "type": "ref_text",
1308
+ "bbox": [
1309
+ 0.503,
1310
+ 0.238,
1311
+ 0.892,
1312
+ 0.309
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "[16] Benjamin Graham, Martin Engelcke, and Laurens Van Der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9224-9232, 2018. 4"
1316
+ },
1317
+ {
1318
+ "type": "ref_text",
1319
+ "bbox": [
1320
+ 0.503,
1321
+ 0.311,
1322
+ 0.892,
1323
+ 0.366
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "[17] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 1, 2"
1327
+ },
1328
+ {
1329
+ "type": "ref_text",
1330
+ "bbox": [
1331
+ 0.503,
1332
+ 0.368,
1333
+ 0.892,
1334
+ 0.425
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Spatial pyramid pooling in deep convolutional networks for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 37(9):1904-1916, 2015. 1"
1338
+ },
1339
+ {
1340
+ "type": "ref_text",
1341
+ "bbox": [
1342
+ 0.503,
1343
+ 0.427,
1344
+ 0.892,
1345
+ 0.485
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5, 8"
1349
+ },
1350
+ {
1351
+ "type": "ref_text",
1352
+ "bbox": [
1353
+ 0.503,
1354
+ 0.485,
1355
+ 0.892,
1356
+ 0.543
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "[20] Borui Jiang, Ruixuan Luo, Jiayuan Mao, Tete Xiao, and Yun-ting Jiang. Acquisition of localization confidence for accurate object detection. In Proceedings of the European conference on computer vision (ECCV), pages 784–799, 2018. 3"
1360
+ },
1361
+ {
1362
+ "type": "ref_text",
1363
+ "bbox": [
1364
+ 0.503,
1365
+ 0.543,
1366
+ 0.892,
1367
+ 0.599
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "[21] Kang Kim and Hee Seok Lee. Probabilistic anchor assignment with iou prediction for object detection. In European Conference on Computer Vision, pages 355-371. Springer, 2020. 7, 8"
1371
+ },
1372
+ {
1373
+ "type": "ref_text",
1374
+ "bbox": [
1375
+ 0.503,
1376
+ 0.601,
1377
+ 0.892,
1378
+ 0.671
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "[22] Shuai Li, Chenhang He, Ruihuang Li, and Lei Zhang. A dual weighting label assignment scheme for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9387-9396, 2022. 6, 7"
1382
+ },
1383
+ {
1384
+ "type": "ref_text",
1385
+ "bbox": [
1386
+ 0.503,
1387
+ 0.673,
1388
+ 0.892,
1389
+ 0.757
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": "[23] Xiang Li, Wenhai Wang, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss v2: Learning reliable localization quality estimation for dense object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11632-11641, 2021. 7, 8"
1393
+ },
1394
+ {
1395
+ "type": "ref_text",
1396
+ "bbox": [
1397
+ 0.503,
1398
+ 0.758,
1399
+ 0.892,
1400
+ 0.83
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "[24] Xiang Li, Wenhai Wang, Lijun Wu, Shuo Chen, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss: Learning qualified and distributed bounding boxes for dense object detection. Advances in Neural Information Processing Systems, 33:21002-21012, 2020. 2, 3, 5, 6, 7, 8"
1404
+ },
1405
+ {
1406
+ "type": "ref_text",
1407
+ "bbox": [
1408
+ 0.503,
1409
+ 0.831,
1410
+ 0.892,
1411
+ 0.902
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "[25] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2117-2125, 2017. 1, 2, 3"
1415
+ },
1416
+ {
1417
+ "type": "list",
1418
+ "bbox": [
1419
+ 0.503,
1420
+ 0.093,
1421
+ 0.892,
1422
+ 0.902
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": null
1426
+ }
1427
+ ],
1428
+ [
1429
+ {
1430
+ "type": "ref_text",
1431
+ "bbox": [
1432
+ 0.08,
1433
+ 0.092,
1434
+ 0.47,
1435
+ 0.147
1436
+ ],
1437
+ "angle": 0,
1438
+ "content": "[26] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólár. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017. 1, 2, 3"
1439
+ },
1440
+ {
1441
+ "type": "ref_text",
1442
+ "bbox": [
1443
+ 0.081,
1444
+ 0.15,
1445
+ 0.47,
1446
+ 0.219
1447
+ ],
1448
+ "angle": 0,
1449
+ "content": "[27] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5"
1450
+ },
1451
+ {
1452
+ "type": "ref_text",
1453
+ "bbox": [
1454
+ 0.081,
1455
+ 0.221,
1456
+ 0.469,
1457
+ 0.276
1458
+ ],
1459
+ "angle": 0,
1460
+ "content": "[28] Shu Liu, Lu Qi, Haifang Qin, Jianping Shi, and Jiaya Jia. Path aggregation network for instance segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8759-8768, 2018. 2, 4, 7, 8"
1461
+ },
1462
+ {
1463
+ "type": "ref_text",
1464
+ "bbox": [
1465
+ 0.081,
1466
+ 0.278,
1467
+ 0.469,
1468
+ 0.332
1469
+ ],
1470
+ "angle": 0,
1471
+ "content": "[29] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. Ssd: Single shot multibox detector. In European conference on computer vision, pages 21-37. Springer, 2016. 2"
1472
+ },
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.081,
1477
+ 0.334,
1478
+ 0.469,
1479
+ 0.402
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "[30] Yuchen Ma, Songtao Liu, Zeming Li, and Jian Sun. Iqdet: Instance-wise quality distribution sampling for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1717-1725, 2021. 7, 8"
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.081,
1488
+ 0.405,
1489
+ 0.469,
1490
+ 0.487
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "[31] Roozbeh Mottaghi, Xianjie Chen, Xiaobai Liu, Nam-Gyu Cho, Seong-Whan Lee, Sanja Fidler, Raquel Urtasun, and Alan Yuille. The role of context for object detection and semantic segmentation in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 1"
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.081,
1499
+ 0.489,
1500
+ 0.469,
1501
+ 0.558
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "[32] Jiangmiao Pang, Kai Chen, Jianping Shi, Huajun Feng, Wanli Ouyang, and Dahua Lin. Libra r-cnn: Towards balanced learning for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 821-830, 2019. 2"
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.081,
1510
+ 0.561,
1511
+ 0.469,
1512
+ 0.615
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "[33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 1, 2"
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.081,
1521
+ 0.617,
1522
+ 0.469,
1523
+ 0.657
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "[34] Joseph Redmon and Ali Farhadi. Yolo9000: better, faster, stronger. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7263-7271, 2017. 2"
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.081,
1532
+ 0.66,
1533
+ 0.469,
1534
+ 0.687
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "[35] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018. 2"
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.081,
1543
+ 0.689,
1544
+ 0.469,
1545
+ 0.743
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "[36] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1, 2"
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.081,
1554
+ 0.746,
1555
+ 0.469,
1556
+ 0.814
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "[37] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4"
1560
+ },
1561
+ {
1562
+ "type": "ref_text",
1563
+ "bbox": [
1564
+ 0.081,
1565
+ 0.817,
1566
+ 0.469,
1567
+ 0.871
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": "[38] Rasmus Rothe, Matthieu Guillaumin, and Luc Van Gool. Non-maximum suppression for object detection by passing messages between windows. In Asian conference on computer vision, pages 290-306. Springer, 2014. 2"
1571
+ },
1572
+ {
1573
+ "type": "ref_text",
1574
+ "bbox": [
1575
+ 0.081,
1576
+ 0.874,
1577
+ 0.469,
1578
+ 0.901
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "[39] Bharat Singh and Larry S Davis. An analysis of scale invariance in object detection snip. In Proceedings of the"
1582
+ },
1583
+ {
1584
+ "type": "list",
1585
+ "bbox": [
1586
+ 0.08,
1587
+ 0.092,
1588
+ 0.47,
1589
+ 0.901
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": null
1593
+ },
1594
+ {
1595
+ "type": "ref_text",
1596
+ "bbox": [
1597
+ 0.535,
1598
+ 0.093,
1599
+ 0.892,
1600
+ 0.12
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "IEEE conference on computer vision and pattern recognition, pages 3578-3587, 2018. 2"
1604
+ },
1605
+ {
1606
+ "type": "ref_text",
1607
+ "bbox": [
1608
+ 0.504,
1609
+ 0.122,
1610
+ 0.894,
1611
+ 0.176
1612
+ ],
1613
+ "angle": 0,
1614
+ "content": "[40] Guanglu Song, Yu Liu, and Xiaogang Wang. Revisiting the sibling head in object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11563-11572, 2020. 1, 2, 3"
1615
+ },
1616
+ {
1617
+ "type": "ref_text",
1618
+ "bbox": [
1619
+ 0.504,
1620
+ 0.177,
1621
+ 0.894,
1622
+ 0.231
1623
+ ],
1624
+ "angle": 0,
1625
+ "content": "[41] Mingxing Tan, Ruoming Pang, and Quoc V Le. Efficientdet: Scalable and efficient object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10781-10790, 2020. 2"
1626
+ },
1627
+ {
1628
+ "type": "ref_text",
1629
+ "bbox": [
1630
+ 0.504,
1631
+ 0.233,
1632
+ 0.892,
1633
+ 0.285
1634
+ ],
1635
+ "angle": 0,
1636
+ "content": "[42] Zhiyu Tan, Junyan Wang, Xiuyu Sun, Ming Lin, Hao Li, et al. Giraffedet: A heavy-neck paradigm for object detection. In International Conference on Learning Representations, 2021. 2"
1637
+ },
1638
+ {
1639
+ "type": "ref_text",
1640
+ "bbox": [
1641
+ 0.504,
1642
+ 0.288,
1643
+ 0.892,
1644
+ 0.342
1645
+ ],
1646
+ "angle": 0,
1647
+ "content": "[43] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 1, 2, 5, 6, 7, 8"
1648
+ },
1649
+ {
1650
+ "type": "ref_text",
1651
+ "bbox": [
1652
+ 0.504,
1653
+ 0.343,
1654
+ 0.892,
1655
+ 0.411
1656
+ ],
1657
+ "angle": 0,
1658
+ "content": "[44] Jianfeng Wang, Lin Song, Zeming Li, Hongbin Sun, Jian Sun, and Nanning Zheng. End-to-end object detection with fully convolutional network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15849-15858, 2021. 6"
1659
+ },
1660
+ {
1661
+ "type": "ref_text",
1662
+ "bbox": [
1663
+ 0.504,
1664
+ 0.413,
1665
+ 0.892,
1666
+ 0.48
1667
+ ],
1668
+ "angle": 0,
1669
+ "content": "[45] Yue Wu, Yinpeng Chen, Lu Yuan, Zicheng Liu, Lijuan Wang, Hongzhi Li, and Yun Fu. Rethinking classification and localization for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10186-10195, 2020. 1, 2, 3"
1670
+ },
1671
+ {
1672
+ "type": "ref_text",
1673
+ "bbox": [
1674
+ 0.504,
1675
+ 0.482,
1676
+ 0.892,
1677
+ 0.548
1678
+ ],
1679
+ "angle": 0,
1680
+ "content": "[46] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1492-1500, 2017. 8"
1681
+ },
1682
+ {
1683
+ "type": "ref_text",
1684
+ "bbox": [
1685
+ 0.504,
1686
+ 0.551,
1687
+ 0.892,
1688
+ 0.619
1689
+ ],
1690
+ "angle": 0,
1691
+ "content": "[47] Chenhongyi Yang, Zehao Huang, and Naiyan Wang. Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13668-13677, 2022. 4"
1692
+ },
1693
+ {
1694
+ "type": "ref_text",
1695
+ "bbox": [
1696
+ 0.504,
1697
+ 0.621,
1698
+ 0.892,
1699
+ 0.66
1700
+ ],
1701
+ "angle": 0,
1702
+ "content": "[48] Mohsen Zand, Ali Etemad, and Michael Greenspan. Object-box: From centers to boxes for anchor-free object detection. arXiv preprint arXiv:2207.06985, 2022. 7"
1703
+ },
1704
+ {
1705
+ "type": "ref_text",
1706
+ "bbox": [
1707
+ 0.504,
1708
+ 0.662,
1709
+ 0.892,
1710
+ 0.716
1711
+ ],
1712
+ "angle": 0,
1713
+ "content": "[49] Haoyang Zhang, Ying Wang, Feras Dayoub, and Niko Sunderhauf. Varifocalnet: An iou-aware dense object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8514-8523, 2021. 8"
1714
+ },
1715
+ {
1716
+ "type": "ref_text",
1717
+ "bbox": [
1718
+ 0.504,
1719
+ 0.717,
1720
+ 0.892,
1721
+ 0.798
1722
+ ],
1723
+ "angle": 0,
1724
+ "content": "[50] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z Li. Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9759-9768, 2020. 2, 3, 5, 6, 7, 8"
1725
+ },
1726
+ {
1727
+ "type": "ref_text",
1728
+ "bbox": [
1729
+ 0.504,
1730
+ 0.8,
1731
+ 0.892,
1732
+ 0.841
1733
+ ],
1734
+ "angle": 0,
1735
+ "content": "[51] Benjin Zhu*, Feng Wang*, Jianfeng Wang, Siwei Yang, Jianhu Chen, and Zeming Li. cvpods: All-in-one toolbox for computer vision research, 2020. 5"
1736
+ },
1737
+ {
1738
+ "type": "ref_text",
1739
+ "bbox": [
1740
+ 0.504,
1741
+ 0.842,
1742
+ 0.892,
1743
+ 0.897
1744
+ ],
1745
+ "angle": 0,
1746
+ "content": "[52] Benjin Zhu, Jianfeng Wang, Zhengkai Jiang, Fuhang Zong, Songtao Liu, Zeming Li, and Jian Sun. Autoassign: Differentiable label assignment for dense object detection. arXiv preprint arXiv:2007.03496, 2020. 6"
1747
+ },
1748
+ {
1749
+ "type": "list",
1750
+ "bbox": [
1751
+ 0.504,
1752
+ 0.093,
1753
+ 0.894,
1754
+ 0.897
1755
+ ],
1756
+ "angle": 0,
1757
+ "content": null
1758
+ }
1759
+ ]
1760
+ ]
2303.01xxx/2303.01047/bb789d82-765d-47ed-82b3-a11998226e8d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28f8b3dadb4606f8a517e17aa4006c63e01447ea1627edf489f8ecedf11a040f
3
+ size 6282629
2303.01xxx/2303.01047/full.md ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task-Specific Context Decoupling for Object Detection
2
+
3
+ Jiayuan Zhuang $^{1}$ Zheng Qin $^{1}$ Hao Yu $^{2}$ Xucan Chen $^{1}$
4
+
5
+ $^{1}$ National University of Defense Technology $^{2}$ Technical University of Munich
6
+
7
+ alpc111@163.com qinzheng12@nudt.edu.cn hao.yu@tum.de xcchen18@139.com
8
+
9
+ # Abstract
10
+
11
+ Classification and localization are two main sub-tasks in object detection. Nonetheless, these two tasks have inconsistent preferences for feature context, i.e., localization expects more boundary-aware features to accurately regress the bounding box, while more semantic context is preferred for object classification. Existing methods usually leverage disentangled heads to learn different feature context for each task. However, the heads are still applied on the same input features, which leads to an imperfect balance between classification and localization. In this work, we propose a novel Task-Specific COntext DEcoupling (TSCODE) head which further disentangles the feature encoding for two tasks. For classification, we generate spatially-coarse but semantically-strong feature encoding. For localization, we provide high-resolution feature map containing more edge information to better regress object boundaries. TSCODE is plug-and-play and can be easily incorporated into existing detection pipelines. Extensive experiments demonstrate that our method stably improves different detectors by over 1.0 AP with less computational cost. Our code and models will be publicly released.
12
+
13
+ # 1. Introduction
14
+
15
+ Object detection aims to recognize and localize objects existing in a natural image, which is a fundamental but challenging task in many computer vision applications. Recent advances in object detection have been predominated by deep learning-based methods [15, 18, 26, 33, 36], where the task is typically formulated as the classification of regions of interest and the localization of bounding boxes.
16
+
17
+ Semantic context encoded in the high-level semantic-rich features plays a crucial role in both the classification and localization task of object detection [1, 31]. Feature pyramid network (FPN) [25] distills the semantic context from the high-level semantic-rich features and fuses it into the low-level detail-preserving feature maps, where small-scale objects can be better detected. This design effectively provides more semantic context for early-stage features and facilitates detecting objects in various scales. Early
18
+
19
+ ![](images/4ccf1fe52c8b470cec41e5c9038f088e3377cea4f88bbb2b05c9b493c90c734a.jpg)
20
+
21
+ ![](images/db750b9af10cc38edcf3ef381f4366249fcb0e970638bf121564303111d1e677.jpg)
22
+ Figure 1. Top: Inference from original FCOS [43]. Bottom: Inference from FCOS with our TSCODE. Results are shown before NMS. The bounding box that has the highest IoU with the ground truth is shown in green, while the top-3 bounding-boxes with the highest classification scores are marked in other colors. The competition between the two tasks in FCOS can be clearly observed in the top figure, i.e., the bounding box with the best IoU (green) has lower classification confidence, while that with the best classification score (blue) has a poor bounding box prediction. Thanks to our TSCODE, the competition issue is addressed in the bottom figure, where the blue bounding box with the most confident classification prediction also has a great IoU with the ground truth.
23
+
24
+ works [17, 33, 36] usually tackle the detection problem by attaching a head network shared by the two tasks on each level of feature map for a specific scale of objects, though the two tasks are semantically misaligned which is noted by Double-Head R-CNN [45] and TSD [40] afterwards. Typically, bounding box regression expects more texture details and edge information to accurately localize the object boundaries, while more semantic context is required to facilitate the classification of objects [6, 7]. Based on this insight, they propose to use two decoupled head branches for different tasks on each feature level from FPN. In this pa
25
+
26
+ per, we name this decoupling strategy the parameter decoupling, which relies on separate heads to encode task-specific semantic context from the same feature map.
27
+
28
+ However, we observe that disentangling classification and localization only in the parameter level leads to an imperfect trade-off between two tasks. Fig. 1 (top) visualizes the top-3 (for better view) bounding boxes with highest confidence scores and the box that has the highest intersection-over-union (IoU) with corresponding ground-truth table box before non-maximum suppression (NMS) [38] predicted by FCOS [43]. FCOS has already extracted accurate enough boxes (see the green one), but they are suppressed during NMS due to relatively low confidence scores. We owe this to the intrinsic competition between the two tasks, and merely relying on the learning ability of head networks to provide the task-specific context from a shared feature map usually shows bias towards one task, while suppressing the other. This phenomenon has been noticed in [6, 40], but still without proper solution being proposed. Some works [13, 28, 41, 42] attempt to incorporate more semantic context to improve the detection results. However, more is not always better, and there are still three problems: 1) The generality of these methods is limited and only a small number of detectors can benefit from them; 2) More computation overhead is introduced, which requires long training schedules and harms the inference efficiency; 3) More importantly, the essential problem still exists, i.e., the shared feature map is still jointly optimized for two tasks that compete with each other.
29
+
30
+ To address these issues, we propose to directly disentangle the feature encoding for classification and localization, namely Task-Specific Context Decoupling (TSCODE for short), so that more suitable semantic context is selected for resolving specific tasks. For the classification branch, it usually demands features with rich semantic context contained in the picture to infer the category of objects, thus we adopt spatially-coarse but semantically-strong feature encoding for it. For the localization branch, it usually requires more boundary information around objects, thus we provide it with high-resolution feature map containing more edge information for finer regression of object boundaries. Moreover, the feature fusion in two branches is designed in an efficient fashion and further boosts the performance. Benefiting from our disentangled design, incoherent context preference of the two tasks is alleviated, which allows the detector to converge faster and achieve better performance. As shown in Fig. 1 (bottom), with TSCODE, the competition issue is addressed and the bounding box with the most confident classification prediction also has a great IoU with the ground truth.
31
+
32
+ Our main contributions are summarized as follows:
33
+
34
+ - We delve into the different roles of semantic contexts in classification and localization tasks and discover the
35
+
36
+ source of their inherent conflict.
37
+
38
+ - We propose a novel TSCODE head to deal with the tangled context conflict, where different feature encodings with task-specific context can be generated.
39
+ - Extensive experiments show that TSCODE is plug-and-play and can easily boosts the performance of existing detectors with even less computational cost.
40
+
41
+ # 2. Related Work
42
+
43
+ Object Detection. The current mainstream CNN-based object detectors can be divided into two-stage [17, 36] and one-stage [29, 33]. Most of them use the feature pyramid network (FPN) [25] to cope with the large-scale variation [39, 42] of objects. Compared with works [3, 5] that merely leverage a single feature map for detecting all the scale-varying objects, FPN takes advantage of the Convolutional Neural Networks (CNN) hierarchy, i.e., it not only fuses detail-preserving low-level features and semantic-rich high-level features, but also distributes the classification and localization tasks of different objects to corresponding feature maps according to their scale on images. While the insightful point [28] that the feature maps in low layers strongly respond to edges or instance parts manifests the necessity of augmenting a bottom-up path to propagate features with strong boundary information and enhance all features with reasonable localization capability. Recent works [41, 42] try to stack this feature fusion structure several times for sufficient information exchange between high-level features and low-level features. These designs maybe a compromise to the inaccurate localization information contained in high-level feature maps and the insufficient semantic context contained in low-level feature maps.
44
+
45
+ Decoupled head. Decoupled head has long been the standard configuration of the one-stage detectors [24, 26, 43, 50]. Recent works, Double-Head R-CNN [45] and TSD [40] revisit the specialized sibling head that is widely used in R-CNN family [2, 14, 15, 17, 32, 36] and finally figure out the essence of the tasks misalignment between classification and localization. YOLOX [12] also points out that the coupled detection head may harm the performance, it introduces decoupled head to the YOLO family [33-35] for the first time and greatly improves the converging speed and boosts the permanformance. Base on the decoupled head, DDOD [6] proposes to use deformable convolutions to learn separate convolutional offset for each branch, aiming to adaptively select specific spatial features for each head. These works demonstrate the importance of decoupling between classification and localization tasks. However, as mentioned in section 1, the decoupling for classification and localization only in the parameter level leads to an imperfect trade-off between two tasks.
46
+
47
+ ![](images/a6bf0e29b012f77dff64c79136ac9cb2e44a4d4575231f402050dba6f18288e5.jpg)
48
+ Figure 2. An illustration of ours novel Task-Specific Context Decoupling (TSCODE). Detector head at the $l^{th}$ pyramid level receive feature maps $P_{l+1}, P_l$ and $P_{l-1}$ from FPN [25], TSCODE then further disentangles the feature encoding for classification and localization tasks.
49
+
50
+ ![](images/c5fa428a3e357bb3ae64480eb61b2e683be6dbf42f5b5c1c789b07eba47e6cc3.jpg)
51
+
52
+ # 3. Method
53
+
54
+ # 3.1. Motivation and Framework
55
+
56
+ Classification and localization are two highly related but still contradictory tasks in object detection. For each object, classification is more coarse-grained which requires richer semantic context, while localization is rather fine-grained and demands more on detailed boundary information. For this reason, mainstream detectors [11, 24, 26, 40, 45, 50] apply decoupled head to cope with this conflict. Specifically, given an ground-truth object assigned to a specific pyramid level $l$ , with bounding box $\mathcal{B}$ and class label $c$ , the detectors with the typical decoupled head minimize the classification and localization loss based on the same feature map $P_l$ :
57
+
58
+ $$
59
+ \mathcal {L} = \mathcal {L} _ {c l s} \left(\mathcal {F} _ {c} \left(P _ {l}\right), c\right) + \mathcal {L} _ {l o c} \left(\mathcal {F} _ {r} \left(P _ {l}\right), \mathcal {B}\right), \tag {1}
60
+ $$
61
+
62
+ where $\mathcal{F}_c(\cdot) = \{f_{cls}(\cdot),\mathcal{C}(\cdot)\}$ , $\mathcal{F}_r(\cdot) = \{f_{loc}(\cdot),\mathcal{R}(\cdot)\}$ , are the classification and localization branches. $f_{cls}(\cdot)$ and $f_{loc}(\cdot)$ are the feature projection functions for classification and localization, while $\mathcal{C}(\cdot)$ and $\mathcal{R}(\cdot)$ are the final layers in two branches which decode features to classification scores and bounding box positions. In the common decoupled-head design, $f_{cls}(\cdot)$ and $f_{loc}(\cdot)$ share the same structure but are learned with separate parameters to provide each task with different feature contexts, i.e., parameter decoupling. However, this simplistic design cannot fully solve this problem as the semantic context has been largely determined by the shared input features $P_l$ . Although recent work [6] attempts to learn features with more flexible context for each task with deformable convolutions, the fact that they still originate from the same features, however, limits its effectiveness. Therefore, the conflict between classification and localization imposes opposite preferences of context in $P_l$ , leading to an imperfect balance between the two tasks.
63
+
64
+ To address this issue, our TSCODE decouples the fea
65
+
66
+ ture encoding for the two tasks at the source and leverages feature maps with different semantic context in the two branches. Instead of using $P_{l}$ as the common input, we feed the two branches with task-specific input features, i.e., $G_{l}^{cls}$ and $G_{l}^{loc}$ . To this end, Eq. (1) can be written as:
67
+
68
+ $$
69
+ \mathcal {L} = \mathcal {L} _ {c l s} \left(\mathcal {F} _ {c} \left(G _ {l} ^ {c l s}\right), c\right) + \lambda \mathcal {L} _ {l o c} \left(\mathcal {F} _ {r} \left(G _ {l} ^ {l o c}\right), \mathcal {B}\right). \tag {2}
70
+ $$
71
+
72
+ For the classification branch, we generate spatially coarser but semantically richer feature maps. While for the localization branch, we provide it with feature maps contain more detailed texture and boundary information.
73
+
74
+ As illustrated in Fig. 2, our method follows the common one-stage detection framework, which is composed of the backbone, the feature pyramid and the detection head. The backbone and feature pyramid generate multi-scale feature maps from the input images. Our TSCODE head then receives three levels of feature maps and generate decoupled feature maps for classification and localization. More importantly, TSCODE is plug-and-play and can be easily incorporated into most of the popular detectors, whether it is anchor-based or anchor-free.
75
+
76
+ # 3.2. Semantic Context Encoding for Classification
77
+
78
+ In object detection, classification is a relatively coarse-grained task which recognizes what an bounding box covers. On the one hand, as mentioned in [6, 10, 20, 40], the classification can often be pinned down by the features of its key or salient parts of an object, i.e., local focus. However, existing methods ignore that the salient areas could be sparsely distributed, indicating that there could be feature redundancy in the classification branch. We find in the experiments that, feeding the classification branch with down-sampled feature maps witnesses almost no performance drop, but reduces the computational cost (results can be seen in Sec. 4.1). On the other hand, the category of an
79
+
80
+ ![](images/938b0d411e59bef5f8f5691f9aa5405982194bf150571247fce6c99b24fa1963.jpg)
81
+ Figure 3. Semantic context encoding for classification.
82
+
83
+ object could be inferred from its surrounding environment, e.g., a chair is more likely to co-occur with a table, or an occluded table can be recognized from a larger region. This means the global abstraction from a richer semantic context could facilitate the classification task.
84
+
85
+ Based on these insights, we devise Semantic Context Encoding (SCE) for efficient and accurate classification. Specifically, at each pyramid level $l$ , SCE leverages the feature maps from two levels, i.e., $P_{l}$ and $P_{l + 1}$ , to generate a semantically-rich feature map for classification. As shown in Fig. 3, we first downsample $P_{l}$ by a factor of 2 and concatenate it with $P_{l + 1}$ to generate the final $G_{l}^{cls}$ :
86
+
87
+ $$
88
+ G _ {l} ^ {c l s} = \operatorname {C o n c a t} \left(\mathrm {D C o n v} \left(P _ {l}\right), P _ {l + 1}\right), \tag {3}
89
+ $$
90
+
91
+ where $\mathrm{Concat}(\cdot)$ and $\mathrm{DConv}(\cdot)$ represent concatenation and a shared downsampling convolutional layer. Note that $G_{l}^{cls}$ is in the $1/2$ resolution of $P_{l}$ . $G_{l}^{cls}$ is then passed into $\mathcal{F}_{c}(\cdot) = \{f_{cls}(\cdot), \mathcal{C}(\cdot)\}$ to predict the classification scores. Instead of using 4 convolutional layers with 256 channels, we adopt a shallow-but-wide design of $f_{cls}(\cdot)$ using 2 convolutional layers with 512 channels. We argue that this design can encode more semantic information than the deep-but-narrow one, which requires little extra computational cost but facilitates more accurate classification. Since $G_{l}^{cls}$ is $2 \times$ downsampled than $P_{l}$ , each location $(x,y)$ in $G_{l}^{cls}$ predicts the classification scores of its four nearest neighbors in $P_{l}$ , denoted as $\tilde{C} \in \mathbb{R}^{H_{l+1} \times W_{l+1} \times 4N}$ , and $N$ is the number of categories. $\tilde{C}$ is then rearranged to $\hat{C} \in \mathbb{R}^{H_{l} \times W_{l} \times N}$ to recover the resolution:
92
+
93
+ $$
94
+ \hat {C} [ 2 x + i, 2 y + j, c ] = \tilde {C} [ x, y, (2 i + j) c ], \forall i, j \in \{0, 1 \}. \tag {4}
95
+ $$
96
+
97
+ By this way, we can not only leverage the sparsity of the salient features (from $P_l$ ), but also benefit from the rich semantic context in higher pyramid level (from $P_{l + 1}$ ). This helps infer object categories more effectively, especially for those texture-less objects or those with severe occlusion.
98
+
99
+ # 3.3. Detail-Preserving Encoding for Localization
100
+
101
+ Unlike classification, localization is a more fine-grained task which relies on more texture details and boundary information to predict the corners of an object. However,
102
+
103
+ ![](images/c24c6b3dacfd74b1414432f9d327f21c9db373c4077ba6aecb5f7c92a454dd22.jpg)
104
+ Figure 4. Detail-preserving encoding for localization.
105
+
106
+ existing methods usually regress the object corners from a single-scale feature map $P_{l}$ . The feature maps in lower pyramid levels have stronger response to contour, edge and detailed texture of objects. This can further benefit the localization task but often comes with huge extra computational cost [28, 47]. QueryDet [47] uses sparse convolution [16] to reduce computational cost on low-level feature maps, but it still requires an extra auxiliary branch with specific supervision signal. On the other hand, we further note that high-level feature maps are also important for localization as it helps see the whole object as completely as possible, which provides more information to infer the overall shape of objects.
107
+
108
+ Based on these observations, we propose Detail-Preseving Encoding (DPE) to achieve accurate localization without sacrificing efficiency. In each pyramid level $l$ , our DPE accepts the feature maps from three pyramid levels, i.e., $P_{l - 1}$ , $P_l$ and $P_{l + 1}$ . $P_{l - 1}$ provides more detail and edge features while $P_{l + 1}$ provides a more thorough perspective of objects. We demonstrate that each pyramid level is mainly related to the two neighboring levels and further levels could even harm the performance. The structure of DPE is shown in Fig. 4. For computational efficiency, we adopt a simplistic U-Net [37] to fuse $P_{l - 1}$ and $P_{l + 1}$ . $P_l$ is first upsampled by a factor of 2 and then aggregated with $P_{l - 1}$ . And a $3\times 3$ convolutional layer with a stride of 2 downsamples it to the resolution of $P_l$ . This design effectively preserves the detail information in $P_{l - 1}$ with little extra computational cost. At last, $P_{l + 1}$ is upsampled and aggregated to generate the final $G_l^{loc}$ . The computation can be written as:
109
+
110
+ $$
111
+ G _ {l} ^ {\text {l o c}} = P _ {l} + \mu \left(P _ {l + 1}\right) + \mathrm {D C o n v} \left(\mu \left(P _ {l}\right) + P _ {l - 1}\right) \tag {5}
112
+ $$
113
+
114
+ where $\mu (\cdot)$ represents upsampling and DConv $(\cdot)$ is another shared downsampling convolutional layer. Speically, we compute $G_{3}^{loc}$ with $C_2$ , $P_{3}$ and $P_{4}$ , as computing $P_{2}$ through FPN induces huge computational cost. Afterwards, $\mathcal{F}_r(\cdot) = \{f_{los}(\cdot),\mathcal{R}(\cdot)\}$ further predicts the bounding boxes in the $l^{\mathrm{th}}$ pyramid level based on $G_{l}^{loc}$ .
115
+
116
+ <table><tr><td>Method</td><td>SCE</td><td>DPE</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td><td>\( AP_S \)</td><td>\( AP_M \)</td><td>\( AP_L \)</td><td>\( AR_S \)</td><td>\( AR_M \)</td><td>\( AR_L \)</td><td>GFLOPs</td></tr><tr><td rowspan="4">FCOS [43]</td><td rowspan="3">✓</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td><td>36.8</td><td>61.7</td><td>73.2</td><td>200.59</td></tr><tr><td></td><td>\( 39.3_{+0.6} \)</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td><td>37.5</td><td>62.1</td><td>73.2</td><td>182.62</td></tr><tr><td>✓</td><td>\( 38.9_{+0.2} \)</td><td>57.5</td><td>41.8</td><td>22.9</td><td>42.8</td><td>50.3</td><td>37.0</td><td>62.4</td><td>73.2</td><td>213.19</td></tr><tr><td>✓</td><td>✓</td><td>\( 40.0_{+1.3} \)</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td><td>38.0</td><td>62.7</td><td>73.3</td><td>195.22</td></tr><tr><td rowspan="4">ATSS [50]</td><td rowspan="3">✓</td><td></td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td><td>38.2</td><td>63.5</td><td>73.6</td><td>205.30</td></tr><tr><td></td><td>\( 40.2_{+0.8} \)</td><td>58.6</td><td>43.8</td><td>23.9</td><td>44.0</td><td>52.2</td><td>38.9</td><td>63.7</td><td>74.0</td><td>187.35</td></tr><tr><td>✓</td><td>\( 39.8_{+0.4} \)</td><td>57.8</td><td>42.9</td><td>23.7</td><td>43.1</td><td>50.9</td><td>38.0</td><td>64.0</td><td>75.3</td><td>217.89</td></tr><tr><td>✓</td><td>✓</td><td>\( 40.8_{+1.4} \)</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td><td>38.7</td><td>64.4</td><td>75.4</td><td>199.94</td></tr><tr><td rowspan="4">GFL [24]</td><td rowspan="3">✓</td><td></td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td><td>38.0</td><td>62.9</td><td>74.1</td><td>208.39</td></tr><tr><td></td><td>\( 41.1_{+0.9} \)</td><td>59.3</td><td>44.5</td><td>24.0</td><td>44.6</td><td>53.8</td><td>37.7</td><td>63.4</td><td>75.2</td><td>190.44</td></tr><tr><td>✓</td><td>\( 40.6_{+0.4} \)</td><td>58.4</td><td>43.8</td><td>23.5</td><td>44.2</td><td>53.2</td><td>37.2</td><td>63.9</td><td>74.6</td><td>220.99</td></tr><tr><td>✓</td><td>✓</td><td>\( 41.6_{+1.4} \)</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td><td>38.4</td><td>64.2</td><td>75.0</td><td>203.04</td></tr></table>
117
+
118
+ Table 1. Ablation studies on COCO mini-val set. SCE stands for using of Semantic Context Encoding on classification; DPE stands for using of Detail-Preserving Encoding on localization.
119
+
120
+ # 4. Experiments
121
+
122
+ Dataset and Evaluation Metric. Our experiments are conducted on the large-scale detection benchmark MSCOCO 2017 [27]. Following common practice [24, 43, 50], we use COCO trainval35k split (115K images) for training and minival split (5K images) for validation. We report our main results and compare with previous detectors on the test-dev split (20K images) by uploading the detection results to the evaluation server.
123
+
124
+ Implementation and Training Details. We implement our TSCODE with MMDetection [4] and cvpods [51]. Unless otherwise noted, we utilize a ResNet-50 [19] backbone network which is pretrained on ImageNet [9]. As TSCODE can be applied as a plug-and-play head to other basic detectors, we follow the original settings in the basic detectors (including loss function, bounding box parameterization, label assignment strategy and hyper-parameters) and merely replace the head part with TSCODE. The input images are resized to a maximum scale of $1333 \times 800$ during testing without changing the aspect ratio. The computational cost (measured in FLOPs) is calculated under the input size of $1280 \times 800$ . We set the batch size to 16 and use 8 V100 GPUs (2 images per GPU) to train our models. Following common practice, we apply the standard $1 \times$ training schedule in the ablation studies. The learning rate starts from 0.01 and decayed by a factor of 10 after 8 and 11 epochs, respectively. We also compare our method with state-of-the-art detectors with different backbone networks, where we adopt $2 \times$ training schedule and multi-scale training. Please refer to Sec. 4.3 for more details.
125
+
126
+ # 4.1. Ablation Studies
127
+
128
+ We first conduct extensive ablation studies to evaluate the efficacy of our design on COCO minival. We use ResNet-50 as the backbone network and all the models are trained for 12 epochs following $1 \times$ schedule.
129
+
130
+ ![](images/9cbe7a719300cd4f1b984b9b786fb35fe9550637fab54521bf65cb0ec7b688e5.jpg)
131
+ Figure 5. Comparison of classification training loss between FCOS with and without TSCODE. TSCODE can accelerate the training and contributes to better convergence.
132
+
133
+ Effects of Individual Components. We first study the effectiveness of each component of TSCODE. In the experiments, we ablate our method with three basic detectors, i.e., FCOS [43], ATSS [50] and GFL [24], to evaluate the generality of our designs. For fair comparison, we retrain the baseline models strictly following the original papers with similar or slightly better results.
134
+
135
+ As demonstrated in Tab. 1, applying only Semantic Context Encoding (SCE) improves the AP of the baseline models by $0.6 \sim 0.9$ points, while reducing the computational cost (FLOPs) by $9\%$ . And note that more significant improvements are observed for large and medium objects. This is consistent with our motivation to leverage more semantic context for the classification task. Moreover, better classification accuracy also facilitates find more objects, and thus SCE achieves better AR results on all the basic detectors.
136
+
137
+ When using Detail-Preserving Encoding (DPE) alone,
138
+
139
+ <table><tr><td>Layer</td><td>Kernel</td><td>Pl+1</td><td>AP</td><td>AP50</td><td>AP75</td><td>GFLOPs</td></tr><tr><td>-</td><td>-</td><td></td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>conv</td><td>3 × 3</td><td></td><td>38.6</td><td>57.3</td><td>41.8</td><td>165.99</td></tr><tr><td>avg</td><td></td><td>✓</td><td>39.1</td><td>57.8</td><td>42.7</td><td>179.47</td></tr><tr><td>max</td><td>3 × 3</td><td>✓</td><td>39.1</td><td>57.9</td><td>42.5</td><td>179.47</td></tr><tr><td>conv</td><td></td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td rowspan="3">conv</td><td>3 × 3</td><td>✓</td><td>39.3</td><td>58.2</td><td>42.8</td><td>182.62</td></tr><tr><td>5 × 5</td><td>✓</td><td>39.2</td><td>58.1</td><td>42.8</td><td>188.23</td></tr><tr><td>7 × 7</td><td>✓</td><td>39.2</td><td>58.2</td><td>42.3</td><td>196.65</td></tr></table>
140
+
141
+ we also observe consistent improvements on all basic detectors, especially on large objects. However, the gains are less than those from SCE, indicating that the feature context in the original decoupled-head design tends to lean to the localization task.
142
+
143
+ At last, we observe significant improvements on all metrics with our full model. The models with TSCODE consistently outperforms the baseline models by $1.3\sim 1.4$ AP points, which is higher than the sum of the individual improvements. Benefitting from the decoupled feature contexts, the classification branch can learn richer semantic information to infer the category of objects, while the localization branch can benefit from more edge details to accurately predict the object boundaries. Moreover, our method also slightly reduces the computational cost, demonstrating the good efficiency of our design. We further visualize the classification loss when training FCOS [43] with and without TSCODE in Fig. 5. TSCODE can accelerate the training and contributes to better convergence. And similar results can be observed in other basic detectors.
144
+
145
+ Semantic Context Encoding. Next, we further study the influence of different ways to conduct SCE with FCOS. The results are shown in Tab. 2. We first feed the classification branch with merely the downsampled $P_{l}$ . It is observed that this model achieves similar results with the baseline but with significantly less computation, indicating the feature redundancy in the classification branch. Next, we put the high-level $P_{l+1}$ into SCE and vary the operation to down-sample $P_{l}$ . The kernel size is fixed to $3 \times 3$ and the stride is 2 for all operations. As observed in Tab. 2, average pooling and max pooling performs slightly worse than convolution, albeit with less computational cost. We assume that average pooling is inefficient in extracting sharp classification signals, while the sparse connections caused by max pooling harms the convergence of the models. At last, we study the influence of the kernel size in convolutions. And a large kernel does not necessarily brings better results. We argue that a large kernel may include too much noise signals, which harms the classification performance. However, the performance of the detector is still improved regardless of how SCE is generated.
146
+
147
+ Table 2. Performance of different ways to generate Semantic Context Encoding (SCE) for classification branch on FCOS [43].
148
+
149
+ <table><tr><td>Pl+2</td><td>Pl+1</td><td>Pl</td><td>P1-1</td><td>AP</td><td>AP50</td><td>AP75</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td></td><td></td><td>✓</td><td></td><td>39.3</td><td>58.2</td><td>42.8</td><td>23.3</td><td>43.3</td><td>50.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td></td><td>39.4</td><td>58.3</td><td>42.6</td><td>23.3</td><td>43.2</td><td>51.0</td></tr><tr><td></td><td>✓</td><td></td><td>✓</td><td>39.6</td><td>58.4</td><td>42.8</td><td>23.9</td><td>43.3</td><td>51.4</td></tr><tr><td></td><td></td><td>✓</td><td>✓</td><td>39.7</td><td>58.1</td><td>43.1</td><td>23.2</td><td>43.7</td><td>51.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>40.0</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>39.9</td><td>58.6</td><td>43.2</td><td>23.1</td><td>43.7</td><td>52.1</td></tr></table>
150
+
151
+ Table 3. Performance of different ways to generate Detail-Preserving Encoding (DPE) for localization branch on FCOS [43].
152
+
153
+ <table><tr><td>Method</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td><td>\( AP_S \)</td><td>\( AP_M \)</td><td>\( AP_L \)</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>22.9</td><td>42.5</td><td>50.1</td></tr><tr><td>FCOS (+ours)</td><td>40.0+1.3</td><td>58.7</td><td>43.1</td><td>23.7</td><td>44.0</td><td>51.8</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>23.6</td><td>42.9</td><td>50.3</td></tr><tr><td>ATSS (+ours)</td><td>40.8+1.4</td><td>59.0</td><td>44.4</td><td>23.7</td><td>44.6</td><td>52.7</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>23.3</td><td>44.0</td><td>52.2</td></tr><tr><td>GFL (+ours)</td><td>41.6+1.4</td><td>59.8</td><td>44.9</td><td>23.8</td><td>45.3</td><td>54.8</td></tr><tr><td>AutoAssign [52]</td><td>40.4</td><td>59.6</td><td>43.7</td><td>22.7</td><td>44.1</td><td>52.9</td></tr><tr><td>AutoAssign (+ours)</td><td>41.1+0.7</td><td>60.2</td><td>44.1</td><td>23.0</td><td>45.0</td><td>54.2</td></tr><tr><td>DDOD [6]</td><td>41.6</td><td>59.9</td><td>45.2</td><td>23.9</td><td>44.9</td><td>54.4</td></tr><tr><td>DDOD (+ours)</td><td>42.4+0.8</td><td>60.2</td><td>46.3</td><td>24.5</td><td>45.5</td><td>56.0</td></tr><tr><td>DeFCN* [44]</td><td>41.4</td><td>59.5</td><td>45.6</td><td>26.1</td><td>44.9</td><td>52.0</td></tr><tr><td>DeFCN* (+ours)</td><td>42.3+0.9</td><td>60.7</td><td>46.7</td><td>27.0</td><td>45.5</td><td>53.6</td></tr><tr><td>OTA [11]</td><td>40.7</td><td>58.4</td><td>44.3</td><td>23.2</td><td>45.0</td><td>53.7</td></tr><tr><td>OTA (+ours)</td><td>41.5+0.8</td><td>59.2</td><td>45.3</td><td>23.4</td><td>45.2</td><td>55.7</td></tr><tr><td>DW [22]</td><td>41.5</td><td>59.8</td><td>44.8</td><td>23.4</td><td>44.9</td><td>54.8</td></tr><tr><td>DW (+ours)</td><td>42.0+0.5</td><td>60.3</td><td>45.1</td><td>24.1</td><td>45.5</td><td>56.0</td></tr></table>
154
+
155
+ Table 4. Applying TSCODE into various popular dense object detectors. Method with * means training for $3 \times$ schedule follow its official repository.
156
+
157
+ Detail-Preserving Encoding. At last, we investigate the efficacy of our design of DPE by ablating the feature maps from different levels in Tab. 3, where SCE is used in all the models by default. Individually aggregating the high-level feature map $P_{l+1}$ with $P_l$ only achieves marginal improvements, but the incorporation of $P_{l-1}$ contributes to more significant performance gains as more detail and edge information is obtained. Interestingly, the model with $P_{l+1}$ and $P_{l-1}$ outperforms the one with $P_{l+1}$ and $P_l$ , which again demonstrates the importance of detail information. And the model with all three feature maps achieves significant improvements, especially on large and medium objects. Note that the gains from three levels are greater than the sum of the individual gains, which means $P_{l+1}$ could provide more benefit if there are adequate detail information. At last, we further add $P_{l+2}$ and find a slight decay in the results, suggesting that too much environmental information may be useless or even harmful for bounding box regression.
158
+
159
+ # 4.2. Generality to Different Detectors
160
+
161
+ To evaluate the generality of TSCODE, we further apply it to 8 recent popular one-stage detectors [6, 11, 22, 24, 43,
162
+
163
+ <table><tr><td>Method</td><td>Backbone</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td><td>\( AP_S \)</td><td>\( AP_M \)</td><td>\( AP_L \)</td><td>Reference</td></tr><tr><td>ATSS [50]</td><td>ResNet-101</td><td>43.6</td><td>62.1</td><td>47.4</td><td>26.1</td><td>47.0</td><td>53.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNet-101</td><td>44.8</td><td>63.3</td><td>48.7</td><td>26.5</td><td>48.8</td><td>56.3</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNet-101</td><td>45.0</td><td>63.7</td><td>48.9</td><td>27.2</td><td>48.8</td><td>54.5</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNet-101</td><td>46.2</td><td>64.3</td><td>50.5</td><td>27.8</td><td>49.9</td><td>57.0</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNet-101</td><td>45.3</td><td>63.5</td><td>49.3</td><td>26.9</td><td>48.8</td><td>56.1</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNet-101</td><td>45.1</td><td>63.4</td><td>49.3</td><td>26.7</td><td>48.5</td><td>56.6</td><td>CVPR21</td></tr><tr><td>ObjectBox [48]</td><td>ResNet-101</td><td>46.1</td><td>65.0</td><td>48.3</td><td>26.0</td><td>48.7</td><td>57.3</td><td>ECCV22</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNet-101</td><td>46.7</td><td>65.1</td><td>50.8</td><td>28.2</td><td>50.3</td><td>57.7</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d</td><td>45.1</td><td>63.9</td><td>49.1</td><td>27.9</td><td>48.2</td><td>54.6</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d</td><td>46.6</td><td>65.6</td><td>50.8</td><td>28.8</td><td>50.4</td><td>57.9</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d</td><td>46.0</td><td>65.1</td><td>50.1</td><td>28.2</td><td>49.6</td><td>56.0</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d</td><td>47.2</td><td>65.7</td><td>51.7</td><td>29.1</td><td>50.8</td><td>58.2</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.8</td><td>51.1</td><td>29.2</td><td>50.4</td><td>57.9</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d</td><td>47.0</td><td>65.7</td><td>51.1</td><td>29.1</td><td>50.5</td><td>57.9</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d</td><td>47.6</td><td>66.3</td><td>51.8</td><td>29.5</td><td>51.1</td><td>58.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d</td><td>48.3</td><td>67.1</td><td>52.6</td><td>30.0</td><td>51.9</td><td>59.9</td><td>-</td></tr><tr><td>ATSS [50]</td><td>ResNeXt-101-32x8d-DCN</td><td>47.7</td><td>66.6</td><td>52.1</td><td>29.3</td><td>50.8</td><td>59.7</td><td>CVPR20</td></tr><tr><td>PAA [21]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.8</td><td>53.3</td><td>30.2</td><td>52.8</td><td>62.2</td><td>ECCV20</td></tr><tr><td>GFL [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>48.2</td><td>67.4</td><td>52.6</td><td>29.2</td><td>51.7</td><td>60.2</td><td>NeurIPS20</td></tr><tr><td>GFLV2 [23]</td><td>ResNeXt-101-32x4d-DCN</td><td>49.0</td><td>67.6</td><td>53.5</td><td>29.7</td><td>52.4</td><td>61.4</td><td>CVPR21</td></tr><tr><td>OTA [11]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.2</td><td>67.6</td><td>53.5</td><td>30.0</td><td>52.5</td><td>62.3</td><td>CVPR21</td></tr><tr><td>IQDet [30]</td><td>ResNeXt-101-64x4d-DCN</td><td>49.0</td><td>67.5</td><td>53.1</td><td>30.0</td><td>52.3</td><td>62.0</td><td>CVPR21</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-32x4d-DCN</td><td>50.0</td><td>68.5</td><td>54.6</td><td>31.0</td><td>53.4</td><td>62.6</td><td>-</td></tr><tr><td>TSCODE (ours) + [24]</td><td>ResNeXt-101-64x4d-DCN</td><td>50.8</td><td>69.3</td><td>55.3</td><td>31.7</td><td>54.2</td><td>63.7</td><td>-</td></tr></table>
164
+
165
+ Table 5. Performance comparison with state-of-the-art detectors (single-model and single-scale results except the last row) on COCO test-dev. TSCODE consistently outperforms the strong baseline GFL [24] by $1.6 \sim 1.8$ AP and even outperforms its improved version GFLV2 [23]. DCN: Deformable Convolutional Network [8].
166
+
167
+ 44,50,52] and evaluate the performance on the COCO minival. As shown in Tab. 4, TSCODE consistently improves the performance of different baseline detectors by $0.5\sim 1.4$ AP points. Notably, TSCODE achieves improvements of 0.8 AP on the detector DDOD [6] which is designed with a specific disentanglement structure for the classification and localization tasks. Moreover, our method provides the latest detector DW [22] with a performance gain of 0.5 AP, which further demonstrates the efficacy of TSCODE.
168
+
169
+ Moreover, DPE can also be regarded as a simple feature fusion module if its output features are directly sent to the classification branch (without context decoupling). To this end, we compare it with the popular PAFPN [28]. They differ in two aspects: First, we leverage a semantic context augmentation from $P_{l}$ to $P_{l-1}$ before bottom-up aggregation ( $P_{l-1}$ back to $P_{l}$ ) which can enrich the feature representation and benefit the classification and localization tasks. Second, the weight of convolution downsampling is shared between all the head, which not only makes the detector parameter-efficient but improves the detection per
170
+
171
+ formance. Third, for each pyramid level, we only aggregate the features from two neighboring levels as we assume that features in a lower level may be helpless for a higher level. We compare DPE and PAFPN on four basic detectors [10,24,43,50]. As observed in Tab. 6, our DPE achieves more performance improvements than PAFPN [28]. DPE outperforms the baselines by about $0.5\sim 0.8$ AP with negligible extra computational cost, but PAFPN only achieves marginal improvements (about 0.2), indicating the strong generality of our DPE. However, as the context is not decoupled, the improvements are still limited.
172
+
173
+ # 4.3. Comparisons with State-of-the-arts
174
+
175
+ At last, we compare TSCODE with recent state-of-the-art detectors on COCO test-dev. Here we select GFL [24] as our baseline. Following common practice, we adopt $2 \times$ schedule to train our models with standard multi-scale training strategy. The results are reported with single-model single-scale testing for all methods.
176
+
177
+ As shown in Tab. 5, our method improves the perfor
178
+
179
+ ![](images/2fae02773a97ba2878776dc1bd337085bf709e974762c3046a3ff3c4a3622e8b.jpg)
180
+ Figure 6. Detection results before and after NMS predicted by FCOS [43] with and without TSCODE. TOP: Before NMS, the bounding boxes correspond to that in Fig. 1. Bottom: After NMS, the green boxes here mean ground-truth bounding boxes.
181
+
182
+ mance of GFL to 46.7 AP and 47.6 AP with ResNet-101 [19] and ResNeXt-101-32x4d [46] backbones, respectively, outperforming all other methods [11, 21, 23, 30, 50]. When using ResNeXt-101-64x4d [46] backbone, the performance of TSCODE can be further improved to 48.3 AP. We also conduct more experiments by adopting Deformable Convolutional Networks (DCN) [8] to ResNeXt backbones. Following [49], we replace the standard convolution in the last layer before prediction with deformable convolutions. Without bells and whistles, TSCODE achieves the AP of 50.8 points, demonstrating the strong compatibility of our method with the advanced techniques in object detection.
183
+
184
+ # 4.4. Qualitative Results
185
+
186
+ In Fig. 6, we qualitatively demonstrate how TSCODE helps detectors improve their performance. For simplicity, we take FCOS [43] as an example. In the typical decoupled-head design, the classification branch and the localization branch share the same input features. However, their different preferences for context causes severe competition. As demonstrated before, the feature context could lean to localization task, and thus detectors is prone to predict a poor classification confidence for the bounding box with high IoU. As a result, these high-quality bounding boxes is unlikely to survive after NMS. This phenomenon is more significant on objects with texture-less surfaces, e.g., dark couch (the $1^{\mathrm{st}}$ column) or shiny spoon (the $2^{\mathrm{nd}}$ column). Another example is the train (the $3^{\mathrm{rd}}$ column), where more global context is required to successfully recognize the train. However, it is difficult to accurately localize the boundary of the train from a coarse feature map as the typical decoupled head does. On the contrary, our TSCODE disentangles the feature context and generates feature inputs
187
+
188
+ <table><tr><td>Method</td><td>AP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td><td>GFLOPs</td></tr><tr><td>FCOS [43]</td><td>38.7</td><td>57.4</td><td>41.8</td><td>200.59</td></tr><tr><td>FCOS w/ PAFPN</td><td>38.7</td><td>57.4</td><td>41.7</td><td>206.49</td></tr><tr><td>FCOS w/ DPE</td><td>\( 39.2_{+0.5} \)</td><td>58.0</td><td>42.4</td><td>213.62</td></tr><tr><td>ATSS [50]</td><td>39.4</td><td>57.6</td><td>42.8</td><td>205.30</td></tr><tr><td>ATSS w/ PAFPN</td><td>39.6</td><td>58.3</td><td>42.2</td><td>211.20</td></tr><tr><td>ATSS w/ DPE</td><td>\( 40.2_{+0.8} \)</td><td>58.2</td><td>42.3</td><td>217.89</td></tr><tr><td>GFL [24]</td><td>40.2</td><td>58.4</td><td>43.3</td><td>208.39</td></tr><tr><td>GFL w/ PAFPN</td><td>40.4</td><td>58.6</td><td>43.6</td><td>214.29</td></tr><tr><td>GFL w/ DPE</td><td>\( 40.8_{+0.6} \)</td><td>58.8</td><td>43.9</td><td>220.99</td></tr><tr><td>TOOD [10]</td><td>42.4</td><td>59.8</td><td>46.1</td><td>-</td></tr><tr><td>TOOD w/ PAFPN</td><td>42.6</td><td>59.9</td><td>46.2</td><td>-</td></tr><tr><td>TOOD w/ DPE</td><td>\( 43.0_{+0.6} \)</td><td>60.3</td><td>46.4</td><td>-</td></tr></table>
189
+
190
+ Table 6. Comparison of our DPE and PAFPN [28]. FLOPs are measured on the input image size of $1280 \times 800$ . Since TOOD [10] contains complex deformable sampling [8], we don't report its computational cost.
191
+
192
+ with specific feature context for each task. As a result, we can leverage more semantic context for classification and more detail and edge information for localization.
193
+
194
+ # 5. Conclusion
195
+
196
+ In this paper, we have delved into the root causes of the inherent competition between classification and localization tasks and proposed a novel TSCODE to eliminate this competition. It decouples the semantic context of the two tasks through two efficient designs, SCE and DPE, and brings the features with richer semantic information for classification and with more edge information for localization. Extensive experiments on the MS COCO benchmark demonstrate the effectiveness the strong generality of TSCODE.
197
+
198
+ # References
199
+
200
+ [1] Sean Bell, C Lawrence Zitnick, Kavita Bala, and Ross Girshick. Inside-outside net: Detecting objects in context with skip pooling and recurrent neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2874-2883, 2016. 1
201
+ [2] Zhaowei Cai and Nuno Vasconcelos. Cascade r-cnn: Delving into high quality object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6154-6162, 2018. 2
202
+ [3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European conference on computer vision, pages 213-229. Springer, 2020. 2
203
+ [4] Kai Chen, Jiaqi Wang, Jiangmiao Pang, Yuhang Cao, Yu Xiong, Xiaoxiao Li, Shuyang Sun, Wansen Feng, Ziwei Liu, Jiarui Xu, et al. Mmdetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 5
204
+ [5] Qiang Chen, Yingming Wang, Tong Yang, Xiangyu Zhang, Jian Cheng, and Jian Sun. You only look one-level feature. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 13039-13048, 2021. 2
205
+ [6] Zehui Chen, Chenhongyi Yang, Qiaofei Li, Feng Zhao, Zheng-Jun Zha, and Feng Wu. Disentangle your dense object detector. In Proceedings of the 29th ACM International Conference on Multimedia, pages 4939-4948, 2021. 1, 2, 3, 6, 7
206
+ [7] Jifeng Dai, Yi Li, Kaiming He, and Jian Sun. R-fcn: Object detection via region-based fully convolutional networks. Advances in neural information processing systems, 29, 2016. 1
207
+ [8] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 764-773, 2017. 7, 8
208
+ [9] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248–255. IEEE, 2009. 5
209
+ [10] Chengjian Feng, Yujie Zhong, Yu Gao, Matthew R Scott, and Weilin Huang. Toed: Task-aligned one-stage object detection. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 3490-3499. IEEE Computer Society, 2021. 3, 7, 8
210
+ [11] Zheng Ge, Songtao Liu, Zeming Li, Osamu Yoshie, and Jian Sun. Ota: Optimal transport assignment for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 303-312, 2021. 3, 6, 7, 8
211
+ [12] Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, and Jian Sun. Yolox: Exceeding yolo series in 2021. arXiv preprint arXiv:2107.08430, 2021. 2
212
+ [13] Golnaz Ghiasi, Tsung-Yi Lin, and Quoc V Le. Nas-fpn: Learning scalable feature pyramid architecture for object detection. In Proceedings of the IEEE/CVF conference on
213
+
214
+ computer vision and pattern recognition, pages 7036-7045, 2019. 2
215
+ [14] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 1440-1448, 2015. 2
216
+ [15] Ross Girshick, Jeff Donahue, Trevor Darrell, and Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 580-587, 2014. 1, 2
217
+ [16] Benjamin Graham, Martin Engelcke, and Laurens Van Der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 9224-9232, 2018. 4
218
+ [17] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 1, 2
219
+ [18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Spatial pyramid pooling in deep convolutional networks for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 37(9):1904-1916, 2015. 1
220
+ [19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5, 8
221
+ [20] Borui Jiang, Ruixuan Luo, Jiayuan Mao, Tete Xiao, and Yun-ting Jiang. Acquisition of localization confidence for accurate object detection. In Proceedings of the European conference on computer vision (ECCV), pages 784–799, 2018. 3
222
+ [21] Kang Kim and Hee Seok Lee. Probabilistic anchor assignment with iou prediction for object detection. In European Conference on Computer Vision, pages 355-371. Springer, 2020. 7, 8
223
+ [22] Shuai Li, Chenhang He, Ruihuang Li, and Lei Zhang. A dual weighting label assignment scheme for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9387-9396, 2022. 6, 7
224
+ [23] Xiang Li, Wenhai Wang, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss v2: Learning reliable localization quality estimation for dense object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11632-11641, 2021. 7, 8
225
+ [24] Xiang Li, Wenhai Wang, Lijun Wu, Shuo Chen, Xiaolin Hu, Jun Li, Jinhui Tang, and Jian Yang. Generalized focal loss: Learning qualified and distributed bounding boxes for dense object detection. Advances in Neural Information Processing Systems, 33:21002-21012, 2020. 2, 3, 5, 6, 7, 8
226
+ [25] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2117-2125, 2017. 1, 2, 3
227
+
228
+ [26] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólár. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017. 1, 2, 3
229
+ [27] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European conference on computer vision, pages 740-755. Springer, 2014. 5
230
+ [28] Shu Liu, Lu Qi, Haifang Qin, Jianping Shi, and Jiaya Jia. Path aggregation network for instance segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8759-8768, 2018. 2, 4, 7, 8
231
+ [29] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. Ssd: Single shot multibox detector. In European conference on computer vision, pages 21-37. Springer, 2016. 2
232
+ [30] Yuchen Ma, Songtao Liu, Zeming Li, and Jian Sun. Iqdet: Instance-wise quality distribution sampling for object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1717-1725, 2021. 7, 8
233
+ [31] Roozbeh Mottaghi, Xianjie Chen, Xiaobai Liu, Nam-Gyu Cho, Seong-Whan Lee, Sanja Fidler, Raquel Urtasun, and Alan Yuille. The role of context for object detection and semantic segmentation in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2014. 1
234
+ [32] Jiangmiao Pang, Kai Chen, Jianping Shi, Huajun Feng, Wanli Ouyang, and Dahua Lin. Libra r-cnn: Towards balanced learning for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 821-830, 2019. 2
235
+ [33] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali Farhadi. You only look once: Unified, real-time object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 779-788, 2016. 1, 2
236
+ [34] Joseph Redmon and Ali Farhadi. Yolo9000: better, faster, stronger. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7263-7271, 2017. 2
237
+ [35] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018. 2
238
+ [36] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 1, 2
239
+ [37] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In International Conference on Medical image computing and computer-assisted intervention, pages 234-241. Springer, 2015. 4
240
+ [38] Rasmus Rothe, Matthieu Guillaumin, and Luc Van Gool. Non-maximum suppression for object detection by passing messages between windows. In Asian conference on computer vision, pages 290-306. Springer, 2014. 2
241
+ [39] Bharat Singh and Larry S Davis. An analysis of scale invariance in object detection snip. In Proceedings of the
242
+
243
+ IEEE conference on computer vision and pattern recognition, pages 3578-3587, 2018. 2
244
+ [40] Guanglu Song, Yu Liu, and Xiaogang Wang. Revisiting the sibling head in object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11563-11572, 2020. 1, 2, 3
245
+ [41] Mingxing Tan, Ruoming Pang, and Quoc V Le. Efficientdet: Scalable and efficient object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10781-10790, 2020. 2
246
+ [42] Zhiyu Tan, Junyan Wang, Xiuyu Sun, Ming Lin, Hao Li, et al. Giraffedet: A heavy-neck paradigm for object detection. In International Conference on Learning Representations, 2021. 2
247
+ [43] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. Fcos: Fully convolutional one-stage object detection. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9627-9636, 2019. 1, 2, 5, 6, 7, 8
248
+ [44] Jianfeng Wang, Lin Song, Zeming Li, Hongbin Sun, Jian Sun, and Nanning Zheng. End-to-end object detection with fully convolutional network. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 15849-15858, 2021. 6
249
+ [45] Yue Wu, Yinpeng Chen, Lu Yuan, Zicheng Liu, Lijuan Wang, Hongzhi Li, and Yun Fu. Rethinking classification and localization for object detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10186-10195, 2020. 1, 2, 3
250
+ [46] Saining Xie, Ross Girshick, Piotr Dólár, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1492-1500, 2017. 8
251
+ [47] Chenhongyi Yang, Zehao Huang, and Naiyan Wang. Querydet: Cascaded sparse query for accelerating high-resolution small object detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13668-13677, 2022. 4
252
+ [48] Mohsen Zand, Ali Etemad, and Michael Greenspan. Object-box: From centers to boxes for anchor-free object detection. arXiv preprint arXiv:2207.06985, 2022. 7
253
+ [49] Haoyang Zhang, Ying Wang, Feras Dayoub, and Niko Sunderhauf. Varifocalnet: An iou-aware dense object detector. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8514-8523, 2021. 8
254
+ [50] Shifeng Zhang, Cheng Chi, Yongqiang Yao, Zhen Lei, and Stan Z Li. Bridging the gap between anchor-based and anchor-free detection via adaptive training sample selection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9759-9768, 2020. 2, 3, 5, 6, 7, 8
255
+ [51] Benjin Zhu*, Feng Wang*, Jianfeng Wang, Siwei Yang, Jianhu Chen, and Zeming Li. cvpods: All-in-one toolbox for computer vision research, 2020. 5
256
+ [52] Benjin Zhu, Jianfeng Wang, Zhengkai Jiang, Fuhang Zong, Songtao Liu, Zeming Li, and Jian Sun. Autoassign: Differentiable label assignment for dense object detection. arXiv preprint arXiv:2007.03496, 2020. 6
2303.01xxx/2303.01047/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90efa6198f895ec8c6700569dfdc26aaa8e9a77f8b12a07e8c6b0c958e901168
3
+ size 878853
2303.01xxx/2303.01047/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01055/2545cb8d-2683-4793-9ae1-7a592b42bc74_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00b815c23ea3d5e3a4869530bef818c88ad7077c3208e9b53b616979c17e53fc
3
+ size 24338125
2303.01xxx/2303.01055/full.md ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Physics-informed neural networks for solving forward and inverse problems in complex beam systems
2
+
3
+ Taniya Kapoor, Hongrui Wang, Member, IEEE, Alfredo Núñez, Senior Member, IEEE, Rolf Dollevoet
4
+
5
+ Abstract—This paper proposes a new framework using physics-informed neural networks (PINNs) to simulate complex structural systems that consist of single and double beams based on Euler-Bernoulli and Timoshenko theory, where the double beams are connected with a Winkler foundation. In particular, forward and inverse problems for the Euler-Bernoulli and Timoshenko partial differential equations (PDEs) are solved using nondimensional equations with the physics-informed loss function. Higher-order complex beam PDEs are efficiently solved for forward problems to compute the transverse displacements and cross-sectional rotations with less than $1e - 3$ percent error. Furthermore, inverse problems are robustly solved to determine the unknown dimensionless model parameters and applied force in the entire space-time domain, even in the case of noisy data. The results suggest that PINNs are a promising strategy for solving problems in engineering structures and machines involving beam systems.
6
+
7
+ Index Terms—PINNs, complex system, Euler-Bernoulli beam, Timoshenko beam, double-beam system.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ Complex engineering issues in real-life scenarios are often characterized by the connection between various subsystems and uncertainty in behavior caused by internal and external variables and their interactions. Furthermore, the design and maintenance of complex systems, such as engineering structures and machines, is made challenging by the unpredictable collective behaviors and properties of these concurrently operating and interacting components. These issues are typically difficult to analyze through conventional methods [1]. Most of these complex engineering systems are continuous, and partial differential equation (PDE) models are used to characterize and understand their behavior. These PDE models are used to simulate a wide range of engineering phenomena, ranging from multiple beam systems in suspension bridge cables (Timoshenko beam equations) [2] to catenary-pantograph interactions in railways (damped beam equations) [3] to simulating air turbulence that disrupts flight (Navier-Stokes equations) [4], [5], among many others [6]-[13]. Solutions to governing PDEs enable real challenges such as structural health monitoring [14]-[16] and optimal structural design [17], [18] to be addressed.
12
+
13
+ Manuscript submitted April 1, 2022, Revised February 24, 2023, May 19, 2023, July 22, 2023, Accepted August 23, 2023 (Corresponding author: Hongrui Wang) T. Kapoor, H. Wang, A. Nunez, R. Dollevoet are with the Section of Railway Engineering, Department of Engineering Structures, Delft University of Technology, The Netherlands. (e-mail: t.kapoor@tudelft.nl; h.wang-8@tudelft.nl; a.a.nunezvicencio@tudelft.nl; r.p.b.j.dollevoet@tudelft.nl).
14
+
15
+ The development of algorithms for diagnostics and prognosis is an issue in maintaining complex engineering systems [1]. Insights could be obtained by solving the forward and inverse problems for the governing PDEs of interest to forecast the system's behavior and minimize unexpected downtimes of complex systems. These equations range in complexity from being extremely nonlinear (Navier-Stokes equation [19]) to incorporating intricate higher-order boundary conditions (fourth-order beam equations [20]). In practice, these equations are too complicated to be solved analytically and must be solved numerically. Numerical methods such as the finite-difference and finite-element methods have been used to approximate the solutions of these PDEs. Despite their success in practice, these methods encounter some difficulties, such as mesh creation, which is more difficult for complex geometries in higher dimensions [21], [22].
16
+
17
+ In recent years, scientific machine learning, which combines scientific computing with machine learning methodologies to estimate PDEs solutions, has made remarkable developments and has emerged as a viable alternative to the aforementioned numerical methods. The review papers [21], [23], [24] extensively discuss state-of-the-art breakthroughs in scientific machine learning, including works on real-world engineering problems. However, data-driven methods require a large amount of data, which is possibly computationally expensive and susceptible to noise in some engineering systems [25]. One possible way to mitigate the effects of these problems is to utilize the known physical knowledge of the underlying system in the learning procedure [26]-[28]. Prior physical knowledge could be incorporated into the learning procedure by collocating the PDE residual at training points, similar to leveraging the physical equation in the training process. The underlying neural networks proposed in [25] are called physics-informed neural networks (PINNs).
18
+
19
+ PINNs utilize neural networks' universal function approximation property [29] and embed the well-posed physical equations modeled by PDEs in the loss function. Prior knowledge of physical principles works as a regularization agent in neural network training, restricting the space of admissible solutions and improving function approximation accuracy. As a result, given some knowledge of the physical features of the problem and some training data, PINN can be utilized to identify a high-fidelity solution. PINNs have already proven to be a very effective paradigm for approximating solutions of PDEs for real-world problems [30], [31], as discussed in the review papers [21], [23].
20
+
21
+ However, several challenges for PINNs have also been found [32]. One such challenge for PINNs is to learn relevant physical phenomena for more complex problems with large coefficients in the physical equation [33]. A sequence-to-sequence learning task was proposed in [33] as a remedy to this problem. However, this can be computationally expensive when the scale is large. In [34], the importance of using nondimensional equations in the PINN framework was highlighted for cardiovascular blood flow. We build on these works and address the challenge of multiscale complex beam systems. Accordingly, this paper uses nondimensional PDEs instead of dimensional PDEs in the loss function. This provides a way to simulate realistic physical equations with computational tractability.
22
+
23
+ Accurate prediction of the dynamics of structures [35] and structural elements, such as plates [36], and beams [37], [38], is crucial in the field of structural engineering. However, measuring quantities of interest in beam systems through lab experiments can prove to be difficult, as it necessitates specialized prototypes, training, and safety during the testing process, increasing the overall cost of the experiment. PINNs offer a simulation-based solution as a mesh-free method that does not require discretizing the domain into a finite number of elements, making it computationally inexpensive compared to numerical methods. PINNs can effectively integrate incomplete or noisy information with prior physical knowledge. The proposed framework converts dimensional PDEs to a nondimensionalized form, increasing the suitability for neural networks and enabling the prediction of deflections and rotations for any material, resulting in a more generalizable method.
24
+
25
+ This paper provides a framework to simulate complex structural systems consisting of two or more basic structural systems connected by an elastic layer. In particular, the forced vibration of two elastically connected beams is studied, which is commonly encountered in the mechanical, construction, and aeronautical industries [6]. These double-beam systems in engineering structures have received significant attention in the scientific community and are considered complex systems. Studies have been conducted to predict the dynamics of these systems under various loading and force conditions, such as those found in papers [39]–[47], among others. These studies include the use of analytical and closed-form solutions [43], [48]–[51]; however, analytical methods have limitations in applicability, as they may be useful only for specific types of problems and can become complex for systems with many variables or nonlinear equations. Other approaches, such as the state-space method presented in [45], [52], may also be computationally expensive for systems with a large number of states. Additionally, modal analysis methods as presented in [6], [53] have been used to study the natural frequencies and modes of vibration, but they do not provide information on the full response of the system and cannot be used to predict the time-domain response at any instant.
26
+
27
+ The considered governing equations are modeled using Euler-Bernoulli and Timoshenko theory. In addition to solving the forward problem and computing the physical quantities of interest, we also solve the inverse problem. For the inverse
28
+
29
+ problem, one may not necessarily have complete information about the inputs to the PDEs, such as initial or boundary data, coefficients [54]–[56] or applied forces. This lack of knowledge makes the forward problem ill-posed, and subsequently, the forward problem cannot be solved uniquely. In this paper, access to data for quantities of interest is leveraged to determine the PDEs' unknown inputs, for instance, the model parameters and applied forces.
30
+
31
+ The main contributions of the current paper are as follows,
32
+
33
+ - To the best of the authors' knowledge, this is the first work to use physics-informed machine learning to solve the forward and inverse problems of Euler-Bernoulli and Timoshenko complex beam models.
34
+ - We address a challenge for PINNs in solving multiscale complex beam PDEs and propose a framework for using nondimensional equations in the loss function.
35
+ - The proposed nondimensional PINN framework is employed to address ill-posed inverse problems for complex systems and to identify the unknown model parameters and the applied force on the beam components. This is achieved by utilizing data from indirect measurements such as the displacement and cross-sectional rotations of the beams.
36
+ - The presented methodology is robust to noise and can accommodate potential uncertainty in the measurement data, making it well suited for real-world applications where data are incomplete or uncertain.
37
+
38
+ The rest of the article is organized as follows. In Section II, the PINN method is presented to simulate the dimensional Euler-Bernoulli beam equation. Due to the limitations of PINNs in simulating the dimensional Euler-Bernoulli beam equation, an alternative approach of using nondimensional equations in the PINN's loss function is proposed and successfully used to solve the dimensionless Euler-Bernoulli equation in Section III. Section IV first applies the proposed framework to simulate the Timoshenko beam model for solving forward and inverse problems. The forward problem of the Euler-Bernoulli double-beam equation is then solved. Additionally, Section IV covers forward and inverse Timoshenko double-beam system problems. Section V concludes this paper.
39
+
40
+ # II. PINNS FOR DIMENSIONAL PDES
41
+
42
+ In this section, the method of PINNs to simulate PDEs is presented in brief using an abstract dimensional PDE. The method is then used to simulate the dimensional Euler-Bernoulli equation. The following abstract dimensional PDE is considered with implicit initial and boundary conditions:
43
+
44
+ $$
45
+ \bar {\mathcal {K}} (\bar {x}, \bar {t}) := \mathcal {D} [ \bar {u} ] (\bar {x}, \bar {t}; \bar {\lambda}) - \bar {f} (\bar {x}, \bar {t}) \quad \forall (\bar {x}, \bar {t}) \in \bar {\Omega} \times \bar {T} \subset \mathbb {R} ^ {\mathrm {d}} \times \mathbb {R} \tag {1}
46
+ $$
47
+
48
+ where $\mathcal{D}[\cdot ]$ denotes the differential operator, $\bar{u}$ is the quantity of interest, $\bar{x}\in \bar{\Omega}\subset \mathbb{R}^{\mathrm{d}}$ , $\bar{t}\in \bar{T}\subset \mathbb{R}$ for $d\geq 1$ , $\bar{\Omega}$ denotes the spatial boundary contained in the d-dimensional Cartesian spatial space and $\bar{T}$ denotes the temporal domain, $\bar{\lambda}\in \mathbb{R}$ is the model parameter, $\bar{f} (\bar{x},\bar{t})$ is the external force, and $\bar{K}$ is the notation for the abstract physical equation.
49
+
50
+ Deep neural networks are the core for PINNs in which inputs $(\bar{x},\bar{t})$ map to output $(\bar{u})$ through an iterative composition of hidden layers. The composition consists of weights $(w)$ , biases $(b)$ , and linear or nonlinear activation function(s) $(\sigma)$ . The inputs undergo a linear composition within a neuron, where they are multiplied by respective weights and summed along with a bias term. Subsequently, this combined input is passed through a nonlinear activation function $(\sigma)$ as presented in Fig. 4. This allows the neural network to introduce nonlinearity, enabling the network to capture intricate relationships between inputs and outputs.
51
+
52
+ To train the neural network, one needs training set $(\Delta)$ , consisting of spatial boundary points $(\Delta_{\mathrm{b}})$ , temporal boundary points $(\Delta_{\mathrm{i}})$ and interior points $(\Delta_{\mathrm{int}})$ . As a result, the training set can be written as $\Delta = \Delta_{\mathrm{i}} \cup \Delta_{\mathrm{b}} \cup \Delta_{\mathrm{int}}$ . In this work, $\Delta_{\mathrm{i}}, \Delta_{\mathrm{b}}$ , and $\Delta_{\mathrm{int}}$ are considered to have $N_{\mathrm{i}}, N_{\mathrm{b}}$ and $N_{\mathrm{int}}$ training points respectively. The total number of training points is denoted by $N_{\mathrm{train}}$ . To approximate the quantity of interest $\bar{u}$ , one needs to minimize the loss function containing the physical model in the form of a PDE with initial and boundary conditions of (1). No additional data are required in the loss function for forward problems. The loss function $\bar{\mathcal{L}}$ is defined as follows:
53
+
54
+ $$
55
+ \bar {\mathcal {L}} (\theta) = \operatorname * {M i n} _ {\theta} \left(\frac {1}{N _ {\text {t r a i n}}} \sum_ {n = 1} ^ {N _ {\text {t r a i n}}} | | \bar {\mathcal {K}} \left(\bar {x} _ {\mathrm {n}}, \bar {t} _ {\mathrm {n}}\right) | | ^ {2}\right) \tag {2}
56
+ $$
57
+
58
+ where $(\bar{x}_{\mathrm{n}},\bar{t}_{\mathrm{n}})$ represents the training tuple for each n. Minimizing this loss function using a suitable optimization algorithm provides optimal parameters $\theta = \{w,b\}$ .
59
+
60
+ Now, we employ the PINN algorithm for the dimensional Euler-Bernoulli beam equation and evaluate the corresponding performance. The dynamic Euler-Bernoulli beam equation is given by
61
+
62
+ $$
63
+ \rho A \bar {u} _ {\bar {t} \bar {t}} + E I \bar {u} _ {\bar {x} \bar {x} \bar {x} \bar {x}} = \bar {f} (\bar {x}, \bar {t}) \quad \bar {x} \in [ 0, \bar {l} ], \bar {t} \in [ 0, t _ {\text {e n d}} ] \tag {3}
64
+ $$
65
+
66
+ ![](images/38b0c2aca374fd8dc4393e5c6f05a213713abb00ded7f769d71b8afab6d921fa.jpg)
67
+ Fig. 1. Simply supported beam with varying transverse force.
68
+
69
+ Here, $\bar{l}$ and $t_{\mathrm{end}}$ refer to the length of the beam and final time, respectively. This equation models the transverse displacement of beam $\bar{u}$ in the space-time domain subject to the external transverse force $\bar{f}$ as shown in Fig. 1. This work considers a uniform cross-sectioned beam with constant material properties throughout the beam. The parameters $\rho$ and $A$ denote the density and cross-sectional area of the beam, respectively. The parameters $E$ and $I$ are Young's modulus and the moment of inertia of the beam, respectively. The external force $\bar{f}$ acts nonuniformly on the body, and $\bar{u}$ is the transverse displacement of the beam, which is the only unknown in the governing PDE. In addition, $u_{\mathrm{tt}}$ represents
70
+
71
+ the second order partial derivative of $u$ with respect to $t$ , and $u_{\mathrm{xxxx}}$ represents the fourth order partial derivative of $u$ with respect to $x$ . The goal of the forward problem is to compute the transverse displacement of the beam supplemented with the initial and boundary conditions. For this study, simply supported beams are considered, which rest on two supports and are free to move horizontally. Real-world applications of simply supported beams include railway tracks, and bridges, to name a few. Mathematically, the simply supported boundary condition for (3) is given by
72
+
73
+ $$
74
+ \bar {u} (0, \bar {t}) = \bar {u} (\bar {l}, \bar {t}) = \bar {u} _ {\bar {x} \bar {x}} (0, \bar {t}) = \bar {u} _ {\bar {x} \bar {x}} (\bar {l}, \bar {t}) = 0
75
+ $$
76
+
77
+ For the numerical experiment, the parameter values of aluminium-like material are considered in the physical equation, which are widely used for making beams. The parameter values taken for the problem are $\rho = 2\times 10^{3}\mathrm{kg / m}^{3}$ , $A = 5\times 10^{-2}\mathrm{m}^2$ , $E = 10^{10}\mathrm{N / m}^2$ , and $I = 4\times 10^{-4}\mathrm{m}^4$ . Additionally, the beam is taken to be $\pi^2$ meters long, and the external force $\vec{f}$ is taken to be $EI(1 - 16\pi^2)\sin (\bar{x} /\pi)\cos (4c\bar{t} /\pi) / \bar{l}^3\mathrm{N}$ , where $c = \sqrt{\frac{EI}{\rho A}}$ . Taking the final time to be $\pi^2 /200$ , the PDE to be solved takes the form
78
+
79
+ $$
80
+ \begin{array}{l} 1 0 ^ {2} \bar {u} _ {\mathrm {t t}} + 4 \times 1 0 ^ {6} \bar {u} _ {\overline {{\mathrm {x}}} \overline {{\mathrm {x}}} \overline {{\mathrm {x}}} \overline {{\mathrm {x}}}} = \\ 4 \times 1 0 ^ {6} (1 - 1 6 \pi^ {2}) \sin (\bar {x} / \pi) \cos (8 0 0 \bar {t} / \pi) / \pi^ {3} \tag {4} \\ \end{array}
81
+ $$
82
+
83
+ in the domain $\bar{x} \in [0, \pi^2]$ and $\bar{t} \in [0, \pi^2/200]$ . For (4) to be well-posed the initial condition of the beam is taken to be $\sin (\bar{x}/l)$ with zero initial velocity, where $l = \sqrt{\bar{l}}$ .
84
+
85
+ For training the neural network, 16000 random training points are generated with the distribution $N_{\mathrm{i}} = 2000$ , $N_{\mathrm{b}} = 4000$ , and $N_{\mathrm{int}} = 10000$ . The neural network consists of 4 hidden layers with 20 neurons in each hidden layer. The tanh activation function, which is one of the most commonly used activation functions in the PINN literature, as described in the review paper [23], is chosen. The loss function (2) consists of the initial condition, boundary condition and PDE. The PDE is regularized in the loss function with the residual parameter 0.1 [57]. The L-BFGS optimizer, which is again one of the most commonly used optimizers in the PINN literature [23] is used to minimize the loss function. As shown in Fig. 2 15000 epochs are performed. However, the figure clearly illustrates that the optimizer does not converge to the solution, and a vast training loss of $10^{14}$ is obtained. Additionally, the graph shows that the optimizer is stuck in the local minima and hence will not converge even if the number of epochs is increased for the same neural network configuration.
86
+
87
+ In [16], [58], the problem of free vibrations in the Euler-Bernoulli single-beam equation was successfully solved by PINNs, where the coefficients of the PDE were taken to be unity. This shows that PINNs can simulate the beam equations, and the challenge lies in the multiscale coefficient values that arise when dealing with a real-life physical equation. The nonconvergence in our case is due to the high value of coefficients, which is due to the dimensional equation. Consequently, a pressing need arises to transform the dimensional form of the equation into a nondimensional form. It may be possible that
88
+
89
+ for some configurations containing hundreds of hidden layers and neurons, this problem may be solved without the need to non-dimensionalizing the PDE. However, nondimensionalization aims to provide computational tractability.
90
+
91
+ ![](images/464801bbbe4a7aa7646954365037b386902710ca8358a6d98aafce226a0d9cae.jpg)
92
+ Fig. 2. L-BFGS training loss vs. the number of epochs for the dimensional Euler-Bernoulli beam equation.
93
+
94
+ # III. PINNS FOR NONDIMENSIONAL PDES
95
+
96
+ This section presents the proposed framework of using nondimensional equations in the PINN loss function. The method for nondimensionalizing the governing PDE is described first. Then, the algorithms for forward and inverse problems using dimensionless equations in PINNs are presented. To nondimensionalize the abstract PDE given by (1), the following transformations are performed
97
+
98
+ $$
99
+ \bar {x} = \xi_ {1} (x); \quad \bar {t} = \xi_ {2} (t); \quad \bar {u} = \xi_ {3} (u); \quad \bar {f} = \xi_ {4} (f) \tag {5}
100
+ $$
101
+
102
+ where, $\xi_1$ , $\xi_2$ , $\xi_3$ , and $\xi_4$ are suitable functions that map the dimensional quantities $\bar{x}$ , $\bar{t}$ , $\bar{u}$ , and $\bar{f}$ to the corresponding nondimensional quantities. After substituting the above transformations in (1) and introducing the dimensionless parameter $\lambda$ , one obtains
103
+
104
+ $$
105
+ \mathcal {K} (x, t) := \mathcal {D} [ u ] (x, t; \lambda) - f (x, t) \quad \forall (x, t) \in \Omega \times T \subset \mathbb {R} ^ {d} \times \mathbb {R} \tag {6}
106
+ $$
107
+
108
+ The proposed framework uses dimensionless equations to simplify and stabilize the problem computationally. By nondimensionalizing the variables and parameters, they are kept within a specific range, resulting in improved performance and generalization of the neural network. Furthermore, dimensionless equations generate more interpretable solutions by eliminating the units of measure, making it easier to understand the underlying physical phenomena and to compare results across different physical systems in the form of ratios and parameters. Hence, using dimensionless equations in PINNs can enhance the neural network's computational stability, generalization, and interpretability.
109
+
110
+ # A. PINN Framework for Forward Problems
111
+
112
+ $\kappa$ , the nondimensional PDE corresponding to the dimensional PDE $\bar{\kappa}$ , is now used in the loss function $\mathcal{L}$ defined as follows:
113
+
114
+ $$
115
+ \mathcal {L} (\theta) = \operatorname * {M i n} _ {\theta} \left(\frac {1}{N _ {\text {t r a i n}}} \sum_ {n = 1} ^ {N _ {\text {t r a i n}}} | | \mathcal {K} \left(x _ {\mathrm {n}}, t _ {\mathrm {n}}\right) | | ^ {2}\right) \tag {7}
116
+ $$
117
+
118
+ A schematic representation of the proposed PINN-based framework is illustrated in Fig 4.
119
+
120
+ # B. Nondimensional Euler-Bernoulli Beam Equation
121
+
122
+ We now test the nondimensional equation in the PINN framework and evaluate the corresponding performance. To nondimensionalize (3), following transformations are used:
123
+
124
+ $$
125
+ u = \bar {u} / l; \quad x = \bar {x} / l; \quad t = c \bar {t} / l ^ {2}; \quad f = \bar {f} l ^ {3} / (E I) \tag {8}
126
+ $$
127
+
128
+ Upon substituting these values in (3), one obtains
129
+
130
+ $$
131
+ u _ {\mathrm {t t}} + u _ {\mathrm {x x x x}} = f (x, t) \quad x \in [ 0, \pi ], t \in [ 0, 1 ] \tag {9}
132
+ $$
133
+
134
+ where $f(x,t) = (1 - 16\pi^2)\sin (x)\cos (4\pi t)$ , with initial and boundary conditions
135
+
136
+ $$
137
+ u (x, 0) = \sin (x), \quad u _ {t} (x, 0) = 0
138
+ $$
139
+
140
+ $$
141
+ u (0, t) = u (\pi , t) = u _ {\mathrm {x x}} (0, t) = u _ {\mathrm {x x}} (\pi , t) = 0
142
+ $$
143
+
144
+ For the error estimation, the relative percentage error $(\mathcal{R})$ used in [57] is chosen. Here, $u^{*}$ is the prediction and $u$ is the analytical solution.
145
+
146
+ $$
147
+ \mathcal {R} = \frac {\left| \left| u ^ {*} - u \right| \right| _ {2}}{\left| \left| u \right| \right| _ {2}} \times 1 0 0
148
+ $$
149
+
150
+ The same neural network architecture as the previous case is chosen to solve this resulting nondimensional PDE. A low training loss is obtained, indicating that the PINN is trained successfully. The analytical solution for this case is $u(x,t) = \sin (x)\cos (4\pi t)$ , which is used to quantify the error in the approximated solution. The nondimensional displacement of the Euler-Bernoulli beam is computed within $\mathcal{R} = 5.3e - 4$ percent. The nondimensional displacement prediction using PINN is shown in Fig. 3.(a). Fig. 3.(b) shows the absolute error between the exact and predicted solutions.
151
+
152
+ The contour plot for the approximate solution shows the dynamics of a simply supported beam under a force, where the x-axis represents the time, the y-axis represents the position along the length of the beam, and the colors represent the displacement of the beam. In Fig. 3.(b) the red regions indicate high displacement, while the blue regions indicate low displacement. There is a strong displacement at the position of the beam when a substantial force is applied, which is consistent with the known physics of this system. The network accurately captures the displacement behavior of the beam, which is evident by the smooth and continuous transition of colors across the plot.
153
+
154
+ The contour plot for the error in Fig. 3.(b) shows the difference between the approximate solution obtained from the network and the true solution. The x-axis represents the time, the y-axis represents the position along the length of the beam, and the colors represent the error. The red regions indicate high error, while the blue regions indicate low error. The areas where the training point concentration is low account for more error, and areas where the concentration of training points is more have relatively low error. One approach to reduce the error is to have more training points in the regions of high error. However, the overall error is low, which indicates that the network accurately captures the displacement behavior of the beam.
155
+
156
+ ![](images/223eaedaf166c0536fcc0a6743952448daf1c6c1b21cd96e13c5c079bb440170.jpg)
157
+ Fig. 3. Nondimensional Euler-Bernoulli beam equation Color bar represents Left: Predicted solution $(u^{*})$ ; Right: Absolute error in prediction $(|u - u^{*}|)$
158
+
159
+ ![](images/1009c04bc35841c513b7633b474d8e730e6df96d0409f08054e73036a61e71f9.jpg)
160
+
161
+ ![](images/7551ea472579ccc5cd655a55444d69c788878dfe5539e54c5eece5f026e3569e.jpg)
162
+ Fig. 4. PINN framework for beam systems: For forward problems, the loss function comprises the nondimensional PDEs and the boundary and initial conditions. For inverse problems, the nondimensional PDEs are supplemented with extra data and potential initial/boundary conditions.
163
+
164
+ From Fig. 3.(b), the PINNs are found to solve the dimensionless Euler-Bernoulli beam equation accurately and hence, for all further experiments, nondimensional PDEs are simulated using PINNs. Additionally, the nondimensional displacement is henceforth referred to as displacement for conciseness. The presented methodology predicts the dimensionless quantities and hence all the plots of results and their associated error plots are dimensionless. Consequently no units are mentioned in the plots of the presented results. Next, we describe the inverse problem-solving strategy using nondimensional equations.
165
+
166
+ # C. PINN Framework for Inverse Problems
167
+
168
+ The abstract dimensionless PDE described by (6) is well-posed, and the forward problem can be solved uniquely. However, in the case of an inverse problem, the problem is ill-posed and either the initial/boundary conditions or the parameters/forces are unknown. Hence the generic abstract PDE can be re-written as
169
+
170
+ $$
171
+ \mathcal {K} ^ {\prime} (x, t) := \mathcal {D} [ u ] (x, t; \lambda) - f (x, t) \quad \forall (x, t) \in \Omega \times T \subset \mathbb {R} ^ {d} \times \mathbb {R} \tag {10}
172
+ $$
173
+
174
+ The algorithm for the PINN framework is presented to solve inverse problems.
175
+
176
+ # Algorithm 1 Inverse PINN algorithm
177
+
178
+ Goal: To predict the unknown parameter $\bar{\lambda}$ or function $\bar{f}(\bar{x},\bar{t})$ .
179
+ Step 1: Nondimensionalize the governing PDE to approximate the dimensionless parameter $\lambda$ or function $f(x,t)$ .
180
+ Step 2: Choose the training set from the space-time domain $\Omega \times T$ , and augment with $(x_{\mathrm{data}}, t_{\mathrm{data}})$ at which additional data $(u_{\mathrm{data}})$ are provided.
181
+ Step 3: Construct a feedforward deep neural network with inputs $(x,t)$ and outputs $u, \lambda$ or $f(x,t)$ .
182
+ Step 4: Minimize the loss function (11) with a suitable optimization algorithm, and find the optimal parameters.
183
+ Step 5: Use the optimal parameters to approximate the parameter $\lambda^{*}$ or the function $f^{*}(x,t)$ .
184
+
185
+ The aim of the inverse problem is to predict the unknown parameter $\lambda$ or the force function $f(x,t)$ , when data are provided for the observable $u$ in some part of the training domain. In this paper, $u_{\mathrm{data}}$ denotes the available data for the inverse problem at $N_{\mathrm{data}}$ points. The prediction of the unknown parameter requires additional information in the loss function as shown in Fig 4. It is essential for the Jacobian matrix utilized in the inverse operation study employing neural networks to exhibit a nonzero determinant, to be invertible, and to possess a reasonable ratio between its largest and smallest eigenvalues to guarantee a unique solution and ensure computational stability. The algorithm for the inverse problem is the same as for the forward problem with a minor modification in the loss function. In addition to the output $u$ , the PINNs now predict the unknown parameter, force, initial or boundary conditions of the physical problems by leveraging the known data. The loss function for the inverse problem can be defined as
186
+
187
+ $$
188
+ \begin{array}{l} \mathcal {L} ^ {\prime} (\theta) = \operatorname * {M i n} _ {\theta} \left(\frac {1}{N _ {\text {t r a i n}}} \sum_ {n = 1} ^ {N _ {\text {t r a i n}}} | | \mathcal {K} \left(x _ {\mathrm {n}}, t _ {\mathrm {n}}\right) | | ^ {2} + \right. \\ \frac {1}{N _ {\mathrm {d a t a}}} \sum_ {n = 1} ^ {N _ {\mathrm {d a t a}}} \left|\left| u _ {\mathrm {d a t a}} \left(x _ {\mathrm {n}}, t _ {\mathrm {n}}\right) - u _ {\mathrm {p r e d}} \left(x _ {\mathrm {n}}, t _ {\mathrm {n}}\right)\right|\right| ^ {2}\left. \right) \tag {11} \\ \end{array}
189
+ $$
190
+
191
+ ![](images/1bfd9df7d0c809901ef1800d172fd715b098e8f5d63208b4b8b2849e2b268d74.jpg)
192
+ Fig. 5. Timoshenko single beam; Color bar represents Left: Cross-sectional rotation $(\theta^{*})$ ; Right: Transverse displacement $(w^{*})$ .
193
+
194
+ ![](images/32fd3cfa3623452aa548e7c73c254bcad514b396562b4d921159ecede50edd93.jpg)
195
+
196
+ Here, $u_{\mathrm{pred}}$ denotes the prediction of $u$ by the neural network section implementing the PINN algorithm for forward and inverse problems of dimensionless beam equations.
197
+
198
+ # IV. NUMERICAL EXPERIMENTS AND DISCUSSION
199
+
200
+ In the following subsections, five numerical experiments are presented. The experiments are conducted in a progressive manner, beginning with simple models such as a single beam system and then progressing to more complex ones such as a double beam connected to a Winkler foundation. To verify the proposed method, we first investigate forward and inverse problems for a single beam, which serves as the proof of the concept. Then, we apply the method to more intricate cases of double-beam systems to simulate forward and inverse problems.
201
+
202
+ # A. Timoshenko Beam Forward Problem
203
+
204
+ The Euler-Bernoulli theory of beams is widely used in the literature and has been successfully applied in structures such as the Eiffel Tower and Ferris wheels. However, it does not consider the effects of transverse shear deformations, which are often significant in the vertical displacements of short and thick beams [59]. Timoshenko beam theory provides a mathematical framework for analyzing thick-beam bending [59]. According to Timoshenko theory, upon the action of an external force, the beam undergoes some cross-sectional rotation in addition to transverse displacement. Mathematically, the dynamics are modeled by a coupled system of PDEs with two variables: transverse displacement and cross-sectional rotation. The model is given by
205
+
206
+ $$
207
+ \begin{array}{l} \rho I \bar {\theta} _ {\overline {{\mathrm {t t}}}} - E I \bar {\theta} _ {\overline {{\mathrm {x x}}}} - k A G (\bar {w} _ {\bar {x}} - \bar {\theta}) = 0 \tag {12} \\ \rho A \bar {w} _ {\bar {t} \bar {t}} - k A G (\bar {w} _ {\bar {x} \bar {x}} - \bar {\theta} _ {\bar {x}}) = \bar {g} (\bar {x}, \bar {t}) \\ \end{array}
208
+ $$
209
+
210
+ where $\rho, A, E$ and $I$ have the usual meaning as in the case of the Euler-Bernoulli beam; $k$ is called the Timoshenko shear coefficient; $G$ is the shear modulus; and $\bar{g}(\bar{x},\bar{t})$ is the external force acting on the beam. The transverse displacement is $\bar{w}(\bar{x},\bar{t})$ and $\bar{\theta}(\bar{x},\bar{t})$ is the cross-sectional rotation of the beam at position $\bar{x}$ and time $\bar{t}$ . After nondimensionalizing (12) and taking the resulting parameters [60] to be unity, the nondimensional equation can be written as follows:
211
+
212
+ $$
213
+ \begin{array}{l} \theta_ {\mathrm {t t}} - \theta_ {\mathrm {x x}} + (\theta - w _ {\mathrm {x}}) = 0 \tag {13} \\ w _ {\mathrm {t t}} + \left(\theta - w _ {\mathrm {x}}\right) _ {\mathrm {x}} = g (x, t) \\ \end{array}
214
+ $$
215
+
216
+ We consider the external force [61] to be $g(x,t) = \cos (t) - \frac{\pi}{2}\sin (x)\cos (t)$ and the computational domain to be $x\in [0,\pi ]$ and $t\in [0,1]$ . To make (13) well-posed, the initial and boundary conditions are supplemented as:
217
+
218
+ $$
219
+ \theta (x, 0) = \frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right), \quad \theta_ {t} (x, 0) = 0
220
+ $$
221
+
222
+ $$
223
+ w (x, 0) = \frac {\pi}{2} \sin (x), w _ {t} (x, 0) = 0
224
+ $$
225
+
226
+ $$
227
+ \theta (0, t) = \theta (\pi , t) = w (0, t) = w (\pi , t) = 0
228
+ $$
229
+
230
+ To estimate the error in the approximated solutions, the analytical solution for the considered problem is used, which is
231
+
232
+ $$
233
+ \begin{array}{l} \theta (x, t) = \left(\frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right)\right) \cos (t) \\ w (x, t) = \frac {\pi}{2} \sin (x) \cos (t) \\ \end{array}
234
+ $$
235
+
236
+ When analytical solutions are not available, there are various ways to validate the PINN solution. One approach is to compare the solutions with those obtained using numerical methods such as finite difference, finite element, finite volume or spectral methods. This can be done by comparing the predicted solutions from the PINNs with the solutions from the numerical simulation for the same physical equation. Another approach is to compare the solutions obtained through PINNs with experimental data. One can compare the predicted solutions from the PINNs with values experimentally measured over space and time. Finally, one can validate the solutions obtained through PINNs by checking if they satisfy the known physical constraints of the system. In summary, one can use available experimental data, numerical methods or physical constraints to evaluate the accuracy of the solution obtained using PINNs.
237
+
238
+ The difficulty of solving a system of PDEs is greater than that solving a single PDE, but the neural network structure used for the Euler-Bernoulli equation is successful in approximating solutions for Timoshenko beams. In particular, the transverse displacement of the beam is computed within $\mathcal{R} = 3.3e - 4$ percent, and the cross-sectional rotation is approximated within $\mathcal{R} = 2.8e - 3$ percent. Approximated solutions and absolute errors in predicting the transverse displacement and cross-sectional rotation are presented in Figs. 5 and 6. Fig. 5 demonstrates that when a sinusoidal force is applied to a Timoshenko beam, the beam bends more
239
+
240
+ ![](images/6b3560a9ae8f24a577bb60e60ab8ebe883a12fa5d86e4aab54ee8f7403afa8e2.jpg)
241
+ Fig. 6. Timoshenko single beam absolute error in predictions Left: $|\theta -\theta^{*}|$ ; Right: Absolute error $|w - w^{*}|$
242
+
243
+ ![](images/1bddb9f55962c751d311649ea43a28f392c9b25db5d0abe4c9cd818036090cbd.jpg)
244
+
245
+ than it rotates. As indicated by the scale in the figures, the maximum deflection is 1.44 and the maximum rotation is 0.32. Additionally, the low error in predictions demonstrates that even with the increase in the PDE complexity, the PINN successfully solves the Timoshenko PDE with comparable results to the Euler-Bernoulli equation.
246
+
247
+ We compare the results obtained from our method with three other methods. The first method we consider is the widely used numerical technique called the finite difference method (FDM). The other two methods are neural network-based approaches, namely physics-guided neural networks (PGNN) [28], [62]–[65] and gradient-enhanced physics-informed neural networks (gPINN) [66]. First, for FDM we employ a central difference scheme to approximate space derivatives and a leapfrog scheme to approximate time derivatives. This approach allows us to solve problems with second-order accuracy in space and time. The results for the Timoshenko beam show that PINNs can achieve a higher level of accuracy than the FDM even with a smaller number of training points. Specifically, 30,000 points are used in the FDM scheme while only 16,000 points were used for training with PINNs and Table I indicates that PINNs perform better than FDM.
248
+
249
+ Second, the performance of PINN is compared to a neural network-based approach PGNN, which leverages physical knowledge embedded in the available data, for instance, the relationship between beam acceleration and displacement for the Timoshenko beam problem. Accelerometers can be employed at discrete locations along the beam to obtain acceleration data. Acceleration data at five equidistant points along the beam are used, with 2000 data points at each location. This dataset is augmented with the boundary and initial conditions of displacement to match the training data size of PINN. PGNN is a deep neural network-based architecture with inputs: position $(x)$ , time $(t)$ , and acceleration. Displacement $(w)$ is taken as the output of this neural network. Training PGNN with identical hyperparameters to those used in PINN, PGNN predicts the displacement $(w)$ with an error of approximately $0.002739\%$ , as shown in Table I.
250
+
251
+ Furthermore, utilizing the displacement values $(w)$ , the neural network's auto differentiation and (13), we derived $\theta_{\mathrm{x}}$ . Subsequently, a second neural network was constructed to predict $\theta$ , where $\theta_{\mathrm{x}}$ is used as the input. Boundary and initial conditions for cross sectional rotation $(\theta)$ are also used to guide the PGNN towards the optimal solution. After training the PGNN, cross-sectional rotation is predicted with
252
+
253
+ TABLEI TIMOSHENKO BEAM: $\mathcal{R}$ AT $t = 1$
254
+
255
+ <table><tr><td>w,θ</td><td>PINN</td><td>FDM</td><td>PGNN</td><td>gPINN</td></tr><tr><td>w(%)</td><td>3.3e-4</td><td>0.005615</td><td>0.002739</td><td>0.249849</td></tr><tr><td>θ(%)</td><td>2.8e-3</td><td>0.004733</td><td>3.486727</td><td>5.498449</td></tr></table>
256
+
257
+ approximately $3.486727\%$ error. It can be inferred from Table I that both displacement and rotation predictions exhibited higher errors than PINN. This discrepancy can be attributed to the restricted availability of acceleration data at only discrete spatial locations within the interior domain rather than a random distribution across the entire domain. Furthermore, the second neural network, employed for rotation prediction, demonstrated inferior performance potentially due to error propagation.
258
+
259
+ Third, we perform another comparison with a neural network-based method to simulate PDEs, gradient-enhanced PINN (gPINN) [66], which differs from PINN in terms of the loss function. The acronym "gPINN" proposed in [66] is used in this work instead of "GPINN" as it is used for another method [67]. In addition to the loss function of PINN, gPINN leverages gradient information of the PDE residual and embed the gradient into the loss function. For the Timoshenko beam problem, derivatives of the system of PDE (13) with respect to space $(x)$ and time $(t)$ are supplemented in the loss function. Table I shows that gPINN exhibits higher relative error percentages in learning displacement and cross-sectional rotation than PINN. The high-order derivatives of the physical equations in the loss function of gPINN make it challenging for autodifferentiation [68] and backpropagation of the loss function, resulting in poor predictions of deflection and rotation for the Timoshenko beam. Table I demonstrates that PINN outperforms FDM, PGNN, and gPINN in accurately predicting displacement and cross-sectional rotation for the Timoshenko beam, emphasizing its superior performance compared to the three alternative methods.
260
+
261
+ # B. Timoshenko Beam Inverse Problem
262
+
263
+ This section addresses the inverse problem for the Timoshenko beam, with the aim to determine the material properties of a beam leveraging the PDE and beam's displacement and rotation data. In structural engineering, the inverse problem of a Timoshenko beam PDE is significant for determining the
264
+
265
+ beam system's structural behavior and for health monitoring. This helps engineers infer the internal material properties and unknown forces from observed responses such as displacement and rotation measurements. The PINN solves this problem by combining the knowledge of physics and deep learning. The PINN uses a neural network to learn the mapping between the unknown parameters of the PDE and observed data while incorporating the constraints of physics in the form of PDEs. This parameter identification aids in providing crucial information for structural diagnosis and repair and helps engineers ensure the safety and stability of structures. The Timoshenko model for parameter estimation is presented as follows.
266
+
267
+ ![](images/ef3e0e4e093be35e54f474eaf044309370d05dfc79260e793c1ee4c37f5774a3.jpg)
268
+
269
+ ![](images/36cb89ebc4925b9de37d92594ffde5aee2571267a3569bed6819a0b5579f652e.jpg)
270
+ Fig. 7. Data to learn the parameters for the Timoshenko single-beam: Blue dots Collocation points. Red dots Additional data points of rotations $(\theta)$ and displacement (u). Black dots Initial and boundary points.
271
+
272
+ $$
273
+ \begin{array}{l} \alpha \theta_ {\mathrm {t t}} - \theta_ {\mathrm {x x}} + (\theta - w _ {\mathrm {x}}) = 0 \tag {14} \\ w _ {\mathrm {t t}} + \left(\theta - w _ {\mathrm {x}}\right) _ {\mathrm {x}} = g (x, t) \\ \end{array}
274
+ $$
275
+
276
+ In the context of the inverse problem of the Timoshenko beam, the PINN is trained on the observed deflections and rotations of the beam, and the material properties are treated as the unknowns to be estimated. In this case, the force $\mathrm{g}(\mathbf{x}, t)$ applied to the beam is considered to be known, and the only unknown in the model is $\alpha$ . This makes the problem ill-posed, requiring additional data at a priori to predict the unknown parameter. For $\alpha = 1$ , the transverse displacement and cross-sectional rotation data obtained from the forward problem is supplied to approximate the parameter value. This data is not error-free and comes with $10^{-3}$ percent error for transverse displacement and with $10^{-4}$ percent error for cross-sectional rotation. As shown in Fig. 7 the additional data is supplied on 5000 points (red dots) at five positions on the beam $(x = 0.2, 0.8, 1.8, 2.6, 3)$ . In practice, this data can be collected using sensors installed at the corresponding locations on the beam as shown in Fig. 7.
277
+
278
+ To solve the inverse problem, the neural network consists of 1600 random training points with the distribution $N_{\mathrm{i}} = 200$ , $N_{\mathrm{b}} = 400$ , and $N_{\mathrm{int}} = 1000$ . To regularize the PDE term in the loss function, a regularization parameter of 1 was
279
+
280
+ chosen [25]. Using the L-BFGS optimizer 5000 iterations are performed and the other parameters are kept the same as in the forward Timoshenko problem. At $t = 0.5$ , the unknown parameter $\alpha = 1.0136$ is learned.
281
+
282
+ We perform a comparison between the PINN and DNNs, as using a numerical iterative method for inverse problems is computationally expensive. From PINNs, at $t = 0.5$ , the unknown parameter $\alpha = 1.0136$ is learned. We utilize DNNs to identify the parameters of a Timoshenko single beam. We use the same architecture for DNN as used by the PINN. The predicted value of alpha is 0.6124 using DNN. PINN is more accurate than DNNs for the inverse problem of beam systems.
283
+
284
+ However, there are several issues that one may need to take care of while solving inverse problems through the presented framework. First, to avoid overfitting, the minimum training data points required to solve the problem should be determined empirically by gradually increasing the number of training points until the model's performance is satisfactory. Second, for some physical problems, noisy data may lead to nonconvergence of the optimization algorithm. Hence, suitable filtering or preprocessing of data may be required before using the PINN framework. Finally, for every run of the neural network, one may learn a different parameter or function value; due to the convergence of the optimizers at different local minima, it may be useful to find the statistics of the inverse problem solution through multiple runs.
285
+
286
+ Experimental results for single beam equations illustrate that PINNs can efficiently solve forward and inverse problems for single beams. In this study, we investigate the ability of PINNs to handle more complex systems, specifically double-beam systems connected by a Winkler foundation, as depicted in Fig. 10.
287
+
288
+ # C. Euler-Bernoulli Double-Beam Forward Problem
289
+
290
+ In this section, and for all further experiments, forced transverse vibrations of two parallel beams are studied. Structurally, two parallel beams of equal lengths joined by a Winkler massless foundation are considered. Both beams are considered slender and have homogeneous material properties. The transverse displacement of both beams is governed by the following system of PDEs [41]:
291
+
292
+ $$
293
+ \begin{array}{l} m _ {1} \bar {w} _ {1 _ {\overline {{\mathrm {t}} \overline {{\mathrm {t}}}}}} + K _ {1} \bar {w} _ {1 _ {\overline {{\mathrm {x x x x}}}}} + k (\bar {w} _ {1} - \bar {w} _ {2}) = \bar {f} _ {1} (\bar {x}, \bar {t}) \tag {15} \\ m _ {2} \bar {w} _ {2 _ {\mathrm {t t}}} + K _ {2} \bar {w} _ {2 _ {\mathrm {x x x x}}} + k (\bar {w} _ {2} - \bar {w} _ {1}) = \bar {f} _ {2} (\bar {x}, \bar {t}) \\ \end{array}
294
+ $$
295
+
296
+ Here, $\bar{w}_1$ and $\bar{w}_2$ are the beam displacements for the first and the second beams respectively. The distributed continuous forces acting transversely on the beams are $\bar{f}_1$ and $\bar{f}_2$ as shown in Fig. 10. The product of the density and the cross-sectional area of the beams is given by $m_1 = \rho_1A_1$ for the first beam and $m_2 = \rho_2A_2$ for the second beam. The parameters $K_{1}$ and $K_{2}$ denote the flexural rigidity of the beams and are given by $K_{1} = E_{1}I_{1}$ and $K_{2} = E_{2}I_{2}$ . The stiffness modulus of the Winkler elastic layer connecting both beams is given by $k$ . For simplicity, we consider $m_{1} = m_{2}$ , and $K_{1} = K_{2}$ , and nondimensionalize (15). After taking all the resulting parameters to be unity, the nondimensional equation has the
297
+
298
+ ![](images/254d5faef247df73e9b1a45196d7e8e88e3d4d898b4f04c4c320a9449f65fddf.jpg)
299
+ Fig. 8. Euler-Bernoulli double-beam: color bar represents absolute error in predictions Left: $|w_{1} - w_{1}^{*}|$ ; Right: $|w_{2} - w_{2}^{*}|$ .
300
+
301
+ ![](images/7718ca2f61148c3d70ea7fc57b0bacc429f59045455e06433b7581eedbd9e48e.jpg)
302
+
303
+ ![](images/3a06e799de411197fa296d39a55829447e9068fbff7c63ecb19e8510052e59e4.jpg)
304
+
305
+ ![](images/64efb18302910b4d43999f4a067b9864b05ca4900ab47f2c9dbe737b6cad2671.jpg)
306
+
307
+ ![](images/5bae6e7700464caeead2ac6d04a33d97294203e5fc853ac169dd9a3399bc3258.jpg)
308
+
309
+ ![](images/eba1e20899af7b3c785da442d9f99adca63fa53c290236ae43ead0522dfe53ea.jpg)
310
+ Fig. 9. Derived quantities for the Euler-Bernoulli double beam. Scattered points represent the exact solution and the continuous line refers to the derived solution. Top: First beam Left Bending moment; Mid Velocity; Right Acceleration. Bottom: Second beam Left Bending moment; Mid Velocity; Right Acceleration.
311
+
312
+ ![](images/5fad07f4bb4bba7015ed805bfac67c4ee6ad5c4f56aabc930bebc24328662051.jpg)
313
+
314
+ ![](images/bf915ecbd6441ef0ab484e41f60e5057a3f05b1d3323c4de7ae18d0e0c937680.jpg)
315
+
316
+ ![](images/01dace0f61cb90e3a8f1a05c9309fa846b0815cde6618ec232c8eec6649e403c.jpg)
317
+ Fig. 10. Double beam system connected by a Winkler foundation.
318
+
319
+ same form as (15) with unit coefficients. The initial conditions are
320
+
321
+ $$
322
+ w _ {1} (x, 0) = \sin (x), \quad w _ {1 _ {\mathrm {t}}} (x, 0) = 0
323
+ $$
324
+
325
+ $$
326
+ w _ {2} (x, 0) = \frac {\pi}{2} \sin (x), \quad w _ {2 _ {\mathrm {t}}} (x, 0) = 0
327
+ $$
328
+
329
+ All four ends of the beams are assumed to be simply supported, expressed as
330
+
331
+ $$
332
+ w _ {1} (0, t) = w _ {1} (\pi , t) = w _ {1 _ {\mathrm {x x}}} (0, t) = w _ {1 _ {\mathrm {x x}}} (\pi , t) = 0
333
+ $$
334
+
335
+ $$
336
+ w _ {2} (0, t) = w _ {2} (\pi , t) = w _ {2 _ {\mathrm {x x}}} (0, t) = w _ {2 _ {\mathrm {x x}}} (\pi , t) = 0
337
+ $$
338
+
339
+ The external acting force is
340
+
341
+ $$
342
+ f _ {1} (x, t) = \left(1 - \frac {\pi}{2}\right) \sin (x) \cos (t)
343
+ $$
344
+
345
+ $$
346
+ f _ {2} (x, t) = \left(\frac {\pi}{2} - 1\right) \sin (x) \cos (t)
347
+ $$
348
+
349
+ For the considered problem, the analytical solution is given by
350
+
351
+ $$
352
+ w _ {1} (x, t) = \sin (x) \cos (t), \quad w _ {2} (x, t) = \frac {\pi}{2} \sin (x) \cos (t)
353
+ $$
354
+
355
+ In addition to computing the beam displacements, derived quantities such as velocity, acceleration, and bending moment are also computed for this problem. These derived quantities also help in the prognosis and diagnostics of the system. For instance, the bending moment estimates the bending effect when an external force is applied to a structural element. Estimating the bending moment can be used to quantify the bending upon the action of applied forces. The beam is the most common structural member vulnerable to bending
356
+
357
+ ![](images/2023851f5f87940c30dd062b9e8cb0fa901c8a4240098b20182ebb92d09bc1a1.jpg)
358
+
359
+ ![](images/4897195cac1b7d255640629d9306c90e63fe5163d6cbdb9469fba075a183afb3.jpg)
360
+
361
+ ![](images/ef5c07c969bf18b0cbb06e9b6b02b731173cf76d71a0c4535fbdb0ba77aa66b4.jpg)
362
+ Fig. 11. Timoshenko double beam. Scattered points represent the exact solution, and the continuous line refers to the predicted solution. Top: First beam Left Displacement $(w_{1})$ ; Right Rotation $(\theta_{1})$ . Bottom: Second beam Left Displacement $(w_{2})$ ; Right Rotation $(\theta_{2})$
363
+
364
+ ![](images/bcf55aa43f5029ee5f3211b028b8075f356d354de9d3c9a2434ab8daf1310cdf.jpg)
365
+
366
+ moments because it can bend at any point along its length when subjected to an external force.
367
+
368
+ TABLE II EULER-BERNOULLI DOUBLE-BEAM: $\mathcal{R}$ AT $t = 1$
369
+
370
+ <table><tr><td></td><td>First beam</td><td>Second beam</td></tr><tr><td>Displacement (%)</td><td>1.9348 × 10-5</td><td>4.3253 × 10-5</td></tr><tr><td>Bending Moment (%)</td><td>9.6112 × 10-4</td><td>6.5506 × 10-4</td></tr><tr><td>Velocity (%)</td><td>1.9043 × 10-3</td><td>2.0161 × 10-3</td></tr><tr><td>Acceleration (%)</td><td>1.9011 × 10-2</td><td>1.4442 × 10-2</td></tr></table>
371
+
372
+ For simulating Euler-Bernoulli double beams, the same neural network architecture as for the single Euler-Bernoulli beam is considered. The only change is in the residual parameter, which is 1 for this case. The results are illustrated in Figs. (8-9) and Table II. The absolute difference between the PINN predicted solution and the exact solution for the first beam is approximately $10^{-4}$ , and for the second beam, it is approximately $10^{-3}$ , as shown in Fig. 8. The bending moment, velocity and acceleration are computed using the neural network's autodifferentiation and backpropagation features. Table II describes the efficiency in the computation of these quantities at $t = 1$ for both beams. The relative percent error in computing the transverse displacement of the beams on the order of $10^{-5}$ , and for acceleration, this error is on the order of $10^{-2}$ , which is very low and shows the potential of physics-informed learning. Fig. 9 illustrates the computed velocity, bending moment, and acceleration of both beams.
373
+
374
+ # D. Timoshenko Double-Beam Forward Problem
375
+
376
+ The double-beam system modeled by Euler-Bernoulli theory can also be modelled using Timoshenko theory under the same assumptions as described for the single Timoshenko equations [40]. In addition to providing the transverse displacement of the beams, Timoshenko theory also provides the cross-sectional rotation of both beams through the system of PDEs [40] given by
377
+
378
+ $$
379
+ \begin{array}{l} k A _ {1} G \left(\bar {\theta} _ {1 _ {\bar {x}}} - \bar {w} _ {1 _ {\bar {x} \bar {z}}}\right) + \rho A _ {1} \bar {w} _ {1 _ {\bar {t} \bar {t}}} + K \left(\bar {w} _ {1} - \bar {w} _ {2}\right) = \bar {f} _ {1} (\bar {x}, \bar {t}) \\ E I _ {2} \bar {\theta} _ {2 _ {\mathrm {x x}}} + G A _ {2} k \left(\bar {w} _ {2 _ {\mathrm {x}}} - \bar {\theta} _ {2}\right) - \rho I _ {2} \bar {\theta} _ {2 _ {\mathrm {t t}}} = 0 \\ \end{array}
380
+ $$
381
+
382
+ $$
383
+ k A _ {2} G \left(\bar {\theta} _ {2 \bar {x}} - \bar {w} _ {2 \bar {x} \bar {z}}\right) + \rho A _ {2} \bar {w} _ {2 \bar {t} \bar {t}} + K \left(\bar {w} _ {2} - \bar {w} _ {1}\right) = \bar {f} _ {2} (\bar {x}, \bar {t})
384
+ $$
385
+
386
+ $$
387
+ E I _ {1} \bar {\theta} _ {1 _ {\bar {x} \bar {x}}} + G A _ {1} k \left(\bar {w} _ {1 _ {\bar {x}}} - \bar {\theta} _ {1}\right) - \rho I _ {1} \bar {\theta} _ {1 _ {\bar {t} \bar {t}}} = 0 \tag {16}
388
+ $$
389
+
390
+ where $\bar{w}_{\mathrm{i}}(\bar{x},\bar{t})$ and $\bar{\theta}_{\mathrm{i}}(\bar{x},\bar{t})$ , $i = 1,2$ denote the transverse displacement and cross-sectional rotation of the beams respectively. $K$ is the stiffness modulus of the Winkler elastic layer. $\mathbf{G}$ is the shear modulus and $k$ is the Timoshenko shear coefficient. The rest of the parameters have the usual meanings as described earlier. For simplicity, we consider $A_{1} = A_{2}$ , and $I_{1} = I_{2}$ and nondimensionalize (16). With some additional assumptions, the non-dimensional equation has the same form as (16) with unit coefficients. For the numerical experiment the initial state of the double beam system is taken to be
391
+
392
+ ![](images/cdaee41833294891306ca3cef3f6df9fcca0aefb444ed73a8ec68ef19f219864.jpg)
393
+
394
+ ![](images/17828d50100fa6974fb6fcfcc5205c461695f93b0f6a49f54744ab168b839bed.jpg)
395
+
396
+ ![](images/b72ed204aee6f9c49b19981533757567e4b03ca5ce93097aa7dcd2c3e1a825b4.jpg)
397
+
398
+ ![](images/2516ddf31a3b53b35e9a54238d57cb83e17903da08b94ca1bf2cd626d08c37b5.jpg)
399
+
400
+ ![](images/6e8258552940ba351878364bc53cc6b1ed6e774d652bb5226a63bc5d95924ab9.jpg)
401
+ Fig. 12. Timoshenko double beam absolute errors in prediction; $\theta_{1}$ and $w_{1}$ are the rotation and displacement of the first beam, $\theta_{2}$ and $w_{2}$ are the rotation and displacement of the second beam Top 16000 training points (a) $|\theta_1 - \theta_1^* |$ ; (b) $|w_{1} - w_{1}^{*}|$ ; (c) $|\theta_{2} - \theta_{2}^{*}|$ ; (d) $|w_{2} - w_{2}^{*}|$ . Bottom 1600 training points (a) $|\theta_{1} - \theta_{1}^{*}|$ ; (b) $|w_{1} - w_{1}^{*}|$ ; (c) $|\theta_{2} - \theta_{2}^{*}|$ ; (d) $|w_{2} - w_{2}^{*}|$ .
402
+
403
+ ![](images/f4eb8880d19899a6e0e2df2ca83c4fd8389e1564a3a01bb0d0fcf7a0154600b0.jpg)
404
+
405
+ ![](images/9897569c649ec8e093daf33e7941bad057e5120561b6e3e5df196d5e7ffc7cc2.jpg)
406
+
407
+ ![](images/13c3efce451a975c83958dc306f0c6c248bcffb77f6c910ed18ca4edce3a38f4.jpg)
408
+
409
+ $$
410
+ \theta_ {1} (x, 0) = \left(\frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right)\right), \quad \theta_ {1 _ {t}} (x, 0) = 0
411
+ $$
412
+
413
+ $$
414
+ w _ {1} (x, 0) = \frac {\pi}{2} \sin (x), \quad w _ {1 _ {t}} (x, 0) = 0
415
+ $$
416
+
417
+ $$
418
+ \theta_ {2} (x, 0) = \frac {2}{\pi} \left(\frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right)\right), \quad \theta_ {2 _ {t}} (x, 0) = 0
419
+ $$
420
+
421
+ $$
422
+ w _ {2} (x, 0) = \sin (x), \quad w _ {2 _ {\mathrm {t}}} (x, 0) = 0
423
+ $$
424
+
425
+ Simply supported boundary conditions are provided to make the problem wellposed
426
+
427
+ $$
428
+ \theta_ {1} (0, t) = \theta_ {1} (\pi , t) = w _ {1} (0, t) = w _ {1} (\pi , t) = 0
429
+ $$
430
+
431
+ $$
432
+ \theta_ {2} (0, t) = \theta_ {2} (\pi , t) = w _ {2} (0, t) = w _ {2} (\pi , t) = 0
433
+ $$
434
+
435
+ Here, $f_{1}(x,t)$ , $f_{2}(x,t)$ and the analytic solutions are as follows
436
+
437
+ $$
438
+ f _ {1} (x, t) = \cos (t) (1 - \sin (x))
439
+ $$
440
+
441
+ $$
442
+ f _ {2} (x, t) = \frac {2}{\pi} \cos (t) - \frac {\pi}{2} \sin (x) \cos (t)
443
+ $$
444
+
445
+ $$
446
+ \begin{array}{l} \theta_ {1} (x, t) = \left(\frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right)\right) \cos (t) \\ \theta_ {2} (x, t) = \frac {2}{\pi} \left(\frac {\pi}{2} \cos (x) + \left(x - \frac {\pi}{2}\right)\right) \cos (t) \\ w _ {1} (x, t) = \frac {\pi}{2} \sin (x) \cos (t), \quad w _ {2} (x, t) = \sin (x) \cos (t) \\ \end{array}
447
+ $$
448
+
449
+ TABLE III TIMOSHENKO DOUBLE-BEAM:HYPERPARAMETERS
450
+
451
+ <table><tr><td>No. of points</td><td>Ni</td><td>Nb</td><td>Nint</td><td>Layers</td><td>Neurons</td><td>Epochs</td></tr><tr><td>16000</td><td>2000</td><td>2000</td><td>10000</td><td>4</td><td>20</td><td>15K</td></tr><tr><td>1600</td><td>200</td><td>200</td><td>1000</td><td>4</td><td>20</td><td>15K</td></tr></table>
452
+
453
+ TABLE IV TIMOSHENKO DOUBLE-BEAM: $\mathcal{R}$ AT $t = 1$
454
+
455
+ <table><tr><td></td><td>16000 points</td><td>1600 points</td></tr><tr><td>θ1(%)</td><td>1.6038 × 10-3</td><td>2.6211 × 10-3</td></tr><tr><td>w1(%)</td><td>3.9302 × 10-5</td><td>2.503 × 10-4</td></tr><tr><td>θ2(%)</td><td>1.0826 × 10-3</td><td>4.9405 × 10-3</td></tr><tr><td>w2(%)</td><td>7.8614 × 10-5</td><td>3.4904 × 10-4</td></tr></table>
456
+
457
+ Two experiments are performed, varying the number of training points, as shown in Table III. Table IV shows
458
+
459
+ the relative percent error in approximating the transverse displacement and cross-sectional rotations for both beams. For cross-sectional rotations $\theta_{1}$ and $\theta_{2}$ , the magnitude of the percent error remains the same even for fewer training points.
460
+
461
+ Using a large number of training points can increase the training time and may not be feasible for problems with many parameters. In these cases, using fewer training points can lead to less accurate solutions, but they can be obtained relatively faster. This approach allows engineers to make informed decisions about the parameters, and once optimal parameters have been identified, forward solutions can be recalculated with higher accuracy by using more training points. This is referred to as training with fewer points for the forward problem. The absolute difference between the predicted and exact solutions of $\theta_{1}$ , $w_{1}$ , $\theta_{2}$ and $w_{2}$ , even for 1600 training points is very small as shown in Fig. 11 Fig. 12. Fig. 11 presents the PINNs prediction for a double Timoshenko beam. The scattered points refer to the exact solution, and the continuous line represents the predicted solution. The force is applied uniformly in both beams; however, the deflection and rotation of the first beam are greater than those of the second beam. The results in Fig. 12 indicate that, for the second beam, a larger number of training points (16000) results in a more accurate prediction of deflection and rotation than a smaller number of training points (1600). Conversely, for the first beam, a smaller number of training points (1600) results in a more accurate prediction of the quantity of interest than a larger number of training points (16000). In any case, the difference in absolute error is relatively small, demonstrating that even with fewer training points, PINNs can still produce accurate predictions.
462
+
463
+ # E. Timoshenko Double-Beam Inverse Problem
464
+
465
+ The applied force on structural systems is critical for structural design and condition assessment. In design, control, and diagnosis, accurate estimation of dynamic forces acting on a structure is essential. These details can be used to evaluate the structural condition. For example, understanding the impact of heavy vehicles on bridge structures can aid in detecting early damage to them. Indirect force determination is of special interest when the applied forces cannot be measured directly, while the responses can be measured easily.
466
+
467
+ ![](images/6ea89819bf4546ff08328b0e07dc65205102320328143ead993cd065d5782093.jpg)
468
+ Fig. 13. Timoshenko double-beam inverse problem: absolute error in the prediction of force when the additional data of rotation and deflections provided at five locations has left: no noise right: 20 percent Gaussian noise.
469
+
470
+ ![](images/645a41f4dab091fe0d6613a9f132fd82d2b2b38004b8734151054554c555c01d.jpg)
471
+
472
+ For the inverse problem, three distinct experiments are performed on (16). First, the unknown parameter is learned from the Timoshenko double-beam system. We consider the unknown parameter to be $\rho A_{1}$ from (16). For the value of $\rho A_{1} = 1$ , the data for transverse displacement and cross-sectional rotation are provided at some points in the computational domain. Second, the unknown applied function on the first beam is learned by providing noise-free simulated displacement and cross-sectional rotation data. For this case, all other parameters, initial and boundary conditions are considered to be known, and only the function $f_{1}(x,t)$ is unknown. Third, the same force function is predicted by providing noisy displacement and cross-sectional rotation data. The data generated for learning the function in the second case are corrupted with noise to be used in the third case. The exact solution for the function to be learned in the second and third cases is $\cos(t)(1 - \sin(x))$ .
473
+
474
+ ![](images/16ffd622bcc93198d96137446eb1ecd7a06f317234996162a20d415b700dc61f.jpg)
475
+ Fig. 14. Data to learn material properties for the Timoshenko double beam: Blue dots Collocation points. Red dots Additional data points of displacement and rotation for the double beam at one location. Black dots Initial and boundary points.
476
+
477
+ The inverse problem in engineering refers to the process of estimating unknown parameters or functions from a set of measured data. In PINNs, the inverse problem is usually solved by training a neural network to fit the measured data and the known physical laws. However, the measured data can be affected by various sources of noise, which can make estimation of the quantity of interest more challenging. The noise can make the measured data unreliable, and the neural network may not be able to accurately estimate the unknown
478
+
479
+ ![](images/3107acd5fff5b08051c45c61c8b18c52a919702339b76ed63a5b7474202a393d.jpg)
480
+ Fig. 15. Data to learn force for the Timoshenko double beam: Blue dots Collocation points. Red dots Additional data points of displacement and rotation for the double beam at six different locations. Black dots Initial and boundary points.
481
+
482
+ parameters or functions. In such a scenario, the optimizer of the neural network does not necessarily converge to local minima.
483
+
484
+ The same neural network architecture is used as in the forward double-beam Timoshenko problem, with residual parameter 1 to regularize the physical equation in the loss function. Here, 2500 epochs are performed using the L-BFGS optimizer to train the neural network. For learning the parameter, 5000 data points are provided at $x = 1.8$ , as shown in Fig. 14. The exact value of the unknown parameter is $\rho A_1 = 1$ in (16), and the predicted value of the parameter using the PINN framework is 1.0208, which is close to the desired value. Even for a system of four PDEs, by only providing data at one particular beam location, the unknown parameter is learned successfully using PINNs. This shows that PINNs can handle large complex systems of PDEs efficiently.
485
+
486
+ TABLEV TIMOSHENKO DOUBLE-BEAM INVERSE PROBLEM: NOISE VS. R
487
+
488
+ <table><tr><td>Noise percent (%)</td><td>Relative error percent (%)</td></tr><tr><td>0</td><td>4.3271 × 10-2</td></tr><tr><td>10</td><td>4.8688 × 10-2</td></tr><tr><td>20</td><td>1.1123 × 10-1</td></tr></table>
489
+
490
+ The function $f_{1}(x,t)$ , the applied force on the first beam is predicted in the second experiment. As illustrated in Fig. 15, the data for transverse displacement and cross-sectional
491
+
492
+ rotation are provided at 6 different locations with 5000 data points at each location.
493
+
494
+ For the third experiment, the data provided for learning the unknown function $f_{1}(x,t)$ are provided with $10\%$ and $20\%$ Gaussian noise and the corresponding performance in learning the function is shown in Table V. Even with $10\%$ and $20\%$ noise, the relative error percent between analytic and predicted force is lower, as seen in TABLE V. Fig. 13 shows the force prediction along the beam when rotation and deflection observations are available at five points. The results demonstrate that the PINN is more precise in its predictions when the data are free from noise compared to when they are noisy. Despite the presence of noise in the data, the absolute error remains within the magnitude of $10^{-2}$ , which is comparable to the error observed when data are not noisy. To be more precise, Fig. 13 shows the absolute difference error of the PINN predicted and exact force at $t = 0.5$ with 0 percent and 20 percent noise. Even with 20 percent noise, the unknown force is learned with less than $1\%$ error over the entire space-time domain, demonstrating that PINN is a very accurate and robust approach.
495
+
496
+ The minimum number of data points required to estimate the model parameters depends on several factors, such as the complexity of the physics, the number of physical parameters in the model, and the quality of the data. More data points and more complex physics require more neural network capacity, resulting in a larger neural network with more hyperparameters. In practice, more data points lead to overfitting. The minimum training data points required for a PINN framework are determined empirically by gradually increasing the number of training points until the model's performance is satisfactory.
497
+
498
+ Finally, a sensitivity analysis is carried out to examine the influence of input variables, specifically the displacement and rotation, on the output variable, which is the force. The analysis involves adding $20\%$ Gaussian noise to the displacement data while no noise is added to the rotation data. The resulting mean accuracy of the force is 0.14313413. In contrast, when $20\%$ noise is introduced to the rotation data with the displacement data remaining unaltered, the mean accuracy of the force is 0.204627. The results of this analysis show that the force is more sensitive to rotations than the displacement data.
499
+
500
+ # V. CONCLUSIONS
501
+
502
+ The design and maintenance of complex structural systems are challenging due to the multiscale interaction of their components. It is desirable to predict the behavior of these complex systems by solving the governing model of interest. Recently, PINNs have emerged as a viable method for simulating PDEs. In this work, we propose using the PINN algorithm with the nondimensionalization step aiding in the learning procedure for complex beam systems. The PINN framework successfully solves the forward and inverse problems for nondimensional single and double-beam systems. Based on the numerical experiments, the following conclusions are drawn.
503
+
504
+ First, the relative percent error in computing the beam displacement does not increase with increasing model complexity when solving the forward problem. In fact, for both
505
+
506
+ Euler-Bernoulli and Timoshenko theory, the error decreases by an order of magnitude for double-beam systems compared to single-beam systems. In addition, the error in computing the bending rotation is comparable for single and double Timoshenko beam systems. This nonincrease in error as the model complexity increases suggests that the PINN framework is appropriate for simulating large-scale systems with multiple connected components.
507
+
508
+ Second, it is demonstrated that PINNs precisely discover the unknown force function and model parameters through their inverse problem-solving capability. The proposed algorithm successfully learns the model parameter with less than $3\%$ error for the single Timoshenko beam. In addition, for the double beam Timoshenko system, the unknown function is approximated on the whole space-time domain with less than $0.05\%$ error, demonstrating the algorithm's effectiveness for solving inverse problems.
509
+
510
+ Third, physical quantities such as velocity, acceleration, and bending moment characterize the system's behavior. Even though the derived quantities are not directly trained in the neural network, they are approximated with less than $2e - 2\%$ error for the Euler-Bernoulli double-beam system.
511
+
512
+ Fourth, the algorithm's ability to use fewer training points in forward problems and to accommodate noisy data in inverse problems is exploited. The obtained results show that even with 1600 training points, the double Timoshenko beam displacement is predicted on the entire space-time domain with less than $5e - 3\%$ error. In the case of the inverse problem, the force function is discovered with less than $0.2\%$ error even when the data used in the learning procedure contains $20\%$ Gaussian noise. These findings imply that the algorithm is accurate and robust under the tested noise levels.
513
+
514
+ To summarize, PINNs enable the simulation of complex structural systems with multiple interacting components efficiently, accurately, and robustly. In the future, this approach could be extended to estimate displacements for various input forces and mechanical vibration modes and incorporate robust methods to account for stochasticities. Additionally, future works on PINNs could be focused on reducing the computational cost and developing methodologies to augment their generalizability, thereby expanding the applicability of PINNs beyond the training domain.
515
+
516
+ # ACKNOWLEDGMENT
517
+
518
+ The authors would like to express their appreciation to the anonymous reviewers and editors for their valuable comments and feedback, which have significantly improved the quality of this work. The authors extend their appreciation to Prof. Siddhartha Mishra (ETH Zürich) for his insightful suggestions in comparisons with numerical methods.
519
+
520
+ # REFERENCES
521
+
522
+ [1] K. A. Kobbacy, D. P. Murthy et al., Complex system maintenance handbook. Springer, 2008.
523
+ [2] M. Chatzis and G. Deodatis, "Modeling of very large interacting multiple-beam systems with application to suspension bridge cables," Journal of Structural Engineering, vol. 139, no. 9, pp. 1541-1554, 2013.
524
+
525
+ [3] Z. Liu, Y. Song, Y. Han, H. Wang, J. Zhang, and Z. Han, “Advances of research on high-speed railway catenary,” Journal of modern transportation, vol. 26, no. 1, pp. 1-23, 2018.
526
+ [4] C. Foias, O. Manley, R. Rosa, and R. Temam, Navier-Stokes equations and turbulence. Cambridge University Press, 2001, vol. 83.
527
+ [5] D. Lucor, A. Agrawal, and A. Sergent, "Simple computational strategies for more effective physics-informed neural networks modeling of turbulent natural convection," Journal of Computational Physics, vol. 456, p. 111022, 2022.
528
+ [6] V. Stojanović and P. Kozić, Vibrations and stability of complex beam systems. Springer, 2015.
529
+ [7] J. Mercieca and V. Kadirkamanathan, "Estimation and identification of spatio-temporal models with applications in engineering, healthcare and social science," Annual Reviews in Control, vol. 42, pp. 285-298, 2016.
530
+ [8] H.-X. Li and C. Qi, "Modeling of distributed parameter systems for applications—a synthesized review from time-space separation," Journal of Process Control, vol. 20, no. 8, pp. 891-901, 2010.
531
+ [9] T. Kapoor, H. Wang, A. Núñez, and R. Dollevoet, "Predicting traction return current in electric railway systems through physics-informed neural networks," in 2022 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2022, pp. 1460-1468.
532
+ [10] A. Chandra, M. Curti, K. Tiels, E. A. Lomonova, and D. M. Tartakovsky, "Physics-informed neural networks for modelling anisotropic and bi-anisotropic electromagnetic constitutive laws through indirect data," in 2022 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2022, pp. 1451-1459.
533
+ [11] L. Yuan, Y.-Q. Ni, X.-Y. Deng, and S. Hao, "A-PINN: Auxiliary physics informed neural networks for forward and inverse problems of nonlinear integro-differential equations," Journal of Computational Physics, vol. 462, p. 111260, 2022.
534
+ [12] A. Fallah and M. M. Aghdam, "Physics-informed neural network for bending and free vibration analysis of three-dimensional functionally graded porous beam resting on elastic foundation," Engineering with Computers, pp. 1-18, 2023.
535
+ [13] T. Kapoor, H. Wang, A. Nunez, and R. Dollevoet, "Physics-informed machine learning for moving load problems," arXiv preprint arXiv:2304.00369, 2023.
536
+ [14] M. G. Kapteyn, J. V. Pretorius, and K. E. Willcox, “A probabilistic graphical model foundation for enabling predictive digital twins at scale,” Nature Computational Science, vol. 1, no. 5, pp. 337–347, 2021.
537
+ [15] H. Wang, A. Núñez, Z. Liu, D. Zhang, and R. Dollevoet, "A Bayesian network approach for condition monitoring of high-speed railway catenaries," IEEE Transactions on Intelligent Transportation Systems, vol. 21, no. 10, pp. 4037-4051, 2019.
538
+ [16] F.-G. Yuan, S. A. Zargar, Q. Chen, and S. Wang, "Machine learning for structural health monitoring: challenges and opportunities," in Sensors and smart structures technologies for civil, mechanical, and aerospace systems 2020, vol. 11379. International Society for Optics and Photonics, 2020, p. 1137903.
539
+ [17] J. Borggaard and J. Burns, “A PDE sensitivity equation method for optimal aerodynamic design,” Journal of Computational Physics, vol. 136, no. 2, pp. 366-384, 1997.
540
+ [18] Z. Lai, C. Mylonas, S. Nagarajaiah, and E. Chatzi, "Structural identification with physics-informed neural ordinary differential equations," Journal of Sound and Vibration, vol. 508, p. 116196, 2021.
541
+ [19] M. Raissi and G. E. Karniadakis, “Hidden physics models: Machine learning of nonlinear partial differential equations,” Journal of Computational Physics, vol. 357, pp. 125–141, 2018.
542
+ [20] N. Abhyankar, E. Hall, and S. Hanagud, "Chaotic vibrations of beams: numerical solution of partial differential equations," 1993.
543
+ [21] G. E. Karniadakis, I. G. Kevrekidis, L. Lu, P. Perdikaris, S. Wang, and L. Yang, "Physics-informed machine learning," Nature Reviews Physics, vol. 3, no. 6, pp. 422-440, 2021.
544
+ [22] Fang, Zhiwei, “A high-efficient hybrid physics-informed neural networks based on convolutional neural network,” IEEE Transactions on Neural Networks and Learning Systems, 2021.
545
+ [23] S. Cuomo, V. S. Di Cola, F. Giampaolo, G. Rozza, M. Raissi, and F. Piccialli, "Scientific machine learning through physics-informed neural networks: where we are and what's next," Journal of Scientific Computing, vol. 92, no. 3, p. 88, 2022.
546
+ [24] J. Blechschmidt and O. G. Ernst, “Three ways to solve partial differential equations with neural networks—a review,” GAMM-Mitteilungen, vol. 44, no. 2, p. e202100006, 2021.
547
+ [25] M. Raissi, P. Perdikaris, and G. E. Karniadakis, "Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations," Journal of Computational Physics, vol. 378, pp. 686-707, 2019.
548
+
549
+ [26] Meng, Yuxin and Rigall, Eric and Chen, Xueen and Gao, Feng and Dong, Junyu and Chen, Sheng, "Physics-Guided Generative Adversarial Networks for Sea Subsurface Temperature Prediction," IEEE Transactions on Neural Networks and Learning Systems, 2021.
550
+ [27] Xu, Wengang and Zhou, Zheng and Li, Tianfu and Sun, Chuang and Chen, Xuefeng and Yan, Ruqiang, "Physics-Constraint Variational Neural Network for Wear State Assessment of External Gear Pump," IEEE Transactions on Neural Networks and Learning Systems, 2022.
551
+ [28] A. Karpatne, G. Atluri, J. H. Faghmous, M. Steinbach, A. Banerjee, A. Ganguly, S. Shekhar, N. Samatova, and V. Kumar, “Theory-guided data science: A new paradigm for scientific discovery from data,” IEEE Transactions on Knowledge and Data Engineering, vol. 29, no. 10, pp. 2318–2331, 2017.
552
+ [29] M. H. Hassoun et al., Fundamentals of artificial neural networks. MIT press, 1995.
553
+ [30] Oszkinat, Clemens and Luczak, Susan E and Rosen, IG, "Uncertainty quantification in estimating blood alcohol concentration from transdermal alcohol level with physics-informed neural networks," IEEE Transactions on Neural Networks and Learning Systems, 2022.
554
+ [31] Hua, Jiaqi and Li, Yingguang and Liu, Changqing and Wan, Peng and Liu, Xu, "Physics-Informed Neural Networks With Weighted Losses by Uncertainty Evaluation for Accurate and Stable Prediction of Manufacturing Systems," IEEE Transactions on Neural Networks and Learning Systems, 2023.
555
+ [32] S. Wang, X. Yu, and P. Perdikaris, "When and why PINNs fail to train: A neural tangent kernel perspective," Journal of Computational Physics, vol. 449, p. 110768, 2022.
556
+ [33] A. Krishnapriyan, A. Gholami, S. Zhe, R. Kirby, and M. W. Mahoney, "Characterizing possible failure modes in physics-informed neural networks," Advances in Neural Information Processing Systems, vol. 34, 2021.
557
+ [34] G. Kissas, Y. Yang, E. Hwuang, W. R. Witschey, J. A. Detre, and P. Perdikaris, "Machine learning in cardiovascular flows modeling: Predicting arterial blood pressure from non-invasive 4D flow MRI data using physics-informed neural networks," Computer Methods in Applied Mechanics and Engineering, vol. 358, p. 112623, 2020.
558
+ [35] Feng, Jiali and Liu, Zhijie and He, Xiuyu and Li, Qing and He, Wei, "Vibration Suppression of a High-Rise Building With Adaptive Iterative Learning Control," IEEE Transactions on Neural Networks and Learning Systems, pp. 1-12, 2021.
559
+ [36] Joghataie, Abdolreza and Torghabehi, Omid Oliyan, "Simulating dynamic plastic continuous neural networks by finite elements," IEEE Transactions on neural networks and learning systems, vol. 25, no. 8, pp. 1583-1587, 2013.
560
+ [37] Liu, Yu and Wu, Xiaoqi and Yao, Xiangqian and Zhao, Jingyi, "Backstepping Technology-Based Adaptive Boundary ILC for an Input-Output-Constrained Flexible Beam," IEEE Transactions on Neural Networks and Learning Systems, pp. 1-9, 2022.
561
+ [38] He, Wei and Meng, Tingting and Huang, Deqing and Li, Xuefang, "Adaptive Boundary Iterative Learning Control for an Euler-Bernoulli Beam System With Input Constraint," IEEE Transactions on Neural Networks and Learning Systems, vol. 29, no. 5, pp. 1539-1549, 2018.
562
+ [39] Z. Oniszczuk, "Free transverse vibrations of elastically connected simply supported double-beam complex system," Journal of sound and vibration, vol. 232, no. 2, pp. 387-403, 2000.
563
+ [40] V. Stojanović and P. Kozić, “Forced transverse vibration of Rayleigh and Timoshenko double-beam system with effect of compressive axial load,” International Journal of Mechanical Sciences, vol. 60, no. 1, pp. 59–71, 2012.
564
+ [41] Z. Oniszczuk, “Forced transverse vibrations of an elastically connected complex simply supported double-beam system,” Journal of sound and vibration, vol. 264, no. 2, pp. 273–286, 2003.
565
+ [42] M. Abu-Hilal, "Dynamic response of a double Euler-Bernoulli beam due to a moving constant load," Journal of sound and vibration, vol. 297, no. 3-5, pp. 477-491, 2006.
566
+ [43] X. Zhao, B. Chen, Y. Li, W. Zhu, F. Nkiegaing, and Y. Shao, "Forced vibration analysis of Timoshenko double-beam system under compressive axial load by means of green's functions," Journal of Sound and Vibration, vol. 464, p. 115001, 2020.
567
+ [44] J. Li and H. Hua, "Spectral finite element analysis of elastically connected double-beam systems," Finite Elements in Analysis and Design, vol. 43, no. 15, pp. 1155-1168, 2007.
568
+ [45] A. Palmeri and S. Adhikari, "A Galerkin-type state-space approach for transverse vibrations of slender double-beam systems with viscoelastic inner layer," Journal of Sound and Vibration, vol. 330, no. 26, pp. 6372-6386, 2011.
569
+
570
+ [46] Z. Ying and Y. Ni, “A response-adjustable sandwich beam with harmonic distribution parameters under stochastic excitations,” International Journal of Structural Stability and Dynamics, vol. 17, no. 07, p. 1750075, 2017.
571
+ [47] Z. Ying, Y. Ni, and R. Huan, "Vibration response characteristics of quasi-periodic sandwich beam with magnetorheological visco-elastomer core under random support excitations," Journal of Vibration and Acoustics, vol. 140, no. 5, 2018.
572
+ [48] T. Murmu and S. Adhikari, “Nonlocal transverse vibration of double-nanobeam-systems,” Journal of Applied Physics, vol. 108, no. 8, p. 083514, 2010.
573
+ [49] B. Chen, B. Lin, X. Zhao, W. Zhu, Y. Yang, and Y. Li, "Closed-form solutions for forced vibrations of a cracked double-beam system interconnected by a viscoelastic layer resting on Winkler-Pasternak elastic foundation," Thin-Walled Structures, vol. 163, p. 107688, 2021.
574
+ [50] S. Liu and B. Yang, "A closed-form analytical solution method for vibration analysis of elastically connected double-beam systems," Composite Structures, vol. 212, pp. 598-608, 2019.
575
+ [51] X. Zhao and P. Chang, “Free and forced vibration of double beam with arbitrary end conditions connected with a viscoelastic layer and discrete points,” International Journal of Mechanical Sciences, vol. 209, p. 106707, 2021.
576
+ [52] Y. Li, F. Xiong, L. Xie, and L. Sun, "State-space approach for transverse vibration of double-beam systems," International Journal of Mechanical Sciences, vol. 189, p. 105974, 2021.
577
+ [53] O. Z. S. Ong, M. H. Ghayesh, D. Losic, and M. Amabili, "Coupled dynamics of double beams reinforced with bidirectional functionally graded carbon nanotubes," Engineering Analysis with Boundary Elements, vol. 143, pp. 263-282, 2022.
578
+ [54] Radev, Stefan T. and Mertens, Ulf K. and Voss, Andreas and Ardizzone, Lynton and Kothe, Ullrich, "BayesFlow: Learning Complex Stochastic Models With Invertible Neural Networks," IEEE Transactions on Neural Networks and Learning Systems, vol. 33, no. 4, pp. 1452-1466, 2022.
579
+ [55] Ning, Hanwen and Qing, Guangyan and Jing, Xingjian, "Identification of Nonlinear Spatiotemporal Dynamical Systems With Nonuniform Observations Using Reproducing-Kernel-Based Integral Least Square Regulation," IEEE Transactions on Neural Networks and Learning Systems, vol. 27, no. 11, pp. 2399-2412, 2016.
580
+ [56] Jin, Pengzhan and Zhang, Zhen and Kevrekidis, Ioannis G. and Karniadakis, George Em, "Learning Poisson Systems and Trajectories of Autonomous Systems via Poisson Neural Networks," IEEE Transactions on Neural Networks and Learning Systems, pp. 1-13, 2022.
581
+ [57] S. Mishra and R. Molinaro, "Estimates on the generalization error of physics-informed neural networks for approximating PDEs," IMA Journal of Numerical Analysis, 2022.
582
+ [58] M. Bazmara, M. Silani, M. Mianroodi et al., "Physics-informed neural networks for nonlinear bending of 3D functionally graded beam," in Structures, vol. 49. Elsevier, 2023, pp. 152-162.
583
+ [59] U. Goerguelue, “Beam theories the difference between Euler-Bernoulli and Timoschenko,” Lecture Handouts, 2009.
584
+ [60] P. Akella, E. G. Hemingway, and O. M. O'Reilly, "A visualization tool for the vibration of Euler-Bernoulli and Timoshenko beams."
585
+ [61] B. Semper, "Semi-discrete and fully discrete Galerkin methods for the vibrating Timoshenko beam," Computer methods in applied mechanics and engineering, vol. 117, no. 3-4, pp. 353-360, 1994.
586
+ [62] Y. Yu, H. Yao, and Y. Liu, "Structural dynamics simulation using a novel physics-guided machine learning method," Engineering Applications of Artificial Intelligence, vol. 96, p. 103947, 2020.
587
+ [63] Z. Zhang and C. Sun, "Structural damage identification via physics-guided machine learning: a methodology integrating pattern recognition with finite element model updating," Structural Health Monitoring, vol. 20, no. 4, pp. 1675-1688, 2021.
588
+ [64] A. Karpatne, W. Watkins, J. Read, and V. Kumar, "Physics-guided neural networks (PGNN): An application in lake temperature modeling," arXiv preprint arXiv:1710.11431, vol. 2, 2017.
589
+ [65] A. Khandelwal, S. Xu, X. Li, X. Jia, M. Stienbach, C. Duffy, J. Nieber, and V. Kumar, "Physics guided machine learning methods for hydrology," arXiv preprint arXiv:2012.02854, 2020.
590
+ [66] J. Yu, L. Lu, X. Meng, and G. E. Karniadakis, "Gradient-enhanced physics-informed neural networks for forward and inverse PDE problems," Computer Methods in Applied Mechanics and Engineering, vol. 393, p. 114823, 2022.
591
+ [67] Y. Miao and H. Li, "GPINN: Physics-informed neural network with graph embedding," arXiv preprint arXiv:2306.09792, 2023.
592
+ [68] J. Bettencourt, M. J. Johnson, and D. Duvenaud, "Taylor-mode automatic differentiation for higher-order derivatives in JAX," in Program Transformations for ML Workshop at NeurIPS 2019, 2019.
593
+
594
+ ![](images/733b2f452d52af23ebe444bf90f61c31fd599eb447cf397a2af48762ea35dcd6.jpg)
595
+
596
+ Taniya Kapoor is currently pursuing Ph.D. degree in the department of Engineering Structures at Delft University of Technology, The Netherlands. Before this, she was an intern in the seminar of applied mathematics at ETH Zürich, Switzerland. She received her M.Sc. degree in applied mathematics from South Asian University, India and in scientific computing from Université de Lille, France. She currently works on scientific machine learning with applications in engineering structures.
597
+
598
+ ![](images/ecbef31eedb77695c685487bba51c2a494f757b015690d94f45546bc73b570da.jpg)
599
+
600
+ Hongrui Wang (Member, IEEE) received the Ph.D. degree from the Section of Railway Engineering, Delft University of Technology, Delft, The Netherlands, in 2019. He was a Post-Doctoral Researcher with the Delft University of Technology, until November 2020, where he is currently an Assistant Professor with the Department of Engineering Structures. His research interests include signal processing, artificial intelligence, and their applications in the structural health monitoring and digital modeling and the design of railway infras
601
+
602
+ tructures.
603
+
604
+ Dr. Wang is an Associate Editor of the IEEE TRANSACTIONS ON INSTRUMENTATION AND MEASUREMENT.
605
+
606
+ ![](images/cb007c072f0dd0cdbfb108ec5a49a6015755a54498ff1d23883914fc4f875e7b.jpg)
607
+
608
+ Alfredo Nunez (Senior Member, IEEE) received the Ph.D. degree in electrical engineering from the University of Chile, Santiago, Chile, in 2010. He is currently an Associate Professor in the field of databased maintenance for railway infrastructure with the Section of Railway Engineering, Department of Engineering Structures, Delft University of Technology, Delft, The Netherlands. He was a Postdoctoral Researcher with the Delft Center for Systems and Control, Delft University of Technology. He has authored or coauthored more than a hundred inter
609
+
610
+ national journal and international conference papers. His current research interests include railway infrastructures, intelligent conditioning monitoring and maintenance of engineering structures, computational intelligence, big data, risk analysis, and optimization.
611
+
612
+ Dr. Núñez is on the Editorial Board of IEEE TRANSACTIONS ON INTELLIGENT TRANSPORTATION SYSTEMS and Applied Soft Computing.
613
+
614
+ ![](images/30d96a7f8882433ecb979db2016f9d1a3e331d08e2f7ac7f833d9d747e4bf649.jpg)
615
+
616
+ Rolf Dollevoet received the M.Sc. degree in mechanical engineering from the Eindhoven University of Technology, Eindhoven, The Netherlands, in 2003, and the Ph.D. degree in rail research on rolling contact fatigue from the University of Twente, Enschede, The Netherlands, in 2010. Since 2003, he has been with the Railway Sector, ProRail, Utrecht, The Netherlands. Since 2012, he has been appointed as a part-time Professor with the Section of Railway Engineering, Delft University of Technology, Delft, The Netherlands. He was also a Railway System Ex
617
+
618
+ pert with ProRail, where he was responsible for all the scientific research and innovation with the Civil Engineering Division, ProRail Asset Management. Dr. Dollevoet was a recipient of the Jan van Stappen Spoorprijs 2010 Award (a yearly prize for contributions to the travel quality and service for passengers in The Netherlands) from the railway sector for his Ph.D. research and its huge potential to reduce track maintenance costs.
2303.01xxx/2303.01055/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5bdc501f4bfee581712786fdccb1a58d519c48b5ea549ce9933659f065feb87
3
+ size 1307997
2303.01xxx/2303.01055/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_content_list.json ADDED
@@ -0,0 +1,1706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "AI and the FCI: Can ChatGPT project an understanding of introductory physics?",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 101,
8
+ 61,
9
+ 903,
10
+ 82
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Colin G. West*",
17
+ "bbox": [
18
+ 437,
19
+ 95,
20
+ 565,
21
+ 109
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Department of Physics, University of Colorado, Boulder, Colorado 80309, USA",
28
+ "bbox": [
29
+ 228,
30
+ 109,
31
+ 774,
32
+ 125
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "(Dated: March 28, 2023)",
39
+ "bbox": [
40
+ 418,
41
+ 123,
42
+ 584,
43
+ 137
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "ChatGPT is a groundbreaking \"chatbot\"-an AI interface built on a large language model that was trained on an enormous corpus of human text to emulate human conversation. Beyond its ability to converse in a plausible way, it has attracted attention for its ability to competently answer questions from the bar exam and from MBA coursework, and to provide useful assistance in writing computer code. These apparent abilities have prompted discussion of ChatGPT as both a threat to the integrity of higher education and conversely as a powerful teaching tool. In this work we present a preliminary analysis of how two versions of ChatGPT (ChatGPT3.5 and ChatGPT4) fare in the field of first-semester university physics, using a modified version of the Force Concept Inventory (FCI) to assess whether it can give correct responses to conceptual physics questions about kinematics and Newtonian dynamics. We demonstrate that, by some measures, ChatGPT3.5 can match or exceed the median performance of a university student who has completed one semester of college physics, though its performance is notably uneven and the results are nuanced. By these same measures, we find that ChatGPT4's performance is approaching the point of being indistinguishable from that of an expert physicist when it comes to introductory mechanics topics. After the completion of our work we became aware of Ref [1], which preceded us to publication and which completes an extensive analysis of the abilities of ChatGPT3.5 in a physics class, including a different modified version of the FCI. We view this work as confirming that portion of their results, and extending the analysis to ChatGPT4, which shows rapid and notable improvement in most, but not all respects.",
50
+ "bbox": [
51
+ 169,
52
+ 143,
53
+ 831,
54
+ 383
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "I. INTRODUCTION",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 197,
64
+ 407,
65
+ 375,
66
+ 422
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "\"ChatGPT,\" in simplest terms, is a software application designed to mimic human conversation by producing and responding to text, a skill called \"natural language processing.\" [2] Technically, it is based on a \"large language model\" (LLM) which makes use of two recent advances in the LLM field: the \"Transformer\" model [3] and \"pretraining.\" [4] from whence arises \"GPT\" (it is a [G]enerative, [P]retrained [T]ransformer model). While a great deal has been written about the methods used to produce the ChatGPT system [5], it suffices here to note that it is one of a new generation of artificial language processing systems—sometimes colloquially called \"chatbots\"—which has garnered substantial attention in both academic [6] and popular press [7] for its ability to seemingly carry on a coherent conversation and complete other tasks. There are two versions of the model available to researchers at the time of this writing: ChatGPT3.5, which is available to the public, and ChatGPT4, which is available only under paid subscription and with substantial limitations on usage rates. The latter model is intended to deliver significantly superior performance than its predecessor [8]. In this paper, we will write \"ChatGPT\" when making statements that apply equally to both versions, and add the version numbers in other cases.",
73
+ "bbox": [
74
+ 81,
75
+ 439,
76
+ 488,
77
+ 801
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Recent papers have shown that, by at least some measures, ChatGPT's ability to converse like a human also allows it to seemingly display competence in fields like business and law. For example, its responses to questions",
84
+ "bbox": [
85
+ 81,
86
+ 801,
87
+ 488,
88
+ 861
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "about Operations Management, a core topic in many MBA programs, were assessed in one study as being at the \"B or B-\" level. [9] Another work concluded that, \"although ChatGPT[3.5] would have been a mediocre law student, its performance was sufficient to successfully earn a JD degree from a highly selective law school\" [10]. A similar work projected that, given the surprisingly strong performance of ChatGPT3.5 on sample questions it could not have seen before, a similar LLM might be able to pass an actual bar exam \"within the next 0-18 months.\" [11]. ChatGPT4 appears to have reached that benchmark approximately three months later [12]. The improved model also scores at very high levels—routinely at the 80th or 90th percentile or above—on many standardized tests, including notably the AP Physics C exam [8].",
95
+ "bbox": [
96
+ 509,
97
+ 407,
98
+ 921,
99
+ 640
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "A central premise of much physics education work is that there is a difference between the ability to solve classically formulated \"physics problems\" and an ability to demonstrate true conceptual \"understanding.\" The field of Physics Education Research (PER) has developed tools specifically to probe this distinction, and here, we deploy one of the fields most venerable instruments, the Force Concept Inventory (FCI) [13] to compare its apparent conceptual performance with typical students from an introductory physics course at the college level.",
106
+ "bbox": [
107
+ 509,
108
+ 640,
109
+ 921,
110
+ 784
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "We note that the work of Ref [1], which was made available several weeks before this work, also includes an analysis of ChatGPT3.5 using the FCI. Since our works were completed without knowledge of one another and since ChatGPT's responses are inherently probabilistic, we view this work as serving as a confirmation of some of their results: namely, that ChatGPT can correctly answer roughly 60 to $65\\%$ of the questions on the FCI, and that it's answers are usually but not always relatively",
117
+ "bbox": [
118
+ 509,
119
+ 785,
120
+ 921,
121
+ 917
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "aside_text",
127
+ "text": "arXiv:2303.01067v2 [physics.ed-ph] 26 Mar 2023",
128
+ "bbox": [
129
+ 22,
130
+ 232,
131
+ 60,
132
+ 757
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "page_footnote",
138
+ "text": "* colin.west@colorado.edu",
139
+ "bbox": [
140
+ 84,
141
+ 898,
142
+ 250,
143
+ 911
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "text",
149
+ "text": "stable. These results are discussed in more detail below, including some alternative analysis, and we then extend the work to reconsider the results with ChatGPT4. Certainly the authors of Ref [1] are likely pursuing similar work which we hope will also serve as useful confirmation or contrast with ours.",
150
+ "bbox": [
151
+ 81,
152
+ 65,
153
+ 488,
154
+ 152
155
+ ],
156
+ "page_idx": 1
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "II. BACKGROUND",
161
+ "text_level": 1,
162
+ "bbox": [
163
+ 200,
164
+ 184,
165
+ 372,
166
+ 198
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "ChatGPT was designed and optimized specifically for the art of conversing in a manner that would seem plausibly human [5]. To a loose approximation, it has analyzed and internalized the patterns of words in an enormous sample of human text (principally the \"CommonCrawl\" dataset) [5]), which consists of things like books, news articles, wikipedia pages, reddit threads, and content from more specialized and technical fora like StackExchange and StackOverflow [5, 14]. From this data, it has an immense probabilistic model of how words tend to be fit together by a human being in various contexts. For example, it \"knows\" that a sentence which begins \"after the fight, William hurt...\" is likely to end with something like \"...his hand.\" But it also recognizes that the sentence \"after the fight scene, William Hurt...\" might instead end with \"...appeared to be injured and had to be helped off set.\" Crucially, this means that ChatGPT is a tool which manipulates and responds to language, and is not designed or trained to implement any model of an underlying concept [15] like business, law, or physics. But it is designed to talk like a person, and people who talk about business, law, and physics generally discuss these topics with some intelligence. Hence, even without any specific training in physics, its familiarity with the way physicists talk about physics may be enough for it to project at least an appearance of understanding.",
173
+ "bbox": [
174
+ 81,
175
+ 217,
176
+ 488,
177
+ 592
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "The concept of \"Understanding\" can be deep and difficult to define, particularly in the context of learning a new topic [16]. We will generally leave that longstanding question [17] to the likes of AI researchers, cognitive scientists and philosophers, and for this reason we have been very careful with the wording in the title of this paper and elsewhere. We are not about to ask, because we are not equipped to know, whether ChatGPT understands introductory physics. We ask whether its behavior creates the appearance of understanding to the outside world- which might either be because it has succeeded in understanding, or merely because it has figured out how to display all the usual indicia of understanding. Hence, in the same manner that some poker player might \"project an aura of confidence\" either as a proud display of internal fortitude or as an act desperate bluffing, we consider here whether ChatGPT \"projects\" an understanding of introductory physics to the outside world.",
184
+ "bbox": [
185
+ 81,
186
+ 594,
187
+ 488,
188
+ 852
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "There is a deep body of work in the literature of physics education about how to assess whether a student is demonstrating \"understanding,\" [16, 18]. To start with the basics, we choose as our primary assessment tool",
195
+ "bbox": [
196
+ 81,
197
+ 854,
198
+ 488,
199
+ 914
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "a classic instrument which has been used almost as a gold standard for decades: the Force Concept Inventory (FCI) [13]. This influential and heavily-studied assessment is a set of 30 questions designed to try to isolate and allow students to demonstrate a conceptual understanding of introductory kinematics and dynamics, as they might be covered in the first semester of introductory physics at the high school or university level. Although it has plenty of limitations, it was designed as a tool with an eye to distinguishing true conceptual mastery from the kinds of rote memorization, pattern-matching, and algorithmic calculation[19-21] which students sometimes use in order to pass conventional physics tests without ever truly knowing what they are doing, or why. Its status as a classic test for understanding in introductory physics makes it a logical starting point for our inquiry, though we encourage future work to extend beyond this starting point as well.",
206
+ "bbox": [
207
+ 511,
208
+ 65,
209
+ 919,
210
+ 325
211
+ ],
212
+ "page_idx": 1
213
+ },
214
+ {
215
+ "type": "text",
216
+ "text": "In short, we will offer ChatGPT a modified version of the questions from the FCI, and assess on multiple levels how successfully it can project understanding of these topics in intro physics. Section III of this paper we describe in more detail how the assessment was modified and administered, and in section IV we analyze ChatGPT's responses through various lenses and compare its performance to a large sample of real human students. Section V gives a summary of our resulting inferences and offers speculations about a number of open questions and directions for future work.",
217
+ "bbox": [
218
+ 511,
219
+ 325,
220
+ 919,
221
+ 484
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "III. METHODS",
228
+ "text_level": 1,
229
+ "bbox": [
230
+ 647,
231
+ 515,
232
+ 785,
233
+ 529
234
+ ],
235
+ "page_idx": 1
236
+ },
237
+ {
238
+ "type": "text",
239
+ "text": "Because all of our reference points for what it looks like when someone projects an understanding of physics are based on the performance of other human beings, Hence, it was important when administering the FCI that we hew as closely as possible to the conditions as our human students would encounter it. This was not entirely possible, and some modifications were necessary both to the questions themselves as well as to the process of giving them to ChatGPT. Such changes were kept to a minimum. In this section we detail those changes and the resulting procedure for assessing ChatGPT with the FCI.",
240
+ "bbox": [
241
+ 511,
242
+ 547,
243
+ 919,
244
+ 708
245
+ ],
246
+ "page_idx": 1
247
+ },
248
+ {
249
+ "type": "text",
250
+ "text": "A. Suitability of the FCI",
251
+ "text_level": 1,
252
+ "bbox": [
253
+ 614,
254
+ 736,
255
+ 820,
256
+ 750
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "The FCI is a natural fit as a first step in assessing the capabilities of ChatGPT for several reasons mentioned above: it is focused on conceptual understanding rather than computation and memorization; it has been widely-studied and validated as an assessment tool, and because it has been given frequently to many students in introductory physics, providing natural benchmarks for comparison. But it is also potentially valuable for another reason. To preserve its integrity as an assessment tool, the providers of the FCI have taken steps to encourage practi",
263
+ "bbox": [
264
+ 511,
265
+ 768,
266
+ 919,
267
+ 914
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "page_number",
273
+ "text": "2",
274
+ "bbox": [
275
+ 906,
276
+ 34,
277
+ 919,
278
+ 45
279
+ ],
280
+ "page_idx": 1
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "tioners who use it to keep its contents (and even more importantly, its solutions) from becoming widely available. These efforts have certainly not been flawlessly successful but it remains the case that FCI text and solutions are difficult to find on the internet. Where they do exist, they are typically either password-protected on websites used by physics educators or at least \"paywalled\" on websites used by students. The relative scarcity of FCI solutions on the internet is consistent with the findings of prior work showing that access to the internet does not undermine the validity of tools like the FCI as conceptual assessment, even when students are observed to be copying question text for the presumable purpose of searching for its answers [22]. And in our case, since we are administering the FCI to a program that can only parse text, we can go even further: a significant portion of the FCI problems and/or solutions that can be found online exist as scanned images and/or had-annotated PDFs, which means that they would not be parsed by the kinds of automated tools that scrape the web for text.",
285
+ "bbox": [
286
+ 81,
287
+ 65,
288
+ 491,
289
+ 354
290
+ ],
291
+ "page_idx": 2
292
+ },
293
+ {
294
+ "type": "text",
295
+ "text": "While the exact details of the text which was used to \"train\" ChatGPT are a proprietary secret, it is known that its reading material was largely drawn from the \"Commoncrawl\" corpus [14], an open repository of data scraped from text found on the public internet. Common Crawl allows users to query which domains it has indexed; we used this feature to verify that it has not indexed the handful of websites which we are most familiar with which might contain solutions to the FCI, either as a PER tool or as a repository of solved problems for students. Beyond the CommonCrawl corpus, it is believed that most of ChatGPT's training data came from specifically generated human with human feedback [23], which of course are highly unlikely to contain references to the FCI. And finally, in the rare locations that we were able to find FCI solutions online, they were typically stored apart from the questions themselves, meaning that there was no obvious reason an LLM would know to pair particular solution texts with particular problems even if it had access to them. For all these reasons, we believe that the FCI is likely not in the training text of ChatGPT and hence that its responses have to represent more than regurgitation of something it \"remembers.\" In this respect our testing with the FCI is what researchers in AI and machine learning might term \"zero shot task\": a challenge in which the model is used to classify (and in this case, respond to) prompting text it has never seen before.",
296
+ "bbox": [
297
+ 81,
298
+ 356,
299
+ 491,
300
+ 760
301
+ ],
302
+ "page_idx": 2
303
+ },
304
+ {
305
+ "type": "text",
306
+ "text": "B. Modifying the FCI",
307
+ "text_level": 1,
308
+ "bbox": [
309
+ 194,
310
+ 792,
311
+ 380,
312
+ 808
313
+ ],
314
+ "page_idx": 2
315
+ },
316
+ {
317
+ "type": "text",
318
+ "text": "The FCI is a 30-item sequential multiple-choice assessment, with each item containing five choices (four distractors and one unique correct answer or \"key\"). [13] It's items cover a range of topics from approximately the first third of a semester of college-level introductory physics: kinematics, projectile motion, free-fall, circular motion,",
319
+ "bbox": [
320
+ 81,
321
+ 825,
322
+ 490,
323
+ 914
324
+ ],
325
+ "page_idx": 2
326
+ },
327
+ {
328
+ "type": "text",
329
+ "text": "and Newton's laws. This means that generally, it is well-suited to our task. Its one significant drawback is that 18 of its 30 items contain some kind of reference to a figure. ChatGPT3.5 is designed only to accept text input, and despite some clever attempts to feed it images in some sort of indirect or transformed state [24], it does not seem capable currently of extracting any meaningful information from a picture. ChatGPT4 advertises multimodal capability which might make it possible to prompt the model with a figure, but this feature is not publically available as of this writing.",
330
+ "bbox": [
331
+ 509,
332
+ 65,
333
+ 919,
334
+ 224
335
+ ],
336
+ "page_idx": 2
337
+ },
338
+ {
339
+ "type": "text",
340
+ "text": "Of the 18 items with figures, we find that 11 of them can be modified by adding text that described what was shown in the figures without fundamentally altering the task at hand. In doing so, we take care to make sure that we did not provide additional clues or context that would make the problem simpler for ChatGPT than it would be for a typical physics student. For example, item seven involves a steel ball on a rope being swung in a circular path and then suddenly cut free. The question asks about the path of the ball after it is released, and the figure supplies several different possible trajectories. One way to describe these trajectories in words would be to say \"tangential to the circle,\" \"normal to the circle,\" etc. But we suspect this modification would substantially alter the difficulty of the problem. Instead, we translate this figure into words with reference to cardinal directions:",
341
+ "bbox": [
342
+ 509,
343
+ 224,
344
+ 919,
345
+ 457
346
+ ],
347
+ "page_idx": 2
348
+ },
349
+ {
350
+ "type": "text",
351
+ "text": "Consider a moment in the ball's motion when the ball is moving north. At that moment, the string breaks near the ball. Which of the following paths would the ball most closely follow after the string breaks?",
352
+ "bbox": [
353
+ 552,
354
+ 465,
355
+ 879,
356
+ 539
357
+ ],
358
+ "page_idx": 2
359
+ },
360
+ {
361
+ "type": "list",
362
+ "sub_type": "text",
363
+ "list_items": [
364
+ "(a) It will initially travel north, but will quickly begin to curve to the west",
365
+ "(b) It will travel north in a straight line",
366
+ "(c) It will travel northeast in a straight line",
367
+ "(d) It will initially travel east, but will quickly begin to curve north",
368
+ "(e) It will travel East in a straight line"
369
+ ],
370
+ "bbox": [
371
+ 562,
372
+ 549,
373
+ 916,
374
+ 670
375
+ ],
376
+ "page_idx": 2
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "We feel that this wording captures exactly and unambiguously all of the different paths indicated in the original figure, but without providing any additional hints. If anything, it may make the item slightly harder for ChatGPT than the original version.",
381
+ "bbox": [
382
+ 509,
383
+ 681,
384
+ 919,
385
+ 753
386
+ ],
387
+ "page_idx": 2
388
+ },
389
+ {
390
+ "type": "text",
391
+ "text": "Thirteen other items from the FCI were modified in a similar way. Six items without figures received minor text modifications that should not have affected the nature of the physics being tested. For example, in clusters of questions where some items referenced \"the previous problem,\" we removed these references and simply restated the set-up from the prior problem, so that items could be asked about independently if needed. We also rephrased any questions that were left with an open-ended statement for the student to complete, since initial experiments showed that ChatGPT3.5 occasionally",
392
+ "bbox": [
393
+ 509,
394
+ 753,
395
+ 919,
396
+ 914
397
+ ],
398
+ "page_idx": 2
399
+ },
400
+ {
401
+ "type": "page_number",
402
+ "text": "3",
403
+ "bbox": [
404
+ 906,
405
+ 34,
406
+ 919,
407
+ 46
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "appeared \"confused\" when it was not explicitly asked a question. Hence, a question like item one, which originally read:",
414
+ "bbox": [
415
+ 81,
416
+ 65,
417
+ 488,
418
+ 109
419
+ ],
420
+ "page_idx": 3
421
+ },
422
+ {
423
+ "type": "text",
424
+ "text": "Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. The time it takes the balls to reach the ground below will be:",
425
+ "bbox": [
426
+ 122,
427
+ 125,
428
+ 450,
429
+ 210
430
+ ],
431
+ "page_idx": 3
432
+ },
433
+ {
434
+ "type": "list",
435
+ "sub_type": "text",
436
+ "list_items": [
437
+ "(a) About half as long for the heavier ball as for the lighter one",
438
+ "(b) About half as long for the lighter ball as for the heavier one",
439
+ "(c) About the same for both balls",
440
+ "(d) Considerably less for the heavier ball, but not necessarily half as long",
441
+ "(e) Considerably less for the lighter ball, but not necessarily half as long"
442
+ ],
443
+ "bbox": [
444
+ 132,
445
+ 224,
446
+ 486,
447
+ 380
448
+ ],
449
+ "page_idx": 3
450
+ },
451
+ {
452
+ "type": "text",
453
+ "text": "Was rephrased to end with a direct question:",
454
+ "bbox": [
455
+ 99,
456
+ 395,
457
+ 426,
458
+ 411
459
+ ],
460
+ "page_idx": 3
461
+ },
462
+ {
463
+ "type": "text",
464
+ "text": "Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. Which of the following describes the time it takes the balls to reach the ground below?",
465
+ "bbox": [
466
+ 122,
467
+ 424,
468
+ 450,
469
+ 512
470
+ ],
471
+ "page_idx": 3
472
+ },
473
+ {
474
+ "type": "list",
475
+ "sub_type": "text",
476
+ "list_items": [
477
+ "(a) About half as long for the heavier ball as for the lighter one",
478
+ "(b) About half as long for the lighter ball as for the heavier one",
479
+ "(c) About the same for both balls",
480
+ "(d) Considerably less for the heavier ball, but not necessarily half as long",
481
+ "(e) Considerably less for the lighter ball, but not necessarily half as long"
482
+ ],
483
+ "bbox": [
484
+ 132,
485
+ 526,
486
+ 486,
487
+ 681
488
+ ],
489
+ "page_idx": 3
490
+ },
491
+ {
492
+ "type": "text",
493
+ "text": "It seems unlikely that such changes impacted either the physics content or the difficulty of the items. Finally, four of the items were left entirely unchanged.",
494
+ "bbox": [
495
+ 81,
496
+ 696,
497
+ 488,
498
+ 738
499
+ ],
500
+ "page_idx": 3
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "This meant that we were able to ask ChatGPT 23 of the FCI's 30 items. Although others have shown that it is possible to get a representative sample of a student's performance using only a subset of the FCI questions [25], it happens that the \"unusable\" questions are not uniformly distributed across all question categories. Removing problems 19 and 20, for example, meant removing the only questions on linear kinematics from the instrument. Although this affects our ability to make comparisons with results from the \"full\" FCI, we believe this difficulty can be overcome, as we shall discuss in Sec IV below.",
505
+ "bbox": [
506
+ 81,
507
+ 739,
508
+ 488,
509
+ 912
510
+ ],
511
+ "page_idx": 3
512
+ },
513
+ {
514
+ "type": "table",
515
+ "img_path": "images/0ce19f77c6180b7014f498828a5ca29360706a2da71c560c8579a5655e0cbf2e.jpg",
516
+ "table_caption": [],
517
+ "table_footnote": [],
518
+ "table_body": "<table><tr><td>Type of change</td><td>Items</td></tr><tr><td>None</td><td>1, 4, 29, 30</td></tr><tr><td>Minor text</td><td>2, 3, 13, 25, 26, 27</td></tr><tr><td>Figure description</td><td>5, 7, 9, 10, 11, 15, 16, 17, 18, 22, 23, 24, 28</td></tr><tr><td>Unusable</td><td>6, 8, 12, 14, 19, 20, 21</td></tr></table>",
519
+ "bbox": [
520
+ 516,
521
+ 64,
522
+ 939,
523
+ 150
524
+ ],
525
+ "page_idx": 3
526
+ },
527
+ {
528
+ "type": "text",
529
+ "text": "TABLE I: Table of items from the FCI and the ways that they were (or were not) modified for use in this work. Seven items were not used.",
530
+ "bbox": [
531
+ 524,
532
+ 171,
533
+ 906,
534
+ 215
535
+ ],
536
+ "page_idx": 3
537
+ },
538
+ {
539
+ "type": "text",
540
+ "text": "C. Administering the FCI to ChatGPT",
541
+ "text_level": 1,
542
+ "bbox": [
543
+ 557,
544
+ 244,
545
+ 872,
546
+ 260
547
+ ],
548
+ "page_idx": 3
549
+ },
550
+ {
551
+ "type": "text",
552
+ "text": "We began interacting with the version of ChatGPT which existed during the month of January, 2023, and used our initial explorations there to develop a the guidelines we used for how to pose questions. Instances of conversations with ChatGPT are completely separate, in the sense that ChatGPT does not \"remember\" content from one chat in a separate chat, so variations of a question can be asked in parallel to identify the best practices for posing the questions. On the other hand, within a conversation ChatGPT can remember content back to a depth of about 3000 words. But in practice, this is not enough to remember a full administration of the FCI. This is part of the reason that we chose to rephrase each question so that it could stand alone, rather than referencing things from \"the previous question,\" etc.",
553
+ "bbox": [
554
+ 511,
555
+ 277,
556
+ 919,
557
+ 494
558
+ ],
559
+ "page_idx": 3
560
+ },
561
+ {
562
+ "type": "text",
563
+ "text": "Our questions to ChatGPT were each posed in the following format:",
564
+ "bbox": [
565
+ 511,
566
+ 494,
567
+ 918,
568
+ 523
569
+ ],
570
+ "page_idx": 3
571
+ },
572
+ {
573
+ "type": "text",
574
+ "text": "Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. Which of the following describes the time it takes the balls to reach the ground below?",
575
+ "bbox": [
576
+ 552,
577
+ 535,
578
+ 879,
579
+ 623
580
+ ],
581
+ "page_idx": 3
582
+ },
583
+ {
584
+ "type": "list",
585
+ "sub_type": "text",
586
+ "list_items": [
587
+ "(a) About half as long for the heavier ball as for the lighter one",
588
+ "(b) About half as long for the lighter ball as for the heavier one",
589
+ "(c) About the same for both balls",
590
+ "(d) Considerably less for the heavier ball, but not necessarily half as long",
591
+ "(e) Considerably less for the lighter ball, but not necessarily half as long Please answer with a letter (A,B,C,D,or E) and a brief explanation of your reasoning."
592
+ ],
593
+ "bbox": [
594
+ 562,
595
+ 633,
596
+ 916,
597
+ 814
598
+ ],
599
+ "page_idx": 3
600
+ },
601
+ {
602
+ "type": "text",
603
+ "text": "A prior work, in which bar exam questions were administered to ChatGPT, found various tricks that caused it to perform better with multiple-choice questions [11] (the art of tweaking the input to an LLM to optimize its response in this fashion is called \"prompt engineering.\") In particular, they found that that, rather than asking",
604
+ "bbox": [
605
+ 511,
606
+ 825,
607
+ 919,
608
+ 914
609
+ ],
610
+ "page_idx": 3
611
+ },
612
+ {
613
+ "type": "page_number",
614
+ "text": "4",
615
+ "bbox": [
616
+ 906,
617
+ 34,
618
+ 919,
619
+ 46
620
+ ],
621
+ "page_idx": 3
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "ChatGPT for a single answer, it performed better when it was asked to rank its top three choices (though in actuality only it's top choice was scored). However, other similar work [10] fails to note an effect of this kind, and our own experiments with this alternative prompt structure similarly showed no improvement in the model's overall performance.",
626
+ "bbox": [
627
+ 81,
628
+ 65,
629
+ 486,
630
+ 167
631
+ ],
632
+ "page_idx": 4
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "We also experimented with two additional procedures. First, we took advantage of feature offered by ChatGPT which allows the user to request that it \"regenerate response\" after it finishes its output. At a gross level, this feature is similar to asking an algorithm for numerically solving some equation to start again but with a different random initial guess. One expects that the results will generally converge to two similar outputs, but perhaps not arrive at exactly the same point. We used this feature as a rudimentary way to explore the \"stability\" of ChatGPT's responses, which might in turn be thought of as a proxy for its \"confidence\" in its answers. Our very preliminary results based on this experimentation are discussed in Sec. IV A 3.",
637
+ "bbox": [
638
+ 81,
639
+ 167,
640
+ 486,
641
+ 369
642
+ ],
643
+ "page_idx": 4
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Finally, we tested a very different prompt with a very different objective, which we call the \"NOVICE\" prompt. In this prompt each question was posed to ChatGPT in the following format:",
648
+ "bbox": [
649
+ 81,
650
+ 369,
651
+ 486,
652
+ 428
653
+ ],
654
+ "page_idx": 4
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "Please answer the following question as though you were a novice high-school student who has not studied physics and does not yet understand Newton's laws:",
659
+ "bbox": [
660
+ 122,
661
+ 440,
662
+ 449,
663
+ 497
664
+ ],
665
+ "page_idx": 4
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "A large truck collides head-on with a small compact car. During the collision, which of the following is true:",
670
+ "bbox": [
671
+ 122,
672
+ 503,
673
+ 450,
674
+ 547
675
+ ],
676
+ "page_idx": 4
677
+ },
678
+ {
679
+ "type": "list",
680
+ "sub_type": "text",
681
+ "list_items": [
682
+ "(a) the truck exerts a greater amount of force on the car than the car exerts on the truck.",
683
+ "(b) the car exerts a greater amount of force on the truck than the truck exerts on the car.",
684
+ "(c) neither exerts a force on the other, the car gets smashed simply because it gets in the way of the truck.",
685
+ "(d) the truck exerts a force on the car but the car does not exert a force on the truck.",
686
+ "(e) the truck exerts the same amount of force on the car as the car exerts on the truck. Just give us your best guess. We know you may not know the correct answer, but we'd like to know which answer makes the most sense to you without any formal physics training."
687
+ ],
688
+ "bbox": [
689
+ 132,
690
+ 559,
691
+ 488,
692
+ 813
693
+ ],
694
+ "page_idx": 4
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "We landed on this particular prompt after some trial and error—see Sec IV B. Using this framing, we investigate whether ChatGPT could potentially be useful as a tool for instructors to be able to preview and probe the thinking of a sample \"novice\" student while they prepare their teaching materials.",
699
+ "bbox": [
700
+ 81,
701
+ 825,
702
+ 486,
703
+ 912
704
+ ],
705
+ "page_idx": 4
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "Initially, we administered the FCI with both the \"BASIC\" and \"NOVIVCE\" prompts, including stability analysis, To ChatGPT3.5 during the weeks of Feb 13 and Feb 20, 2023. Notably, between our initial experimentation in January and the final administrations of the FCI which generated the results below, there was a significant update to the model which focused on improving its mathematical capabilities, following a series of relatively high-profile examples where users were able to get ChatGPT to espouse manifestly untrue statements about elementary mathematics. Since none of the items in the FCI involve calculations, we think it is unlikely that the update had much impact on our main results. After the release of ChatGPT4, we repeated the same administrations to the new model during the week of March 13th. Results from both models are presented in the remainder of this paper.",
710
+ "bbox": [
711
+ 509,
712
+ 65,
713
+ 919,
714
+ 313
715
+ ],
716
+ "page_idx": 4
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "IV. RESULTS",
721
+ "text_level": 1,
722
+ "bbox": [
723
+ 653,
724
+ 340,
725
+ 779,
726
+ 353
727
+ ],
728
+ "page_idx": 4
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "We begin with an analysis of ChatGPT's responses to the 23 usable FCI questions in the modified form described above. For the first, \"BASIC\" administration of these problems, we analyze the responses on both a quantitative level, focused purely on its multiple-choice response, and on a qualitative one, by analyzing its stated reason for the answer it chose.",
733
+ "bbox": [
734
+ 511,
735
+ 372,
736
+ 919,
737
+ 473
738
+ ],
739
+ "page_idx": 4
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "A. “BASIC” responses",
744
+ "text_level": 1,
745
+ "bbox": [
746
+ 620,
747
+ 502,
748
+ 810,
749
+ 516
750
+ ],
751
+ "page_idx": 4
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "1. Answer Choices",
756
+ "text_level": 1,
757
+ "bbox": [
758
+ 648,
759
+ 534,
760
+ 785,
761
+ 546
762
+ ],
763
+ "page_idx": 4
764
+ },
765
+ {
766
+ "type": "text",
767
+ "text": "When prompted with the 23 usable FCI questions using the \"BASIC\" prompt format, ChatGPT3.5 gave a correct answer for fifteen of them. It's correct answers are roughly evenly distributed among the unaltered problems and the modified problems which originally contained figures, though it struggles with a certain subset of the modified problems which involve \"spatial reasoning,\" discussed in more detail in Sec IV A 2.",
768
+ "bbox": [
769
+ 509,
770
+ 565,
771
+ 918,
772
+ 681
773
+ ],
774
+ "page_idx": 4
775
+ },
776
+ {
777
+ "type": "text",
778
+ "text": "It is not obvious how we should interpret this number in order to make comparisons to human students taking the FCI. It is tempting to assign ChatGPT a \"score\" of $15/23 \\approx 65\\%$ , but it is not entirely clear that this is fair. An argument could instead be made that its \"score\" is $15/30 = 50\\%$ , because of course one important aspect of \"understanding\" a physics problem is the ability to synthesize the data being presented across multiple representations [26]. Since ChatGPT simply cannot comprehend a question that requires reference to a figure, it could be said that it manifestly displays no understanding of that particular item.",
779
+ "bbox": [
780
+ 509,
781
+ 681,
782
+ 919,
783
+ 854
784
+ ],
785
+ "page_idx": 4
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "In either case, however, ChatGPT3.5's quantitative performance compares quite adequately with the posttest results that are typical for students taking the FCI at the end of their first semester of college-level physics.",
790
+ "bbox": [
791
+ 509,
792
+ 854,
793
+ 918,
794
+ 912
795
+ ],
796
+ "page_idx": 4
797
+ },
798
+ {
799
+ "type": "page_number",
800
+ "text": "5",
801
+ "bbox": [
802
+ 906,
803
+ 34,
804
+ 918,
805
+ 45
806
+ ],
807
+ "page_idx": 4
808
+ },
809
+ {
810
+ "type": "text",
811
+ "text": "We can make a direct comparison to one of the author's previous classes in 2018, in which 415 students took the FCI at the end of the term, and produced the distribution of scores found in figure 1. In that distribution, the median student score was a $56\\%$ , meaning that depending on how charitably one treats its partial results, ChatGPT was either just below the performance of a typical student or else perhaps a nontrivial cut above average. As an alternative perspective, the median letter grades of the students scoring around a $50\\%$ or a $65\\%$ were a B- and a B+, respectively.",
812
+ "bbox": [
813
+ 81,
814
+ 65,
815
+ 491,
816
+ 227
817
+ ],
818
+ "page_idx": 5
819
+ },
820
+ {
821
+ "type": "image",
822
+ "img_path": "images/d531a3288bd5ef80b007e3abbb4e213aa72eb3c35c3fd8222142c47e1fd38949.jpg",
823
+ "image_caption": [
824
+ "FIG. 1: The distribution of FCI scores at the end of a semester of college physics at a large public R1 university. ChatGPT3.5's performance using \"BASIC\" prompting would put it either in the bin containing the median (50%) or in the bin just above (65%)"
825
+ ],
826
+ "image_footnote": [],
827
+ "bbox": [
828
+ 142,
829
+ 260,
830
+ 444,
831
+ 435
832
+ ],
833
+ "page_idx": 5
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "When we repeated the analysis using ChatGPT 4, we found that the model now responded \"correctly\" to 22 of the 23 questions. Even more strikingly, the question which it \"missed\" was item 26, which asks about the behavior of a box being pushed against friction. The answer which it chooses, \"D,\" would in fact be correct if air resistance were to be considered non-negligible, and the problem text does not specify this condition either way. Students are presumably expected to make the standard (and physically reasonable assumption) that the air resistance on a box being pushed by a human is a small effect compared to other forces in the problem. ChatGPT perhaps betrays some \"inhuman\" behavior by not making a similar assumption. Nevertheless, when we prompted the model in a new instance with the same question and added the text \"assume air resistance is negligible,\" it gives the correct answer here as well.",
838
+ "bbox": [
839
+ 81,
840
+ 571,
841
+ 490,
842
+ 819
843
+ ],
844
+ "page_idx": 5
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "Needless to say, the performance of ChatGPT4 compares to the very uppermost decile of performance by physics students in our post-test data, and of course its near-perfect responses no longer show and weakness for problems that originally relied on figured and/or spatial reasoning.",
849
+ "bbox": [
850
+ 81,
851
+ 825,
852
+ 491,
853
+ 914
854
+ ],
855
+ "page_idx": 5
856
+ },
857
+ {
858
+ "type": "text",
859
+ "text": "2. Free Responses",
860
+ "text_level": 1,
861
+ "bbox": [
862
+ 651,
863
+ 66,
864
+ 782,
865
+ 80
866
+ ],
867
+ "page_idx": 5
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "As part of the \"BASIC\" prompt, we asked ChatGPT to provide \"a brief explanation of [its] reasoning.\" This gives us an additional dimension along which to assess its performance, albeit a qualitative one.",
872
+ "bbox": [
873
+ 511,
874
+ 98,
875
+ 919,
876
+ 155
877
+ ],
878
+ "page_idx": 5
879
+ },
880
+ {
881
+ "type": "text",
882
+ "text": "As a baseline, we read through each response and made a decision about whether the answer was completely accurate, which is the level of performance we would expect from an expert physicist considering such introductory concepts. In this determination we disregarded tone and focused only on content. So, ChatGPT was not penalized if its correct explanation sounded stilted, in a way that suggested machine learning rather than a human intelligence behind the response. On the other hand, we marked \"wrong\" any response that contained an error which a trained physicist would not reasonably have made, no matter how ancillary the error was to ChatGPT's analysis. Certainly, these determinations were simply a judgement call on the part of the author and of course were not blind to the fact that the responses were generated by a language model. However, we feel that in most cases the determinations were relatively black-and-white, since they centered on a decision about whether the statements made in the explanation were factually true.",
883
+ "bbox": [
884
+ 509,
885
+ 155,
886
+ 919,
887
+ 444
888
+ ],
889
+ "page_idx": 5
890
+ },
891
+ {
892
+ "type": "text",
893
+ "text": "In our judgement, 10 of ChatGPT3.5's 23 free responses (43%) fully passed this \"expert test.\" These responses were typically relatively terse, invoking the correct constellation of concepts, and often making explicit note of facts or concepts which ruled out wrong answers as well as the facts which supported the correct one. For example, item 13 of the FCI says",
894
+ "bbox": [
895
+ 511,
896
+ 445,
897
+ 919,
898
+ 547
899
+ ],
900
+ "page_idx": 5
901
+ },
902
+ {
903
+ "type": "text",
904
+ "text": "\"A boy throws a steel ball straight up. Consider the motion of the ball only after it has left the boy's hand but before it touches the ground, and assume that forces exerted by the air are negligible. For these conditions, what are the force(s) acting on the ball?",
905
+ "bbox": [
906
+ 552,
907
+ 556,
908
+ 879,
909
+ 643
910
+ ],
911
+ "page_idx": 5
912
+ },
913
+ {
914
+ "type": "list",
915
+ "sub_type": "text",
916
+ "list_items": [
917
+ "(a) A downward force of gravity along with a steadily decreasing upward force.",
918
+ "(b) A steadily decreasing upward force from the moment it leaves the boy's hand until it reaches its highest point; on the way down there is a steadily increasing downward force of gravity as the object gets closer to the earth.",
919
+ "(c) An almost constant downward force of gravity along with an upward force that steadily decreases until the ball reaches its highest point; on the way down there is only a constant downward force of gravity.",
920
+ "(d) An almost constant downward force of gravity only.",
921
+ "(e) None of the above. The ball falls back to ground because of its natural tendency to rest on the surface of the earth."
922
+ ],
923
+ "bbox": [
924
+ 562,
925
+ 652,
926
+ 919,
927
+ 911
928
+ ],
929
+ "page_idx": 5
930
+ },
931
+ {
932
+ "type": "page_number",
933
+ "text": "6",
934
+ "bbox": [
935
+ 906,
936
+ 34,
937
+ 919,
938
+ 46
939
+ ],
940
+ "page_idx": 5
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "Note the absence of terms like \"projectile motion,\" which might cause a language model (or a student!) to mechanically regurgitate the fact that only the force of gravity is acting. Despite this challenge, ChatGPT'3.5s response, which was representative of its performance on these ten \"entirely correct\" free responses, was as follows:",
945
+ "bbox": [
946
+ 81,
947
+ 65,
948
+ 491,
949
+ 154
950
+ ],
951
+ "page_idx": 6
952
+ },
953
+ {
954
+ "type": "text",
955
+ "text": "(D) An almost constant downward force of gravity only.",
956
+ "text_level": 1,
957
+ "bbox": [
958
+ 122,
959
+ 161,
960
+ 450,
961
+ 191
962
+ ],
963
+ "page_idx": 6
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "After the steel ball leaves the boy's hand, the only force acting on it is the force of gravity, which is directed downwards. This force is almost constant throughout the motion of the ball, with only slight variations due to changes in the distance between the ball and the center of the Earth. There is no upward force acting on the ball, as the ball is not being propelled upwards once it leaves the boy's hand. As a result, the ball will continue to rise until the force of gravity slows it down and eventually reverses its direction, causing it to fall back to the ground. Therefore, the correct statement is (D) An almost constant downward force of gravity only.",
968
+ "bbox": [
969
+ 122,
970
+ 195,
971
+ 450,
972
+ 412
973
+ ],
974
+ "page_idx": 6
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "In addition to identifying the correct force, it makes specific note of the absence of any ongoing propulsive force (For comparison, in the Fall '18 post-test student data, $38\\%$ of students chose one of the responses which includes an upward force during the motion). We note also that ChatGPT3.5 goes out of its way to comment on and correctly explain why the correct answer choice has the qualifier \"almost-constant\" on the force of gravity. This level of commentary is in fact deeper than we would typically expect of a first-semester physics student, at least unprompted. In fact, the inclusion of this detail in the question strikes us as the kind of thing an instructor does to make sure that their questions are airtight so that the strongest students have no room to complain or overthink.",
979
+ "bbox": [
980
+ 81,
981
+ 421,
982
+ 488,
983
+ 637
984
+ ],
985
+ "page_idx": 6
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "Of course, not all of ChatGPT3.5's responses reach this level of accuracy. There were five free-responses which we judged to be more correct than not (in fact, typically almost entirely correct), but containing at least one patently untrue statement which simply could not have been made by an expert physicist. These minor errors were often \"surprising\" in the sense that they did not necessarily represent the kinds of classic misapprehensions we have come to expect from new students on similar questions. And the errors were typically \"unforced\" in the sense that they did not appear in the portion of the explanation that was strictly necessary to identify the correct answer. For example, consider item 22 of the FCI, which asks about a rocket in deep space that was moving horizontally from left to right at a constant speed until its engine produced a period of constant upward force. The question asks about the speed of the rocket while the engine is on (whether it is increasing, decreasing, constant, etc).",
990
+ "bbox": [
991
+ 81,
992
+ 638,
993
+ 490,
994
+ 914
995
+ ],
996
+ "page_idx": 6
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "ChatGPT3.5 correctly identifies that the speed is increasing while the engine is on, but says the following:",
1001
+ "bbox": [
1002
+ 511,
1003
+ 65,
1004
+ 919,
1005
+ 95
1006
+ ],
1007
+ "page_idx": 6
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "Since the rocket is subject to no outside forces, its motion is determined solely by the forces it experiences from its engine. When the engine is turned on, the rocket is subject to a constant thrust that is perpendicular to its initial motion. This thrust produces an acceleration that is also perpendicular to the rocket's initial motion. As a result, the rocket moves along a circular path, with its speed increasing as it moves from point \"b\" to point \"c\". Therefore, the answer is (B) its speed is continuously increasing.",
1012
+ "bbox": [
1013
+ 552,
1014
+ 106,
1015
+ 880,
1016
+ 281
1017
+ ],
1018
+ "page_idx": 6
1019
+ },
1020
+ {
1021
+ "type": "text",
1022
+ "text": "All of the elements of the correct chain of reasoning are present: (1) the net force in this case is equal to the force of the engine (2) when the engine is on, the net force is nonzero (3) a nonzero net force causes an acceleration and (4) the acceleration is not directed against the motion and hence it will cause the velocity to increase. But ChatGPT3.5 make an unforced error by trying to describe the nature of the rocket's motion during this time, even though this was not part of the question. It identifies the rocket's motion as \"circular,\" perhaps because in its corpus of physics there is a strong association between discussions of circular motion and \"forces which are perpendicular to the direction of motion.\" Missing the obvious detail that this particular force is only initially perpendicular to the motion, it draws an incorrect conclusion. And the mistake is as surprising as it is unnecessary: we do not imagine that many freshman physics students are picturing a rocket which becomes locked into something like cyclotron motion when they read this problem, even though many of the found it difficult (57% chose incorrectly on the Fall '18 post-test).",
1023
+ "bbox": [
1024
+ 509,
1025
+ 291,
1026
+ 919,
1027
+ 595
1028
+ ],
1029
+ "page_idx": 6
1030
+ },
1031
+ {
1032
+ "type": "text",
1033
+ "text": "These five responses correspond with the other five problems for which ChatGPT3.5 chose the correct multiple-choice answer, although we graded these responses separately and it was not a foregone conclusion that it would coincide in this way.",
1034
+ "bbox": [
1035
+ 511,
1036
+ 595,
1037
+ 919,
1038
+ 667
1039
+ ],
1040
+ "page_idx": 6
1041
+ },
1042
+ {
1043
+ "type": "text",
1044
+ "text": "After 10 fully-correct responses and five nearly-correct responses that nevertheless contain unambiguous errors, ChatGPT3.5 gives eight responses which are nowhere near the mark. These responses typically contain an assortment of words and phrases which are genuinely relevant to answering the problem. Often, entire sentences form accurate statements about the underlying physics. But the ideas are linked up in ways which are grammatically correct but often logically unreasonable or inconsistent, and the responses typically also contain discussions of details that are wholly irrelevant, intermixed on equal footing with the key concepts. The analyses may swerve unpredictably onto different tracks, or arrive at conclusions entirely at odds with the preceding arguments, as if the model had no idea what it had just said, and yet it continues confidently to claim an inescapable conclusion. They certainly do not project anything like",
1045
+ "bbox": [
1046
+ 509,
1047
+ 667,
1048
+ 919,
1049
+ 914
1050
+ ],
1051
+ "page_idx": 6
1052
+ },
1053
+ {
1054
+ "type": "page_number",
1055
+ "text": "7",
1056
+ "bbox": [
1057
+ 906,
1058
+ 34,
1059
+ 919,
1060
+ 46
1061
+ ],
1062
+ "page_idx": 6
1063
+ },
1064
+ {
1065
+ "type": "text",
1066
+ "text": "understanding-quite to the contrary, if given by a human they would be taken as specific evidence that they are regurgitating memorized words and do not know what they are talking about.",
1067
+ "bbox": [
1068
+ 81,
1069
+ 65,
1070
+ 486,
1071
+ 125
1072
+ ],
1073
+ "page_idx": 7
1074
+ },
1075
+ {
1076
+ "type": "text",
1077
+ "text": "In defense of ChatGPT3.5, half of the eight problems in which the free-response is wide of the mark are problems that originally contained figures. In particular, these four problems are the ones in which the answers rely most on spatial reasoning and on establishing some sense of direction, and ChatGPT3.5 seems simply unable to recognize the directional system being used.",
1078
+ "bbox": [
1079
+ 81,
1080
+ 125,
1081
+ 486,
1082
+ 224
1083
+ ],
1084
+ "page_idx": 7
1085
+ },
1086
+ {
1087
+ "type": "text",
1088
+ "text": "Consider for example its response to item seven: the problem regarding a ball released during circular motion which was described above. In our wording of the question, the ball is moving north at the time its string is cut and it is allowed to move off tangent to the circle, so the correct answer about its subsequent motion should be that it travels in a straight line to the north. ChatGPT3.5 comes close to understanding this, but appears confused about two points relating to the directional system, explaining:",
1089
+ "bbox": [
1090
+ 81,
1091
+ 224,
1092
+ 486,
1093
+ 369
1094
+ ],
1095
+ "page_idx": 7
1096
+ },
1097
+ {
1098
+ "type": "text",
1099
+ "text": "When the string breaks, the ball will continue to move in a straight line tangential to the circular path it was following at that moment. Since the ball was moving north, the tangential velocity of the ball will be directed to the east.",
1100
+ "bbox": [
1101
+ 122,
1102
+ 377,
1103
+ 447,
1104
+ 463
1105
+ ],
1106
+ "page_idx": 7
1107
+ },
1108
+ {
1109
+ "type": "text",
1110
+ "text": "However, the force of gravity will also be acting on the ball, pulling it downward towards the Earth. As a result, the ball will follow a curved path towards the west as it falls.",
1111
+ "bbox": [
1112
+ 122,
1113
+ 468,
1114
+ 447,
1115
+ 526
1116
+ ],
1117
+ "page_idx": 7
1118
+ },
1119
+ {
1120
+ "type": "text",
1121
+ "text": "Therefore, the correct statement is (A) It will initially travel north, but will quickly begin to curve to the west.",
1122
+ "bbox": [
1123
+ 122,
1124
+ 529,
1125
+ 447,
1126
+ 571
1127
+ ],
1128
+ "page_idx": 7
1129
+ },
1130
+ {
1131
+ "type": "text",
1132
+ "text": "Aspects of this response are perfectly reasonable. The first sentence is entirely true, and so is the third- though most students answering this problem would ignore this vertical motion and answer only with respect to its horizontal motion, as the figure suggests. But from these two true statements, ChatGPT3.5 is unable to draw any correct conclusions about the real world direction, making the bizarre leaps from \"tangential\" to \"east\" and \"downward\" to \"west.\" One possible reason for this is that while there are good reasons to expect terms like \"tangential\" and \"gravity\" to be preferentially associated in the training corpus with discussion of balls released from circular motion, there is no reason that a particular direction like \"north\" would be similarly associated. Hence, in trying to model how a human would talk about this problem, ChatGPT3.5 gravitates correctly to details that all expert humans would mention, but then has to pick randomly to fill in a direction, because presumably all the cardinal directions are represented roughly equally in the training text. ChatGPT3.5 has succeeded at saying something that conveys general understanding based on general patterns, but failed to show specific understanding of the details in the instance at hand.",
1133
+ "bbox": [
1134
+ 81,
1135
+ 580,
1136
+ 486,
1137
+ 912
1138
+ ],
1139
+ "page_idx": 7
1140
+ },
1141
+ {
1142
+ "type": "text",
1143
+ "text": "ChatGPT4, by contrast, gave no answers in this \"highly confused\" category. We assessed 20 of its 23 free responses as being \"exactly correct,\" as one would expect from an expert physicist. The GPT4 model is notably more concise in general than its predecessor, and this brevity may have helped it avoid some of the \"unforced errors\" seen previously. But it also succeeded on nearly all of the problems for which ChatGPT3.5 was completely confused. Consider item seven, for example: the ball released from circular motion which ChatGPT3.5 assessed correctly in general terms but failed to answer correctly because it could not identify which direction was \"tangential\" in the particular case at hand. ChatGPT4 instead gives a correct answer with perfectly correct reasoning:",
1144
+ "bbox": [
1145
+ 511,
1146
+ 65,
1147
+ 918,
1148
+ 282
1149
+ ],
1150
+ "page_idx": 7
1151
+ },
1152
+ {
1153
+ "type": "text",
1154
+ "text": "When the string breaks, the ball will continue in the direction it was moving at the exact moment the string broke, due to its inertia. Since the ball was moving north when the string broke, it will continue to travel north in a straight line.",
1155
+ "bbox": [
1156
+ 552,
1157
+ 294,
1158
+ 879,
1159
+ 381
1160
+ ],
1161
+ "page_idx": 7
1162
+ },
1163
+ {
1164
+ "type": "text",
1165
+ "text": "Like ChatGPT3.5, ChatGPT4 has identified a correct general concept to invoke (inertia), but has also succeeded at drawing a specific inference in this case (note that this answer was \"stable\" and not a lucky guess; see below). Without more public information about the improvements in ChatGPT4 and the workings of its head layers in this case, we cannot know how it arrives here. But it seems plausible that it has been an able to extract a deeper level of patterns from its training text. The base pattern is that \"inertia\" and \"motion in the tangential direction\" commonly discussed in this context. But with in that pattern is a deeper one: in examples where humans discuss such questions, the solution typically refers to the exact same direction- left, north, $+\\mathrm{y}$ etc- which was referenced in the problem next to words referring to its motion at the motion the ball is released. Whether or not this is exactly why ChatGPT4 performs better on this problem, it is a good example of how increasing the models inferential depth can allow it to project an apparently deeper understanding physics without having been \"taught\" any more explicit physics concepts.",
1166
+ "bbox": [
1167
+ 511,
1168
+ 391,
1169
+ 918,
1170
+ 696
1171
+ ],
1172
+ "page_idx": 7
1173
+ },
1174
+ {
1175
+ "type": "text",
1176
+ "text": "ChatGPT4's three remaining response were all \"nearly perfect,\" with solid reasoning and incidental minor errors. For example, in response to item 16, which asks about the forces on an elevator moving at constant velocity, it says that there must be no net force on the elevator and hence that \"any forces acting on the object must come as equal and opposite pairs.\" This leads it to a correct conclusion that in this particular case that the upward tension from the elevator cable has the same magnitude as the downward weight force. But an expert analyzing the problem would not quite have made the statement that \"any forces\" acting MUST come as pairs—an elevator with two cables, for example, could still move at constant velocity without any two forces existing as an equal and opposite set. But this level of \"error,\" which",
1177
+ "bbox": [
1178
+ 511,
1179
+ 696,
1180
+ 918,
1181
+ 912
1182
+ ],
1183
+ "page_idx": 7
1184
+ },
1185
+ {
1186
+ "type": "page_number",
1187
+ "text": "8",
1188
+ "bbox": [
1189
+ 906,
1190
+ 34,
1191
+ 918,
1192
+ 45
1193
+ ],
1194
+ "page_idx": 7
1195
+ },
1196
+ {
1197
+ "type": "text",
1198
+ "text": "represents ChatGPT4's worst performance, is far more minor than the worst mistakes of ChatGPPT3.5.",
1199
+ "bbox": [
1200
+ 83,
1201
+ 65,
1202
+ 488,
1203
+ 95
1204
+ ],
1205
+ "page_idx": 8
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "3. Stability",
1210
+ "text_level": 1,
1211
+ "bbox": [
1212
+ 243,
1213
+ 126,
1214
+ 331,
1215
+ 138
1216
+ ],
1217
+ "page_idx": 8
1218
+ },
1219
+ {
1220
+ "type": "text",
1221
+ "text": "Because there is a probabilistic element inherent in ChatGPT's operation, we make a brief study of the \"stability\" of the models' responses by experimenting with two types of perturbations. This investigation is suggestive but very preliminary; deeper work could certainly be done on this topic.",
1222
+ "bbox": [
1223
+ 81,
1224
+ 157,
1225
+ 488,
1226
+ 244
1227
+ ],
1228
+ "page_idx": 8
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "First, we make use of ChatGPT's \"regenerate response\" button, which allows us to simply request that the model start over its process with the exact same prompt, albeit with some variation in whatever internal parameters it uses to traverse its vast network of possible responses. This is analogous to asking a numerical equation solver to start over with a different random initial guess, and just as in that case, one would hope that a stable method would consistently produce comparable outputs.",
1233
+ "bbox": [
1234
+ 81,
1235
+ 246,
1236
+ 488,
1237
+ 390
1238
+ ],
1239
+ "page_idx": 8
1240
+ },
1241
+ {
1242
+ "type": "text",
1243
+ "text": "For this first, initial probe of the Chatbot's stability, we regenerated each response just three times, but already some patterns seemed clear. The only cases where ChatGPT3.5 consistently changed its answers between regenerations were a subset of the eight questions for which we judged that its written responses were completely confused. There were two other cases of correct answers where the model occasionally changed its mind when the responses were regenerated, but in those cases it stuck with the correct answers a clear majority of the time. This pattern is perhaps somehow comforting or impressive for the question of ChatGPT's ability to project understanding: when it knows the answer, it knows the answer with some stability. When it is flying by the seat of its pants, it also does not care what destination it flies to.",
1244
+ "bbox": [
1245
+ 81,
1246
+ 391,
1247
+ 488,
1248
+ 621
1249
+ ],
1250
+ "page_idx": 8
1251
+ },
1252
+ {
1253
+ "type": "text",
1254
+ "text": "The other form of perturbation which should be explored is perturbations to the input, rather than to the initial starting conditions of the model themselves. Of course, because we cannot control the inner workings of the model and put it back in the exact same internal state every time, it is not possible to disentangle these two. Nevertheless, we attempted for a subset of the problems to feed ChatGPT3.5 variations on the same questions but with irrelevant words and sentence structures switched around (e.g. \"A boy throws a steel ball straight up\" becomes \"a rock is tossed straight upward by a girl). Once again, we found that this generally did not affect the response if the model got the initial problem right, but it could change the responses for the problems it was already getting completely wrong.",
1255
+ "bbox": [
1256
+ 81,
1257
+ 623,
1258
+ 488,
1259
+ 840
1260
+ ],
1261
+ "page_idx": 8
1262
+ },
1263
+ {
1264
+ "type": "text",
1265
+ "text": "Our discussion here has focused on ChatGPT3.5 because we found that ChatGPT4's responses were completely insensitive to either of the perturbation types described above. Perhaps this is not surprising: ChatGPT3.5's responses were only unstable on problems",
1266
+ "bbox": [
1267
+ 81,
1268
+ 840,
1269
+ 488,
1270
+ 914
1271
+ ],
1272
+ "page_idx": 8
1273
+ },
1274
+ {
1275
+ "type": "text",
1276
+ "text": "where its responses were confused and incoherent, and ChatGPT4 never reached that level of confusion.",
1277
+ "bbox": [
1278
+ 511,
1279
+ 65,
1280
+ 919,
1281
+ 95
1282
+ ],
1283
+ "page_idx": 8
1284
+ },
1285
+ {
1286
+ "type": "text",
1287
+ "text": "B. \"NOVICE\" Responses",
1288
+ "text_level": 1,
1289
+ "bbox": [
1290
+ 611,
1291
+ 127,
1292
+ 821,
1293
+ 141
1294
+ ],
1295
+ "page_idx": 8
1296
+ },
1297
+ {
1298
+ "type": "text",
1299
+ "text": "This paper is focused on the question of whether ChatGPTs behavior is consistent with \"understanding\" introductory physics, but part of our motivation for asking that question is to understand how it may affect physics classrooms and physics pedagogy more broadly. One way it might be of use to an instructor would be as a way to gain insight into the unfamiliar mind of a novice physics student. If ChatGPT were able to successfully answer questions in a manner that plausibly mimicked typical student mental models [27] it might be of great value in testing and preparing lesson plans, refining exam questions, etc.",
1300
+ "bbox": [
1301
+ 509,
1302
+ 159,
1303
+ 919,
1304
+ 332
1305
+ ],
1306
+ "page_idx": 8
1307
+ },
1308
+ {
1309
+ "type": "text",
1310
+ "text": "The question of whether ChatGPT can do this is the worthy subject of its own project, but we make an initial stab at it here to establish some baseline results and investigate whether such deeper work is likely to bear fruit. To do this, we gave the modified FCI questions to ChatGPT again, but this time using a prompt which asks it to answer as though it had not yet studied any introductory physics. This prompt was developed through trial and error using a small subset of FCI problems for which we had strong evidence from our own students' prior pretests about how they might answer. These were items 4 and 26 from the FCI. Item 4 tests Newton's 3rd law in the unintuitive context of an asymmetric collision between a truck and a car. Prior to studying physics [28], many students (nearly $75\\%$ in our fall '18 pretest) believe that the truck will exert a greater force on the car than vice versa. We found that initially, even when we prompted ChatGPT to answer as though it had not studied Newton's Laws, it continued to give the expert answer that the forces were equal. It was only when we modified the prompt to include a reminder of our desired reference frame both before and after the question text itself that we saw it give the infamous novice answer described above. We saw similar behavior in question 26, which tests concepts about the balance of forces in the context of kinetic friction.",
1311
+ "bbox": [
1312
+ 509,
1313
+ 333,
1314
+ 919,
1315
+ 709
1316
+ ],
1317
+ "page_idx": 8
1318
+ },
1319
+ {
1320
+ "type": "text",
1321
+ "text": "The results from ChatGPT3.5 were mixed. To quantitatively evaluate the performance, we scored its answers on a \"key\" comprising all of the most common responses given by students in our Fall '18 pretest data, 10 of which were already correct answers. ChatGPT's answers to questions with the \"NOVICE\" prompt matched these most-common student answers on 11 occasions out of 23. In particular, seven of these 11 matches came in situations where the most common pretest answer was itself a correct answer. While it is not inherently a bad to match in these cases, because an educator using ChatGPT to test out possible student responses would want to know about cases where even an untrained student is likely to get the question right, it is discouraging to",
1322
+ "bbox": [
1323
+ 509,
1324
+ 710,
1325
+ 919,
1326
+ 914
1327
+ ],
1328
+ "page_idx": 8
1329
+ },
1330
+ {
1331
+ "type": "page_number",
1332
+ "text": "9",
1333
+ "bbox": [
1334
+ 906,
1335
+ 34,
1336
+ 918,
1337
+ 45
1338
+ ],
1339
+ "page_idx": 8
1340
+ },
1341
+ {
1342
+ "type": "text",
1343
+ "text": "see comparatively few cases where the plurality student pretest opinion was wrong and ChatGPT was also able to identify the distractor they would find most compelling.",
1344
+ "bbox": [
1345
+ 83,
1346
+ 65,
1347
+ 486,
1348
+ 109
1349
+ ],
1350
+ "page_idx": 9
1351
+ },
1352
+ {
1353
+ "type": "text",
1354
+ "text": "Curiously, the problem was not that ChatGPT did \"too well\" on the problem set. In fact, ChatGPT's \"NOVICE\" responses to the 23 modified FCI questions were right only nine times. There were only three such occasions where ChatGPT got a question right which the pretest plurality got wrong. But clearly, in the remaining cases, it did not agree with the plurality about which wrong answer seemed most intuitive.",
1355
+ "bbox": [
1356
+ 83,
1357
+ 109,
1358
+ 486,
1359
+ 227
1360
+ ],
1361
+ "page_idx": 9
1362
+ },
1363
+ {
1364
+ "type": "text",
1365
+ "text": "This binary, agree-or-disagree framework is a little unfair to ChatGPT. Imagine a scenario where $50\\%$ of students choose wrong answer \"A,\" $49\\%$ choose wrong answer \"B,\" and only $1\\%$ choose the correct answer \"C.\" If ChatGPT were to offer answer \"B\" when roleplaying as a student, it would hardly represent a glaring failure. To capture some of this nuance, we propose a simple measure where ChatGPT is given a point for each answer, weighed by the fraction of students from our Fall '18 pretest who chose that answer (so, half a point if $50\\%$ of students chose the same answer). We exclude problems which the plurality of students got right, since in such cases we cannot tell anything about whether ChatGPT is understanding novice thinking or simply agreeing with their conclusion as an expert would. Summing these scores and then normalizing by the equivalent score of the student key itself (which represents the maximum possible number of points ChatGPT could score) gives ChatGPT3.5 a score of 63. Note that if we run this calculation for the average performance of someone guessing randomly, for this particular distribution of student responses, the minimum score would be 58. Hence, ChatGPT3.5's performance is only a little better than random here.",
1366
+ "bbox": [
1367
+ 83,
1368
+ 228,
1369
+ 486,
1370
+ 575
1371
+ ],
1372
+ "page_idx": 9
1373
+ },
1374
+ {
1375
+ "type": "text",
1376
+ "text": "To our surprise, the performance of the otherwise-superior ChatGPT4 is substantially worse on this task. In fact, the GPT-4 model proved completely incapable of straying from the \"correct\" answers in response to this new prompt, and a variety of others which were tested. Its score using the same metric above was a 36- i.e., a person randomly trying to guess what a novice would choose would outperform it, because the model insisted on choosing correct answers, and these were rarely chosen by novice students in practice.",
1377
+ "bbox": [
1378
+ 81,
1379
+ 579,
1380
+ 486,
1381
+ 723
1382
+ ],
1383
+ "page_idx": 9
1384
+ },
1385
+ {
1386
+ "type": "text",
1387
+ "text": "Examining the free-responses of both ChatGPT3.5 and ChatGPT4 under the \"Novice\" prompt gives some insight into the reason it wasn't better able to mimic the behavior of an untrained student. For ChatGPT3.5, a significant majority of its explanations referenced concepts like forces and accelerations that would likely not be the basis for a true novice's analysis of the problem. In fact, it routinely mentioned specific cases Newton's laws by name, despite the explicit instruction that it should answer as a student who \"does not yet know or understand\" them. It seemed instead to be answering from the perspective of a student who knew about Newton's laws, but who was having trouble applying them cor",
1388
+ "bbox": [
1389
+ 81,
1390
+ 724,
1391
+ 486,
1392
+ 912
1393
+ ],
1394
+ "page_idx": 9
1395
+ },
1396
+ {
1397
+ "type": "text",
1398
+ "text": "directly. ChatGPT4's responses differed just slightly: it was better about avoiding explicit mention of Newton's laws, but would simply claim that its intuition coincided with the correct Newtonian framework using nontechnical terms. For example, analyzing an object moving at a constant velocity, it would say, \"the upward pull must be just as big as the downward pull, because it makes sense that the forces must be balanced in a case like this.\"",
1399
+ "bbox": [
1400
+ 511,
1401
+ 65,
1402
+ 916,
1403
+ 180
1404
+ ],
1405
+ "page_idx": 9
1406
+ },
1407
+ {
1408
+ "type": "text",
1409
+ "text": "The picture that emerges from both models is that of a traditional professor, who understands the physics well but struggles to recognize their students' difficulties because they can scarcely remember what it was like not to know the concepts by heart. We leave it for future work to see whether the \"novice\" performance could improve if we further refine the prompt, or perhaps engage in more back-and-forth with the model. However, we conjecture that it may be in the very nature of a LLM trained on a broad corpus of human text that it struggles to answer questions with a self-imposed blind spot. After all, cases of experts discussing physics correctly presumably represent the vast majority of cases in which physics was discussed in ChatGPT's training text. And there were likely some instances in which students mentioned Newton's laws, identified that they did not know how to use them, and proceeded to use them incorrectly, which is closer to what we saw in the responses from ChatGPT3.5. But seems much less likely to have seen cases where students mentioned Newton's laws by name, but only for the purpose of saying that they had never heard of Newton's laws and to describe their reasoning in the absence of them. As such, ChatGPT might inevitably struggle to identify cases where students were reasoning with a pre-Newtonian mindset based on an instruction like \"you do not know or understand Newton's laws\" absent a training set specifically designed for this purpose.",
1410
+ "bbox": [
1411
+ 511,
1412
+ 181,
1413
+ 916,
1414
+ 573
1415
+ ],
1416
+ "page_idx": 9
1417
+ },
1418
+ {
1419
+ "type": "text",
1420
+ "text": "V. CONCLUSIONS",
1421
+ "text_level": 1,
1422
+ "bbox": [
1423
+ 633,
1424
+ 604,
1425
+ 799,
1426
+ 618
1427
+ ],
1428
+ "page_idx": 9
1429
+ },
1430
+ {
1431
+ "type": "text",
1432
+ "text": "Our paper asks whether ChatGPT \"can project an understanding of introductory physics,\" meaning, \"can it display behaviors consistent with having an underlying understanding of kinematics and Newtonian dynamics, whether or not such underlying understanding actually exists?\" For Chat GPT3.5, the answer appears to be \"yes locally, not globally.\" In other words: in some isolated cases it responded to items from the FCI designed to test for conceptual mastery of introductory physics exactly the way an expert physicist might, despite (as far as we can tell) having never seen the question before and not having any specific programming dedicated to \"doing physics.\" ChatGPT3.5 displays this ability about the same percentage of the time as a B- or B-level student taking a college physics course. This is considerably more proficiency than many physicists would have predicted was imminent just a few years ago. On the other hand: when the GPT3.5 mask slips, what it reveals is so clearly devoid of understanding that it spoils the charade. If a",
1433
+ "bbox": [
1434
+ 511,
1435
+ 637,
1436
+ 916,
1437
+ 912
1438
+ ],
1439
+ "page_idx": 9
1440
+ },
1441
+ {
1442
+ "type": "page_number",
1443
+ "text": "10",
1444
+ "bbox": [
1445
+ 901,
1446
+ 34,
1447
+ 918,
1448
+ 45
1449
+ ],
1450
+ "page_idx": 9
1451
+ },
1452
+ {
1453
+ "type": "text",
1454
+ "text": "student submitted work which showed perfect mastery in one place and complete incoherence on the same set of topics immediately after, we would suspect cheating. ChatGPT may not be \"cheating\" per se, but it is not on the whole performing in a manner that one projects full-fledged expertise.",
1455
+ "bbox": [
1456
+ 81,
1457
+ 65,
1458
+ 488,
1459
+ 152
1460
+ ],
1461
+ "page_idx": 10
1462
+ },
1463
+ {
1464
+ "type": "text",
1465
+ "text": "The story of ChatGPT4 is quite different. There are many aspects of true \"understanding\" - such as the ability to engage in metaphor [29] or make use of multiple representations [30]-which differentiate experts and novices and which we do not attempt to test here. As such, perhaps we still cannot claim that ChatGPT4 \"globally\" projects understanding of introductory physics. But for whatever subset of conceptual understanding is captured by the FCI (a tool built precisely to capture some aspect of deeper comprehension) ChatGPT4 is completely capable of responding like an expert, making only one mistake in its responses (and even then, making a mistake that a nitpicking expert might be inclined to defend).",
1466
+ "bbox": [
1467
+ 81,
1468
+ 155,
1469
+ 488,
1470
+ 358
1471
+ ],
1472
+ "page_idx": 10
1473
+ },
1474
+ {
1475
+ "type": "text",
1476
+ "text": "There are at least four valuable questions to be asked in light of this performance, and we provide speculative discussion here in anticipation of further work. First, one could ask what ChatGPT's performance can tell us about the nature of some of the standardized assessment tools used in PER. One answer appears to be that it is important to treat these instruments as a whole, and not place too much emphasis on any single item. Assessments like the FCI were designed intentionally to probe central concepts along various different dimensions and with slight variations in the presentations of the concepts. ChatGPT3.5's oscillation between expertise and vacuousness underscores the necessity of this. The results also remind us that there is yet another distinction to be drawn between \"understanding\" a concept, in the sense of \"being able to apply it like an expert,\" and believing a concept, in the sense of having an internal satisfaction that the concept is true because it should necessarily be true. This distinction is significant also among our students: compare how relatively easy it is for a student to learn that \"both forces in the pair are always equal\" in the context of Newton's Third law with how few students actually feel that this fact is reasonable and intuitive when they first encounter it [31].",
1477
+ "bbox": [
1478
+ 81,
1479
+ 361,
1480
+ 488,
1481
+ 709
1482
+ ],
1483
+ "page_idx": 10
1484
+ },
1485
+ {
1486
+ "type": "text",
1487
+ "text": "Second, given its level of apparent understanding, one could investigate ChatGPT's potential use by students, because it (or tools like it) will soon be used by students in our classroom, which is already happening in other fields of study [32]. At the moment, a student relying on ChatGPT3.5 might be easy enough to spot, given its inconsistency. But because its faulty responses stand out so much, an enterprising student could conceivably ask it for help with every question and then learn the signs of when to disregard its answers. And of course, while ChatGPT4 is currently available only on a subscription basis and with substantial rate limitations that will prevent it from being used broadly by every student in an intro physics class, these restrictions are surely likely to recede",
1488
+ "bbox": [
1489
+ 81,
1490
+ 710,
1491
+ 488,
1492
+ 912
1493
+ ],
1494
+ "page_idx": 10
1495
+ },
1496
+ {
1497
+ "type": "text",
1498
+ "text": "with time. All this is to say, even at its current ability level, ChatGPT3.5 threatens the integrity of things like take-home tests, exam corrections, lab reports, written homework assignments, and online problem sets, at least in their current form. ChatGPT4's performance is significantly stronger still.",
1499
+ "bbox": [
1500
+ 511,
1501
+ 65,
1502
+ 919,
1503
+ 152
1504
+ ],
1505
+ "page_idx": 10
1506
+ },
1507
+ {
1508
+ "type": "text",
1509
+ "text": "Third, we considered the possible role that ChatGPT could have as an in-class teaching tool. In some fields where its responses have a higher success rate, like computer science, some faculty are encouraging their students to come to class with a ChatGPT tab open, and to use it to ask brief clarifying questions, or find the bugs in their sample code, so that many more of these minor questions can be handled than a professor could hope to field on their own [33]. At this juncture, we clearly do not recommend using ChatGPT3.5 this way in a physics classroom, as it's hit rate for correct answers and correct explanations is simply too low, and the danger of confusing a student (or undermining their confidence) by giving them information that is only statistically trustworthy is too great. However, ChatGPT4 seems likely to have crossed the crucial thresholds here, and as it becomes widely available it may indeed be possible for a student to use it as a \"copilot\" to aid their understanding in the same way that students currently might use tools like calculators and computer algebra systems to help them track the mathematics in their physics courses. We do not opine at this juncture on whether this kind of use would be desirable, except to say that the strength and ubiquity of ChatGPT and its successors might make it inevitable, in which case it may be preferable for educators now to be identifying the best ways to do so.",
1510
+ "bbox": [
1511
+ 511,
1512
+ 155,
1513
+ 919,
1514
+ 532
1515
+ ],
1516
+ "page_idx": 10
1517
+ },
1518
+ {
1519
+ "type": "text",
1520
+ "text": "Finally, we asked about using ChatGPT as a tool to support physics instructors outside the classroom, by assisting with preparation. Here, it is clear that ChatGPT is not able to play at least one of the roles we had initially conjectured: it cannot conjure a useful simulacrum of a novice mindset in a way that would allow an educator to test out their various examples and analogies to see how they might land with their students. But there are still some other ways it could be useful. It's partial understanding and relatively literal mode of interpretation, for example, make it an attractive tool to playtest possible exam questions to root out glitches, loopholes, or overdetermined facts. ChatGPT might also function as a tool to help with the challenging process [34] of drafting of new exam or homework questions in the first place.",
1521
+ "bbox": [
1522
+ 511,
1523
+ 534,
1524
+ 919,
1525
+ 751
1526
+ ],
1527
+ "page_idx": 10
1528
+ },
1529
+ {
1530
+ "type": "text",
1531
+ "text": "All of these motivating questions immediately suggest areas for follow-up projects. We intend to pursue some ourselves, and encourage others to do so as well. And we must note that, while history contains faulty predictions about the timeline of AI development in both directions [35], the current pace and nature of the field suggests it will continue to advance rapidly, as evidenced by the dramatic leap in performance from ChatGPT3.5 to ChatGPT4 which came about even within the time it has taken to prepare this work for publication. Indeed, the self-reinforcing nature of the field as AI models",
1532
+ "bbox": [
1533
+ 511,
1534
+ 753,
1535
+ 918,
1536
+ 912
1537
+ ],
1538
+ "page_idx": 10
1539
+ },
1540
+ {
1541
+ "type": "page_number",
1542
+ "text": "11",
1543
+ "bbox": [
1544
+ 898,
1545
+ 34,
1546
+ 918,
1547
+ 45
1548
+ ],
1549
+ "page_idx": 10
1550
+ },
1551
+ {
1552
+ "type": "text",
1553
+ "text": "learn to train themselves, coupled with the current exponential growth of computing power, suggests that the field as a whole could advance at an exponential or even superexponential rate [36]. Concepts which at the time of this writing can be dismissed as being beyond even ChatGPT4's capabilities may be old news by the time the reader finds this. This is all the more reason why continued work in this area is urgently needed.",
1554
+ "bbox": [
1555
+ 81,
1556
+ 65,
1557
+ 491,
1558
+ 183
1559
+ ],
1560
+ "page_idx": 11
1561
+ },
1562
+ {
1563
+ "type": "text",
1564
+ "text": "ACKNOWLEDGEMENT",
1565
+ "text_level": 1,
1566
+ "bbox": [
1567
+ 186,
1568
+ 209,
1569
+ 388,
1570
+ 223
1571
+ ],
1572
+ "page_idx": 11
1573
+ },
1574
+ {
1575
+ "type": "text",
1576
+ "text": "The author thanks Noah Finkelstein for useful discussions regarding the scope and framing of this work, and",
1577
+ "bbox": [
1578
+ 81,
1579
+ 241,
1580
+ 488,
1581
+ 271
1582
+ ],
1583
+ "page_idx": 11
1584
+ },
1585
+ {
1586
+ "type": "text",
1587
+ "text": "Mark Kissler, M.D., of the University of Colorado Hospital for suggesting the phrase \"project understanding\" as a way to clarify the central research question explored here.",
1588
+ "bbox": [
1589
+ 511,
1590
+ 65,
1591
+ 919,
1592
+ 125
1593
+ ],
1594
+ "page_idx": 11
1595
+ },
1596
+ {
1597
+ "type": "list",
1598
+ "sub_type": "ref_text",
1599
+ "list_items": [
1600
+ "[1] Gerd Kortemeyer. Could an artificial-intelligence agent pass an introductory physics course? arXiv preprint arXiv:2301.12127, 2023.",
1601
+ "[2] OpenAI. Chatgpt: Optimizing language models for dialogue, 2022.",
1602
+ "[3] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.",
1603
+ "[4] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. arXiv preprint arXiv:2203.02155, 2022.",
1604
+ "[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.",
1605
+ "[6] Eva AM van Dis, Johan Bollen, Willem Zuidema, Robert van Rooij, and Claudi L Bockting. Chatgpt: five priorities for research. Nature, 614(7947):224-226, 2023.",
1606
+ "[7] The PyCoach. Chatgpt: The end of programming (as we know it), Dec 2022.",
1607
+ "[8] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.",
1608
+ "[9] Christian Terwiesch. Would chat gpt3 get a wharton mba? a prediction based on its performance in the operations management course. Mack Institute for Innovation Management at the Wharton School, University of Pennsylvania. Retrieved from: https://mackinstitute.wharton.upenn.edu/wpcontent/uploads/2023/01/ChristianTerwiesch-Chat-GTP-1.24.pdf [Date accessed: February 6th, 2023], 2023.",
1609
+ "[10] Jonathan H Choi, Kristin E Hickman, Amy Monahan, and Daniel Schwarcz. Chatgpt goes to law school. Available at SSRN, 2023.",
1610
+ "[11] Michael Bommarito II and Daniel Martin Katz. Gpt takes the bar exam. arXiv preprint arXiv:2212.14402, 2022.",
1611
+ "[12] Daniel Martin Katz, Michael James Bommarito, Shang Gao, and Pablo Arredondo. Gpt-4 passes the bar exam. Available at SSRN 4389233, 2023."
1612
+ ],
1613
+ "bbox": [
1614
+ 86,
1615
+ 325,
1616
+ 488,
1617
+ 909
1618
+ ],
1619
+ "page_idx": 11
1620
+ },
1621
+ {
1622
+ "type": "list",
1623
+ "sub_type": "ref_text",
1624
+ "list_items": [
1625
+ "[13] David Hestenes, Malcolm Wells, and Gregg Swackhamer. Force concept inventory. The physics teacher, 30(3):141-158, 1992.",
1626
+ "[14] Common crawl. Faq.",
1627
+ "[15] There may be an exception for raw mathematics—see Methods section.",
1628
+ "[16] Grant P Wiggins and Jay McTighe. Understanding by design. Ascd, 2005.",
1629
+ "[17] Marvin L Minsky. Why people think computers can't. AI magazine, 3(4):3-3, 1982.",
1630
+ "[18] Julian D Gifford and Noah D Finkelstein. Categorical framework for mathematical sense making in physics. Physical Review Physics Education Research, 16(2):020121, 2020.",
1631
+ "[19] Carl Wieman and Katherine Perkins. Transforming physics education. Physics today, 58(11):36, 2005.",
1632
+ "[20] Andrew Elby. Helping physics students learn how to learn. American Journal of Physics, 69(S1):S54-S64, 2001.",
1633
+ "[21] Jonathan Tuminaro and Edward F Redish. Understanding students' poor performance on mathematical problem solving in physics. In AIP Conference Proceedings, volume 720, pages 113-116. American Institute of Physics, 2004.",
1634
+ "[22] Bethany R Wilcox and Steven J Pollock. Investigating students' behavior and performance in online conceptual assessment. Physical Review Physics Education Research, 15(2):020145, 2019.",
1635
+ "[23] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017.",
1636
+ "[24] Alexa Steinbrueck. Can chatgpt do image recognition?, Jan 2023.",
1637
+ "[25] Jing Han, Lei Bao, Li Chen, Tianfang Cai, Yuan Pi, Shaona Zhou, Yan Tu, and Kathleen Koenig. Dividing the force concept inventory into two equivalent half-length tests. Physical Review Special Topics-Physics Education Research, 11(1):010112, 2015.",
1638
+ "[26] Alan Van Heuvelen. Learning to think like a physicist: A review of research-based instructional strategies. American Journal of physics, 59(10):891-897, 1991.",
1639
+ "[27] Donald A Norman. Some observations on mental models. Mental models, 7(112):7-14, 1983."
1640
+ ],
1641
+ "bbox": [
1642
+ 514,
1643
+ 325,
1644
+ 919,
1645
+ 909
1646
+ ],
1647
+ "page_idx": 11
1648
+ },
1649
+ {
1650
+ "type": "page_number",
1651
+ "text": "12",
1652
+ "bbox": [
1653
+ 898,
1654
+ 34,
1655
+ 919,
1656
+ 46
1657
+ ],
1658
+ "page_idx": 11
1659
+ },
1660
+ {
1661
+ "type": "list",
1662
+ "sub_type": "ref_text",
1663
+ "list_items": [
1664
+ "[28] and, sadly, in a good portion of the time after studying physics.",
1665
+ "[29] Fredrik Jeppsson, Jesper Haglund, and Tamer G Amin. Varying use of conceptual metaphors across levels of expertise in thermodynamics. International Journal of Science Education, 37(5-6):780-805, 2015.",
1666
+ "[30] Patrick B Kohl and Noah D Finkelstein. Patterns of multiple representation use by experts and novices during physics problem solving. Physical review special topics—Physics education research, 4(1):010111, 2008.",
1667
+ "[31] CH Poon. Teaching newton's third law of motion in the presence of student preconception. Physics Education, 41(3):223, 2006."
1668
+ ],
1669
+ "bbox": [
1670
+ 84,
1671
+ 66,
1672
+ 488,
1673
+ 239
1674
+ ],
1675
+ "page_idx": 12
1676
+ },
1677
+ {
1678
+ "type": "list",
1679
+ "sub_type": "ref_text",
1680
+ "list_items": [
1681
+ "[32] Jürgen Rudolph, Samson Tan, and Shannon Tan. Chatgpt: Bullshit spewer or the end of traditional assessments in higher education? Journal of Applied Learning and Teaching, 6(1), 2023.",
1682
+ "[33] Thomas Rid. Five days in class with chatgpt, Jan 2023.",
1683
+ "[34] James H Smith and Alfred G Costantine. Writing better physics exams. The Physics Teacher, 26(3):138-144, 1988.",
1684
+ "[35] Stuart Armstrong, Kaj Sotala, and Sean S O hEigeartaigh. The errors, insights and lessons of famous AI predictions—and what they mean for the future. Journal of Experimental & Theoretical Artificial Intelligence, 26(3):317-342, 2014.",
1685
+ "[36] Nick Bostrom. Superintelligence: Paths, Dangers, Strategies. Oxford University Press, Oxford, 2014."
1686
+ ],
1687
+ "bbox": [
1688
+ 514,
1689
+ 66,
1690
+ 919,
1691
+ 267
1692
+ ],
1693
+ "page_idx": 12
1694
+ },
1695
+ {
1696
+ "type": "page_number",
1697
+ "text": "13",
1698
+ "bbox": [
1699
+ 898,
1700
+ 34,
1701
+ 919,
1702
+ 46
1703
+ ],
1704
+ "page_idx": 12
1705
+ }
1706
+ ]
2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01067/4a89950d-6ff6-43c5-9ea0-d219ee099751_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a39a4b1dc637a303914a48ad72da8ffa7f64357719e94d2056d318c6730c94b2
3
+ size 257276
2303.01xxx/2303.01067/full.md ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI and the FCI: Can ChatGPT project an understanding of introductory physics?
2
+
3
+ Colin G. West*
4
+
5
+ $^{1}$ Department of Physics, University of Colorado, Boulder, Colorado 80309, USA
6
+
7
+ (Dated: March 28, 2023)
8
+
9
+ ChatGPT is a groundbreaking "chatbot"-an AI interface built on a large language model that was trained on an enormous corpus of human text to emulate human conversation. Beyond its ability to converse in a plausible way, it has attracted attention for its ability to competently answer questions from the bar exam and from MBA coursework, and to provide useful assistance in writing computer code. These apparent abilities have prompted discussion of ChatGPT as both a threat to the integrity of higher education and conversely as a powerful teaching tool. In this work we present a preliminary analysis of how two versions of ChatGPT (ChatGPT3.5 and ChatGPT4) fare in the field of first-semester university physics, using a modified version of the Force Concept Inventory (FCI) to assess whether it can give correct responses to conceptual physics questions about kinematics and Newtonian dynamics. We demonstrate that, by some measures, ChatGPT3.5 can match or exceed the median performance of a university student who has completed one semester of college physics, though its performance is notably uneven and the results are nuanced. By these same measures, we find that ChatGPT4's performance is approaching the point of being indistinguishable from that of an expert physicist when it comes to introductory mechanics topics. After the completion of our work we became aware of Ref [1], which preceded us to publication and which completes an extensive analysis of the abilities of ChatGPT3.5 in a physics class, including a different modified version of the FCI. We view this work as confirming that portion of their results, and extending the analysis to ChatGPT4, which shows rapid and notable improvement in most, but not all respects.
10
+
11
+ # I. INTRODUCTION
12
+
13
+ "ChatGPT," in simplest terms, is a software application designed to mimic human conversation by producing and responding to text, a skill called "natural language processing." [2] Technically, it is based on a "large language model" (LLM) which makes use of two recent advances in the LLM field: the "Transformer" model [3] and "pretraining." [4] from whence arises "GPT" (it is a [G]enerative, [P]retrained [T]ransformer model). While a great deal has been written about the methods used to produce the ChatGPT system [5], it suffices here to note that it is one of a new generation of artificial language processing systems—sometimes colloquially called "chatbots"—which has garnered substantial attention in both academic [6] and popular press [7] for its ability to seemingly carry on a coherent conversation and complete other tasks. There are two versions of the model available to researchers at the time of this writing: ChatGPT3.5, which is available to the public, and ChatGPT4, which is available only under paid subscription and with substantial limitations on usage rates. The latter model is intended to deliver significantly superior performance than its predecessor [8]. In this paper, we will write "ChatGPT" when making statements that apply equally to both versions, and add the version numbers in other cases.
14
+
15
+ Recent papers have shown that, by at least some measures, ChatGPT's ability to converse like a human also allows it to seemingly display competence in fields like business and law. For example, its responses to questions
16
+
17
+ about Operations Management, a core topic in many MBA programs, were assessed in one study as being at the "B or B-" level. [9] Another work concluded that, "although ChatGPT[3.5] would have been a mediocre law student, its performance was sufficient to successfully earn a JD degree from a highly selective law school" [10]. A similar work projected that, given the surprisingly strong performance of ChatGPT3.5 on sample questions it could not have seen before, a similar LLM might be able to pass an actual bar exam "within the next 0-18 months." [11]. ChatGPT4 appears to have reached that benchmark approximately three months later [12]. The improved model also scores at very high levels—routinely at the 80th or 90th percentile or above—on many standardized tests, including notably the AP Physics C exam [8].
18
+
19
+ A central premise of much physics education work is that there is a difference between the ability to solve classically formulated "physics problems" and an ability to demonstrate true conceptual "understanding." The field of Physics Education Research (PER) has developed tools specifically to probe this distinction, and here, we deploy one of the fields most venerable instruments, the Force Concept Inventory (FCI) [13] to compare its apparent conceptual performance with typical students from an introductory physics course at the college level.
20
+
21
+ We note that the work of Ref [1], which was made available several weeks before this work, also includes an analysis of ChatGPT3.5 using the FCI. Since our works were completed without knowledge of one another and since ChatGPT's responses are inherently probabilistic, we view this work as serving as a confirmation of some of their results: namely, that ChatGPT can correctly answer roughly 60 to $65\%$ of the questions on the FCI, and that it's answers are usually but not always relatively
22
+
23
+ stable. These results are discussed in more detail below, including some alternative analysis, and we then extend the work to reconsider the results with ChatGPT4. Certainly the authors of Ref [1] are likely pursuing similar work which we hope will also serve as useful confirmation or contrast with ours.
24
+
25
+ # II. BACKGROUND
26
+
27
+ ChatGPT was designed and optimized specifically for the art of conversing in a manner that would seem plausibly human [5]. To a loose approximation, it has analyzed and internalized the patterns of words in an enormous sample of human text (principally the "CommonCrawl" dataset) [5]), which consists of things like books, news articles, wikipedia pages, reddit threads, and content from more specialized and technical fora like StackExchange and StackOverflow [5, 14]. From this data, it has an immense probabilistic model of how words tend to be fit together by a human being in various contexts. For example, it "knows" that a sentence which begins "after the fight, William hurt..." is likely to end with something like "...his hand." But it also recognizes that the sentence "after the fight scene, William Hurt..." might instead end with "...appeared to be injured and had to be helped off set." Crucially, this means that ChatGPT is a tool which manipulates and responds to language, and is not designed or trained to implement any model of an underlying concept [15] like business, law, or physics. But it is designed to talk like a person, and people who talk about business, law, and physics generally discuss these topics with some intelligence. Hence, even without any specific training in physics, its familiarity with the way physicists talk about physics may be enough for it to project at least an appearance of understanding.
28
+
29
+ The concept of "Understanding" can be deep and difficult to define, particularly in the context of learning a new topic [16]. We will generally leave that longstanding question [17] to the likes of AI researchers, cognitive scientists and philosophers, and for this reason we have been very careful with the wording in the title of this paper and elsewhere. We are not about to ask, because we are not equipped to know, whether ChatGPT understands introductory physics. We ask whether its behavior creates the appearance of understanding to the outside world- which might either be because it has succeeded in understanding, or merely because it has figured out how to display all the usual indicia of understanding. Hence, in the same manner that some poker player might "project an aura of confidence" either as a proud display of internal fortitude or as an act desperate bluffing, we consider here whether ChatGPT "projects" an understanding of introductory physics to the outside world.
30
+
31
+ There is a deep body of work in the literature of physics education about how to assess whether a student is demonstrating "understanding," [16, 18]. To start with the basics, we choose as our primary assessment tool
32
+
33
+ a classic instrument which has been used almost as a gold standard for decades: the Force Concept Inventory (FCI) [13]. This influential and heavily-studied assessment is a set of 30 questions designed to try to isolate and allow students to demonstrate a conceptual understanding of introductory kinematics and dynamics, as they might be covered in the first semester of introductory physics at the high school or university level. Although it has plenty of limitations, it was designed as a tool with an eye to distinguishing true conceptual mastery from the kinds of rote memorization, pattern-matching, and algorithmic calculation[19-21] which students sometimes use in order to pass conventional physics tests without ever truly knowing what they are doing, or why. Its status as a classic test for understanding in introductory physics makes it a logical starting point for our inquiry, though we encourage future work to extend beyond this starting point as well.
34
+
35
+ In short, we will offer ChatGPT a modified version of the questions from the FCI, and assess on multiple levels how successfully it can project understanding of these topics in intro physics. Section III of this paper we describe in more detail how the assessment was modified and administered, and in section IV we analyze ChatGPT's responses through various lenses and compare its performance to a large sample of real human students. Section V gives a summary of our resulting inferences and offers speculations about a number of open questions and directions for future work.
36
+
37
+ # III. METHODS
38
+
39
+ Because all of our reference points for what it looks like when someone projects an understanding of physics are based on the performance of other human beings, Hence, it was important when administering the FCI that we hew as closely as possible to the conditions as our human students would encounter it. This was not entirely possible, and some modifications were necessary both to the questions themselves as well as to the process of giving them to ChatGPT. Such changes were kept to a minimum. In this section we detail those changes and the resulting procedure for assessing ChatGPT with the FCI.
40
+
41
+ # A. Suitability of the FCI
42
+
43
+ The FCI is a natural fit as a first step in assessing the capabilities of ChatGPT for several reasons mentioned above: it is focused on conceptual understanding rather than computation and memorization; it has been widely-studied and validated as an assessment tool, and because it has been given frequently to many students in introductory physics, providing natural benchmarks for comparison. But it is also potentially valuable for another reason. To preserve its integrity as an assessment tool, the providers of the FCI have taken steps to encourage practi
44
+
45
+ tioners who use it to keep its contents (and even more importantly, its solutions) from becoming widely available. These efforts have certainly not been flawlessly successful but it remains the case that FCI text and solutions are difficult to find on the internet. Where they do exist, they are typically either password-protected on websites used by physics educators or at least "paywalled" on websites used by students. The relative scarcity of FCI solutions on the internet is consistent with the findings of prior work showing that access to the internet does not undermine the validity of tools like the FCI as conceptual assessment, even when students are observed to be copying question text for the presumable purpose of searching for its answers [22]. And in our case, since we are administering the FCI to a program that can only parse text, we can go even further: a significant portion of the FCI problems and/or solutions that can be found online exist as scanned images and/or had-annotated PDFs, which means that they would not be parsed by the kinds of automated tools that scrape the web for text.
46
+
47
+ While the exact details of the text which was used to "train" ChatGPT are a proprietary secret, it is known that its reading material was largely drawn from the "Commoncrawl" corpus [14], an open repository of data scraped from text found on the public internet. Common Crawl allows users to query which domains it has indexed; we used this feature to verify that it has not indexed the handful of websites which we are most familiar with which might contain solutions to the FCI, either as a PER tool or as a repository of solved problems for students. Beyond the CommonCrawl corpus, it is believed that most of ChatGPT's training data came from specifically generated human with human feedback [23], which of course are highly unlikely to contain references to the FCI. And finally, in the rare locations that we were able to find FCI solutions online, they were typically stored apart from the questions themselves, meaning that there was no obvious reason an LLM would know to pair particular solution texts with particular problems even if it had access to them. For all these reasons, we believe that the FCI is likely not in the training text of ChatGPT and hence that its responses have to represent more than regurgitation of something it "remembers." In this respect our testing with the FCI is what researchers in AI and machine learning might term "zero shot task": a challenge in which the model is used to classify (and in this case, respond to) prompting text it has never seen before.
48
+
49
+ # B. Modifying the FCI
50
+
51
+ The FCI is a 30-item sequential multiple-choice assessment, with each item containing five choices (four distractors and one unique correct answer or "key"). [13] It's items cover a range of topics from approximately the first third of a semester of college-level introductory physics: kinematics, projectile motion, free-fall, circular motion,
52
+
53
+ and Newton's laws. This means that generally, it is well-suited to our task. Its one significant drawback is that 18 of its 30 items contain some kind of reference to a figure. ChatGPT3.5 is designed only to accept text input, and despite some clever attempts to feed it images in some sort of indirect or transformed state [24], it does not seem capable currently of extracting any meaningful information from a picture. ChatGPT4 advertises multimodal capability which might make it possible to prompt the model with a figure, but this feature is not publically available as of this writing.
54
+
55
+ Of the 18 items with figures, we find that 11 of them can be modified by adding text that described what was shown in the figures without fundamentally altering the task at hand. In doing so, we take care to make sure that we did not provide additional clues or context that would make the problem simpler for ChatGPT than it would be for a typical physics student. For example, item seven involves a steel ball on a rope being swung in a circular path and then suddenly cut free. The question asks about the path of the ball after it is released, and the figure supplies several different possible trajectories. One way to describe these trajectories in words would be to say "tangential to the circle," "normal to the circle," etc. But we suspect this modification would substantially alter the difficulty of the problem. Instead, we translate this figure into words with reference to cardinal directions:
56
+
57
+ Consider a moment in the ball's motion when the ball is moving north. At that moment, the string breaks near the ball. Which of the following paths would the ball most closely follow after the string breaks?
58
+
59
+ (a) It will initially travel north, but will quickly begin to curve to the west
60
+ (b) It will travel north in a straight line
61
+ (c) It will travel northeast in a straight line
62
+ (d) It will initially travel east, but will quickly begin to curve north
63
+ (e) It will travel East in a straight line
64
+
65
+ We feel that this wording captures exactly and unambiguously all of the different paths indicated in the original figure, but without providing any additional hints. If anything, it may make the item slightly harder for ChatGPT than the original version.
66
+
67
+ Thirteen other items from the FCI were modified in a similar way. Six items without figures received minor text modifications that should not have affected the nature of the physics being tested. For example, in clusters of questions where some items referenced "the previous problem," we removed these references and simply restated the set-up from the prior problem, so that items could be asked about independently if needed. We also rephrased any questions that were left with an open-ended statement for the student to complete, since initial experiments showed that ChatGPT3.5 occasionally
68
+
69
+ appeared "confused" when it was not explicitly asked a question. Hence, a question like item one, which originally read:
70
+
71
+ Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. The time it takes the balls to reach the ground below will be:
72
+
73
+ (a) About half as long for the heavier ball as for the lighter one
74
+ (b) About half as long for the lighter ball as for the heavier one
75
+ (c) About the same for both balls
76
+ (d) Considerably less for the heavier ball, but not necessarily half as long
77
+ (e) Considerably less for the lighter ball, but not necessarily half as long
78
+
79
+ Was rephrased to end with a direct question:
80
+
81
+ Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. Which of the following describes the time it takes the balls to reach the ground below?
82
+
83
+ (a) About half as long for the heavier ball as for the lighter one
84
+ (b) About half as long for the lighter ball as for the heavier one
85
+ (c) About the same for both balls
86
+ (d) Considerably less for the heavier ball, but not necessarily half as long
87
+ (e) Considerably less for the lighter ball, but not necessarily half as long
88
+
89
+ It seems unlikely that such changes impacted either the physics content or the difficulty of the items. Finally, four of the items were left entirely unchanged.
90
+
91
+ This meant that we were able to ask ChatGPT 23 of the FCI's 30 items. Although others have shown that it is possible to get a representative sample of a student's performance using only a subset of the FCI questions [25], it happens that the "unusable" questions are not uniformly distributed across all question categories. Removing problems 19 and 20, for example, meant removing the only questions on linear kinematics from the instrument. Although this affects our ability to make comparisons with results from the "full" FCI, we believe this difficulty can be overcome, as we shall discuss in Sec IV below.
92
+
93
+ <table><tr><td>Type of change</td><td>Items</td></tr><tr><td>None</td><td>1, 4, 29, 30</td></tr><tr><td>Minor text</td><td>2, 3, 13, 25, 26, 27</td></tr><tr><td>Figure description</td><td>5, 7, 9, 10, 11, 15, 16, 17, 18, 22, 23, 24, 28</td></tr><tr><td>Unusable</td><td>6, 8, 12, 14, 19, 20, 21</td></tr></table>
94
+
95
+ TABLE I: Table of items from the FCI and the ways that they were (or were not) modified for use in this work. Seven items were not used.
96
+
97
+ # C. Administering the FCI to ChatGPT
98
+
99
+ We began interacting with the version of ChatGPT which existed during the month of January, 2023, and used our initial explorations there to develop a the guidelines we used for how to pose questions. Instances of conversations with ChatGPT are completely separate, in the sense that ChatGPT does not "remember" content from one chat in a separate chat, so variations of a question can be asked in parallel to identify the best practices for posing the questions. On the other hand, within a conversation ChatGPT can remember content back to a depth of about 3000 words. But in practice, this is not enough to remember a full administration of the FCI. This is part of the reason that we chose to rephrase each question so that it could stand alone, rather than referencing things from "the previous question," etc.
100
+
101
+ Our questions to ChatGPT were each posed in the following format:
102
+
103
+ Two metal balls are the same size but one weighs twice as much as the other. The balls are dropped from the roof of a single story building at the same instant of time. Which of the following describes the time it takes the balls to reach the ground below?
104
+
105
+ (a) About half as long for the heavier ball as for the lighter one
106
+ (b) About half as long for the lighter ball as for the heavier one
107
+ (c) About the same for both balls
108
+ (d) Considerably less for the heavier ball, but not necessarily half as long
109
+ (e) Considerably less for the lighter ball, but not necessarily half as long Please answer with a letter (A,B,C,D,or E) and a brief explanation of your reasoning.
110
+
111
+ A prior work, in which bar exam questions were administered to ChatGPT, found various tricks that caused it to perform better with multiple-choice questions [11] (the art of tweaking the input to an LLM to optimize its response in this fashion is called "prompt engineering.") In particular, they found that that, rather than asking
112
+
113
+ ChatGPT for a single answer, it performed better when it was asked to rank its top three choices (though in actuality only it's top choice was scored). However, other similar work [10] fails to note an effect of this kind, and our own experiments with this alternative prompt structure similarly showed no improvement in the model's overall performance.
114
+
115
+ We also experimented with two additional procedures. First, we took advantage of feature offered by ChatGPT which allows the user to request that it "regenerate response" after it finishes its output. At a gross level, this feature is similar to asking an algorithm for numerically solving some equation to start again but with a different random initial guess. One expects that the results will generally converge to two similar outputs, but perhaps not arrive at exactly the same point. We used this feature as a rudimentary way to explore the "stability" of ChatGPT's responses, which might in turn be thought of as a proxy for its "confidence" in its answers. Our very preliminary results based on this experimentation are discussed in Sec. IV A 3.
116
+
117
+ Finally, we tested a very different prompt with a very different objective, which we call the "NOVICE" prompt. In this prompt each question was posed to ChatGPT in the following format:
118
+
119
+ Please answer the following question as though you were a novice high-school student who has not studied physics and does not yet understand Newton's laws:
120
+
121
+ A large truck collides head-on with a small compact car. During the collision, which of the following is true:
122
+
123
+ (a) the truck exerts a greater amount of force on the car than the car exerts on the truck.
124
+ (b) the car exerts a greater amount of force on the truck than the truck exerts on the car.
125
+ (c) neither exerts a force on the other, the car gets smashed simply because it gets in the way of the truck.
126
+ (d) the truck exerts a force on the car but the car does not exert a force on the truck.
127
+ (e) the truck exerts the same amount of force on the car as the car exerts on the truck. Just give us your best guess. We know you may not know the correct answer, but we'd like to know which answer makes the most sense to you without any formal physics training.
128
+
129
+ We landed on this particular prompt after some trial and error—see Sec IV B. Using this framing, we investigate whether ChatGPT could potentially be useful as a tool for instructors to be able to preview and probe the thinking of a sample "novice" student while they prepare their teaching materials.
130
+
131
+ Initially, we administered the FCI with both the "BASIC" and "NOVIVCE" prompts, including stability analysis, To ChatGPT3.5 during the weeks of Feb 13 and Feb 20, 2023. Notably, between our initial experimentation in January and the final administrations of the FCI which generated the results below, there was a significant update to the model which focused on improving its mathematical capabilities, following a series of relatively high-profile examples where users were able to get ChatGPT to espouse manifestly untrue statements about elementary mathematics. Since none of the items in the FCI involve calculations, we think it is unlikely that the update had much impact on our main results. After the release of ChatGPT4, we repeated the same administrations to the new model during the week of March 13th. Results from both models are presented in the remainder of this paper.
132
+
133
+ # IV. RESULTS
134
+
135
+ We begin with an analysis of ChatGPT's responses to the 23 usable FCI questions in the modified form described above. For the first, "BASIC" administration of these problems, we analyze the responses on both a quantitative level, focused purely on its multiple-choice response, and on a qualitative one, by analyzing its stated reason for the answer it chose.
136
+
137
+ # A. “BASIC” responses
138
+
139
+ # 1. Answer Choices
140
+
141
+ When prompted with the 23 usable FCI questions using the "BASIC" prompt format, ChatGPT3.5 gave a correct answer for fifteen of them. It's correct answers are roughly evenly distributed among the unaltered problems and the modified problems which originally contained figures, though it struggles with a certain subset of the modified problems which involve "spatial reasoning," discussed in more detail in Sec IV A 2.
142
+
143
+ It is not obvious how we should interpret this number in order to make comparisons to human students taking the FCI. It is tempting to assign ChatGPT a "score" of $15/23 \approx 65\%$ , but it is not entirely clear that this is fair. An argument could instead be made that its "score" is $15/30 = 50\%$ , because of course one important aspect of "understanding" a physics problem is the ability to synthesize the data being presented across multiple representations [26]. Since ChatGPT simply cannot comprehend a question that requires reference to a figure, it could be said that it manifestly displays no understanding of that particular item.
144
+
145
+ In either case, however, ChatGPT3.5's quantitative performance compares quite adequately with the posttest results that are typical for students taking the FCI at the end of their first semester of college-level physics.
146
+
147
+ We can make a direct comparison to one of the author's previous classes in 2018, in which 415 students took the FCI at the end of the term, and produced the distribution of scores found in figure 1. In that distribution, the median student score was a $56\%$ , meaning that depending on how charitably one treats its partial results, ChatGPT was either just below the performance of a typical student or else perhaps a nontrivial cut above average. As an alternative perspective, the median letter grades of the students scoring around a $50\%$ or a $65\%$ were a B- and a B+, respectively.
148
+
149
+ ![](images/d531a3288bd5ef80b007e3abbb4e213aa72eb3c35c3fd8222142c47e1fd38949.jpg)
150
+ FIG. 1: The distribution of FCI scores at the end of a semester of college physics at a large public R1 university. ChatGPT3.5's performance using "BASIC" prompting would put it either in the bin containing the median (50%) or in the bin just above (65%)
151
+
152
+ When we repeated the analysis using ChatGPT 4, we found that the model now responded "correctly" to 22 of the 23 questions. Even more strikingly, the question which it "missed" was item 26, which asks about the behavior of a box being pushed against friction. The answer which it chooses, "D," would in fact be correct if air resistance were to be considered non-negligible, and the problem text does not specify this condition either way. Students are presumably expected to make the standard (and physically reasonable assumption) that the air resistance on a box being pushed by a human is a small effect compared to other forces in the problem. ChatGPT perhaps betrays some "inhuman" behavior by not making a similar assumption. Nevertheless, when we prompted the model in a new instance with the same question and added the text "assume air resistance is negligible," it gives the correct answer here as well.
153
+
154
+ Needless to say, the performance of ChatGPT4 compares to the very uppermost decile of performance by physics students in our post-test data, and of course its near-perfect responses no longer show and weakness for problems that originally relied on figured and/or spatial reasoning.
155
+
156
+ # 2. Free Responses
157
+
158
+ As part of the "BASIC" prompt, we asked ChatGPT to provide "a brief explanation of [its] reasoning." This gives us an additional dimension along which to assess its performance, albeit a qualitative one.
159
+
160
+ As a baseline, we read through each response and made a decision about whether the answer was completely accurate, which is the level of performance we would expect from an expert physicist considering such introductory concepts. In this determination we disregarded tone and focused only on content. So, ChatGPT was not penalized if its correct explanation sounded stilted, in a way that suggested machine learning rather than a human intelligence behind the response. On the other hand, we marked "wrong" any response that contained an error which a trained physicist would not reasonably have made, no matter how ancillary the error was to ChatGPT's analysis. Certainly, these determinations were simply a judgement call on the part of the author and of course were not blind to the fact that the responses were generated by a language model. However, we feel that in most cases the determinations were relatively black-and-white, since they centered on a decision about whether the statements made in the explanation were factually true.
161
+
162
+ In our judgement, 10 of ChatGPT3.5's 23 free responses (43%) fully passed this "expert test." These responses were typically relatively terse, invoking the correct constellation of concepts, and often making explicit note of facts or concepts which ruled out wrong answers as well as the facts which supported the correct one. For example, item 13 of the FCI says
163
+
164
+ "A boy throws a steel ball straight up. Consider the motion of the ball only after it has left the boy's hand but before it touches the ground, and assume that forces exerted by the air are negligible. For these conditions, what are the force(s) acting on the ball?
165
+
166
+ (a) A downward force of gravity along with a steadily decreasing upward force.
167
+ (b) A steadily decreasing upward force from the moment it leaves the boy's hand until it reaches its highest point; on the way down there is a steadily increasing downward force of gravity as the object gets closer to the earth.
168
+ (c) An almost constant downward force of gravity along with an upward force that steadily decreases until the ball reaches its highest point; on the way down there is only a constant downward force of gravity.
169
+ (d) An almost constant downward force of gravity only.
170
+ (e) None of the above. The ball falls back to ground because of its natural tendency to rest on the surface of the earth.
171
+
172
+ Note the absence of terms like "projectile motion," which might cause a language model (or a student!) to mechanically regurgitate the fact that only the force of gravity is acting. Despite this challenge, ChatGPT'3.5s response, which was representative of its performance on these ten "entirely correct" free responses, was as follows:
173
+
174
+ # (D) An almost constant downward force of gravity only.
175
+
176
+ After the steel ball leaves the boy's hand, the only force acting on it is the force of gravity, which is directed downwards. This force is almost constant throughout the motion of the ball, with only slight variations due to changes in the distance between the ball and the center of the Earth. There is no upward force acting on the ball, as the ball is not being propelled upwards once it leaves the boy's hand. As a result, the ball will continue to rise until the force of gravity slows it down and eventually reverses its direction, causing it to fall back to the ground. Therefore, the correct statement is (D) An almost constant downward force of gravity only.
177
+
178
+ In addition to identifying the correct force, it makes specific note of the absence of any ongoing propulsive force (For comparison, in the Fall '18 post-test student data, $38\%$ of students chose one of the responses which includes an upward force during the motion). We note also that ChatGPT3.5 goes out of its way to comment on and correctly explain why the correct answer choice has the qualifier "almost-constant" on the force of gravity. This level of commentary is in fact deeper than we would typically expect of a first-semester physics student, at least unprompted. In fact, the inclusion of this detail in the question strikes us as the kind of thing an instructor does to make sure that their questions are airtight so that the strongest students have no room to complain or overthink.
179
+
180
+ Of course, not all of ChatGPT3.5's responses reach this level of accuracy. There were five free-responses which we judged to be more correct than not (in fact, typically almost entirely correct), but containing at least one patently untrue statement which simply could not have been made by an expert physicist. These minor errors were often "surprising" in the sense that they did not necessarily represent the kinds of classic misapprehensions we have come to expect from new students on similar questions. And the errors were typically "unforced" in the sense that they did not appear in the portion of the explanation that was strictly necessary to identify the correct answer. For example, consider item 22 of the FCI, which asks about a rocket in deep space that was moving horizontally from left to right at a constant speed until its engine produced a period of constant upward force. The question asks about the speed of the rocket while the engine is on (whether it is increasing, decreasing, constant, etc).
181
+
182
+ ChatGPT3.5 correctly identifies that the speed is increasing while the engine is on, but says the following:
183
+
184
+ Since the rocket is subject to no outside forces, its motion is determined solely by the forces it experiences from its engine. When the engine is turned on, the rocket is subject to a constant thrust that is perpendicular to its initial motion. This thrust produces an acceleration that is also perpendicular to the rocket's initial motion. As a result, the rocket moves along a circular path, with its speed increasing as it moves from point "b" to point "c". Therefore, the answer is (B) its speed is continuously increasing.
185
+
186
+ All of the elements of the correct chain of reasoning are present: (1) the net force in this case is equal to the force of the engine (2) when the engine is on, the net force is nonzero (3) a nonzero net force causes an acceleration and (4) the acceleration is not directed against the motion and hence it will cause the velocity to increase. But ChatGPT3.5 make an unforced error by trying to describe the nature of the rocket's motion during this time, even though this was not part of the question. It identifies the rocket's motion as "circular," perhaps because in its corpus of physics there is a strong association between discussions of circular motion and "forces which are perpendicular to the direction of motion." Missing the obvious detail that this particular force is only initially perpendicular to the motion, it draws an incorrect conclusion. And the mistake is as surprising as it is unnecessary: we do not imagine that many freshman physics students are picturing a rocket which becomes locked into something like cyclotron motion when they read this problem, even though many of the found it difficult (57% chose incorrectly on the Fall '18 post-test).
187
+
188
+ These five responses correspond with the other five problems for which ChatGPT3.5 chose the correct multiple-choice answer, although we graded these responses separately and it was not a foregone conclusion that it would coincide in this way.
189
+
190
+ After 10 fully-correct responses and five nearly-correct responses that nevertheless contain unambiguous errors, ChatGPT3.5 gives eight responses which are nowhere near the mark. These responses typically contain an assortment of words and phrases which are genuinely relevant to answering the problem. Often, entire sentences form accurate statements about the underlying physics. But the ideas are linked up in ways which are grammatically correct but often logically unreasonable or inconsistent, and the responses typically also contain discussions of details that are wholly irrelevant, intermixed on equal footing with the key concepts. The analyses may swerve unpredictably onto different tracks, or arrive at conclusions entirely at odds with the preceding arguments, as if the model had no idea what it had just said, and yet it continues confidently to claim an inescapable conclusion. They certainly do not project anything like
191
+
192
+ understanding-quite to the contrary, if given by a human they would be taken as specific evidence that they are regurgitating memorized words and do not know what they are talking about.
193
+
194
+ In defense of ChatGPT3.5, half of the eight problems in which the free-response is wide of the mark are problems that originally contained figures. In particular, these four problems are the ones in which the answers rely most on spatial reasoning and on establishing some sense of direction, and ChatGPT3.5 seems simply unable to recognize the directional system being used.
195
+
196
+ Consider for example its response to item seven: the problem regarding a ball released during circular motion which was described above. In our wording of the question, the ball is moving north at the time its string is cut and it is allowed to move off tangent to the circle, so the correct answer about its subsequent motion should be that it travels in a straight line to the north. ChatGPT3.5 comes close to understanding this, but appears confused about two points relating to the directional system, explaining:
197
+
198
+ When the string breaks, the ball will continue to move in a straight line tangential to the circular path it was following at that moment. Since the ball was moving north, the tangential velocity of the ball will be directed to the east.
199
+
200
+ However, the force of gravity will also be acting on the ball, pulling it downward towards the Earth. As a result, the ball will follow a curved path towards the west as it falls.
201
+
202
+ Therefore, the correct statement is (A) It will initially travel north, but will quickly begin to curve to the west.
203
+
204
+ Aspects of this response are perfectly reasonable. The first sentence is entirely true, and so is the third- though most students answering this problem would ignore this vertical motion and answer only with respect to its horizontal motion, as the figure suggests. But from these two true statements, ChatGPT3.5 is unable to draw any correct conclusions about the real world direction, making the bizarre leaps from "tangential" to "east" and "downward" to "west." One possible reason for this is that while there are good reasons to expect terms like "tangential" and "gravity" to be preferentially associated in the training corpus with discussion of balls released from circular motion, there is no reason that a particular direction like "north" would be similarly associated. Hence, in trying to model how a human would talk about this problem, ChatGPT3.5 gravitates correctly to details that all expert humans would mention, but then has to pick randomly to fill in a direction, because presumably all the cardinal directions are represented roughly equally in the training text. ChatGPT3.5 has succeeded at saying something that conveys general understanding based on general patterns, but failed to show specific understanding of the details in the instance at hand.
205
+
206
+ ChatGPT4, by contrast, gave no answers in this "highly confused" category. We assessed 20 of its 23 free responses as being "exactly correct," as one would expect from an expert physicist. The GPT4 model is notably more concise in general than its predecessor, and this brevity may have helped it avoid some of the "unforced errors" seen previously. But it also succeeded on nearly all of the problems for which ChatGPT3.5 was completely confused. Consider item seven, for example: the ball released from circular motion which ChatGPT3.5 assessed correctly in general terms but failed to answer correctly because it could not identify which direction was "tangential" in the particular case at hand. ChatGPT4 instead gives a correct answer with perfectly correct reasoning:
207
+
208
+ When the string breaks, the ball will continue in the direction it was moving at the exact moment the string broke, due to its inertia. Since the ball was moving north when the string broke, it will continue to travel north in a straight line.
209
+
210
+ Like ChatGPT3.5, ChatGPT4 has identified a correct general concept to invoke (inertia), but has also succeeded at drawing a specific inference in this case (note that this answer was "stable" and not a lucky guess; see below). Without more public information about the improvements in ChatGPT4 and the workings of its head layers in this case, we cannot know how it arrives here. But it seems plausible that it has been an able to extract a deeper level of patterns from its training text. The base pattern is that "inertia" and "motion in the tangential direction" commonly discussed in this context. But with in that pattern is a deeper one: in examples where humans discuss such questions, the solution typically refers to the exact same direction- left, north, $+\mathrm{y}$ etc- which was referenced in the problem next to words referring to its motion at the motion the ball is released. Whether or not this is exactly why ChatGPT4 performs better on this problem, it is a good example of how increasing the models inferential depth can allow it to project an apparently deeper understanding physics without having been "taught" any more explicit physics concepts.
211
+
212
+ ChatGPT4's three remaining response were all "nearly perfect," with solid reasoning and incidental minor errors. For example, in response to item 16, which asks about the forces on an elevator moving at constant velocity, it says that there must be no net force on the elevator and hence that "any forces acting on the object must come as equal and opposite pairs." This leads it to a correct conclusion that in this particular case that the upward tension from the elevator cable has the same magnitude as the downward weight force. But an expert analyzing the problem would not quite have made the statement that "any forces" acting MUST come as pairs—an elevator with two cables, for example, could still move at constant velocity without any two forces existing as an equal and opposite set. But this level of "error," which
213
+
214
+ represents ChatGPT4's worst performance, is far more minor than the worst mistakes of ChatGPPT3.5.
215
+
216
+ # 3. Stability
217
+
218
+ Because there is a probabilistic element inherent in ChatGPT's operation, we make a brief study of the "stability" of the models' responses by experimenting with two types of perturbations. This investigation is suggestive but very preliminary; deeper work could certainly be done on this topic.
219
+
220
+ First, we make use of ChatGPT's "regenerate response" button, which allows us to simply request that the model start over its process with the exact same prompt, albeit with some variation in whatever internal parameters it uses to traverse its vast network of possible responses. This is analogous to asking a numerical equation solver to start over with a different random initial guess, and just as in that case, one would hope that a stable method would consistently produce comparable outputs.
221
+
222
+ For this first, initial probe of the Chatbot's stability, we regenerated each response just three times, but already some patterns seemed clear. The only cases where ChatGPT3.5 consistently changed its answers between regenerations were a subset of the eight questions for which we judged that its written responses were completely confused. There were two other cases of correct answers where the model occasionally changed its mind when the responses were regenerated, but in those cases it stuck with the correct answers a clear majority of the time. This pattern is perhaps somehow comforting or impressive for the question of ChatGPT's ability to project understanding: when it knows the answer, it knows the answer with some stability. When it is flying by the seat of its pants, it also does not care what destination it flies to.
223
+
224
+ The other form of perturbation which should be explored is perturbations to the input, rather than to the initial starting conditions of the model themselves. Of course, because we cannot control the inner workings of the model and put it back in the exact same internal state every time, it is not possible to disentangle these two. Nevertheless, we attempted for a subset of the problems to feed ChatGPT3.5 variations on the same questions but with irrelevant words and sentence structures switched around (e.g. "A boy throws a steel ball straight up" becomes "a rock is tossed straight upward by a girl). Once again, we found that this generally did not affect the response if the model got the initial problem right, but it could change the responses for the problems it was already getting completely wrong.
225
+
226
+ Our discussion here has focused on ChatGPT3.5 because we found that ChatGPT4's responses were completely insensitive to either of the perturbation types described above. Perhaps this is not surprising: ChatGPT3.5's responses were only unstable on problems
227
+
228
+ where its responses were confused and incoherent, and ChatGPT4 never reached that level of confusion.
229
+
230
+ # B. "NOVICE" Responses
231
+
232
+ This paper is focused on the question of whether ChatGPTs behavior is consistent with "understanding" introductory physics, but part of our motivation for asking that question is to understand how it may affect physics classrooms and physics pedagogy more broadly. One way it might be of use to an instructor would be as a way to gain insight into the unfamiliar mind of a novice physics student. If ChatGPT were able to successfully answer questions in a manner that plausibly mimicked typical student mental models [27] it might be of great value in testing and preparing lesson plans, refining exam questions, etc.
233
+
234
+ The question of whether ChatGPT can do this is the worthy subject of its own project, but we make an initial stab at it here to establish some baseline results and investigate whether such deeper work is likely to bear fruit. To do this, we gave the modified FCI questions to ChatGPT again, but this time using a prompt which asks it to answer as though it had not yet studied any introductory physics. This prompt was developed through trial and error using a small subset of FCI problems for which we had strong evidence from our own students' prior pretests about how they might answer. These were items 4 and 26 from the FCI. Item 4 tests Newton's 3rd law in the unintuitive context of an asymmetric collision between a truck and a car. Prior to studying physics [28], many students (nearly $75\%$ in our fall '18 pretest) believe that the truck will exert a greater force on the car than vice versa. We found that initially, even when we prompted ChatGPT to answer as though it had not studied Newton's Laws, it continued to give the expert answer that the forces were equal. It was only when we modified the prompt to include a reminder of our desired reference frame both before and after the question text itself that we saw it give the infamous novice answer described above. We saw similar behavior in question 26, which tests concepts about the balance of forces in the context of kinetic friction.
235
+
236
+ The results from ChatGPT3.5 were mixed. To quantitatively evaluate the performance, we scored its answers on a "key" comprising all of the most common responses given by students in our Fall '18 pretest data, 10 of which were already correct answers. ChatGPT's answers to questions with the "NOVICE" prompt matched these most-common student answers on 11 occasions out of 23. In particular, seven of these 11 matches came in situations where the most common pretest answer was itself a correct answer. While it is not inherently a bad to match in these cases, because an educator using ChatGPT to test out possible student responses would want to know about cases where even an untrained student is likely to get the question right, it is discouraging to
237
+
238
+ see comparatively few cases where the plurality student pretest opinion was wrong and ChatGPT was also able to identify the distractor they would find most compelling.
239
+
240
+ Curiously, the problem was not that ChatGPT did "too well" on the problem set. In fact, ChatGPT's "NOVICE" responses to the 23 modified FCI questions were right only nine times. There were only three such occasions where ChatGPT got a question right which the pretest plurality got wrong. But clearly, in the remaining cases, it did not agree with the plurality about which wrong answer seemed most intuitive.
241
+
242
+ This binary, agree-or-disagree framework is a little unfair to ChatGPT. Imagine a scenario where $50\%$ of students choose wrong answer "A," $49\%$ choose wrong answer "B," and only $1\%$ choose the correct answer "C." If ChatGPT were to offer answer "B" when roleplaying as a student, it would hardly represent a glaring failure. To capture some of this nuance, we propose a simple measure where ChatGPT is given a point for each answer, weighed by the fraction of students from our Fall '18 pretest who chose that answer (so, half a point if $50\%$ of students chose the same answer). We exclude problems which the plurality of students got right, since in such cases we cannot tell anything about whether ChatGPT is understanding novice thinking or simply agreeing with their conclusion as an expert would. Summing these scores and then normalizing by the equivalent score of the student key itself (which represents the maximum possible number of points ChatGPT could score) gives ChatGPT3.5 a score of 63. Note that if we run this calculation for the average performance of someone guessing randomly, for this particular distribution of student responses, the minimum score would be 58. Hence, ChatGPT3.5's performance is only a little better than random here.
243
+
244
+ To our surprise, the performance of the otherwise-superior ChatGPT4 is substantially worse on this task. In fact, the GPT-4 model proved completely incapable of straying from the "correct" answers in response to this new prompt, and a variety of others which were tested. Its score using the same metric above was a 36- i.e., a person randomly trying to guess what a novice would choose would outperform it, because the model insisted on choosing correct answers, and these were rarely chosen by novice students in practice.
245
+
246
+ Examining the free-responses of both ChatGPT3.5 and ChatGPT4 under the "Novice" prompt gives some insight into the reason it wasn't better able to mimic the behavior of an untrained student. For ChatGPT3.5, a significant majority of its explanations referenced concepts like forces and accelerations that would likely not be the basis for a true novice's analysis of the problem. In fact, it routinely mentioned specific cases Newton's laws by name, despite the explicit instruction that it should answer as a student who "does not yet know or understand" them. It seemed instead to be answering from the perspective of a student who knew about Newton's laws, but who was having trouble applying them cor
247
+
248
+ directly. ChatGPT4's responses differed just slightly: it was better about avoiding explicit mention of Newton's laws, but would simply claim that its intuition coincided with the correct Newtonian framework using nontechnical terms. For example, analyzing an object moving at a constant velocity, it would say, "the upward pull must be just as big as the downward pull, because it makes sense that the forces must be balanced in a case like this."
249
+
250
+ The picture that emerges from both models is that of a traditional professor, who understands the physics well but struggles to recognize their students' difficulties because they can scarcely remember what it was like not to know the concepts by heart. We leave it for future work to see whether the "novice" performance could improve if we further refine the prompt, or perhaps engage in more back-and-forth with the model. However, we conjecture that it may be in the very nature of a LLM trained on a broad corpus of human text that it struggles to answer questions with a self-imposed blind spot. After all, cases of experts discussing physics correctly presumably represent the vast majority of cases in which physics was discussed in ChatGPT's training text. And there were likely some instances in which students mentioned Newton's laws, identified that they did not know how to use them, and proceeded to use them incorrectly, which is closer to what we saw in the responses from ChatGPT3.5. But seems much less likely to have seen cases where students mentioned Newton's laws by name, but only for the purpose of saying that they had never heard of Newton's laws and to describe their reasoning in the absence of them. As such, ChatGPT might inevitably struggle to identify cases where students were reasoning with a pre-Newtonian mindset based on an instruction like "you do not know or understand Newton's laws" absent a training set specifically designed for this purpose.
251
+
252
+ # V. CONCLUSIONS
253
+
254
+ Our paper asks whether ChatGPT "can project an understanding of introductory physics," meaning, "can it display behaviors consistent with having an underlying understanding of kinematics and Newtonian dynamics, whether or not such underlying understanding actually exists?" For Chat GPT3.5, the answer appears to be "yes locally, not globally." In other words: in some isolated cases it responded to items from the FCI designed to test for conceptual mastery of introductory physics exactly the way an expert physicist might, despite (as far as we can tell) having never seen the question before and not having any specific programming dedicated to "doing physics." ChatGPT3.5 displays this ability about the same percentage of the time as a B- or B-level student taking a college physics course. This is considerably more proficiency than many physicists would have predicted was imminent just a few years ago. On the other hand: when the GPT3.5 mask slips, what it reveals is so clearly devoid of understanding that it spoils the charade. If a
255
+
256
+ student submitted work which showed perfect mastery in one place and complete incoherence on the same set of topics immediately after, we would suspect cheating. ChatGPT may not be "cheating" per se, but it is not on the whole performing in a manner that one projects full-fledged expertise.
257
+
258
+ The story of ChatGPT4 is quite different. There are many aspects of true "understanding" - such as the ability to engage in metaphor [29] or make use of multiple representations [30]-which differentiate experts and novices and which we do not attempt to test here. As such, perhaps we still cannot claim that ChatGPT4 "globally" projects understanding of introductory physics. But for whatever subset of conceptual understanding is captured by the FCI (a tool built precisely to capture some aspect of deeper comprehension) ChatGPT4 is completely capable of responding like an expert, making only one mistake in its responses (and even then, making a mistake that a nitpicking expert might be inclined to defend).
259
+
260
+ There are at least four valuable questions to be asked in light of this performance, and we provide speculative discussion here in anticipation of further work. First, one could ask what ChatGPT's performance can tell us about the nature of some of the standardized assessment tools used in PER. One answer appears to be that it is important to treat these instruments as a whole, and not place too much emphasis on any single item. Assessments like the FCI were designed intentionally to probe central concepts along various different dimensions and with slight variations in the presentations of the concepts. ChatGPT3.5's oscillation between expertise and vacuousness underscores the necessity of this. The results also remind us that there is yet another distinction to be drawn between "understanding" a concept, in the sense of "being able to apply it like an expert," and believing a concept, in the sense of having an internal satisfaction that the concept is true because it should necessarily be true. This distinction is significant also among our students: compare how relatively easy it is for a student to learn that "both forces in the pair are always equal" in the context of Newton's Third law with how few students actually feel that this fact is reasonable and intuitive when they first encounter it [31].
261
+
262
+ Second, given its level of apparent understanding, one could investigate ChatGPT's potential use by students, because it (or tools like it) will soon be used by students in our classroom, which is already happening in other fields of study [32]. At the moment, a student relying on ChatGPT3.5 might be easy enough to spot, given its inconsistency. But because its faulty responses stand out so much, an enterprising student could conceivably ask it for help with every question and then learn the signs of when to disregard its answers. And of course, while ChatGPT4 is currently available only on a subscription basis and with substantial rate limitations that will prevent it from being used broadly by every student in an intro physics class, these restrictions are surely likely to recede
263
+
264
+ with time. All this is to say, even at its current ability level, ChatGPT3.5 threatens the integrity of things like take-home tests, exam corrections, lab reports, written homework assignments, and online problem sets, at least in their current form. ChatGPT4's performance is significantly stronger still.
265
+
266
+ Third, we considered the possible role that ChatGPT could have as an in-class teaching tool. In some fields where its responses have a higher success rate, like computer science, some faculty are encouraging their students to come to class with a ChatGPT tab open, and to use it to ask brief clarifying questions, or find the bugs in their sample code, so that many more of these minor questions can be handled than a professor could hope to field on their own [33]. At this juncture, we clearly do not recommend using ChatGPT3.5 this way in a physics classroom, as it's hit rate for correct answers and correct explanations is simply too low, and the danger of confusing a student (or undermining their confidence) by giving them information that is only statistically trustworthy is too great. However, ChatGPT4 seems likely to have crossed the crucial thresholds here, and as it becomes widely available it may indeed be possible for a student to use it as a "copilot" to aid their understanding in the same way that students currently might use tools like calculators and computer algebra systems to help them track the mathematics in their physics courses. We do not opine at this juncture on whether this kind of use would be desirable, except to say that the strength and ubiquity of ChatGPT and its successors might make it inevitable, in which case it may be preferable for educators now to be identifying the best ways to do so.
267
+
268
+ Finally, we asked about using ChatGPT as a tool to support physics instructors outside the classroom, by assisting with preparation. Here, it is clear that ChatGPT is not able to play at least one of the roles we had initially conjectured: it cannot conjure a useful simulacrum of a novice mindset in a way that would allow an educator to test out their various examples and analogies to see how they might land with their students. But there are still some other ways it could be useful. It's partial understanding and relatively literal mode of interpretation, for example, make it an attractive tool to playtest possible exam questions to root out glitches, loopholes, or overdetermined facts. ChatGPT might also function as a tool to help with the challenging process [34] of drafting of new exam or homework questions in the first place.
269
+
270
+ All of these motivating questions immediately suggest areas for follow-up projects. We intend to pursue some ourselves, and encourage others to do so as well. And we must note that, while history contains faulty predictions about the timeline of AI development in both directions [35], the current pace and nature of the field suggests it will continue to advance rapidly, as evidenced by the dramatic leap in performance from ChatGPT3.5 to ChatGPT4 which came about even within the time it has taken to prepare this work for publication. Indeed, the self-reinforcing nature of the field as AI models
271
+
272
+ learn to train themselves, coupled with the current exponential growth of computing power, suggests that the field as a whole could advance at an exponential or even superexponential rate [36]. Concepts which at the time of this writing can be dismissed as being beyond even ChatGPT4's capabilities may be old news by the time the reader finds this. This is all the more reason why continued work in this area is urgently needed.
273
+
274
+ # ACKNOWLEDGEMENT
275
+
276
+ The author thanks Noah Finkelstein for useful discussions regarding the scope and framing of this work, and
277
+
278
+ Mark Kissler, M.D., of the University of Colorado Hospital for suggesting the phrase "project understanding" as a way to clarify the central research question explored here.
279
+
280
+ [1] Gerd Kortemeyer. Could an artificial-intelligence agent pass an introductory physics course? arXiv preprint arXiv:2301.12127, 2023.
281
+ [2] OpenAI. Chatgpt: Optimizing language models for dialogue, 2022.
282
+ [3] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
283
+ [4] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. arXiv preprint arXiv:2203.02155, 2022.
284
+ [5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
285
+ [6] Eva AM van Dis, Johan Bollen, Willem Zuidema, Robert van Rooij, and Claudi L Bockting. Chatgpt: five priorities for research. Nature, 614(7947):224-226, 2023.
286
+ [7] The PyCoach. Chatgpt: The end of programming (as we know it), Dec 2022.
287
+ [8] OpenAI. GPT-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
288
+ [9] Christian Terwiesch. Would chat gpt3 get a wharton mba? a prediction based on its performance in the operations management course. Mack Institute for Innovation Management at the Wharton School, University of Pennsylvania. Retrieved from: https://mackinstitute.wharton.upenn.edu/wpcontent/uploads/2023/01/ChristianTerwiesch-Chat-GTP-1.24.pdf [Date accessed: February 6th, 2023], 2023.
289
+ [10] Jonathan H Choi, Kristin E Hickman, Amy Monahan, and Daniel Schwarcz. Chatgpt goes to law school. Available at SSRN, 2023.
290
+ [11] Michael Bommarito II and Daniel Martin Katz. Gpt takes the bar exam. arXiv preprint arXiv:2212.14402, 2022.
291
+ [12] Daniel Martin Katz, Michael James Bommarito, Shang Gao, and Pablo Arredondo. Gpt-4 passes the bar exam. Available at SSRN 4389233, 2023.
292
+
293
+ [13] David Hestenes, Malcolm Wells, and Gregg Swackhamer. Force concept inventory. The physics teacher, 30(3):141-158, 1992.
294
+ [14] Common crawl. Faq.
295
+ [15] There may be an exception for raw mathematics—see Methods section.
296
+ [16] Grant P Wiggins and Jay McTighe. Understanding by design. Ascd, 2005.
297
+ [17] Marvin L Minsky. Why people think computers can't. AI magazine, 3(4):3-3, 1982.
298
+ [18] Julian D Gifford and Noah D Finkelstein. Categorical framework for mathematical sense making in physics. Physical Review Physics Education Research, 16(2):020121, 2020.
299
+ [19] Carl Wieman and Katherine Perkins. Transforming physics education. Physics today, 58(11):36, 2005.
300
+ [20] Andrew Elby. Helping physics students learn how to learn. American Journal of Physics, 69(S1):S54-S64, 2001.
301
+ [21] Jonathan Tuminaro and Edward F Redish. Understanding students' poor performance on mathematical problem solving in physics. In AIP Conference Proceedings, volume 720, pages 113-116. American Institute of Physics, 2004.
302
+ [22] Bethany R Wilcox and Steven J Pollock. Investigating students' behavior and performance in online conceptual assessment. Physical Review Physics Education Research, 15(2):020145, 2019.
303
+ [23] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017.
304
+ [24] Alexa Steinbrueck. Can chatgpt do image recognition?, Jan 2023.
305
+ [25] Jing Han, Lei Bao, Li Chen, Tianfang Cai, Yuan Pi, Shaona Zhou, Yan Tu, and Kathleen Koenig. Dividing the force concept inventory into two equivalent half-length tests. Physical Review Special Topics-Physics Education Research, 11(1):010112, 2015.
306
+ [26] Alan Van Heuvelen. Learning to think like a physicist: A review of research-based instructional strategies. American Journal of physics, 59(10):891-897, 1991.
307
+ [27] Donald A Norman. Some observations on mental models. Mental models, 7(112):7-14, 1983.
308
+
309
+ [28] and, sadly, in a good portion of the time after studying physics.
310
+ [29] Fredrik Jeppsson, Jesper Haglund, and Tamer G Amin. Varying use of conceptual metaphors across levels of expertise in thermodynamics. International Journal of Science Education, 37(5-6):780-805, 2015.
311
+ [30] Patrick B Kohl and Noah D Finkelstein. Patterns of multiple representation use by experts and novices during physics problem solving. Physical review special topics—Physics education research, 4(1):010111, 2008.
312
+ [31] CH Poon. Teaching newton's third law of motion in the presence of student preconception. Physics Education, 41(3):223, 2006.
313
+
314
+ [32] Jürgen Rudolph, Samson Tan, and Shannon Tan. Chatgpt: Bullshit spewer or the end of traditional assessments in higher education? Journal of Applied Learning and Teaching, 6(1), 2023.
315
+ [33] Thomas Rid. Five days in class with chatgpt, Jan 2023.
316
+ [34] James H Smith and Alfred G Costantine. Writing better physics exams. The Physics Teacher, 26(3):138-144, 1988.
317
+ [35] Stuart Armstrong, Kaj Sotala, and Sean S O hEigeartaigh. The errors, insights and lessons of famous AI predictions—and what they mean for the future. Journal of Experimental & Theoretical Artificial Intelligence, 26(3):317-342, 2014.
318
+ [36] Nick Bostrom. Superintelligence: Paths, Dangers, Strategies. Oxford University Press, Oxford, 2014.
2303.01xxx/2303.01067/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe5a2f3f904c444676d24ab1d1319a94ef9b87c61c0f811f74f87932393ad2d
3
+ size 33527
2303.01xxx/2303.01067/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_content_list.json ADDED
@@ -0,0 +1,1798 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "© 2021 IEEE",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 416,
8
+ 424,
9
+ 580,
10
+ 446
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting or republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.",
17
+ "bbox": [
18
+ 73,
19
+ 474,
20
+ 924,
21
+ 593
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "aside_text",
27
+ "text": "arXiv:2303.01177v1 [cs.RO] 2 Mar 2023",
28
+ "bbox": [
29
+ 22,
30
+ 265,
31
+ 57,
32
+ 700
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Autonomous Aerial Filming with Distributed Lighting by a Team of Unmanned Aerial Vehicles",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 135,
42
+ 70,
43
+ 862,
44
+ 170
45
+ ],
46
+ "page_idx": 1
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Vít Krátký<sup>1</sup>, Alfonso Alcántara<sup>2</sup>, Jesús Capitán<sup>2</sup>, Petr Štepan<sup>1</sup>, Martin Saska<sup>1</sup> and Aníbal Ollero<sup>2</sup>",
51
+ "bbox": [
52
+ 78,
53
+ 179,
54
+ 915,
55
+ 200
56
+ ],
57
+ "page_idx": 1
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Abstract—This paper describes a method for autonomous aerial cinematography with distributed lighting by a team of unmanned aerial vehicles (UAVs). Although camera-carrying multirotor helicopters have become commonplace in cinematography, their usage is limited to scenarios with sufficient natural light or of lighting provided by static artificial lights. We propose to use a formation of unmanned aerial vehicles as a tool for filming a target under illumination from various directions, which is one of the fundamental techniques of traditional cinematography. We decompose the multi-UAV trajectory optimization problem to tackle non-linear cinematographic aspects and obstacle avoidance at separate stages, which allows us to re-plan in real time and react to changes in dynamic environments. The performance of our method has been evaluated in realistic simulation scenarios and field experiments, where we show how it increases the quality of the shots and that it is capable of planning safe trajectories even in cluttered environments.",
62
+ "bbox": [
63
+ 73,
64
+ 266,
65
+ 491,
66
+ 479
67
+ ],
68
+ "page_idx": 1
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Index Terms—Multi-Robot Systems, Aerial Systems: Applications, Motion and Path Planning",
73
+ "bbox": [
74
+ 73,
75
+ 487,
76
+ 491,
77
+ 513
78
+ ],
79
+ "page_idx": 1
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "I. INTRODUCTION",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 204,
87
+ 534,
88
+ 361,
89
+ 547
90
+ ],
91
+ "page_idx": 1
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "T HE interest in Unmanned Aerial Vehicles (UAVs) for aerial photography and filming is growing fast [1]–[5]. This is mainly due to their manoeuvrability and the capacity to create unique shots when compared to standard cameras. The use of UAVs as flying cameras presents not only a remarkable potential for recreational cinematography, but also for the monitoring of inspection operations in outdoor infrastructures with complex access. For instance, the EU-funded project, AERIAL-CORE, proposes UAVs to surveil the safety of human workers during maintenance operations of electrical power lines (see Fig. 1). In this industrial setup, a high-quality video is key, as it is used by supervising ground",
96
+ "bbox": [
97
+ 73,
98
+ 553,
99
+ 491,
100
+ 734
101
+ ],
102
+ "page_idx": 1
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "Manuscript received: February 24, 2021; Revised May 26, 2021; Accepted July 3, 2021.",
107
+ "bbox": [
108
+ 73,
109
+ 746,
110
+ 491,
111
+ 770
112
+ ],
113
+ "page_idx": 1
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "This paper was recommended for publication by Editor M. Ani Hsieh upon evaluation of the Associate Editor and Reviewers' comments. This work was supported by EU project AERIAL-CORE (H2020-2019-871479), by MULTICOP (US-1265072) in FEDER-Junta de Andalucia Programme, by project no. DG18P02OVV069 in program NAKI II, by CTU grant no SGS20/174/OHK3/3T/13, and by OP VVV funded project CZ.02.1.01/0.0/0.0/16 019/0000765 \"Research Center for Informatics\". (Vit Kratky and Alfonso Alcantara are co-first authors.)",
118
+ "bbox": [
119
+ 73,
120
+ 770,
121
+ 491,
122
+ 861
123
+ ],
124
+ "page_idx": 1
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "$^{1}$ Vit Krátý, Petr Štepan, and Martin Sasha are with Faculty of Electrical Engineering, Czech Technical University in Prague, Czech Republic, {kratkvit|stepan|martin.saska}@fel.cyvut.cz.",
129
+ "bbox": [
130
+ 73,
131
+ 861,
132
+ 491,
133
+ 895
134
+ ],
135
+ "page_idx": 1
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "$^{2}$ Alfonso Alcantara, Jesús Capitan, and Aníbal Ollero are with GRVC Robotics Laboratory, University of Seville, Spain {aamarin|jcapitan|aollero}@us.es.",
140
+ "bbox": [
141
+ 73,
142
+ 895,
143
+ 491,
144
+ 930
145
+ ],
146
+ "page_idx": 1
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Digital Object Identifier (DOI): see top of this page.",
151
+ "bbox": [
152
+ 73,
153
+ 931,
154
+ 362,
155
+ 944
156
+ ],
157
+ "page_idx": 1
158
+ },
159
+ {
160
+ "type": "image",
161
+ "img_path": "images/59e03ea8e09d4ef2373265a82b4ff5d5a8a32bc81e6586ad87f5b27816edee13.jpg",
162
+ "image_caption": [
163
+ "Fig. 1: UAV filming applications to provide external lighting; to capture smooth shots outdoors; and to monitor dangerous maintenance operations at electrical lines<sup>1</sup>. Pictures were obtained within AERIAL-CORE and DRONUMENT projects, for which the proposed technology is being developed. Videos of the work in this paper can be seen on the multimedia material page (http://mrs.felk.cvut.cz/papers/aerial-filming)."
164
+ ],
165
+ "image_footnote": [],
166
+ "bbox": [
167
+ 506,
168
+ 263,
169
+ 715,
170
+ 407
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "image",
176
+ "img_path": "images/3a490b74b87370ceeb2e27c4ee3cd561de41c464aa2d983ea2afe1a008aab634.jpg",
177
+ "image_caption": [],
178
+ "image_footnote": [],
179
+ "bbox": [
180
+ 715,
181
+ 266,
182
+ 919,
183
+ 407
184
+ ],
185
+ "page_idx": 1
186
+ },
187
+ {
188
+ "type": "text",
189
+ "text": "operators to monitor safety during the maintenance work. Multi-UAV teams expand upon these possibilities as they could provide alternative points of view or even supplementary illumination. Similarly in our DRONUMENT project of NAKI II program, efficient variable illumination plays a key role for documentation of historical buildings interiors.",
190
+ "bbox": [
191
+ 501,
192
+ 512,
193
+ 921,
194
+ 602
195
+ ],
196
+ "page_idx": 1
197
+ },
198
+ {
199
+ "type": "text",
200
+ "text": "Proper lighting techniques are fundamental in bringing out details in an image and in creating more natural-looking film scenes. Thus, cinematography sets are packed with different lighting sources, as digital sensors are not as reactive to light as the human eye. This can also be relevant in monitoring maintenance operations scheduled at times of the day with poor illumination. Although aerial cinematography has been attractive to the scientific community as of late, lighting techniques have yet to be applied to improve the performance of filming. Filmmakers apply many types of lighting techniques making use of various equipment. In this work, we only consider direct lighting techniques that do not require additional equipment apart from light sources. Although static lights could produce more pleasant footage in some situations, we believe that UAVs are not optimal for this purpose. Therefore, we only use UAVs as dynamic sources of light to provide lighting to a dynamic scene.",
201
+ "bbox": [
202
+ 501,
203
+ 603,
204
+ 921,
205
+ 859
206
+ ],
207
+ "page_idx": 1
208
+ },
209
+ {
210
+ "type": "text",
211
+ "text": "In this context, navigating a team of UAVs for filming tasks with distributed lighting is complex. Smooth and safe trajectories are required to achieve pleasing shots that do not compromise safety in dynamic scenarios. We propose",
212
+ "bbox": [
213
+ 501,
214
+ 859,
215
+ 921,
216
+ 920
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "header",
222
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
223
+ "bbox": [
224
+ 75,
225
+ 29,
226
+ 457,
227
+ 41
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "page_number",
233
+ "text": "1",
234
+ "bbox": [
235
+ 911,
236
+ 30,
237
+ 919,
238
+ 40
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "page_footnote",
244
+ "text": "1 https://aerial-core.eu",
245
+ "bbox": [
246
+ 504,
247
+ 930,
248
+ 630,
249
+ 944
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "a method for online trajectory planning and execution with multiple UAVs. Our team obeys a leader-follower scheme where the formation leader carries an onboard camera to film a moving target and the followers generate trajectories that enable distributed lighting of the target, while maintaining desired lighting angles. We formulate a non-linear, optimization-based method that plans visually pleasant trajectories for the filming UAV and distributes the others in a specified formation. Moreover, we tackle safety by including a systematic framework for obstacle avoidance. Safe flight corridors for the UAVs are generated by forming sets of convex polyhedrons that model free space. Optimal and safe trajectories are thereafter computed within these convex sets.",
256
+ "bbox": [
257
+ 78,
258
+ 69,
259
+ 488,
260
+ 263
261
+ ],
262
+ "page_idx": 2
263
+ },
264
+ {
265
+ "type": "text",
266
+ "text": "A. Related works",
267
+ "text_level": 1,
268
+ "bbox": [
269
+ 78,
270
+ 290,
271
+ 194,
272
+ 303
273
+ ],
274
+ "page_idx": 2
275
+ },
276
+ {
277
+ "type": "text",
278
+ "text": "There have been several works focusing on filming dynamic scenes with a single UAV. Commercial products exist (e.g., DJI Mavic [6] or Skydio [7]) that implement semi-autonomous functionalities, such as auto-follow features for tracking an actor with collision avoidance. However, they do not address cinematographic principles. An integrated system for outdoor cinematography combining vision-based target localization with trajectory planning and collision avoidance has been proposed [8], [9]. Smoothness is achieved by minimizing trajectory jerk; shot quality by defining objective curves that fulfill relative angles with respect to the actor. Optimal trajectories for cinematography have also been computed in real-time through receding horizon optimization with non-linear constraints [10]. A user inputs framing objectives for the targets on the image to minimize errors on the image target projections, sizes, and relative viewing angles. Some authors have approached UAV cinematography by applying machine learning [11], [12]. Particularly, such techniques have been applied to demonstrations imitating professional cameraman's behaviors [13] or for reinforcement learning to achieve visually pleasant shots [14]. These works have presented valuable results for online trajectory planning, although they have not addressed the specific complexities for multi-UAV systems.",
279
+ "bbox": [
280
+ 78,
281
+ 310,
282
+ 488,
283
+ 655
284
+ ],
285
+ "page_idx": 2
286
+ },
287
+ {
288
+ "type": "text",
289
+ "text": "Regarding the methodology for multiple UAVs, a non-linear optimization problem was solved in receding horizon in [15], where collision avoidance to filmed actors and mutual collisions of UAVs were considered. Aesthetic objectives are introduced by the user as virtual reference trails. A specific camera parameter space is proposed in [16] to ensure cinematographic properties and to fulfill dynamic constraints along the trajectories. The motion of multiple UAVs around dynamic targets is coordinated through a centralized master-slave approach. A greedy framework for multi-UAV camera coordination is proposed in [17]. A decentralized planner computes UAV trajectories considering smoothness, shot diversity, collision avoidance, and mutual visibility. We have also addressed the trajectory planning for multi-UAV cinematography in previous work. We presented an architecture to execute cinematographic shots (with different types of camera motion) using multiple UAVs [18] and developed a distributed method to plan optimal trajectories reducing jerky camera movements [19]. In this paper, our focus is on the specifics of outdoor and dynamic",
290
+ "bbox": [
291
+ 78,
292
+ 657,
293
+ 488,
294
+ 943
295
+ ],
296
+ "page_idx": 2
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "settings when compared to indoor scenarios [15]. Therefore, we have integrated obstacle avoidance in a more fundamental manner using local maps. Moreover, a novel problem with respect to previous work has been introduced, as we perform scene illumination with multiple UAVs to increase the quality of image shots.",
301
+ "bbox": [
302
+ 508,
303
+ 69,
304
+ 918,
305
+ 157
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "The modification of lighting angles to improve images is fundamental in cinematography [20]. A camera with an onboard light on a UAV can compensate for insufficient lighting, but positioning lights at different angles with respect to the camera axis would require several UAVs. Despite the unquestionable importance of lighting for shot quality, its usage for aerial cinematography is not well-studied. Utilizing UAVs to provide supplementary illumination has been proposed for building documentation tasks [21] or tunnel inspection [22]. A formation with a filming UAV and others carrying lights was deployed to document the overshadowed parts of historical buildings [23]. A similar system has been used to carry out specialized documentation techniques [24]. However, these works have proposed lighting for tasks in static scenes, whereas the present paper deals with filming of moving targets in dynamic and potentially cluttered environments, e.g., to monitor inspection operations in large outdoor infrastructures.",
312
+ "bbox": [
313
+ 508,
314
+ 160,
315
+ 918,
316
+ 415
317
+ ],
318
+ "page_idx": 2
319
+ },
320
+ {
321
+ "type": "text",
322
+ "text": "In order to guarantee safe trajectories in multi-UAV cinematography, most works [15], [16], [19] only consider collision avoidance with actors, other UAVs, or static objects that can be modelled with previously known no-fly zones. The work in [9] integrates local mapping with onboard sensors to penalize proximity to obstacles and solves an unconstrained optimization problem. Another approach to obstacle avoidance applied for standard UAV trajectory planning is to create a convex representation of free space via a set of linear inequality constraints [25]-[28], to obtain a QP formulation for real-time motion planning. We have been inspired by these single-UAV works to develop a fundamental framework for the representation of obstacles in our non-linear optimization problem for multi-UAV cinematography.",
323
+ "bbox": [
324
+ 508,
325
+ 417,
326
+ 918,
327
+ 627
328
+ ],
329
+ "page_idx": 2
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "B. Contributions",
334
+ "text_level": 1,
335
+ "bbox": [
336
+ 508,
337
+ 650,
338
+ 620,
339
+ 664
340
+ ],
341
+ "page_idx": 2
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "Our main contributions are summarized as the following:",
346
+ "bbox": [
347
+ 524,
348
+ 670,
349
+ 908,
350
+ 684
351
+ ],
352
+ "page_idx": 2
353
+ },
354
+ {
355
+ "type": "list",
356
+ "sub_type": "text",
357
+ "list_items": [
358
+ "- We formulate a novel optimization problem for aerial filming with distributed lighting. Using a leader-follower scheme, we plan and execute trajectories in a distributed manner. Optimization is run in receding horizon to compute smooth trajectories with pleasing footage for the UAV filming (the leader), which takes shots of a dynamic target indicated by an external user. The followers compute their trajectories to maintain a formation with specified lighting angles on the target.",
359
+ "- We propose a new method to tackle non-convex trajectory optimization with obstacle avoidance in real time. We decompose the problem in two parts. Non-linear kinematical aspects are formulated in a problem without obstacle avoidance to generate reference trajectories. These are used to generate collision-free regions which are convex and to transform the problem into a final QP optimization task."
360
+ ],
361
+ "bbox": [
362
+ 524,
363
+ 688,
364
+ 918,
365
+ 943
366
+ ],
367
+ "page_idx": 2
368
+ },
369
+ {
370
+ "type": "page_number",
371
+ "text": "2",
372
+ "bbox": [
373
+ 76,
374
+ 31,
375
+ 84,
376
+ 39
377
+ ],
378
+ "page_idx": 2
379
+ },
380
+ {
381
+ "type": "header",
382
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
383
+ "bbox": [
384
+ 540,
385
+ 30,
386
+ 918,
387
+ 40
388
+ ],
389
+ "page_idx": 2
390
+ },
391
+ {
392
+ "type": "text",
393
+ "text": "- We present experimental results for different types of cinematographic shots. We prove that our method is capable of computing smooth trajectories for reducing jerky movements and show that the distributed formation improves the illumination of footage. The system is evaluated with field experiments and also in various realistic simulated scenarios, including the filming of a moving target in a cluttered environment.",
394
+ "bbox": [
395
+ 91,
396
+ 69,
397
+ 491,
398
+ 189
399
+ ],
400
+ "page_idx": 3
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "II. SYSTEM OVERVIEW",
405
+ "text_level": 1,
406
+ "bbox": [
407
+ 184,
408
+ 205,
409
+ 380,
410
+ 219
411
+ ],
412
+ "page_idx": 3
413
+ },
414
+ {
415
+ "type": "text",
416
+ "text": "Fig. 2 depicts the architecture of the entire system. The leader UAV carries a camera for filming while several others carry light sources to provide proper illumination. A human director specifies the cinematographic parameters for the scene. These parameters include the shot type (i.e., the camera motion relative to the target), the camera shooting angle for the leader, and the desired lighting angles for the followers. This information, together with an estimation of the target trajectory, is used to generate reference trajectories for the UAVs (Section III-B). These initial trajectories do not consider obstacle avoidance, but only cinematographic aspects. The leader attempts to execute the commanded shot smoothly, whereas the followers maintain a surrounding formation with the desired lighting angles.",
417
+ "bbox": [
418
+ 73,
419
+ 223,
420
+ 491,
421
+ 435
422
+ ],
423
+ "page_idx": 3
424
+ },
425
+ {
426
+ "type": "text",
427
+ "text": "Safety is ensured by integrating information from a local map for collision avoidance (Section III-C). Firstly, a collision-free path is generated for each UAV using the map and the initial cinematographic trajectories as guidelines. Then, a safe corridor along each of these paths is computed, consisting of a set of obstacle-free polyhedrons generated by the convex decomposition of free space (see Fig. 5). Finally, the UAV trajectories are obtained as a result of a trajectory optimization process that computes dynamically feasible trajectories inside each safe corridor (Section III-D). Inter-UAV collision avoidance is achieved by including the team-mates planned trajectories as obstacles in the map.",
428
+ "bbox": [
429
+ 73,
430
+ 435,
431
+ 490,
432
+ 614
433
+ ],
434
+ "page_idx": 3
435
+ },
436
+ {
437
+ "type": "text",
438
+ "text": "The entire pipeline shown in Fig. 2 (except for the Human director component) runs on board each UAV in a receding horizon manner. This enables the online planning to react properly to changes in the behavior of the target being filmed, as well as to malfunctioning team-members or previously unseen obstacles. Note that either the Cinematographic trajectory generator or the Lighting trajectory generator is activated on each UAV, depending on whether it carries a camera or light. The component for trajectory tracking on each UAV is the low-level control pipeline described in [29].",
439
+ "bbox": [
440
+ 73,
441
+ 616,
442
+ 491,
443
+ 767
444
+ ],
445
+ "page_idx": 3
446
+ },
447
+ {
448
+ "type": "text",
449
+ "text": "III. AUTONOMOUS AERIAL CINEMATOGRAPHY",
450
+ "text_level": 1,
451
+ "bbox": [
452
+ 84,
453
+ 782,
454
+ 480,
455
+ 796
456
+ ],
457
+ "page_idx": 3
458
+ },
459
+ {
460
+ "type": "text",
461
+ "text": "In this section, we begin by detailing the UAV dynamic model (Section III-A). Then, we describe our procedure to generate optimal and safe trajectories for each UAV (Sections III-B, III-C, and III-D). Lastly, we explain how the orientation of a UAV is controlled (Section III-E).",
462
+ "bbox": [
463
+ 73,
464
+ 801,
465
+ 491,
466
+ 878
467
+ ],
468
+ "page_idx": 3
469
+ },
470
+ {
471
+ "type": "text",
472
+ "text": "A. Multi-rotor aerial vehicle dynamic model",
473
+ "text_level": 1,
474
+ "bbox": [
475
+ 73,
476
+ 895,
477
+ 380,
478
+ 910
479
+ ],
480
+ "page_idx": 3
481
+ },
482
+ {
483
+ "type": "text",
484
+ "text": "An independent trajectory tracker [29] for UAV attitude control is used, which allows for planning with a simplified",
485
+ "bbox": [
486
+ 73,
487
+ 914,
488
+ 491,
489
+ 945
490
+ ],
491
+ "page_idx": 3
492
+ },
493
+ {
494
+ "type": "text",
495
+ "text": "positional dynamic UAV model. In addition, the orientation of the camera or light source onboard (depending on the UAV) needs to be modelled. We assume the existence of a gimbal mechanism to compensate angle deviations due to changes in UAV attitude. Therefore, it is assumed that camera roll is negligible and we only control pitch and heading. Since the heading of a multi-rotor vehicle can be controlled independently of its position, we fix the relative position between the camera/light and the UAV to always point forward and control its heading through the UAV heading. The positional part of the dynamic model is defined as a linear double integrator:",
496
+ "bbox": [
497
+ 501,
498
+ 68,
499
+ 921,
500
+ 234
501
+ ],
502
+ "page_idx": 3
503
+ },
504
+ {
505
+ "type": "equation",
506
+ "text": "\n$$\n\\begin{array}{l} \\dot {\\mathbf {p}} = \\mathbf {v}, \\\\ \\cdot \\end{array} \\tag {1}\n$$\n",
507
+ "text_format": "latex",
508
+ "bbox": [
509
+ 684,
510
+ 239,
511
+ 919,
512
+ 262
513
+ ],
514
+ "page_idx": 3
515
+ },
516
+ {
517
+ "type": "equation",
518
+ "text": "\n$$\n\\dot {\\mathbf {v}} = \\mathbf {a},\n$$\n",
519
+ "text_format": "latex",
520
+ "bbox": [
521
+ 687,
522
+ 260,
523
+ 738,
524
+ 271
525
+ ],
526
+ "page_idx": 3
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "where $\\mathbf{p} = [p_x p_y p_z]^T \\in \\mathbb{R}^3$ is the UAV position, $\\mathbf{v} = [v_x v_y v_z]^T \\in \\mathbb{R}^3$ the linear velocity, and $\\mathbf{a} = [a_x a_y a_z]^T \\in \\mathbb{R}^3$ the linear acceleration. The orientation of the camera/light may be modelled similarly:",
531
+ "bbox": [
532
+ 503,
533
+ 276,
534
+ 921,
535
+ 337
536
+ ],
537
+ "page_idx": 3
538
+ },
539
+ {
540
+ "type": "equation",
541
+ "text": "\n$$\n\\begin{array}{l} \\dot {\\mathbf {o}} = \\boldsymbol {\\omega}, \\\\ \\therefore \\quad \\mathbf {o} \\end{array} \\tag {2}\n$$\n",
542
+ "text_format": "latex",
543
+ "bbox": [
544
+ 686,
545
+ 340,
546
+ 919,
547
+ 364
548
+ ],
549
+ "page_idx": 3
550
+ },
551
+ {
552
+ "type": "equation",
553
+ "text": "\n$$\n\\dot {\\boldsymbol {\\omega}} = \\boldsymbol {\\theta},\n$$\n",
554
+ "text_format": "latex",
555
+ "bbox": [
556
+ 686,
557
+ 361,
558
+ 738,
559
+ 375
560
+ ],
561
+ "page_idx": 3
562
+ },
563
+ {
564
+ "type": "text",
565
+ "text": "where $\\mathbf{o} = [\\varphi \\xi]^T$ represents an orientation with respect to a global frame given by its heading and pitch angles, $\\omega \\in \\mathbb{R}^2$ are the corresponding angular rates, and $\\theta \\in \\mathbb{R}^2$ the angular accelerations. For the description of the proposed method, we define a full positional state of the UAV $\\mathbf{x}_p = [\\mathbf{p}^T\\mathbf{v}^T]^T \\in \\mathbb{R}^6$ , a vector of positional control inputs $\\mathbf{u}_p = \\mathbf{a}$ , an orientation state $\\mathbf{x}_o = [\\mathbf{o}^T\\boldsymbol{\\omega}^T]^T \\in \\mathbb{R}^4$ , and a vector of orientation control inputs $\\mathbf{u}_o = \\boldsymbol{\\theta}$ .",
566
+ "bbox": [
567
+ 501,
568
+ 378,
569
+ 921,
570
+ 501
571
+ ],
572
+ "page_idx": 3
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "B. Generation of reference trajectories",
577
+ "text_level": 1,
578
+ "bbox": [
579
+ 504,
580
+ 520,
581
+ 772,
582
+ 536
583
+ ],
584
+ "page_idx": 3
585
+ },
586
+ {
587
+ "type": "text",
588
+ "text": "The first step of our method for trajectory planning is to generate a reference trajectory $D_{j}$ for each UAV $j$ . The problem complexity is alleviated by removing collision avoidance constraints and focusing only on kinematographic aspects. For the filming UAV, the objective is to reach a position relative to the target as provided by the shot type $C_s$ , while minimizing changes in the camera angle to produce pleasant images. A specific camera shooting angle $\\psi_d$ over the target needs to be maintained. The following non-linear optimization problem is formulated for the filming UAV:",
589
+ "bbox": [
590
+ 501,
591
+ 539,
592
+ 921,
593
+ 690
594
+ ],
595
+ "page_idx": 3
596
+ },
597
+ {
598
+ "type": "equation",
599
+ "text": "\n$$\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {\\mathrm {N} - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left\\| \\mathbf {u} _ {k - 1} \\right\\| ^ {2} + \\alpha_ {1} J _ {\\psi , k}\\right) + \\alpha_ {2} J _ {N}, \\tag {3}\n$$\n",
600
+ "text_format": "latex",
601
+ "bbox": [
602
+ 532,
603
+ 693,
604
+ 919,
605
+ 734
606
+ ],
607
+ "page_idx": 3
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "subject to $\\mathbf{x}_0 = \\mathbf{x}'$ (3.a)",
612
+ "bbox": [
613
+ 519,
614
+ 736,
615
+ 919,
616
+ 753
617
+ ],
618
+ "page_idx": 3
619
+ },
620
+ {
621
+ "type": "equation",
622
+ "text": "\n$$\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {p} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {3.b}\n$$\n",
623
+ "text_format": "latex",
624
+ "bbox": [
625
+ 571,
626
+ 755,
627
+ 919,
628
+ 772
629
+ ],
630
+ "page_idx": 3
631
+ },
632
+ {
633
+ "type": "equation",
634
+ "text": "\n$$\n\\mathbf {v} _ {\\min } \\leq \\mathbf {v} _ {k} \\leq \\mathbf {v} _ {\\max } \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {3.c}\n$$\n",
635
+ "text_format": "latex",
636
+ "bbox": [
637
+ 573,
638
+ 773,
639
+ 919,
640
+ 790
641
+ ],
642
+ "page_idx": 3
643
+ },
644
+ {
645
+ "type": "equation",
646
+ "text": "\n$$\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {3.d}\n$$\n",
647
+ "text_format": "latex",
648
+ "bbox": [
649
+ 571,
650
+ 792,
651
+ 919,
652
+ 809
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "equation",
658
+ "text": "\n$$\nq _ {z, \\min } \\leq q _ {z, k} \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {3.e}\n$$\n",
659
+ "text_format": "latex",
660
+ "bbox": [
661
+ 563,
662
+ 811,
663
+ 919,
664
+ 828
665
+ ],
666
+ "page_idx": 3
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "where $\\mathrm{f}_p(\\cdot)$ represents the positional part of the dynamic model defined in Section III-A; $\\mathbf{v}_{min}$ , $\\mathbf{v}_{max}$ are velocity limitations; and $\\mathbf{u}_{min}$ , $\\mathbf{u}_{max}$ control inputs limitations.",
671
+ "bbox": [
672
+ 501,
673
+ 832,
674
+ 919,
675
+ 877
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "The first two terms in the cost function pursue smooth trajectories by penalizing UAV accelerations and reducing",
682
+ "bbox": [
683
+ 503,
684
+ 877,
685
+ 921,
686
+ 909
687
+ ],
688
+ "page_idx": 3
689
+ },
690
+ {
691
+ "type": "header",
692
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
693
+ "bbox": [
694
+ 76,
695
+ 29,
696
+ 457,
697
+ 41
698
+ ],
699
+ "page_idx": 3
700
+ },
701
+ {
702
+ "type": "page_number",
703
+ "text": "3",
704
+ "bbox": [
705
+ 911,
706
+ 30,
707
+ 919,
708
+ 40
709
+ ],
710
+ "page_idx": 3
711
+ },
712
+ {
713
+ "type": "page_footnote",
714
+ "text": "2 For simplicity of description, $\\mathbf{x} := \\mathbf{x}_p$ , and $\\mathbf{u} := \\mathbf{u}_p$ . We use the Runge-Kutta method for numerical integration.",
715
+ "bbox": [
716
+ 503,
717
+ 917,
718
+ 921,
719
+ 945
720
+ ],
721
+ "page_idx": 3
722
+ },
723
+ {
724
+ "type": "image",
725
+ "img_path": "images/cbd1334574cfacbdd9e07dbd5d0af4104256d95f335973143e0ac6ff6fcf572b.jpg",
726
+ "image_caption": [
727
+ "Fig. 2: The architecture of the proposed system. $C_s$ and $C_l$ represent the desired type of cinematographic shot and lighting configuration specified by a human director; $T_T$ is the target estimated trajectory; $D_L$ , $D_F$ are reference trajectories for the leader UAV and the follower UAVs, respectively; $P_L$ , $P_F$ are collision-free paths generated along the desired trajectories; $S_L$ , $S_F$ are safe corridors along the collision-free paths; and $T_L$ , $T_F$ are optimized trajectories for the camera and lighting UAVs, respectively. The modules enclosed in the blue rectangle run on both types of UAVs."
728
+ ],
729
+ "image_footnote": [],
730
+ "bbox": [
731
+ 78,
732
+ 70,
733
+ 903,
734
+ 191
735
+ ],
736
+ "page_idx": 4
737
+ },
738
+ {
739
+ "type": "text",
740
+ "text": "gimbal movements. The director specifies an aesthetic objective through the desired camera shooting angle $\\psi_d$ to film the target (see Fig. 3). Emphasis is given on positioning the UAV to keep this angle constant without moving the gimbal. In doing so, the angular changes in the gimbal are reduced to favor less jerky camera motion and therefore, pleasant footage. In order to define $J_{\\psi}$ , the relative position between the UAV camera and the target is introduced as:",
741
+ "bbox": [
742
+ 73,
743
+ 275,
744
+ 491,
745
+ 393
746
+ ],
747
+ "page_idx": 4
748
+ },
749
+ {
750
+ "type": "equation",
751
+ "text": "\n$$\n\\mathbf {q} = \\left[ \\begin{array}{l l l} q _ {x} & q _ {y} & q _ {z} \\end{array} \\right] ^ {T} = \\mathbf {p} _ {L} - \\mathbf {p} _ {T}. \\tag {4}\n$$\n",
752
+ "text_format": "latex",
753
+ "bbox": [
754
+ 169,
755
+ 398,
756
+ 490,
757
+ 419
758
+ ],
759
+ "page_idx": 4
760
+ },
761
+ {
762
+ "type": "text",
763
+ "text": "Then, we define $J_{\\psi}$ as:",
764
+ "bbox": [
765
+ 73,
766
+ 422,
767
+ 235,
768
+ 436
769
+ ],
770
+ "page_idx": 4
771
+ },
772
+ {
773
+ "type": "equation",
774
+ "text": "\n$$\nJ _ {\\psi , k} = \\left(\\tan \\left(\\psi_ {d}\\right) - \\frac {q _ {z , k}}{\\sqrt {q _ {x , k} ^ {2} + q _ {y , k} ^ {2}}}\\right) ^ {2}. \\tag {5}\n$$\n",
775
+ "text_format": "latex",
776
+ "bbox": [
777
+ 151,
778
+ 440,
779
+ 490,
780
+ 492
781
+ ],
782
+ "page_idx": 4
783
+ },
784
+ {
785
+ "type": "text",
786
+ "text": "The target position is predicted within the time horizon with a motion model (a constant speed model in our experiments). Prediction errors are tackled by recomputing UAV trajectories with a receding horizon. By minimizing the previous cost, we implicitly minimize variations in camera pitch angle as the relative pitch with respect to the target is kept constant. Moreover, the camera heading corresponds with the UAV heading, whose variations are also smoothed as explained in Section III-D. Therefore, the idea is to generate UAV trajectories where the gimbal only needs to move slightly to compensate for small disturbances.",
787
+ "bbox": [
788
+ 73,
789
+ 494,
790
+ 490,
791
+ 660
792
+ ],
793
+ "page_idx": 4
794
+ },
795
+ {
796
+ "type": "text",
797
+ "text": "The terminal cost $J_{N} = ||\\mathbf{x}_{xy,d} - \\mathbf{x}_{xy,N}||^{2}$ guides the UAV to a desired state imposed by the shot type, e.g., at a certain distance beside the target's final position in a lateral shot. Note that a final UAV height is not imposed, as we want the planner to compute the optimal $p_{z}$ to maintain the camera shooting angle commanded by the director. Lastly, the constraint (3.e) establishes a minimum distance above the target for safety purposes.",
798
+ "bbox": [
799
+ 73,
800
+ 688,
801
+ 491,
802
+ 811
803
+ ],
804
+ "page_idx": 4
805
+ },
806
+ {
807
+ "type": "text",
808
+ "text": "The reference trajectories for the lighting UAVs are computed to achieve a desired leader-follower formation around the target. The desired position of the followers is influenced by the corresponding leader position $\\mathbf{p}_L$ and camera orientation $\\mathbf{o}_L$ , the target position $\\mathbf{p}_T$ , the desired lighting angles of $j$ -th light $\\chi_j$ and $\\varrho_j$ , and the desired distance of the light to the target $d_j$ . The desired position of $j$ -th follower $\\mathbf{p}_j$ is then",
809
+ "bbox": [
810
+ 73,
811
+ 838,
812
+ 491,
813
+ 946
814
+ ],
815
+ "page_idx": 4
816
+ },
817
+ {
818
+ "type": "image",
819
+ "img_path": "images/57efac4238ee7e8ae880d17c3c8e57cbf7e2d8b5e027e3119624bc9d2ff5c9cd.jpg",
820
+ "image_caption": [
821
+ "Fig. 3: Reference frames and camera shooting angle. The origins of the camera and UAV frames coincide."
822
+ ],
823
+ "image_footnote": [],
824
+ "bbox": [
825
+ 526,
826
+ 277,
827
+ 816,
828
+ 424
829
+ ],
830
+ "page_idx": 4
831
+ },
832
+ {
833
+ "type": "text",
834
+ "text": "given by the equation:",
835
+ "bbox": [
836
+ 504,
837
+ 484,
838
+ 660,
839
+ 500
840
+ ],
841
+ "page_idx": 4
842
+ },
843
+ {
844
+ "type": "equation",
845
+ "text": "\n$$\n\\mathbf {p} _ {j} = \\mathbf {p} _ {T} + d _ {j} \\left[ \\begin{array}{c} - \\cos (\\varphi_ {j}) \\cos (\\xi_ {j}) \\\\ - \\sin (\\varphi_ {j}) \\cos (\\xi_ {j}) \\\\ \\sin (\\xi_ {j}) \\end{array} \\right], \\tag {6}\n$$\n",
846
+ "text_format": "latex",
847
+ "bbox": [
848
+ 586,
849
+ 500,
850
+ 919,
851
+ 549
852
+ ],
853
+ "page_idx": 4
854
+ },
855
+ {
856
+ "type": "text",
857
+ "text": "where $\\varphi_{j} = \\varphi_{L} + \\chi_{j}$ and $\\xi_{j} = \\xi_{L} + \\varrho_{j}$ are desired lighting angles relative to the camera's optical axis (see Fig. 4). To avoid jumps in the desired followers' positions caused by quick changes in the target position (e.g., due to a transition to a new target), a virtual target, located in front of the camera at a certain distance along its optical axis, is used. The position of this virtual target is given by:",
858
+ "bbox": [
859
+ 503,
860
+ 550,
861
+ 921,
862
+ 656
863
+ ],
864
+ "page_idx": 4
865
+ },
866
+ {
867
+ "type": "equation",
868
+ "text": "\n$$\n\\mathbf {p} _ {v} = \\mathbf {p} _ {L} + d _ {v} \\left[ \\begin{array}{c} \\cos \\left(\\varphi_ {L}\\right) \\cos \\left(\\xi_ {L}\\right) \\\\ \\sin \\left(\\varphi_ {L}\\right) \\cos \\left(\\xi_ {L}\\right) \\\\ \\sin \\left(\\xi_ {L}\\right) \\end{array} \\right], \\tag {7}\n$$\n",
869
+ "text_format": "latex",
870
+ "bbox": [
871
+ 589,
872
+ 657,
873
+ 919,
874
+ 705
875
+ ],
876
+ "page_idx": 4
877
+ },
878
+ {
879
+ "type": "text",
880
+ "text": "where $d_v$ is the desired distance between the virtual target and the camera center and $\\mathbf{p}_v$ denotes the virtual target position. Substituting position $\\mathbf{p}_v$ for $\\mathbf{p}_T$ in (6), a more consistent formation scheme is acquired, where less aggressive maneuvers are required; and the lighting always focuses on the scene in front of the camera, which is relevant in obtaining pleasant videos.",
881
+ "bbox": [
882
+ 501,
883
+ 708,
884
+ 919,
885
+ 813
886
+ ],
887
+ "page_idx": 4
888
+ },
889
+ {
890
+ "type": "text",
891
+ "text": "C. Generation of safe corridors",
892
+ "text_level": 1,
893
+ "bbox": [
894
+ 504,
895
+ 834,
896
+ 725,
897
+ 849
898
+ ],
899
+ "page_idx": 4
900
+ },
901
+ {
902
+ "type": "text",
903
+ "text": "The initial reference trajectories are computed without considering obstacles. They are, therefore, used as seed to generate a safe corridor $S_{j}$ for each UAV $j$ where collision-free trajectories can then be computed. Firstly, we convert each trajectory $D_{j}$ into a collision-free path $P_{j}$ . We iterate over each of the $N$ waypoints in $D_{j}$ and add it directly to $P_{j}$ if",
904
+ "bbox": [
905
+ 501,
906
+ 853,
907
+ 921,
908
+ 946
909
+ ],
910
+ "page_idx": 4
911
+ },
912
+ {
913
+ "type": "page_number",
914
+ "text": "4",
915
+ "bbox": [
916
+ 76,
917
+ 31,
918
+ 86,
919
+ 40
920
+ ],
921
+ "page_idx": 4
922
+ },
923
+ {
924
+ "type": "header",
925
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
926
+ "bbox": [
927
+ 540,
928
+ 29,
929
+ 919,
930
+ 41
931
+ ],
932
+ "page_idx": 4
933
+ },
934
+ {
935
+ "type": "image",
936
+ "img_path": "images/049f04c05e12fa4532254bd25aff057223c079a0cdeb6e7841e1331a01a912db.jpg",
937
+ "image_caption": [
938
+ "(a) top view"
939
+ ],
940
+ "image_footnote": [],
941
+ "bbox": [
942
+ 81,
943
+ 75,
944
+ 292,
945
+ 186
946
+ ],
947
+ "page_idx": 5
948
+ },
949
+ {
950
+ "type": "image",
951
+ "img_path": "images/ee678417bea15d2bde0cc46f260fb234fcdac8e6f2b6c49b56acbd2788a35d0d.jpg",
952
+ "image_caption": [
953
+ "(b) side view",
954
+ "Fig. 4: The leader-follower scheme defined by (6)."
955
+ ],
956
+ "image_footnote": [],
957
+ "bbox": [
958
+ 323,
959
+ 74,
960
+ 467,
961
+ 184
962
+ ],
963
+ "page_idx": 5
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "it is collision-free. Otherwise, we label the previous collision-free waypoint as $A$ and keep moving along $D_{j}$ until we find the next collision-free waypoint $B$ . Then, we try to find an alternative collision-free path from $A$ to $B$ , to be appended to $P_{j}$ and continue iterating. For that alternative path, we use the Jump Point Search (JPS) algorithm introduced in [30], [31] and extended to 3D in [27]. A real-time performance is ensured by introducing a timeout for the JPS path search.",
968
+ "bbox": [
969
+ 73,
970
+ 244,
971
+ 491,
972
+ 364
973
+ ],
974
+ "page_idx": 5
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "If the JPS algorithm fails to find a path within the given timeout from $A$ to $B$ , we run it again to connect $A$ directly to the last waypoint in $D_{j}$ (let this waypoint be $C$ ). If this is not found either, we append to $P_{j}$ the path to the node closest to $C$ from all those expanded during the JPS search. Once completed, $P_{j}$ consists of an arbitrary number of points equal to or greater than $N$ . Since $P_{j}$ is used for the generation of the safety corridors for particular points in $D_{j}$ , it is postprocessed so that $|P_{j}| = |D_{j}| = N$ . $P_{j}$ is sampled so that the waypoint distribution is close to the initial points in $D_{j}$ . Since these collision-free paths are used as a guide for trajectory optimization in subsequent steps, the distance sampling step $d_{s}$ is limited to help avoid the dynamic infeasibility of the final trajectories. If the sampled $P_{j}$ consists of more than $N$ waypoints, the overflowing points are discarded for the subsequent steps of the trajectory optimization process. The process to create a collision-free path $P_{j}$ and its corresponding safe corridor $S_{j}$ is illustrated in Fig. 5.",
979
+ "bbox": [
980
+ 73,
981
+ 364,
982
+ 490,
983
+ 636
984
+ ],
985
+ "page_idx": 5
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "Safe corridors are generated around the collision-free paths with a prefixed initial position of the UAV (i.e., $N + 1$ waypoints), using a map of the environment represented by a point cloud $O_{pcl}$ and the convex decomposition method proposed in [27]. This method is based on an iterative procedure for the generation of polyhedrons. It begins by inflating an ellipsoid aligned with each path segment. In the next step, tangent planes are constructed at the contact points between the ellipsoid and any obstacles. Afterwards, all points lying behind this plane are removed from $O_{pcl}$ . Yet again, the next iteration starts by inflating the ellipsoid up to the nearest point in $O_{pcl}$ . This procedure is terminated if there are no remaining points in $O_{pcl}$ . The generated tangent planes define an obstacle-free polyhedron $\\mathcal{P}$ enclosing the corresponding path segment and the set of all polyhedrons along the path constitutes the safe corridor.",
990
+ "bbox": [
991
+ 73,
992
+ 637,
993
+ 491,
994
+ 878
995
+ ],
996
+ "page_idx": 5
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "D. Trajectory optimization",
1001
+ "text_level": 1,
1002
+ "bbox": [
1003
+ 73,
1004
+ 895,
1005
+ 261,
1006
+ 910
1007
+ ],
1008
+ "page_idx": 5
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "text": "Given a collision-free path $P$ and its corresponding safe corridor $S$ , a final optimal trajectory is computed through a",
1013
+ "bbox": [
1014
+ 73,
1015
+ 914,
1016
+ 491,
1017
+ 946
1018
+ ],
1019
+ "page_idx": 5
1020
+ },
1021
+ {
1022
+ "type": "image",
1023
+ "img_path": "images/1132cd546db0b8c0309a482d08a51eab569d6cd0eceaf38ac5482ade95914580.jpg",
1024
+ "image_caption": [
1025
+ "Fig. 5: The safe corridor generation process. The initial reference trajectory (green) is converted into a collision-free path (purple), and the obstacle-free polyhedrons are generated along this path. The final optimized trajectory within the safe corridor is also shown (blue). We inflate the obstacles for safety purposes (light red)."
1026
+ ],
1027
+ "image_footnote": [],
1028
+ "bbox": [
1029
+ 504,
1030
+ 70,
1031
+ 923,
1032
+ 250
1033
+ ],
1034
+ "page_idx": 5
1035
+ },
1036
+ {
1037
+ "type": "text",
1038
+ "text": "QP problem in receding horizon. The particular optimization task ${}^{3}$ attempts to track a desired trajectory $\\mathbf{p}_d$ corresponding to the reference trajectory $D_{j}$ :",
1039
+ "bbox": [
1040
+ 501,
1041
+ 323,
1042
+ 921,
1043
+ 369
1044
+ ],
1045
+ "page_idx": 5
1046
+ },
1047
+ {
1048
+ "type": "equation",
1049
+ "text": "\n$$\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {N - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left\\| \\mathbf {p} _ {d, k} - \\mathbf {p} _ {k} \\right\\| ^ {2} + \\beta \\left\\| \\mathbf {u} _ {k - 1} \\right\\| ^ {2}\\right), \\tag {8}\n$$\n",
1050
+ "text_format": "latex",
1051
+ "bbox": [
1052
+ 539,
1053
+ 373,
1054
+ 919,
1055
+ 414
1056
+ ],
1057
+ "page_idx": 5
1058
+ },
1059
+ {
1060
+ "type": "text",
1061
+ "text": "subject to $\\mathbf{x}_0 = \\mathbf{x}'$ (8.a)",
1062
+ "bbox": [
1063
+ 524,
1064
+ 416,
1065
+ 919,
1066
+ 431
1067
+ ],
1068
+ "page_idx": 5
1069
+ },
1070
+ {
1071
+ "type": "equation",
1072
+ "text": "\n$$\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {p} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {8.b}\n$$\n",
1073
+ "text_format": "latex",
1074
+ "bbox": [
1075
+ 576,
1076
+ 435,
1077
+ 919,
1078
+ 452
1079
+ ],
1080
+ "page_idx": 5
1081
+ },
1082
+ {
1083
+ "type": "equation",
1084
+ "text": "\n$$\n\\mathbf {v} _ {\\min } \\leq \\mathbf {v} _ {k} \\leq \\mathbf {v} _ {\\max } \\forall k \\in \\{1, \\dots , N \\}, \\tag {8.c}\n$$\n",
1085
+ "text_format": "latex",
1086
+ "bbox": [
1087
+ 576,
1088
+ 454,
1089
+ 919,
1090
+ 470
1091
+ ],
1092
+ "page_idx": 5
1093
+ },
1094
+ {
1095
+ "type": "equation",
1096
+ "text": "\n$$\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {8.d}\n$$\n",
1097
+ "text_format": "latex",
1098
+ "bbox": [
1099
+ 576,
1100
+ 473,
1101
+ 919,
1102
+ 489
1103
+ ],
1104
+ "page_idx": 5
1105
+ },
1106
+ {
1107
+ "type": "equation",
1108
+ "text": "\n$$\n\\mathbf {p} _ {k} \\in \\mathcal {P} _ {k} \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {8.e}\n$$\n",
1109
+ "text_format": "latex",
1110
+ "bbox": [
1111
+ 593,
1112
+ 491,
1113
+ 919,
1114
+ 508
1115
+ ],
1116
+ "page_idx": 5
1117
+ },
1118
+ {
1119
+ "type": "text",
1120
+ "text": "where $\\mathrm{f}_p(\\cdot)$ represents the positional part of a dynamic model defined in Section III-A; $\\mathbf{v}_{min}$ , $\\mathbf{v}_{max}$ are velocity limitations; $\\mathbf{u}_{min}$ , $\\mathbf{u}_{max}$ control inputs limitations; and $\\mathcal{P}_k$ is a convex polyhedron representing a free space associated with $k$ -th transition point. The last constraint ensures a safe resulting trajectory without collisions. Given that the constraint (8.e) can be decoupled in a set of linear constraints, the problem becomes a quadratic convex program.",
1121
+ "bbox": [
1122
+ 501,
1123
+ 512,
1124
+ 919,
1125
+ 632
1126
+ ],
1127
+ "page_idx": 5
1128
+ },
1129
+ {
1130
+ "type": "text",
1131
+ "text": "The optimization formulation is the same for both the leader and follower UAVs. However, there are a couple of relevant differences. First, the desired reference trajectories are computed in a different manner, following either filming or lighting criteria (see Section III-B). Second, the followers encode mutual-collision avoidance through constraint (8.e). To prevent negative effects on the cinematographic quality of the performed shot, the entirety of mutual collision avoidance is left to the followers. A fixed priority scheme is defined for the UAVs, and the occupied space $O_{pcl}$ of each follower is updated with the current planned trajectories from the leader and other followers of a higher priority. $O_{pcl}$ is updated with spherical objects of the desired collision avoidance radius at each waypoint of the UAV trajectories to be avoided. A similar procedure is followed to incorporate the target's predicted trajectory (also for the leader in this case). To hold with real-time performance, the occupied space $O_{pcl}$ is assumed static for a given horizon time, but it is updated at each planning step, accommodating all static and dynamic obstacles.",
1132
+ "bbox": [
1133
+ 501,
1134
+ 633,
1135
+ 921,
1136
+ 920
1137
+ ],
1138
+ "page_idx": 5
1139
+ },
1140
+ {
1141
+ "type": "header",
1142
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
1143
+ "bbox": [
1144
+ 76,
1145
+ 29,
1146
+ 457,
1147
+ 41
1148
+ ],
1149
+ "page_idx": 5
1150
+ },
1151
+ {
1152
+ "type": "page_number",
1153
+ "text": "5",
1154
+ "bbox": [
1155
+ 911,
1156
+ 30,
1157
+ 919,
1158
+ 40
1159
+ ],
1160
+ "page_idx": 5
1161
+ },
1162
+ {
1163
+ "type": "page_footnote",
1164
+ "text": "3 For simplicity of description, $\\mathbf{x} \\coloneqq \\mathbf{x}_p$ , and $\\mathbf{u} \\coloneqq \\mathbf{u}_p$ .",
1165
+ "bbox": [
1166
+ 504,
1167
+ 929,
1168
+ 810,
1169
+ 945
1170
+ ],
1171
+ "page_idx": 5
1172
+ },
1173
+ {
1174
+ "type": "text",
1175
+ "text": "Another crucial issue for the applications of multi-UAV cinematography is how to prevent other UAVs from appearing in the Field of View (FoV) of the filming UAV. However, including this in the optimization task as either a constraint or a cost term can remarkably increase the complexity of the problem. We considered including the FoV of the leader camera as an obstacle in the local maps of the followers, so that they may avoid it. Even so, relatively small changes in camera orientation could result in significant changes in the map representation and lead to unstable planned trajectories. Therefore, the camera's FoV is avoided by the lighting UAVs only through penalizing deviations from the desired trajectories $\\mathbf{p}_d$ . Thus, FoV avoidance is mostly determined by the choice of lighting parameters that describe the desired formation.",
1176
+ "bbox": [
1177
+ 73,
1178
+ 69,
1179
+ 491,
1180
+ 294
1181
+ ],
1182
+ "page_idx": 6
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "Finally, occlusions caused by obstacles in the FoV of the camera or the lights are also a relevant aspect when filming. Occlusion throughout a significant part of the shot renders the shot useless, and in the case of onboard detection of the target, it also disables target following. However in most cases, occlusions are temporary and avoiding them is always a trade-off between significant deviation from the desired cinematographic trajectory and having part of the video occluded. In this work, the trajectories are generated so that they are close to the desired cinematographic shots specified by a director. The possible occlusions have to be resolved by redefining the shot to be performed.",
1187
+ "bbox": [
1188
+ 73,
1189
+ 296,
1190
+ 491,
1191
+ 478
1192
+ ],
1193
+ "page_idx": 6
1194
+ },
1195
+ {
1196
+ "type": "text",
1197
+ "text": "E. Orientation control",
1198
+ "text_level": 1,
1199
+ "bbox": [
1200
+ 73,
1201
+ 508,
1202
+ 232,
1203
+ 522
1204
+ ],
1205
+ "page_idx": 6
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "In this application, both the camera and the light sources need to always be pointing at the filmed target. Hence, their desired orientation is given by:",
1210
+ "bbox": [
1211
+ 73,
1212
+ 530,
1213
+ 490,
1214
+ 575
1215
+ ],
1216
+ "page_idx": 6
1217
+ },
1218
+ {
1219
+ "type": "equation",
1220
+ "text": "\n$$\n\\mathbf {o} _ {d} = \\left[ \\varphi_ {d} \\xi_ {d} \\right] ^ {T} = \\left[ \\arctan \\left(q _ {y}, q _ {x}\\right) \\sin \\left(\\frac {q _ {z}}{| | q | |}\\right) \\right] ^ {T}. \\tag {9}\n$$\n",
1221
+ "text_format": "latex",
1222
+ "bbox": [
1223
+ 117,
1224
+ 578,
1225
+ 488,
1226
+ 607
1227
+ ],
1228
+ "page_idx": 6
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "Orientation control is also formulated as a constrained quadratic optimization problem in receding horizon in order to achieve smoother orientation changes. For simplicity of description, $\\mathbf{x} \\coloneqq \\mathbf{x}_o$ and $\\mathbf{u} \\coloneqq \\mathbf{u}_o$ in the following problem formulation:",
1233
+ "bbox": [
1234
+ 73,
1235
+ 609,
1236
+ 491,
1237
+ 681
1238
+ ],
1239
+ "page_idx": 6
1240
+ },
1241
+ {
1242
+ "type": "equation",
1243
+ "text": "\n$$\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {N - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left| \\left| \\mathbf {o} _ {d, k} - \\mathbf {o} _ {k} \\right| \\right| ^ {2} + \\gamma \\left| \\left| \\mathbf {u} _ {k - 1} \\right| \\right| ^ {2}\\right), \\tag {10}\n$$\n",
1244
+ "text_format": "latex",
1245
+ "bbox": [
1246
+ 102,
1247
+ 684,
1248
+ 488,
1249
+ 724
1250
+ ],
1251
+ "page_idx": 6
1252
+ },
1253
+ {
1254
+ "type": "text",
1255
+ "text": "subject to $\\mathbf{x}_0 = \\mathbf{x}'$ (10.a)",
1256
+ "bbox": [
1257
+ 89,
1258
+ 727,
1259
+ 488,
1260
+ 743
1261
+ ],
1262
+ "page_idx": 6
1263
+ },
1264
+ {
1265
+ "type": "equation",
1266
+ "text": "\n$$\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {o} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {10.b}\n$$\n",
1267
+ "text_format": "latex",
1268
+ "bbox": [
1269
+ 138,
1270
+ 746,
1271
+ 488,
1272
+ 762
1273
+ ],
1274
+ "page_idx": 6
1275
+ },
1276
+ {
1277
+ "type": "equation",
1278
+ "text": "\n$$\n\\omega_ {\\min } \\leq \\omega_ {k} \\leq \\omega_ {\\max } \\forall k \\in \\{1, \\dots , N \\}, \\tag {10.c}\n$$\n",
1279
+ "text_format": "latex",
1280
+ "bbox": [
1281
+ 138,
1282
+ 765,
1283
+ 488,
1284
+ 781
1285
+ ],
1286
+ "page_idx": 6
1287
+ },
1288
+ {
1289
+ "type": "equation",
1290
+ "text": "\n$$\n\\left. \\xi_ {\\min } \\leq \\xi_ {k} \\leq \\xi_ {\\max } \\quad \\forall k \\in \\{1, \\dots , N \\}, \\right. \\tag {10.d}\n$$\n",
1291
+ "text_format": "latex",
1292
+ "bbox": [
1293
+ 143,
1294
+ 784,
1295
+ 488,
1296
+ 800
1297
+ ],
1298
+ "page_idx": 6
1299
+ },
1300
+ {
1301
+ "type": "equation",
1302
+ "text": "\n$$\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {10.e}\n$$\n",
1303
+ "text_format": "latex",
1304
+ "bbox": [
1305
+ 142,
1306
+ 803,
1307
+ 488,
1308
+ 819
1309
+ ],
1310
+ "page_idx": 6
1311
+ },
1312
+ {
1313
+ "type": "text",
1314
+ "text": "where $\\mathrm{f}_o(\\cdot)$ represents the orientation aspect of the dynamic model defined in Section III-A; $\\omega_{min}$ , $\\omega_{max}$ are limitations on the angular velocities; $\\mathbf{u}_{min}$ , $\\mathbf{u}_{max}$ control inputs limitations; and $\\xi_{min}$ , $\\xi_{max}$ represent hardware limitations of the gimbal to adjusting pitch angles. The heading and pitch angles of the camera or light can be controlled independently. Thus, Problem (10) was decoupled into two simpler problems. The optimal solution for each problem can be found analytically",
1315
+ "bbox": [
1316
+ 73,
1317
+ 823,
1318
+ 491,
1319
+ 946
1320
+ ],
1321
+ "page_idx": 6
1322
+ },
1323
+ {
1324
+ "type": "text",
1325
+ "text": "with a standard framework for linear MPC (Model Predictive Control).",
1326
+ "bbox": [
1327
+ 503,
1328
+ 68,
1329
+ 921,
1330
+ 98
1331
+ ],
1332
+ "page_idx": 6
1333
+ },
1334
+ {
1335
+ "type": "text",
1336
+ "text": "IV. EXPERIMENTAL EVALUATION",
1337
+ "text_level": 1,
1338
+ "bbox": [
1339
+ 571,
1340
+ 117,
1341
+ 854,
1342
+ 131
1343
+ ],
1344
+ "page_idx": 6
1345
+ },
1346
+ {
1347
+ "type": "text",
1348
+ "text": "In this section, experimental results are presented to demonstrate the performance of our method for multi-UAV trajectory planning. We have assessed that the proposed method is capable of computing smooth cinematographic trajectories in real-time. Additionally, we have evaluated that the trajectories of the follower UAVs which provide lighting for the target are capable of complying with formation constraints to improve the quality of the shot. The safety of our method has also been proved through experiments in the presence of multiple obstacles.",
1349
+ "bbox": [
1350
+ 501,
1351
+ 137,
1352
+ 921,
1353
+ 287
1354
+ ],
1355
+ "page_idx": 6
1356
+ },
1357
+ {
1358
+ "type": "text",
1359
+ "text": "A. Experimental setup",
1360
+ "text_level": 1,
1361
+ "bbox": [
1362
+ 503,
1363
+ 310,
1364
+ 661,
1365
+ 325
1366
+ ],
1367
+ "page_idx": 6
1368
+ },
1369
+ {
1370
+ "type": "text",
1371
+ "text": "We implemented our architecture described in Section II in $\\mathrm{C + + }$ using the ROS framework. The ACADO Toolkit [32] was used to solve the optimization problems. We conducted software-in-the-loop simulations using Gazebo to simulate physics and to equip the UAVs with a camera and lights. To solve the optimization problems, a horizon length of 8s and a time step of $0.2\\mathrm{s}$ were chosen. The cinematographic parameters were set to $\\psi_{d} = 6^{\\circ}$ and $q_{z,min} = 0.5\\mathrm{m}$ . The maximum distance sampling step was set to $d_{s,max} = 0.5\\mathrm{m}$ .",
1372
+ "bbox": [
1373
+ 501,
1374
+ 329,
1375
+ 921,
1376
+ 467
1377
+ ],
1378
+ "page_idx": 6
1379
+ },
1380
+ {
1381
+ "type": "text",
1382
+ "text": "B. Simulation - Cinematography trajectories",
1383
+ "text_level": 1,
1384
+ "bbox": [
1385
+ 504,
1386
+ 487,
1387
+ 808,
1388
+ 502
1389
+ ],
1390
+ "page_idx": 6
1391
+ },
1392
+ {
1393
+ "type": "text",
1394
+ "text": "The objective of this simulation was twofold: to demonstrate how the method computes smoother camera trajectories for the leader UAV while complying with cinematographic aspects, and how the trajectories of the followers keep with formation constraints to light the target properly. We simulated a human worker performing a maintenance operation on a transmission tower while monitored by a team of three UAVs (one filming and two lighting the worker). While the worker approached and climbed the tower, the system was commanded to perform a lateral shot followed by a sequence of fly-over shots.",
1395
+ "bbox": [
1396
+ 501,
1397
+ 506,
1398
+ 921,
1399
+ 656
1400
+ ],
1401
+ "page_idx": 6
1402
+ },
1403
+ {
1404
+ "type": "text",
1405
+ "text": "The fly-over shots were selected to film the operation as they impose relative motion between the camera and the target. This feature is regarded as richer from a cinematographic point of view. We further demonstrate how our method is able to execute these relative movements more aesthetically than a baseline approach where the specific term to smooth variations in camera angles has been removed (i.e., $\\alpha_{1} = 0$ in Problem 3). Fig. 6 compares the trajectories for the camera carrying UAV generated with both our method and the baseline approach. The baseline approach generates straight trajectories, whereas our method results in orbital trajectories, which have been used in the cinematography literature to produce more pleasant videos. For instance, [16], [17], [33] apply the arcball principle [34] to create a spherical surface around the target for aesthetic camera motion. We can also see in Fig. 6 that our method reduces the jerk of the camera angles. Note that in aerial cinematography literature, the jerk of the camera motion (third derivative of the angles) has been identified as a key aspect for shot quality [9], [35]. We measured the root mean",
1406
+ "bbox": [
1407
+ 501,
1408
+ 657,
1409
+ 921,
1410
+ 946
1411
+ ],
1412
+ "page_idx": 6
1413
+ },
1414
+ {
1415
+ "type": "page_number",
1416
+ "text": "6",
1417
+ "bbox": [
1418
+ 76,
1419
+ 31,
1420
+ 86,
1421
+ 39
1422
+ ],
1423
+ "page_idx": 6
1424
+ },
1425
+ {
1426
+ "type": "header",
1427
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
1428
+ "bbox": [
1429
+ 540,
1430
+ 29,
1431
+ 919,
1432
+ 41
1433
+ ],
1434
+ "page_idx": 6
1435
+ },
1436
+ {
1437
+ "type": "image",
1438
+ "img_path": "images/b6fb194e06b73a2d5ae24c1662e10947d15be9c6d9978dcc6b2a84c3942752b1.jpg",
1439
+ "image_caption": [],
1440
+ "image_footnote": [],
1441
+ "bbox": [
1442
+ 80,
1443
+ 75,
1444
+ 488,
1445
+ 244
1446
+ ],
1447
+ "page_idx": 7
1448
+ },
1449
+ {
1450
+ "type": "image",
1451
+ "img_path": "images/d09cf5cf34df4264f5c390e92936f42db5955c3c588dbb89a8c16e51113684eb.jpg",
1452
+ "image_caption": [
1453
+ "Fig. 6: Trajectories for the camera carrying UAV while monitoring a worker on a transmission tower. For simplicity, only the lateral shot and the first fly-over shot are shown. We compare the trajectories generated by our method (blue) with those from a baseline approach without cinematographic costs (green). The upper image displays a top view of the UAV's and target's trajectories. The small white dots on the trajectories depict transition points sampled every $5\\mathrm{s}$ to give a notion of the speed. The bottom image depicts the temporal evolution of the jerk of the camera angles"
1454
+ ],
1455
+ "image_footnote": [],
1456
+ "bbox": [
1457
+ 75,
1458
+ 247,
1459
+ 488,
1460
+ 345
1461
+ ],
1462
+ "page_idx": 7
1463
+ },
1464
+ {
1465
+ "type": "text",
1466
+ "text": "square of the jerk of $\\varphi$ and $\\xi$ along the full trajectories and obtained $0.0197\\mathrm{rad}\\mathrm{s}^{-3}$ and $0.0048\\mathrm{rad}\\mathrm{s}^{-3}$ , respectively, for our method; and $0.0265\\mathrm{rad}\\mathrm{s}^{-3}$ and $0.0075\\mathrm{rad}\\mathrm{s}^{-3}$ , respectively, for the baseline without the cinematographic cost term.",
1467
+ "bbox": [
1468
+ 73,
1469
+ 482,
1470
+ 491,
1471
+ 542
1472
+ ],
1473
+ "page_idx": 7
1474
+ },
1475
+ {
1476
+ "type": "text",
1477
+ "text": "Fig. 7 shows the trajectories followed by the whole UAV formation throughout the experiment to film the maintenance operation. It can be seen that the formation is properly maintained to avoid collisions between the UAVs and the tower, and to provide required lighting of the filmed object. Moreover, none of the UAVs appear in the camera's field of view. The temporal evolution of the deviations from the desired orientation of each light and their distance from entering the camera FoV during this simulation are shown in Fig. 8. A video of the complete simulation can be found at the site with multimedia materials.",
1478
+ "bbox": [
1479
+ 73,
1480
+ 542,
1481
+ 491,
1482
+ 708
1483
+ ],
1484
+ "page_idx": 7
1485
+ },
1486
+ {
1487
+ "type": "text",
1488
+ "text": "C. Simulation - Cluttered environment",
1489
+ "text_level": 1,
1490
+ "bbox": [
1491
+ 75,
1492
+ 729,
1493
+ 339,
1494
+ 743
1495
+ ],
1496
+ "page_idx": 7
1497
+ },
1498
+ {
1499
+ "type": "text",
1500
+ "text": "The aim of this simulation was to demonstrate the performance of our method for trajectory planning in a cluttered environment while assessing its scalability with numerous obstacles. We simulated a forest-like scenario with multiple trees as obstacles. As a human target walks through the forest, the filming UAV executes a chase shot from behind while the lighting UAVs follow the leader side by side. Fig. 9 depicts the distribution of the obstacles around the forest and the generated trajectories for the UAVs. In this figure, it is visible that the UAVs were able to follow the human in formation and to simultaneously avoid obstacles.",
1501
+ "bbox": [
1502
+ 73,
1503
+ 748,
1504
+ 490,
1505
+ 912
1506
+ ],
1507
+ "page_idx": 7
1508
+ },
1509
+ {
1510
+ "type": "text",
1511
+ "text": "Finally, we analyze the scalability of our method in terms of computational demand. Simulations were run with a 4-",
1512
+ "bbox": [
1513
+ 73,
1514
+ 914,
1515
+ 491,
1516
+ 945
1517
+ ],
1518
+ "page_idx": 7
1519
+ },
1520
+ {
1521
+ "type": "image",
1522
+ "img_path": "images/8d1e9cc4caef6ea641aeb8ec088b3f43043a48d9172d29edf00e7dccfa995599.jpg",
1523
+ "image_caption": [
1524
+ "Fig. 7: An illustration of the experiment where an operator is filmed working on a transmission tower. The trajectories of the camera carrying leader (orange), both followers carrying lights (blue and green), and the human worker (red) are shown. The obstacle map is represented by a point cloud, including the power lines and tower. The worker is tracked with a lateral shot as he walks to the tower and then with a sequence of fly-over shots while he climbs up. Several onboard images taken during the experiment are also shown."
1525
+ ],
1526
+ "image_footnote": [],
1527
+ "bbox": [
1528
+ 506,
1529
+ 79,
1530
+ 919,
1531
+ 220
1532
+ ],
1533
+ "page_idx": 7
1534
+ },
1535
+ {
1536
+ "type": "image",
1537
+ "img_path": "images/3bc01194f0cb3a741948dbe2cde4c3f80842fc8c38d13a91c491946e1ebff249.jpg",
1538
+ "image_caption": [
1539
+ "Fig. 8: Temporal evolution of the distance $d_{F}$ of UAVs carrying lights from entering the camera FoV, deviation from desired heading $\\varphi_{d}$ and deviation from desired pitch angle of light $\\xi_{d}$ ."
1540
+ ],
1541
+ "image_footnote": [],
1542
+ "bbox": [
1543
+ 509,
1544
+ 348,
1545
+ 916,
1546
+ 484
1547
+ ],
1548
+ "page_idx": 7
1549
+ },
1550
+ {
1551
+ "type": "text",
1552
+ "text": "core Intel(R) Core(TM) i7-10510U CPU @ 1.80 GHz. Table I shows the results of our method that correspond to the total planning time for each iteration that was run on the leader UAV. As expected, most time was spent during the non-convex optimization step described in Section III-B. The results for the followers are not included because they skip this non-convex optimization and thus, consume less time. The results are similar for the two simulations, although the second scenario was significantly more cluttered.",
1553
+ "bbox": [
1554
+ 501,
1555
+ 540,
1556
+ 921,
1557
+ 676
1558
+ ],
1559
+ "page_idx": 7
1560
+ },
1561
+ {
1562
+ "type": "text",
1563
+ "text": "Since the map of the environment is transformed into safe corridors made of convex polyhedrons, cluttered environments do not represent an increase in the computational demands of the trajectory optimization method. Therefore, we are able to plan the leader's trajectories at a rate of $1\\mathrm{Hz}$ with horizon lengths of 8s. This rate is adequate for real-time performance in the dynamic scenarios that we target. The lower computational complexity required to generate the initial trajectories of the followers allows us to plan follower's trajectories at a higher rate of $2\\mathrm{Hz}$ , enabling faster reactions to changes of the leader's behaviour and thus a more efficient mutual collision avoidance.",
1564
+ "bbox": [
1565
+ 501,
1566
+ 676,
1567
+ 921,
1568
+ 857
1569
+ ],
1570
+ "page_idx": 7
1571
+ },
1572
+ {
1573
+ "type": "text",
1574
+ "text": "D. Real world experiment",
1575
+ "text_level": 1,
1576
+ "bbox": [
1577
+ 504,
1578
+ 878,
1579
+ 687,
1580
+ 893
1581
+ ],
1582
+ "page_idx": 7
1583
+ },
1584
+ {
1585
+ "type": "text",
1586
+ "text": "In order to demonstrate our method, we performed field experiments generating trajectories for a real team of UAVs (see Fig. 10). Thus, we proved the real-time performance of",
1587
+ "bbox": [
1588
+ 503,
1589
+ 898,
1590
+ 921,
1591
+ 945
1592
+ ],
1593
+ "page_idx": 7
1594
+ },
1595
+ {
1596
+ "type": "header",
1597
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
1598
+ "bbox": [
1599
+ 76,
1600
+ 29,
1601
+ 457,
1602
+ 41
1603
+ ],
1604
+ "page_idx": 7
1605
+ },
1606
+ {
1607
+ "type": "page_number",
1608
+ "text": "7",
1609
+ "bbox": [
1610
+ 911,
1611
+ 30,
1612
+ 919,
1613
+ 40
1614
+ ],
1615
+ "page_idx": 7
1616
+ },
1617
+ {
1618
+ "type": "text",
1619
+ "text": "TABLE I: The planning times of our method per iteration. The total average values are shown for the two experiments. The percentage of time consumed at each step is shown thereafter. ITG stands for the procedure indicated in Section III-B, SCG for procedure described in Section III-C and FTO for trajectory optimization described in Section III-D.",
1620
+ "bbox": [
1621
+ 73,
1622
+ 70,
1623
+ 491,
1624
+ 147
1625
+ ],
1626
+ "page_idx": 8
1627
+ },
1628
+ {
1629
+ "type": "table",
1630
+ "img_path": "images/28f9f7bbf1adc1a632f23745974ef4552df75264a1d83951db6a5f73d75baf65.jpg",
1631
+ "table_caption": [],
1632
+ "table_footnote": [],
1633
+ "table_body": "<table><tr><td rowspan=\"2\"></td><td colspan=\"4\">Time (s)</td></tr><tr><td>Total (Avg ± std)</td><td>ITG (%)</td><td>SCG (%)</td><td>FTO (%)</td></tr><tr><td>Tower</td><td>0.70923 ± 0.10557</td><td>70.9982</td><td>11.81564</td><td>17.18615</td></tr><tr><td>Forest</td><td>0.71274 ± 0.05792</td><td>72.41338</td><td>8.77989</td><td>18.80673</td></tr></table>",
1634
+ "bbox": [
1635
+ 78,
1636
+ 154,
1637
+ 491,
1638
+ 210
1639
+ ],
1640
+ "page_idx": 8
1641
+ },
1642
+ {
1643
+ "type": "image",
1644
+ "img_path": "images/50d0fb859a238de77951221f2f947340ac9365db2284742f1c14b93f141b2ad2.jpg",
1645
+ "image_caption": [
1646
+ "Fig. 9: A top view of the trajectories generated in the cluttered forest scenario. The trajectories of the target (red), the leader (orange), and both followers (blue and green) are shown. The black dots represent trees."
1647
+ ],
1648
+ "image_footnote": [],
1649
+ "bbox": [
1650
+ 81,
1651
+ 223,
1652
+ 491,
1653
+ 325
1654
+ ],
1655
+ "page_idx": 8
1656
+ },
1657
+ {
1658
+ "type": "text",
1659
+ "text": "the proposed approach onboard. A sequence of shots was commanded to film a dynamic target in an outdoor scenario. A video of the experiment can be found at the multimedia material site.",
1660
+ "bbox": [
1661
+ 73,
1662
+ 387,
1663
+ 491,
1664
+ 446
1665
+ ],
1666
+ "page_idx": 8
1667
+ },
1668
+ {
1669
+ "type": "image",
1670
+ "img_path": "images/8f55c5ae58ba9e6cd6ae5be7eb9bc160e6b122fb9bb4756cb2df3d614f23d238.jpg",
1671
+ "image_caption": [
1672
+ "Fig. 10: A snapshot of a real experiment in an outdoor scenario. UAVs were localized using RTK GPS. The target location was generated from a noisy ground truth, without onboard visual detection."
1673
+ ],
1674
+ "image_footnote": [],
1675
+ "bbox": [
1676
+ 78,
1677
+ 457,
1678
+ 490,
1679
+ 563
1680
+ ],
1681
+ "page_idx": 8
1682
+ },
1683
+ {
1684
+ "type": "text",
1685
+ "text": "V. CONCLUSION",
1686
+ "text_level": 1,
1687
+ "bbox": [
1688
+ 212,
1689
+ 617,
1690
+ 354,
1691
+ 631
1692
+ ],
1693
+ "page_idx": 8
1694
+ },
1695
+ {
1696
+ "type": "text",
1697
+ "text": "This paper has presented a method for autonomous aerial cinematography with distributed lighting by a team of UAVs. We have proposed a novel methodology for multi-UAV trajectory planning, addressing non-linear cinematographic aspects and obstacle avoidance in separate optimization steps. We have demonstrated that the method is capable of generating smooth trajectories complying with aesthetic objectives for the filming UAV; and trajectories for the follower UAVs that allow them to keep a formation lighting the target properly and staying out of the camera FoV. Besides, our results indicate that we can plan trajectories in a distributed and online manner, and that the method is suitable for obstacle avoidance even in cluttered environments. As future work, we plan to address occlusions caused by obstacles within the camera FoV. Our idea is to compute the regions where these occlusions would take place and include them in the representation of the occupied space.",
1698
+ "bbox": [
1699
+ 73,
1700
+ 636,
1701
+ 491,
1702
+ 878
1703
+ ],
1704
+ "page_idx": 8
1705
+ },
1706
+ {
1707
+ "type": "text",
1708
+ "text": "REFERENCES",
1709
+ "text_level": 1,
1710
+ "bbox": [
1711
+ 235,
1712
+ 888,
1713
+ 331,
1714
+ 902
1715
+ ],
1716
+ "page_idx": 8
1717
+ },
1718
+ {
1719
+ "type": "ref_text",
1720
+ "text": "[1] I. Mademlis et al., \"High-level multiple-UAV cinematography tools for covering outdoor events,\" IEEE Trans. on Broadcasting, vol. 65, no. 3, pp. 627-635, 2019.",
1721
+ "bbox": [
1722
+ 81,
1723
+ 907,
1724
+ 491,
1725
+ 944
1726
+ ],
1727
+ "page_idx": 8
1728
+ },
1729
+ {
1730
+ "type": "list",
1731
+ "sub_type": "ref_text",
1732
+ "list_items": [
1733
+ "[2] A. Sabirova et al., \"Drone cinematography system design and new guideline model for scene objects interaction,\" in 2020 Int. Conf. Nonlinearity, Information and Robotics, 2020.",
1734
+ "[3] B. F. Jeon et al., \"Detection-Aware Trajectory Generation for a Drone Cinematographer,\" ArXiv e-prints.",
1735
+ "[4] L.-E. Caraballo et al., \"Autonomous planning for multiple aerial cinematographers,\" in IEEE/RSJ IROS, 2020.",
1736
+ "[5] P. Moreno et al., \"Aerial multi-camera robotic jib crane,\" IEEE RA-L, vol. 6, no. 2, pp. 4103-4108, 2021.",
1737
+ "[6] DJI, \"Mavic pro 2,\" 2018. [Online]. Available: www.dji.com/es/mavic",
1738
+ "[7] Skydio, \"Skydio 2,\" 2019. [Online]. Available: www.skydio.com",
1739
+ "[8] R. Bonatti et al., \"Towards a robust aerial cinematography platform: Localizing and tracking moving targets in unstructured environments,\" in IEEE/RSJ IROS, 2019.",
1740
+ "[9] R. Bonatti et al., \"Autonomous aerial cinematography in unstructured environments with learned artistic decision-making,\" JFR, vol. 37, no. 4, pp. 606-641, 2020.",
1741
+ "[10] T. Nageli et al., \"Real-time motion planning for aerial videography with dynamic obstacle avoidance and viewpoint optimization,\" IEEE RA-L, vol. 2, no. 3, pp. 1696-1703, 2017.",
1742
+ "[11] N. Passalis et al., \"Deep reinforcement learning for frontal view person shooting using drones,\" in IEEE EAIS, 2018.",
1743
+ "[12] Y. Dang et al., \"Imitation learning-based algorithm for drone cinematography system,\" IEEE Trans. Cogn. Devel. Syst., pp. 1-1, 2020.",
1744
+ "[13] C. Huang et al., \"Learning to capture a film-look video with a camera drone,\" in IEEE ICRA, 2019, pp. 1871-1877.",
1745
+ "[14] M. Gschwindt et al., \"Can a robot become a movie director? Learning artistic principles for aerial cinematography,\" in IEEE/RSJ IROS, 2019.",
1746
+ "[15] T. Nageli et al., \"Real-time planning for automated multi-view drone cinematography,\" ACM Trans. Graph., vol. 36, no. 4, pp. 1-10, 2017.",
1747
+ "[16] Q. Galvane et al., “Directing cinematographic drones,” ACM Trans. Graph., vol. 37, no. 3, pp. 1-18, 2018.",
1748
+ "[17] A. Bucker et al., “Do You See What I See? Coordinating multiple aerial cameras for robot cinematography,” arXiv, 2020.",
1749
+ "[18] A. Alcantara et al., \"Autonomous execution of cinematographic shots with multiple drones,\" IEEE Access, pp. 201300-201316, 2020.",
1750
+ "[19] ——, \"Optimal trajectory planning for cinematography with multiple unmanned aerial vehicles,\" RAS, vol. 140, p. 103778, 2021.",
1751
+ "[20] B. Hall, Understanding cinematography. Crowood, 2015.",
1752
+ "[21] P. Petracek et al., \"Dronument: System for Reliable Deployment of Micro Aerial Vehicles in Dark Areas of Large Historical Monuments,\" IEEE RA-L, vol. 5, no. 2, pp. 2078-2085, 2020.",
1753
+ "[22] M. Petrlik et al., \"A robust UAV system for operations in a constrained environment,\" IEEE RA-L, vol. 5, no. 2, pp. 2169-2176, 2020.",
1754
+ "[23] M.aska et al., \"Documentation of dark areas of large historical buildings by a formation of unmanned aerial vehicles using model predictive control,\" in IEEE ETFA, 2017.",
1755
+ "[24] V. Krátý et al., \"Autonomous reflectance transformation imaging by a team of unmanned aerial vehicles,\" IEEE RA-L, vol. 5, no. 2, pp. 2302-2309, 2020.",
1756
+ "[25] J. Chen et al., \"Online generation of collision-free trajectories for quadrotor flight in unknown cluttered environments,\" in IEEE ICRA, 2016.",
1757
+ "[26] K. Mohta et al., \"Fast, autonomous flight in GPS-denied and cluttered environments,\" JFR, vol. 35, no. 1, pp. 101-120, 2018.",
1758
+ "[27] S. Liu et al., \"Planning dynamically feasible trajectories for quadrotors using safe flight corridors in 3-D complex environments,\" IEEE RA-L, vol. 2, no. 3, pp. 1688-1695, 2017.",
1759
+ "[28] J. Tordesillas et al., “FASTER: Fast and Safe Trajectory Planner for Flights in Unknown Environments,” IEEE/RSJ IROS, 2019.",
1760
+ "[29] T. Báča et al., “The MRS UAV system: Pushing the frontiers of reproducible research, real-world deployment, and education with autonomous unmanned aerial vehicles,” JINT, vol. 26, 2021.",
1761
+ "[30] D. Harabor et al., \"Online graph pruning for pathfinding on grid maps.\" in AAAI Conf. on Artificial Intelligence, vol. 25, no. 1, 2011.",
1762
+ "[31] ——, “Improving jump point search,” in Int. Conf. on Automated Planning and Scheduling, vol. 24, no. 1, 2014.",
1763
+ "[32] B. Houska et al., \"ACADO Toolkit - An Open Source Framework for Automatic Control and Dynamic Optimization,\" Optimal Control Applications and Methods, vol. 32, no. 3, pp. 298-312, 2011.",
1764
+ "[33] C. Lino et al., \"Intuitive and Efficient Camera Control with the Toric Space,\" ACM Trans. Graph., 2015.",
1765
+ "[34] K. Shoemake, \"Arcball: a user interface for specifying three-dimensional orientation using a mouse,\" in Proceedings of Graphics Interface, 1992.",
1766
+ "[35] C. Gebhardt et al., \"Airways: Optimization-Based Planning of Quadrotor Trajectories according to High-Level User Goals,\" in Proceedings of the Conf. on Human Factors in Computing Systems, 2016."
1767
+ ],
1768
+ "bbox": [
1769
+ 506,
1770
+ 70,
1771
+ 921,
1772
+ 944
1773
+ ],
1774
+ "page_idx": 8
1775
+ },
1776
+ {
1777
+ "type": "page_number",
1778
+ "text": "8",
1779
+ "bbox": [
1780
+ 76,
1781
+ 31,
1782
+ 86,
1783
+ 39
1784
+ ],
1785
+ "page_idx": 8
1786
+ },
1787
+ {
1788
+ "type": "header",
1789
+ "text": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811",
1790
+ "bbox": [
1791
+ 540,
1792
+ 29,
1793
+ 919,
1794
+ 41
1795
+ ],
1796
+ "page_idx": 8
1797
+ }
1798
+ ]
2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_model.json ADDED
@@ -0,0 +1,2209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.266,
8
+ 0.058,
9
+ 0.701
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2303.01177v1 [cs.RO] 2 Mar 2023"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.418,
18
+ 0.425,
19
+ 0.581,
20
+ 0.447
21
+ ],
22
+ "angle": 0,
23
+ "content": "© 2021 IEEE"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.074,
29
+ 0.476,
30
+ 0.925,
31
+ 0.594
32
+ ],
33
+ "angle": 0,
34
+ "content": "Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting or republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works."
35
+ }
36
+ ],
37
+ [
38
+ {
39
+ "type": "header",
40
+ "bbox": [
41
+ 0.076,
42
+ 0.03,
43
+ 0.458,
44
+ 0.042
45
+ ],
46
+ "angle": 0,
47
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
48
+ },
49
+ {
50
+ "type": "page_number",
51
+ "bbox": [
52
+ 0.912,
53
+ 0.031,
54
+ 0.921,
55
+ 0.041
56
+ ],
57
+ "angle": 0,
58
+ "content": "1"
59
+ },
60
+ {
61
+ "type": "title",
62
+ "bbox": [
63
+ 0.137,
64
+ 0.071,
65
+ 0.864,
66
+ 0.171
67
+ ],
68
+ "angle": 0,
69
+ "content": "Autonomous Aerial Filming with Distributed Lighting by a Team of Unmanned Aerial Vehicles"
70
+ },
71
+ {
72
+ "type": "text",
73
+ "bbox": [
74
+ 0.08,
75
+ 0.18,
76
+ 0.916,
77
+ 0.201
78
+ ],
79
+ "angle": 0,
80
+ "content": "Vít Krátký<sup>1</sup>, Alfonso Alcántara<sup>2</sup>, Jesús Capitán<sup>2</sup>, Petr Štepan<sup>1</sup>, Martin Saska<sup>1</sup> and Aníbal Ollero<sup>2</sup>"
81
+ },
82
+ {
83
+ "type": "text",
84
+ "bbox": [
85
+ 0.074,
86
+ 0.267,
87
+ 0.493,
88
+ 0.481
89
+ ],
90
+ "angle": 0,
91
+ "content": "Abstract—This paper describes a method for autonomous aerial cinematography with distributed lighting by a team of unmanned aerial vehicles (UAVs). Although camera-carrying multirotor helicopters have become commonplace in cinematography, their usage is limited to scenarios with sufficient natural light or of lighting provided by static artificial lights. We propose to use a formation of unmanned aerial vehicles as a tool for filming a target under illumination from various directions, which is one of the fundamental techniques of traditional cinematography. We decompose the multi-UAV trajectory optimization problem to tackle non-linear cinematographic aspects and obstacle avoidance at separate stages, which allows us to re-plan in real time and react to changes in dynamic environments. The performance of our method has been evaluated in realistic simulation scenarios and field experiments, where we show how it increases the quality of the shots and that it is capable of planning safe trajectories even in cluttered environments."
92
+ },
93
+ {
94
+ "type": "text",
95
+ "bbox": [
96
+ 0.075,
97
+ 0.488,
98
+ 0.492,
99
+ 0.515
100
+ ],
101
+ "angle": 0,
102
+ "content": "Index Terms—Multi-Robot Systems, Aerial Systems: Applications, Motion and Path Planning"
103
+ },
104
+ {
105
+ "type": "title",
106
+ "bbox": [
107
+ 0.205,
108
+ 0.535,
109
+ 0.362,
110
+ 0.548
111
+ ],
112
+ "angle": 0,
113
+ "content": "I. INTRODUCTION"
114
+ },
115
+ {
116
+ "type": "text",
117
+ "bbox": [
118
+ 0.074,
119
+ 0.554,
120
+ 0.493,
121
+ 0.736
122
+ ],
123
+ "angle": 0,
124
+ "content": "T HE interest in Unmanned Aerial Vehicles (UAVs) for aerial photography and filming is growing fast [1]–[5]. This is mainly due to their manoeuvrability and the capacity to create unique shots when compared to standard cameras. The use of UAVs as flying cameras presents not only a remarkable potential for recreational cinematography, but also for the monitoring of inspection operations in outdoor infrastructures with complex access. For instance, the EU-funded project, AERIAL-CORE, proposes UAVs to surveil the safety of human workers during maintenance operations of electrical power lines (see Fig. 1). In this industrial setup, a high-quality video is key, as it is used by supervising ground"
125
+ },
126
+ {
127
+ "type": "text",
128
+ "bbox": [
129
+ 0.075,
130
+ 0.747,
131
+ 0.493,
132
+ 0.771
133
+ ],
134
+ "angle": 0,
135
+ "content": "Manuscript received: February 24, 2021; Revised May 26, 2021; Accepted July 3, 2021."
136
+ },
137
+ {
138
+ "type": "text",
139
+ "bbox": [
140
+ 0.075,
141
+ 0.771,
142
+ 0.493,
143
+ 0.862
144
+ ],
145
+ "angle": 0,
146
+ "content": "This paper was recommended for publication by Editor M. Ani Hsieh upon evaluation of the Associate Editor and Reviewers' comments. This work was supported by EU project AERIAL-CORE (H2020-2019-871479), by MULTICOP (US-1265072) in FEDER-Junta de Andalucia Programme, by project no. DG18P02OVV069 in program NAKI II, by CTU grant no SGS20/174/OHK3/3T/13, and by OP VVV funded project CZ.02.1.01/0.0/0.0/16 019/0000765 \"Research Center for Informatics\". (Vit Kratky and Alfonso Alcantara are co-first authors.)"
147
+ },
148
+ {
149
+ "type": "text",
150
+ "bbox": [
151
+ 0.075,
152
+ 0.862,
153
+ 0.493,
154
+ 0.896
155
+ ],
156
+ "angle": 0,
157
+ "content": "\\(^{1}\\)Vit Krátý, Petr Štepan, and Martin Sasha are with Faculty of Electrical Engineering, Czech Technical University in Prague, Czech Republic, {kratkvit|stepan|martin.saska}@fel.cyvut.cz."
158
+ },
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.075,
163
+ 0.896,
164
+ 0.493,
165
+ 0.931
166
+ ],
167
+ "angle": 0,
168
+ "content": "\\(^{2}\\)Alfonso Alcantara, Jesús Capitan, and Aníbal Ollero are with GRVC Robotics Laboratory, University of Seville, Spain {aamarin|jcapitan|aollero}@us.es."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.075,
174
+ 0.932,
175
+ 0.363,
176
+ 0.945
177
+ ],
178
+ "angle": 0,
179
+ "content": "Digital Object Identifier (DOI): see top of this page."
180
+ },
181
+ {
182
+ "type": "image",
183
+ "bbox": [
184
+ 0.508,
185
+ 0.265,
186
+ 0.716,
187
+ 0.409
188
+ ],
189
+ "angle": 0,
190
+ "content": null
191
+ },
192
+ {
193
+ "type": "image",
194
+ "bbox": [
195
+ 0.716,
196
+ 0.267,
197
+ 0.92,
198
+ 0.409
199
+ ],
200
+ "angle": 0,
201
+ "content": null
202
+ },
203
+ {
204
+ "type": "image_caption",
205
+ "bbox": [
206
+ 0.503,
207
+ 0.414,
208
+ 0.923,
209
+ 0.505
210
+ ],
211
+ "angle": 0,
212
+ "content": "Fig. 1: UAV filming applications to provide external lighting; to capture smooth shots outdoors; and to monitor dangerous maintenance operations at electrical lines<sup>1</sup>. Pictures were obtained within AERIAL-CORE and DRONUMENT projects, for which the proposed technology is being developed. Videos of the work in this paper can be seen on the multimedia material page (http://mrs.felk.cvut.cz/papers/aerial-filming)."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.503,
218
+ 0.513,
219
+ 0.923,
220
+ 0.603
221
+ ],
222
+ "angle": 0,
223
+ "content": "operators to monitor safety during the maintenance work. Multi-UAV teams expand upon these possibilities as they could provide alternative points of view or even supplementary illumination. Similarly in our DRONUMENT project of NAKI II program, efficient variable illumination plays a key role for documentation of historical buildings interiors."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.503,
229
+ 0.604,
230
+ 0.923,
231
+ 0.86
232
+ ],
233
+ "angle": 0,
234
+ "content": "Proper lighting techniques are fundamental in bringing out details in an image and in creating more natural-looking film scenes. Thus, cinematography sets are packed with different lighting sources, as digital sensors are not as reactive to light as the human eye. This can also be relevant in monitoring maintenance operations scheduled at times of the day with poor illumination. Although aerial cinematography has been attractive to the scientific community as of late, lighting techniques have yet to be applied to improve the performance of filming. Filmmakers apply many types of lighting techniques making use of various equipment. In this work, we only consider direct lighting techniques that do not require additional equipment apart from light sources. Although static lights could produce more pleasant footage in some situations, we believe that UAVs are not optimal for this purpose. Therefore, we only use UAVs as dynamic sources of light to provide lighting to a dynamic scene."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.503,
240
+ 0.86,
241
+ 0.922,
242
+ 0.921
243
+ ],
244
+ "angle": 0,
245
+ "content": "In this context, navigating a team of UAVs for filming tasks with distributed lighting is complex. Smooth and safe trajectories are required to achieve pleasing shots that do not compromise safety in dynamic scenarios. We propose"
246
+ },
247
+ {
248
+ "type": "page_footnote",
249
+ "bbox": [
250
+ 0.506,
251
+ 0.931,
252
+ 0.631,
253
+ 0.945
254
+ ],
255
+ "angle": 0,
256
+ "content": "1 https://aerial-core.eu"
257
+ }
258
+ ],
259
+ [
260
+ {
261
+ "type": "page_number",
262
+ "bbox": [
263
+ 0.078,
264
+ 0.032,
265
+ 0.086,
266
+ 0.04
267
+ ],
268
+ "angle": 0,
269
+ "content": "2"
270
+ },
271
+ {
272
+ "type": "header",
273
+ "bbox": [
274
+ 0.542,
275
+ 0.031,
276
+ 0.919,
277
+ 0.041
278
+ ],
279
+ "angle": 0,
280
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
281
+ },
282
+ {
283
+ "type": "text",
284
+ "bbox": [
285
+ 0.079,
286
+ 0.07,
287
+ 0.49,
288
+ 0.264
289
+ ],
290
+ "angle": 0,
291
+ "content": "a method for online trajectory planning and execution with multiple UAVs. Our team obeys a leader-follower scheme where the formation leader carries an onboard camera to film a moving target and the followers generate trajectories that enable distributed lighting of the target, while maintaining desired lighting angles. We formulate a non-linear, optimization-based method that plans visually pleasant trajectories for the filming UAV and distributes the others in a specified formation. Moreover, we tackle safety by including a systematic framework for obstacle avoidance. Safe flight corridors for the UAVs are generated by forming sets of convex polyhedrons that model free space. Optimal and safe trajectories are thereafter computed within these convex sets."
292
+ },
293
+ {
294
+ "type": "title",
295
+ "bbox": [
296
+ 0.079,
297
+ 0.291,
298
+ 0.196,
299
+ 0.304
300
+ ],
301
+ "angle": 0,
302
+ "content": "A. Related works"
303
+ },
304
+ {
305
+ "type": "text",
306
+ "bbox": [
307
+ 0.079,
308
+ 0.311,
309
+ 0.49,
310
+ 0.656
311
+ ],
312
+ "angle": 0,
313
+ "content": "There have been several works focusing on filming dynamic scenes with a single UAV. Commercial products exist (e.g., DJI Mavic [6] or Skydio [7]) that implement semi-autonomous functionalities, such as auto-follow features for tracking an actor with collision avoidance. However, they do not address cinematographic principles. An integrated system for outdoor cinematography combining vision-based target localization with trajectory planning and collision avoidance has been proposed [8], [9]. Smoothness is achieved by minimizing trajectory jerk; shot quality by defining objective curves that fulfill relative angles with respect to the actor. Optimal trajectories for cinematography have also been computed in real-time through receding horizon optimization with non-linear constraints [10]. A user inputs framing objectives for the targets on the image to minimize errors on the image target projections, sizes, and relative viewing angles. Some authors have approached UAV cinematography by applying machine learning [11], [12]. Particularly, such techniques have been applied to demonstrations imitating professional cameraman's behaviors [13] or for reinforcement learning to achieve visually pleasant shots [14]. These works have presented valuable results for online trajectory planning, although they have not addressed the specific complexities for multi-UAV systems."
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.079,
319
+ 0.659,
320
+ 0.49,
321
+ 0.944
322
+ ],
323
+ "angle": 0,
324
+ "content": "Regarding the methodology for multiple UAVs, a non-linear optimization problem was solved in receding horizon in [15], where collision avoidance to filmed actors and mutual collisions of UAVs were considered. Aesthetic objectives are introduced by the user as virtual reference trails. A specific camera parameter space is proposed in [16] to ensure cinematographic properties and to fulfill dynamic constraints along the trajectories. The motion of multiple UAVs around dynamic targets is coordinated through a centralized master-slave approach. A greedy framework for multi-UAV camera coordination is proposed in [17]. A decentralized planner computes UAV trajectories considering smoothness, shot diversity, collision avoidance, and mutual visibility. We have also addressed the trajectory planning for multi-UAV cinematography in previous work. We presented an architecture to execute cinematographic shots (with different types of camera motion) using multiple UAVs [18] and developed a distributed method to plan optimal trajectories reducing jerky camera movements [19]. In this paper, our focus is on the specifics of outdoor and dynamic"
325
+ },
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.509,
330
+ 0.07,
331
+ 0.919,
332
+ 0.159
333
+ ],
334
+ "angle": 0,
335
+ "content": "settings when compared to indoor scenarios [15]. Therefore, we have integrated obstacle avoidance in a more fundamental manner using local maps. Moreover, a novel problem with respect to previous work has been introduced, as we perform scene illumination with multiple UAVs to increase the quality of image shots."
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.509,
341
+ 0.161,
342
+ 0.919,
343
+ 0.416
344
+ ],
345
+ "angle": 0,
346
+ "content": "The modification of lighting angles to improve images is fundamental in cinematography [20]. A camera with an onboard light on a UAV can compensate for insufficient lighting, but positioning lights at different angles with respect to the camera axis would require several UAVs. Despite the unquestionable importance of lighting for shot quality, its usage for aerial cinematography is not well-studied. Utilizing UAVs to provide supplementary illumination has been proposed for building documentation tasks [21] or tunnel inspection [22]. A formation with a filming UAV and others carrying lights was deployed to document the overshadowed parts of historical buildings [23]. A similar system has been used to carry out specialized documentation techniques [24]. However, these works have proposed lighting for tasks in static scenes, whereas the present paper deals with filming of moving targets in dynamic and potentially cluttered environments, e.g., to monitor inspection operations in large outdoor infrastructures."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.509,
352
+ 0.418,
353
+ 0.919,
354
+ 0.628
355
+ ],
356
+ "angle": 0,
357
+ "content": "In order to guarantee safe trajectories in multi-UAV cinematography, most works [15], [16], [19] only consider collision avoidance with actors, other UAVs, or static objects that can be modelled with previously known no-fly zones. The work in [9] integrates local mapping with onboard sensors to penalize proximity to obstacles and solves an unconstrained optimization problem. Another approach to obstacle avoidance applied for standard UAV trajectory planning is to create a convex representation of free space via a set of linear inequality constraints [25]-[28], to obtain a QP formulation for real-time motion planning. We have been inspired by these single-UAV works to develop a fundamental framework for the representation of obstacles in our non-linear optimization problem for multi-UAV cinematography."
358
+ },
359
+ {
360
+ "type": "title",
361
+ "bbox": [
362
+ 0.509,
363
+ 0.651,
364
+ 0.621,
365
+ 0.665
366
+ ],
367
+ "angle": 0,
368
+ "content": "B. Contributions"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.525,
374
+ 0.671,
375
+ 0.91,
376
+ 0.685
377
+ ],
378
+ "angle": 0,
379
+ "content": "Our main contributions are summarized as the following:"
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.525,
385
+ 0.689,
386
+ 0.919,
387
+ 0.824
388
+ ],
389
+ "angle": 0,
390
+ "content": "- We formulate a novel optimization problem for aerial filming with distributed lighting. Using a leader-follower scheme, we plan and execute trajectories in a distributed manner. Optimization is run in receding horizon to compute smooth trajectories with pleasing footage for the UAV filming (the leader), which takes shots of a dynamic target indicated by an external user. The followers compute their trajectories to maintain a formation with specified lighting angles on the target."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.525,
396
+ 0.825,
397
+ 0.919,
398
+ 0.944
399
+ ],
400
+ "angle": 0,
401
+ "content": "- We propose a new method to tackle non-convex trajectory optimization with obstacle avoidance in real time. We decompose the problem in two parts. Non-linear kinematical aspects are formulated in a problem without obstacle avoidance to generate reference trajectories. These are used to generate collision-free regions which are convex and to transform the problem into a final QP optimization task."
402
+ },
403
+ {
404
+ "type": "list",
405
+ "bbox": [
406
+ 0.525,
407
+ 0.689,
408
+ 0.919,
409
+ 0.944
410
+ ],
411
+ "angle": 0,
412
+ "content": null
413
+ }
414
+ ],
415
+ [
416
+ {
417
+ "type": "header",
418
+ "bbox": [
419
+ 0.077,
420
+ 0.03,
421
+ 0.458,
422
+ 0.042
423
+ ],
424
+ "angle": 0,
425
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
426
+ },
427
+ {
428
+ "type": "page_number",
429
+ "bbox": [
430
+ 0.912,
431
+ 0.031,
432
+ 0.921,
433
+ 0.041
434
+ ],
435
+ "angle": 0,
436
+ "content": "3"
437
+ },
438
+ {
439
+ "type": "text",
440
+ "bbox": [
441
+ 0.092,
442
+ 0.07,
443
+ 0.493,
444
+ 0.19
445
+ ],
446
+ "angle": 0,
447
+ "content": "- We present experimental results for different types of cinematographic shots. We prove that our method is capable of computing smooth trajectories for reducing jerky movements and show that the distributed formation improves the illumination of footage. The system is evaluated with field experiments and also in various realistic simulated scenarios, including the filming of a moving target in a cluttered environment."
448
+ },
449
+ {
450
+ "type": "title",
451
+ "bbox": [
452
+ 0.186,
453
+ 0.206,
454
+ 0.381,
455
+ 0.22
456
+ ],
457
+ "angle": 0,
458
+ "content": "II. SYSTEM OVERVIEW"
459
+ },
460
+ {
461
+ "type": "text",
462
+ "bbox": [
463
+ 0.074,
464
+ 0.224,
465
+ 0.492,
466
+ 0.436
467
+ ],
468
+ "angle": 0,
469
+ "content": "Fig. 2 depicts the architecture of the entire system. The leader UAV carries a camera for filming while several others carry light sources to provide proper illumination. A human director specifies the cinematographic parameters for the scene. These parameters include the shot type (i.e., the camera motion relative to the target), the camera shooting angle for the leader, and the desired lighting angles for the followers. This information, together with an estimation of the target trajectory, is used to generate reference trajectories for the UAVs (Section III-B). These initial trajectories do not consider obstacle avoidance, but only cinematographic aspects. The leader attempts to execute the commanded shot smoothly, whereas the followers maintain a surrounding formation with the desired lighting angles."
470
+ },
471
+ {
472
+ "type": "text",
473
+ "bbox": [
474
+ 0.074,
475
+ 0.436,
476
+ 0.491,
477
+ 0.616
478
+ ],
479
+ "angle": 0,
480
+ "content": "Safety is ensured by integrating information from a local map for collision avoidance (Section III-C). Firstly, a collision-free path is generated for each UAV using the map and the initial cinematographic trajectories as guidelines. Then, a safe corridor along each of these paths is computed, consisting of a set of obstacle-free polyhedrons generated by the convex decomposition of free space (see Fig. 5). Finally, the UAV trajectories are obtained as a result of a trajectory optimization process that computes dynamically feasible trajectories inside each safe corridor (Section III-D). Inter-UAV collision avoidance is achieved by including the team-mates planned trajectories as obstacles in the map."
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.074,
486
+ 0.617,
487
+ 0.493,
488
+ 0.768
489
+ ],
490
+ "angle": 0,
491
+ "content": "The entire pipeline shown in Fig. 2 (except for the Human director component) runs on board each UAV in a receding horizon manner. This enables the online planning to react properly to changes in the behavior of the target being filmed, as well as to malfunctioning team-members or previously unseen obstacles. Note that either the Cinematographic trajectory generator or the Lighting trajectory generator is activated on each UAV, depending on whether it carries a camera or light. The component for trajectory tracking on each UAV is the low-level control pipeline described in [29]."
492
+ },
493
+ {
494
+ "type": "title",
495
+ "bbox": [
496
+ 0.086,
497
+ 0.783,
498
+ 0.482,
499
+ 0.797
500
+ ],
501
+ "angle": 0,
502
+ "content": "III. AUTONOMOUS AERIAL CINEMATOGRAPHY"
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.075,
508
+ 0.802,
509
+ 0.492,
510
+ 0.879
511
+ ],
512
+ "angle": 0,
513
+ "content": "In this section, we begin by detailing the UAV dynamic model (Section III-A). Then, we describe our procedure to generate optimal and safe trajectories for each UAV (Sections III-B, III-C, and III-D). Lastly, we explain how the orientation of a UAV is controlled (Section III-E)."
514
+ },
515
+ {
516
+ "type": "title",
517
+ "bbox": [
518
+ 0.075,
519
+ 0.896,
520
+ 0.382,
521
+ 0.911
522
+ ],
523
+ "angle": 0,
524
+ "content": "A. Multi-rotor aerial vehicle dynamic model"
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.075,
530
+ 0.915,
531
+ 0.492,
532
+ 0.946
533
+ ],
534
+ "angle": 0,
535
+ "content": "An independent trajectory tracker [29] for UAV attitude control is used, which allows for planning with a simplified"
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.503,
541
+ 0.069,
542
+ 0.923,
543
+ 0.236
544
+ ],
545
+ "angle": 0,
546
+ "content": "positional dynamic UAV model. In addition, the orientation of the camera or light source onboard (depending on the UAV) needs to be modelled. We assume the existence of a gimbal mechanism to compensate angle deviations due to changes in UAV attitude. Therefore, it is assumed that camera roll is negligible and we only control pitch and heading. Since the heading of a multi-rotor vehicle can be controlled independently of its position, we fix the relative position between the camera/light and the UAV to always point forward and control its heading through the UAV heading. The positional part of the dynamic model is defined as a linear double integrator:"
547
+ },
548
+ {
549
+ "type": "equation",
550
+ "bbox": [
551
+ 0.686,
552
+ 0.24,
553
+ 0.921,
554
+ 0.263
555
+ ],
556
+ "angle": 0,
557
+ "content": "\\[\n\\begin{array}{l} \\dot {\\mathbf {p}} = \\mathbf {v}, \\\\ \\cdot \\end{array} \\tag {1}\n\\]"
558
+ },
559
+ {
560
+ "type": "equation",
561
+ "bbox": [
562
+ 0.688,
563
+ 0.261,
564
+ 0.74,
565
+ 0.272
566
+ ],
567
+ "angle": 0,
568
+ "content": "\\[\n\\dot {\\mathbf {v}} = \\mathbf {a},\n\\]"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.504,
574
+ 0.277,
575
+ 0.922,
576
+ 0.338
577
+ ],
578
+ "angle": 0,
579
+ "content": "where \\(\\mathbf{p} = [p_x p_y p_z]^T \\in \\mathbb{R}^3\\) is the UAV position, \\(\\mathbf{v} = [v_x v_y v_z]^T \\in \\mathbb{R}^3\\) the linear velocity, and \\(\\mathbf{a} = [a_x a_y a_z]^T \\in \\mathbb{R}^3\\) the linear acceleration. The orientation of the camera/light may be modelled similarly:"
580
+ },
581
+ {
582
+ "type": "equation",
583
+ "bbox": [
584
+ 0.687,
585
+ 0.342,
586
+ 0.921,
587
+ 0.365
588
+ ],
589
+ "angle": 0,
590
+ "content": "\\[\n\\begin{array}{l} \\dot {\\mathbf {o}} = \\boldsymbol {\\omega}, \\\\ \\therefore \\quad \\mathbf {o} \\end{array} \\tag {2}\n\\]"
591
+ },
592
+ {
593
+ "type": "equation",
594
+ "bbox": [
595
+ 0.687,
596
+ 0.362,
597
+ 0.739,
598
+ 0.375
599
+ ],
600
+ "angle": 0,
601
+ "content": "\\[\n\\dot {\\boldsymbol {\\omega}} = \\boldsymbol {\\theta},\n\\]"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.503,
607
+ 0.38,
608
+ 0.922,
609
+ 0.502
610
+ ],
611
+ "angle": 0,
612
+ "content": "where \\(\\mathbf{o} = [\\varphi \\xi]^T\\) represents an orientation with respect to a global frame given by its heading and pitch angles, \\(\\omega \\in \\mathbb{R}^2\\) are the corresponding angular rates, and \\(\\theta \\in \\mathbb{R}^2\\) the angular accelerations. For the description of the proposed method, we define a full positional state of the UAV \\(\\mathbf{x}_p = [\\mathbf{p}^T\\mathbf{v}^T]^T \\in \\mathbb{R}^6\\), a vector of positional control inputs \\(\\mathbf{u}_p = \\mathbf{a}\\), an orientation state \\(\\mathbf{x}_o = [\\mathbf{o}^T\\boldsymbol{\\omega}^T]^T \\in \\mathbb{R}^4\\), and a vector of orientation control inputs \\(\\mathbf{u}_o = \\boldsymbol{\\theta}\\)."
613
+ },
614
+ {
615
+ "type": "title",
616
+ "bbox": [
617
+ 0.505,
618
+ 0.521,
619
+ 0.774,
620
+ 0.537
621
+ ],
622
+ "angle": 0,
623
+ "content": "B. Generation of reference trajectories"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.503,
629
+ 0.54,
630
+ 0.922,
631
+ 0.691
632
+ ],
633
+ "angle": 0,
634
+ "content": "The first step of our method for trajectory planning is to generate a reference trajectory \\( D_{j} \\) for each UAV \\( j \\). The problem complexity is alleviated by removing collision avoidance constraints and focusing only on kinematographic aspects. For the filming UAV, the objective is to reach a position relative to the target as provided by the shot type \\( C_s \\), while minimizing changes in the camera angle to produce pleasant images. A specific camera shooting angle \\( \\psi_d \\) over the target needs to be maintained. The following non-linear optimization problem is formulated for the filming UAV:"
635
+ },
636
+ {
637
+ "type": "equation",
638
+ "bbox": [
639
+ 0.534,
640
+ 0.694,
641
+ 0.921,
642
+ 0.735
643
+ ],
644
+ "angle": 0,
645
+ "content": "\\[\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {\\mathrm {N} - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left\\| \\mathbf {u} _ {k - 1} \\right\\| ^ {2} + \\alpha_ {1} J _ {\\psi , k}\\right) + \\alpha_ {2} J _ {N}, \\tag {3}\n\\]"
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.52,
651
+ 0.737,
652
+ 0.921,
653
+ 0.754
654
+ ],
655
+ "angle": 0,
656
+ "content": "subject to \\(\\mathbf{x}_0 = \\mathbf{x}'\\) (3.a)"
657
+ },
658
+ {
659
+ "type": "equation",
660
+ "bbox": [
661
+ 0.573,
662
+ 0.756,
663
+ 0.921,
664
+ 0.773
665
+ ],
666
+ "angle": 0,
667
+ "content": "\\[\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {p} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {3.b}\n\\]"
668
+ },
669
+ {
670
+ "type": "equation",
671
+ "bbox": [
672
+ 0.574,
673
+ 0.775,
674
+ 0.921,
675
+ 0.791
676
+ ],
677
+ "angle": 0,
678
+ "content": "\\[\n\\mathbf {v} _ {\\min } \\leq \\mathbf {v} _ {k} \\leq \\mathbf {v} _ {\\max } \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {3.c}\n\\]"
679
+ },
680
+ {
681
+ "type": "equation",
682
+ "bbox": [
683
+ 0.573,
684
+ 0.794,
685
+ 0.921,
686
+ 0.81
687
+ ],
688
+ "angle": 0,
689
+ "content": "\\[\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {3.d}\n\\]"
690
+ },
691
+ {
692
+ "type": "equation",
693
+ "bbox": [
694
+ 0.565,
695
+ 0.813,
696
+ 0.921,
697
+ 0.829
698
+ ],
699
+ "angle": 0,
700
+ "content": "\\[\nq _ {z, \\min } \\leq q _ {z, k} \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {3.e}\n\\]"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.503,
706
+ 0.833,
707
+ 0.921,
708
+ 0.878
709
+ ],
710
+ "angle": 0,
711
+ "content": "where \\( \\mathrm{f}_p(\\cdot) \\) represents the positional part of the dynamic model defined in Section III-A; \\( \\mathbf{v}_{min} \\), \\( \\mathbf{v}_{max} \\) are velocity limitations; and \\( \\mathbf{u}_{min} \\), \\( \\mathbf{u}_{max} \\) control inputs limitations."
712
+ },
713
+ {
714
+ "type": "text",
715
+ "bbox": [
716
+ 0.504,
717
+ 0.878,
718
+ 0.922,
719
+ 0.91
720
+ ],
721
+ "angle": 0,
722
+ "content": "The first two terms in the cost function pursue smooth trajectories by penalizing UAV accelerations and reducing"
723
+ },
724
+ {
725
+ "type": "page_footnote",
726
+ "bbox": [
727
+ 0.504,
728
+ 0.919,
729
+ 0.922,
730
+ 0.946
731
+ ],
732
+ "angle": 0,
733
+ "content": "2 For simplicity of description, \\(\\mathbf{x} := \\mathbf{x}_p\\), and \\(\\mathbf{u} := \\mathbf{u}_p\\). We use the Runge-Kutta method for numerical integration."
734
+ }
735
+ ],
736
+ [
737
+ {
738
+ "type": "page_number",
739
+ "bbox": [
740
+ 0.077,
741
+ 0.032,
742
+ 0.088,
743
+ 0.041
744
+ ],
745
+ "angle": 0,
746
+ "content": "4"
747
+ },
748
+ {
749
+ "type": "header",
750
+ "bbox": [
751
+ 0.541,
752
+ 0.03,
753
+ 0.92,
754
+ 0.042
755
+ ],
756
+ "angle": 0,
757
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
758
+ },
759
+ {
760
+ "type": "image",
761
+ "bbox": [
762
+ 0.08,
763
+ 0.071,
764
+ 0.905,
765
+ 0.192
766
+ ],
767
+ "angle": 0,
768
+ "content": null
769
+ },
770
+ {
771
+ "type": "image_caption",
772
+ "bbox": [
773
+ 0.074,
774
+ 0.198,
775
+ 0.925,
776
+ 0.263
777
+ ],
778
+ "angle": 0,
779
+ "content": "Fig. 2: The architecture of the proposed system. \\( C_s \\) and \\( C_l \\) represent the desired type of cinematographic shot and lighting configuration specified by a human director; \\( T_T \\) is the target estimated trajectory; \\( D_L \\), \\( D_F \\) are reference trajectories for the leader UAV and the follower UAVs, respectively; \\( P_L \\), \\( P_F \\) are collision-free paths generated along the desired trajectories; \\( S_L \\), \\( S_F \\) are safe corridors along the collision-free paths; and \\( T_L \\), \\( T_F \\) are optimized trajectories for the camera and lighting UAVs, respectively. The modules enclosed in the blue rectangle run on both types of UAVs."
780
+ },
781
+ {
782
+ "type": "text",
783
+ "bbox": [
784
+ 0.074,
785
+ 0.276,
786
+ 0.492,
787
+ 0.395
788
+ ],
789
+ "angle": 0,
790
+ "content": "gimbal movements. The director specifies an aesthetic objective through the desired camera shooting angle \\(\\psi_d\\) to film the target (see Fig. 3). Emphasis is given on positioning the UAV to keep this angle constant without moving the gimbal. In doing so, the angular changes in the gimbal are reduced to favor less jerky camera motion and therefore, pleasant footage. In order to define \\(J_{\\psi}\\), the relative position between the UAV camera and the target is introduced as:"
791
+ },
792
+ {
793
+ "type": "equation",
794
+ "bbox": [
795
+ 0.17,
796
+ 0.399,
797
+ 0.491,
798
+ 0.42
799
+ ],
800
+ "angle": 0,
801
+ "content": "\\[\n\\mathbf {q} = \\left[ \\begin{array}{l l l} q _ {x} & q _ {y} & q _ {z} \\end{array} \\right] ^ {T} = \\mathbf {p} _ {L} - \\mathbf {p} _ {T}. \\tag {4}\n\\]"
802
+ },
803
+ {
804
+ "type": "text",
805
+ "bbox": [
806
+ 0.075,
807
+ 0.423,
808
+ 0.236,
809
+ 0.437
810
+ ],
811
+ "angle": 0,
812
+ "content": "Then, we define \\(J_{\\psi}\\) as:"
813
+ },
814
+ {
815
+ "type": "equation",
816
+ "bbox": [
817
+ 0.152,
818
+ 0.441,
819
+ 0.491,
820
+ 0.493
821
+ ],
822
+ "angle": 0,
823
+ "content": "\\[\nJ _ {\\psi , k} = \\left(\\tan \\left(\\psi_ {d}\\right) - \\frac {q _ {z , k}}{\\sqrt {q _ {x , k} ^ {2} + q _ {y , k} ^ {2}}}\\right) ^ {2}. \\tag {5}\n\\]"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.074,
829
+ 0.495,
830
+ 0.491,
831
+ 0.661
832
+ ],
833
+ "angle": 0,
834
+ "content": "The target position is predicted within the time horizon with a motion model (a constant speed model in our experiments). Prediction errors are tackled by recomputing UAV trajectories with a receding horizon. By minimizing the previous cost, we implicitly minimize variations in camera pitch angle as the relative pitch with respect to the target is kept constant. Moreover, the camera heading corresponds with the UAV heading, whose variations are also smoothed as explained in Section III-D. Therefore, the idea is to generate UAV trajectories where the gimbal only needs to move slightly to compensate for small disturbances."
835
+ },
836
+ {
837
+ "type": "text",
838
+ "bbox": [
839
+ 0.074,
840
+ 0.689,
841
+ 0.492,
842
+ 0.812
843
+ ],
844
+ "angle": 0,
845
+ "content": "The terminal cost \\( J_{N} = ||\\mathbf{x}_{xy,d} - \\mathbf{x}_{xy,N}||^{2} \\) guides the UAV to a desired state imposed by the shot type, e.g., at a certain distance beside the target's final position in a lateral shot. Note that a final UAV height is not imposed, as we want the planner to compute the optimal \\( p_{z} \\) to maintain the camera shooting angle commanded by the director. Lastly, the constraint (3.e) establishes a minimum distance above the target for safety purposes."
846
+ },
847
+ {
848
+ "type": "text",
849
+ "bbox": [
850
+ 0.074,
851
+ 0.839,
852
+ 0.492,
853
+ 0.947
854
+ ],
855
+ "angle": 0,
856
+ "content": "The reference trajectories for the lighting UAVs are computed to achieve a desired leader-follower formation around the target. The desired position of the followers is influenced by the corresponding leader position \\(\\mathbf{p}_L\\) and camera orientation \\(\\mathbf{o}_L\\), the target position \\(\\mathbf{p}_T\\), the desired lighting angles of \\(j\\)-th light \\(\\chi_j\\) and \\(\\varrho_j\\), and the desired distance of the light to the target \\(d_j\\). The desired position of \\(j\\)-th follower \\(\\mathbf{p}_j\\) is then"
857
+ },
858
+ {
859
+ "type": "image",
860
+ "bbox": [
861
+ 0.527,
862
+ 0.279,
863
+ 0.818,
864
+ 0.425
865
+ ],
866
+ "angle": 0,
867
+ "content": null
868
+ },
869
+ {
870
+ "type": "image_caption",
871
+ "bbox": [
872
+ 0.504,
873
+ 0.43,
874
+ 0.924,
875
+ 0.457
876
+ ],
877
+ "angle": 0,
878
+ "content": "Fig. 3: Reference frames and camera shooting angle. The origins of the camera and UAV frames coincide."
879
+ },
880
+ {
881
+ "type": "text",
882
+ "bbox": [
883
+ 0.505,
884
+ 0.485,
885
+ 0.661,
886
+ 0.5
887
+ ],
888
+ "angle": 0,
889
+ "content": "given by the equation:"
890
+ },
891
+ {
892
+ "type": "equation",
893
+ "bbox": [
894
+ 0.587,
895
+ 0.501,
896
+ 0.921,
897
+ 0.55
898
+ ],
899
+ "angle": 0,
900
+ "content": "\\[\n\\mathbf {p} _ {j} = \\mathbf {p} _ {T} + d _ {j} \\left[ \\begin{array}{c} - \\cos (\\varphi_ {j}) \\cos (\\xi_ {j}) \\\\ - \\sin (\\varphi_ {j}) \\cos (\\xi_ {j}) \\\\ \\sin (\\xi_ {j}) \\end{array} \\right], \\tag {6}\n\\]"
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.504,
906
+ 0.551,
907
+ 0.922,
908
+ 0.657
909
+ ],
910
+ "angle": 0,
911
+ "content": "where \\(\\varphi_{j} = \\varphi_{L} + \\chi_{j}\\) and \\(\\xi_{j} = \\xi_{L} + \\varrho_{j}\\) are desired lighting angles relative to the camera's optical axis (see Fig. 4). To avoid jumps in the desired followers' positions caused by quick changes in the target position (e.g., due to a transition to a new target), a virtual target, located in front of the camera at a certain distance along its optical axis, is used. The position of this virtual target is given by:"
912
+ },
913
+ {
914
+ "type": "equation",
915
+ "bbox": [
916
+ 0.591,
917
+ 0.659,
918
+ 0.921,
919
+ 0.707
920
+ ],
921
+ "angle": 0,
922
+ "content": "\\[\n\\mathbf {p} _ {v} = \\mathbf {p} _ {L} + d _ {v} \\left[ \\begin{array}{c} \\cos \\left(\\varphi_ {L}\\right) \\cos \\left(\\xi_ {L}\\right) \\\\ \\sin \\left(\\varphi_ {L}\\right) \\cos \\left(\\xi_ {L}\\right) \\\\ \\sin \\left(\\xi_ {L}\\right) \\end{array} \\right], \\tag {7}\n\\]"
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.503,
928
+ 0.709,
929
+ 0.921,
930
+ 0.814
931
+ ],
932
+ "angle": 0,
933
+ "content": "where \\( d_v \\) is the desired distance between the virtual target and the camera center and \\( \\mathbf{p}_v \\) denotes the virtual target position. Substituting position \\( \\mathbf{p}_v \\) for \\( \\mathbf{p}_T \\) in (6), a more consistent formation scheme is acquired, where less aggressive maneuvers are required; and the lighting always focuses on the scene in front of the camera, which is relevant in obtaining pleasant videos."
934
+ },
935
+ {
936
+ "type": "title",
937
+ "bbox": [
938
+ 0.505,
939
+ 0.835,
940
+ 0.726,
941
+ 0.85
942
+ ],
943
+ "angle": 0,
944
+ "content": "C. Generation of safe corridors"
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.503,
950
+ 0.854,
951
+ 0.922,
952
+ 0.947
953
+ ],
954
+ "angle": 0,
955
+ "content": "The initial reference trajectories are computed without considering obstacles. They are, therefore, used as seed to generate a safe corridor \\( S_{j} \\) for each UAV \\( j \\) where collision-free trajectories can then be computed. Firstly, we convert each trajectory \\( D_{j} \\) into a collision-free path \\( P_{j} \\). We iterate over each of the \\( N \\) waypoints in \\( D_{j} \\) and add it directly to \\( P_{j} \\) if"
956
+ }
957
+ ],
958
+ [
959
+ {
960
+ "type": "header",
961
+ "bbox": [
962
+ 0.077,
963
+ 0.03,
964
+ 0.458,
965
+ 0.042
966
+ ],
967
+ "angle": 0,
968
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
969
+ },
970
+ {
971
+ "type": "page_number",
972
+ "bbox": [
973
+ 0.912,
974
+ 0.031,
975
+ 0.921,
976
+ 0.041
977
+ ],
978
+ "angle": 0,
979
+ "content": "5"
980
+ },
981
+ {
982
+ "type": "image",
983
+ "bbox": [
984
+ 0.082,
985
+ 0.077,
986
+ 0.294,
987
+ 0.187
988
+ ],
989
+ "angle": 0,
990
+ "content": null
991
+ },
992
+ {
993
+ "type": "image_caption",
994
+ "bbox": [
995
+ 0.148,
996
+ 0.189,
997
+ 0.232,
998
+ 0.203
999
+ ],
1000
+ "angle": 0,
1001
+ "content": "(a) top view"
1002
+ },
1003
+ {
1004
+ "type": "image",
1005
+ "bbox": [
1006
+ 0.324,
1007
+ 0.075,
1008
+ 0.468,
1009
+ 0.185
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": null
1013
+ },
1014
+ {
1015
+ "type": "image_caption",
1016
+ "bbox": [
1017
+ 0.348,
1018
+ 0.189,
1019
+ 0.439,
1020
+ 0.203
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "(b) side view"
1024
+ },
1025
+ {
1026
+ "type": "image_caption",
1027
+ "bbox": [
1028
+ 0.128,
1029
+ 0.205,
1030
+ 0.439,
1031
+ 0.22
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": "Fig. 4: The leader-follower scheme defined by (6)."
1035
+ },
1036
+ {
1037
+ "type": "text",
1038
+ "bbox": [
1039
+ 0.074,
1040
+ 0.245,
1041
+ 0.492,
1042
+ 0.365
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "it is collision-free. Otherwise, we label the previous collision-free waypoint as \\( A \\) and keep moving along \\( D_{j} \\) until we find the next collision-free waypoint \\( B \\). Then, we try to find an alternative collision-free path from \\( A \\) to \\( B \\), to be appended to \\( P_{j} \\) and continue iterating. For that alternative path, we use the Jump Point Search (JPS) algorithm introduced in [30], [31] and extended to 3D in [27]. A real-time performance is ensured by introducing a timeout for the JPS path search."
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "bbox": [
1050
+ 0.074,
1051
+ 0.366,
1052
+ 0.491,
1053
+ 0.637
1054
+ ],
1055
+ "angle": 0,
1056
+ "content": "If the JPS algorithm fails to find a path within the given timeout from \\( A \\) to \\( B \\), we run it again to connect \\( A \\) directly to the last waypoint in \\( D_{j} \\) (let this waypoint be \\( C \\)). If this is not found either, we append to \\( P_{j} \\) the path to the node closest to \\( C \\) from all those expanded during the JPS search. Once completed, \\( P_{j} \\) consists of an arbitrary number of points equal to or greater than \\( N \\). Since \\( P_{j} \\) is used for the generation of the safety corridors for particular points in \\( D_{j} \\), it is postprocessed so that \\( |P_{j}| = |D_{j}| = N \\). \\( P_{j} \\) is sampled so that the waypoint distribution is close to the initial points in \\( D_{j} \\). Since these collision-free paths are used as a guide for trajectory optimization in subsequent steps, the distance sampling step \\( d_{s} \\) is limited to help avoid the dynamic infeasibility of the final trajectories. If the sampled \\( P_{j} \\) consists of more than \\( N \\) waypoints, the overflowing points are discarded for the subsequent steps of the trajectory optimization process. The process to create a collision-free path \\( P_{j} \\) and its corresponding safe corridor \\( S_{j} \\) is illustrated in Fig. 5."
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "bbox": [
1061
+ 0.074,
1062
+ 0.638,
1063
+ 0.493,
1064
+ 0.879
1065
+ ],
1066
+ "angle": 0,
1067
+ "content": "Safe corridors are generated around the collision-free paths with a prefixed initial position of the UAV (i.e., \\( N + 1 \\) waypoints), using a map of the environment represented by a point cloud \\( O_{pcl} \\) and the convex decomposition method proposed in [27]. This method is based on an iterative procedure for the generation of polyhedrons. It begins by inflating an ellipsoid aligned with each path segment. In the next step, tangent planes are constructed at the contact points between the ellipsoid and any obstacles. Afterwards, all points lying behind this plane are removed from \\( O_{pcl} \\). Yet again, the next iteration starts by inflating the ellipsoid up to the nearest point in \\( O_{pcl} \\). This procedure is terminated if there are no remaining points in \\( O_{pcl} \\). The generated tangent planes define an obstacle-free polyhedron \\( \\mathcal{P} \\) enclosing the corresponding path segment and the set of all polyhedrons along the path constitutes the safe corridor."
1068
+ },
1069
+ {
1070
+ "type": "title",
1071
+ "bbox": [
1072
+ 0.075,
1073
+ 0.896,
1074
+ 0.262,
1075
+ 0.911
1076
+ ],
1077
+ "angle": 0,
1078
+ "content": "D. Trajectory optimization"
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "bbox": [
1083
+ 0.075,
1084
+ 0.915,
1085
+ 0.492,
1086
+ 0.947
1087
+ ],
1088
+ "angle": 0,
1089
+ "content": "Given a collision-free path \\( P \\) and its corresponding safe corridor \\( S \\), a final optimal trajectory is computed through a"
1090
+ },
1091
+ {
1092
+ "type": "image",
1093
+ "bbox": [
1094
+ 0.505,
1095
+ 0.071,
1096
+ 0.924,
1097
+ 0.25
1098
+ ],
1099
+ "angle": 0,
1100
+ "content": null
1101
+ },
1102
+ {
1103
+ "type": "image_caption",
1104
+ "bbox": [
1105
+ 0.503,
1106
+ 0.254,
1107
+ 0.922,
1108
+ 0.319
1109
+ ],
1110
+ "angle": 0,
1111
+ "content": "Fig. 5: The safe corridor generation process. The initial reference trajectory (green) is converted into a collision-free path (purple), and the obstacle-free polyhedrons are generated along this path. The final optimized trajectory within the safe corridor is also shown (blue). We inflate the obstacles for safety purposes (light red)."
1112
+ },
1113
+ {
1114
+ "type": "text",
1115
+ "bbox": [
1116
+ 0.503,
1117
+ 0.324,
1118
+ 0.922,
1119
+ 0.371
1120
+ ],
1121
+ "angle": 0,
1122
+ "content": "QP problem in receding horizon. The particular optimization task \\({}^{3}\\) attempts to track a desired trajectory \\(\\mathbf{p}_d\\) corresponding to the reference trajectory \\(D_{j}\\):"
1123
+ },
1124
+ {
1125
+ "type": "equation",
1126
+ "bbox": [
1127
+ 0.54,
1128
+ 0.374,
1129
+ 0.921,
1130
+ 0.415
1131
+ ],
1132
+ "angle": 0,
1133
+ "content": "\\[\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {N - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left\\| \\mathbf {p} _ {d, k} - \\mathbf {p} _ {k} \\right\\| ^ {2} + \\beta \\left\\| \\mathbf {u} _ {k - 1} \\right\\| ^ {2}\\right), \\tag {8}\n\\]"
1134
+ },
1135
+ {
1136
+ "type": "text",
1137
+ "bbox": [
1138
+ 0.525,
1139
+ 0.417,
1140
+ 0.921,
1141
+ 0.433
1142
+ ],
1143
+ "angle": 0,
1144
+ "content": "subject to \\(\\mathbf{x}_0 = \\mathbf{x}'\\) (8.a)"
1145
+ },
1146
+ {
1147
+ "type": "equation",
1148
+ "bbox": [
1149
+ 0.577,
1150
+ 0.436,
1151
+ 0.92,
1152
+ 0.453
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": "\\[\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {p} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {8.b}\n\\]"
1156
+ },
1157
+ {
1158
+ "type": "equation",
1159
+ "bbox": [
1160
+ 0.577,
1161
+ 0.455,
1162
+ 0.921,
1163
+ 0.471
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "\\[\n\\mathbf {v} _ {\\min } \\leq \\mathbf {v} _ {k} \\leq \\mathbf {v} _ {\\max } \\forall k \\in \\{1, \\dots , N \\}, \\tag {8.c}\n\\]"
1167
+ },
1168
+ {
1169
+ "type": "equation",
1170
+ "bbox": [
1171
+ 0.577,
1172
+ 0.474,
1173
+ 0.921,
1174
+ 0.49
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": "\\[\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {8.d}\n\\]"
1178
+ },
1179
+ {
1180
+ "type": "equation",
1181
+ "bbox": [
1182
+ 0.594,
1183
+ 0.492,
1184
+ 0.921,
1185
+ 0.509
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "\\[\n\\mathbf {p} _ {k} \\in \\mathcal {P} _ {k} \\quad \\forall k \\in \\{1, \\dots , N \\}, \\tag {8.e}\n\\]"
1189
+ },
1190
+ {
1191
+ "type": "text",
1192
+ "bbox": [
1193
+ 0.503,
1194
+ 0.513,
1195
+ 0.92,
1196
+ 0.633
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": "where \\( \\mathrm{f}_p(\\cdot) \\) represents the positional part of a dynamic model defined in Section III-A; \\( \\mathbf{v}_{min} \\), \\( \\mathbf{v}_{max} \\) are velocity limitations; \\( \\mathbf{u}_{min} \\), \\( \\mathbf{u}_{max} \\) control inputs limitations; and \\( \\mathcal{P}_k \\) is a convex polyhedron representing a free space associated with \\( k \\)-th transition point. The last constraint ensures a safe resulting trajectory without collisions. Given that the constraint (8.e) can be decoupled in a set of linear constraints, the problem becomes a quadratic convex program."
1200
+ },
1201
+ {
1202
+ "type": "text",
1203
+ "bbox": [
1204
+ 0.503,
1205
+ 0.634,
1206
+ 0.922,
1207
+ 0.921
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "The optimization formulation is the same for both the leader and follower UAVs. However, there are a couple of relevant differences. First, the desired reference trajectories are computed in a different manner, following either filming or lighting criteria (see Section III-B). Second, the followers encode mutual-collision avoidance through constraint (8.e). To prevent negative effects on the cinematographic quality of the performed shot, the entirety of mutual collision avoidance is left to the followers. A fixed priority scheme is defined for the UAVs, and the occupied space \\( O_{pcl} \\) of each follower is updated with the current planned trajectories from the leader and other followers of a higher priority. \\( O_{pcl} \\) is updated with spherical objects of the desired collision avoidance radius at each waypoint of the UAV trajectories to be avoided. A similar procedure is followed to incorporate the target's predicted trajectory (also for the leader in this case). To hold with real-time performance, the occupied space \\( O_{pcl} \\) is assumed static for a given horizon time, but it is updated at each planning step, accommodating all static and dynamic obstacles."
1211
+ },
1212
+ {
1213
+ "type": "page_footnote",
1214
+ "bbox": [
1215
+ 0.505,
1216
+ 0.93,
1217
+ 0.812,
1218
+ 0.946
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": "3 For simplicity of description, \\(\\mathbf{x} \\coloneqq \\mathbf{x}_p\\), and \\(\\mathbf{u} \\coloneqq \\mathbf{u}_p\\)."
1222
+ }
1223
+ ],
1224
+ [
1225
+ {
1226
+ "type": "page_number",
1227
+ "bbox": [
1228
+ 0.077,
1229
+ 0.032,
1230
+ 0.087,
1231
+ 0.04
1232
+ ],
1233
+ "angle": 0,
1234
+ "content": "6"
1235
+ },
1236
+ {
1237
+ "type": "header",
1238
+ "bbox": [
1239
+ 0.541,
1240
+ 0.03,
1241
+ 0.92,
1242
+ 0.042
1243
+ ],
1244
+ "angle": 0,
1245
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
1246
+ },
1247
+ {
1248
+ "type": "text",
1249
+ "bbox": [
1250
+ 0.074,
1251
+ 0.07,
1252
+ 0.493,
1253
+ 0.295
1254
+ ],
1255
+ "angle": 0,
1256
+ "content": "Another crucial issue for the applications of multi-UAV cinematography is how to prevent other UAVs from appearing in the Field of View (FoV) of the filming UAV. However, including this in the optimization task as either a constraint or a cost term can remarkably increase the complexity of the problem. We considered including the FoV of the leader camera as an obstacle in the local maps of the followers, so that they may avoid it. Even so, relatively small changes in camera orientation could result in significant changes in the map representation and lead to unstable planned trajectories. Therefore, the camera's FoV is avoided by the lighting UAVs only through penalizing deviations from the desired trajectories \\(\\mathbf{p}_d\\). Thus, FoV avoidance is mostly determined by the choice of lighting parameters that describe the desired formation."
1257
+ },
1258
+ {
1259
+ "type": "text",
1260
+ "bbox": [
1261
+ 0.074,
1262
+ 0.297,
1263
+ 0.493,
1264
+ 0.479
1265
+ ],
1266
+ "angle": 0,
1267
+ "content": "Finally, occlusions caused by obstacles in the FoV of the camera or the lights are also a relevant aspect when filming. Occlusion throughout a significant part of the shot renders the shot useless, and in the case of onboard detection of the target, it also disables target following. However in most cases, occlusions are temporary and avoiding them is always a trade-off between significant deviation from the desired cinematographic trajectory and having part of the video occluded. In this work, the trajectories are generated so that they are close to the desired cinematographic shots specified by a director. The possible occlusions have to be resolved by redefining the shot to be performed."
1268
+ },
1269
+ {
1270
+ "type": "title",
1271
+ "bbox": [
1272
+ 0.075,
1273
+ 0.509,
1274
+ 0.233,
1275
+ 0.523
1276
+ ],
1277
+ "angle": 0,
1278
+ "content": "E. Orientation control"
1279
+ },
1280
+ {
1281
+ "type": "text",
1282
+ "bbox": [
1283
+ 0.074,
1284
+ 0.531,
1285
+ 0.491,
1286
+ 0.577
1287
+ ],
1288
+ "angle": 0,
1289
+ "content": "In this application, both the camera and the light sources need to always be pointing at the filmed target. Hence, their desired orientation is given by:"
1290
+ },
1291
+ {
1292
+ "type": "equation",
1293
+ "bbox": [
1294
+ 0.118,
1295
+ 0.579,
1296
+ 0.49,
1297
+ 0.608
1298
+ ],
1299
+ "angle": 0,
1300
+ "content": "\\[\n\\mathbf {o} _ {d} = \\left[ \\varphi_ {d} \\xi_ {d} \\right] ^ {T} = \\left[ \\arctan \\left(q _ {y}, q _ {x}\\right) \\sin \\left(\\frac {q _ {z}}{| | q | |}\\right) \\right] ^ {T}. \\tag {9}\n\\]"
1301
+ },
1302
+ {
1303
+ "type": "text",
1304
+ "bbox": [
1305
+ 0.074,
1306
+ 0.61,
1307
+ 0.493,
1308
+ 0.683
1309
+ ],
1310
+ "angle": 0,
1311
+ "content": "Orientation control is also formulated as a constrained quadratic optimization problem in receding horizon in order to achieve smoother orientation changes. For simplicity of description, \\(\\mathbf{x} \\coloneqq \\mathbf{x}_o\\) and \\(\\mathbf{u} \\coloneqq \\mathbf{u}_o\\) in the following problem formulation:"
1312
+ },
1313
+ {
1314
+ "type": "equation",
1315
+ "bbox": [
1316
+ 0.103,
1317
+ 0.685,
1318
+ 0.49,
1319
+ 0.726
1320
+ ],
1321
+ "angle": 0,
1322
+ "content": "\\[\n\\underset {\\mathbf {u} _ {0}, \\dots , \\mathbf {u} _ {N - 1}} {\\text {m i n i m i z e}} \\sum_ {k = 1} ^ {N} \\left(\\left| \\left| \\mathbf {o} _ {d, k} - \\mathbf {o} _ {k} \\right| \\right| ^ {2} + \\gamma \\left| \\left| \\mathbf {u} _ {k - 1} \\right| \\right| ^ {2}\\right), \\tag {10}\n\\]"
1323
+ },
1324
+ {
1325
+ "type": "text",
1326
+ "bbox": [
1327
+ 0.09,
1328
+ 0.728,
1329
+ 0.49,
1330
+ 0.744
1331
+ ],
1332
+ "angle": 0,
1333
+ "content": "subject to \\(\\mathbf{x}_0 = \\mathbf{x}'\\) (10.a)"
1334
+ },
1335
+ {
1336
+ "type": "equation",
1337
+ "bbox": [
1338
+ 0.14,
1339
+ 0.747,
1340
+ 0.49,
1341
+ 0.763
1342
+ ],
1343
+ "angle": 0,
1344
+ "content": "\\[\n\\mathbf {x} _ {k + 1} = \\mathrm {f} _ {o} \\left(\\mathbf {x} _ {k}, \\mathbf {u} _ {k}\\right) \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {10.b}\n\\]"
1345
+ },
1346
+ {
1347
+ "type": "equation",
1348
+ "bbox": [
1349
+ 0.14,
1350
+ 0.766,
1351
+ 0.49,
1352
+ 0.782
1353
+ ],
1354
+ "angle": 0,
1355
+ "content": "\\[\n\\omega_ {\\min } \\leq \\omega_ {k} \\leq \\omega_ {\\max } \\forall k \\in \\{1, \\dots , N \\}, \\tag {10.c}\n\\]"
1356
+ },
1357
+ {
1358
+ "type": "equation",
1359
+ "bbox": [
1360
+ 0.145,
1361
+ 0.785,
1362
+ 0.49,
1363
+ 0.801
1364
+ ],
1365
+ "angle": 0,
1366
+ "content": "\\[\n\\left. \\xi_ {\\min } \\leq \\xi_ {k} \\leq \\xi_ {\\max } \\quad \\forall k \\in \\{1, \\dots , N \\}, \\right. \\tag {10.d}\n\\]"
1367
+ },
1368
+ {
1369
+ "type": "equation",
1370
+ "bbox": [
1371
+ 0.143,
1372
+ 0.804,
1373
+ 0.49,
1374
+ 0.82
1375
+ ],
1376
+ "angle": 0,
1377
+ "content": "\\[\n\\mathbf {u} _ {\\min } \\leq \\mathbf {u} _ {k} \\leq \\mathbf {u} _ {\\max } \\quad \\forall k \\in \\{0, \\dots , N - 1 \\}, \\tag {10.e}\n\\]"
1378
+ },
1379
+ {
1380
+ "type": "text",
1381
+ "bbox": [
1382
+ 0.074,
1383
+ 0.824,
1384
+ 0.493,
1385
+ 0.947
1386
+ ],
1387
+ "angle": 0,
1388
+ "content": "where \\( \\mathrm{f}_o(\\cdot) \\) represents the orientation aspect of the dynamic model defined in Section III-A; \\( \\omega_{min} \\), \\( \\omega_{max} \\) are limitations on the angular velocities; \\( \\mathbf{u}_{min} \\), \\( \\mathbf{u}_{max} \\) control inputs limitations; and \\( \\xi_{min} \\), \\( \\xi_{max} \\) represent hardware limitations of the gimbal to adjusting pitch angles. The heading and pitch angles of the camera or light can be controlled independently. Thus, Problem (10) was decoupled into two simpler problems. The optimal solution for each problem can be found analytically"
1389
+ },
1390
+ {
1391
+ "type": "text",
1392
+ "bbox": [
1393
+ 0.504,
1394
+ 0.069,
1395
+ 0.922,
1396
+ 0.099
1397
+ ],
1398
+ "angle": 0,
1399
+ "content": "with a standard framework for linear MPC (Model Predictive Control)."
1400
+ },
1401
+ {
1402
+ "type": "title",
1403
+ "bbox": [
1404
+ 0.572,
1405
+ 0.118,
1406
+ 0.855,
1407
+ 0.132
1408
+ ],
1409
+ "angle": 0,
1410
+ "content": "IV. EXPERIMENTAL EVALUATION"
1411
+ },
1412
+ {
1413
+ "type": "text",
1414
+ "bbox": [
1415
+ 0.503,
1416
+ 0.138,
1417
+ 0.923,
1418
+ 0.289
1419
+ ],
1420
+ "angle": 0,
1421
+ "content": "In this section, experimental results are presented to demonstrate the performance of our method for multi-UAV trajectory planning. We have assessed that the proposed method is capable of computing smooth cinematographic trajectories in real-time. Additionally, we have evaluated that the trajectories of the follower UAVs which provide lighting for the target are capable of complying with formation constraints to improve the quality of the shot. The safety of our method has also been proved through experiments in the presence of multiple obstacles."
1422
+ },
1423
+ {
1424
+ "type": "title",
1425
+ "bbox": [
1426
+ 0.504,
1427
+ 0.311,
1428
+ 0.663,
1429
+ 0.326
1430
+ ],
1431
+ "angle": 0,
1432
+ "content": "A. Experimental setup"
1433
+ },
1434
+ {
1435
+ "type": "text",
1436
+ "bbox": [
1437
+ 0.503,
1438
+ 0.33,
1439
+ 0.922,
1440
+ 0.468
1441
+ ],
1442
+ "angle": 0,
1443
+ "content": "We implemented our architecture described in Section II in \\(\\mathrm{C + + }\\) using the ROS framework. The ACADO Toolkit [32] was used to solve the optimization problems. We conducted software-in-the-loop simulations using Gazebo to simulate physics and to equip the UAVs with a camera and lights. To solve the optimization problems, a horizon length of 8s and a time step of \\(0.2\\mathrm{s}\\) were chosen. The cinematographic parameters were set to \\(\\psi_{d} = 6^{\\circ}\\) and \\(q_{z,min} = 0.5\\mathrm{m}\\). The maximum distance sampling step was set to \\(d_{s,max} = 0.5\\mathrm{m}\\)."
1444
+ },
1445
+ {
1446
+ "type": "title",
1447
+ "bbox": [
1448
+ 0.505,
1449
+ 0.488,
1450
+ 0.81,
1451
+ 0.503
1452
+ ],
1453
+ "angle": 0,
1454
+ "content": "B. Simulation - Cinematography trajectories"
1455
+ },
1456
+ {
1457
+ "type": "text",
1458
+ "bbox": [
1459
+ 0.503,
1460
+ 0.507,
1461
+ 0.922,
1462
+ 0.657
1463
+ ],
1464
+ "angle": 0,
1465
+ "content": "The objective of this simulation was twofold: to demonstrate how the method computes smoother camera trajectories for the leader UAV while complying with cinematographic aspects, and how the trajectories of the followers keep with formation constraints to light the target properly. We simulated a human worker performing a maintenance operation on a transmission tower while monitored by a team of three UAVs (one filming and two lighting the worker). While the worker approached and climbed the tower, the system was commanded to perform a lateral shot followed by a sequence of fly-over shots."
1466
+ },
1467
+ {
1468
+ "type": "text",
1469
+ "bbox": [
1470
+ 0.503,
1471
+ 0.658,
1472
+ 0.922,
1473
+ 0.947
1474
+ ],
1475
+ "angle": 0,
1476
+ "content": "The fly-over shots were selected to film the operation as they impose relative motion between the camera and the target. This feature is regarded as richer from a cinematographic point of view. We further demonstrate how our method is able to execute these relative movements more aesthetically than a baseline approach where the specific term to smooth variations in camera angles has been removed (i.e., \\(\\alpha_{1} = 0\\) in Problem 3). Fig. 6 compares the trajectories for the camera carrying UAV generated with both our method and the baseline approach. The baseline approach generates straight trajectories, whereas our method results in orbital trajectories, which have been used in the cinematography literature to produce more pleasant videos. For instance, [16], [17], [33] apply the arcball principle [34] to create a spherical surface around the target for aesthetic camera motion. We can also see in Fig. 6 that our method reduces the jerk of the camera angles. Note that in aerial cinematography literature, the jerk of the camera motion (third derivative of the angles) has been identified as a key aspect for shot quality [9], [35]. We measured the root mean"
1477
+ }
1478
+ ],
1479
+ [
1480
+ {
1481
+ "type": "header",
1482
+ "bbox": [
1483
+ 0.077,
1484
+ 0.03,
1485
+ 0.458,
1486
+ 0.042
1487
+ ],
1488
+ "angle": 0,
1489
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
1490
+ },
1491
+ {
1492
+ "type": "page_number",
1493
+ "bbox": [
1494
+ 0.912,
1495
+ 0.031,
1496
+ 0.921,
1497
+ 0.041
1498
+ ],
1499
+ "angle": 0,
1500
+ "content": "7"
1501
+ },
1502
+ {
1503
+ "type": "image",
1504
+ "bbox": [
1505
+ 0.081,
1506
+ 0.077,
1507
+ 0.49,
1508
+ 0.246
1509
+ ],
1510
+ "angle": 0,
1511
+ "content": null
1512
+ },
1513
+ {
1514
+ "type": "image",
1515
+ "bbox": [
1516
+ 0.076,
1517
+ 0.248,
1518
+ 0.49,
1519
+ 0.346
1520
+ ],
1521
+ "angle": 0,
1522
+ "content": null
1523
+ },
1524
+ {
1525
+ "type": "image_caption",
1526
+ "bbox": [
1527
+ 0.074,
1528
+ 0.35,
1529
+ 0.493,
1530
+ 0.465
1531
+ ],
1532
+ "angle": 0,
1533
+ "content": "Fig. 6: Trajectories for the camera carrying UAV while monitoring a worker on a transmission tower. For simplicity, only the lateral shot and the first fly-over shot are shown. We compare the trajectories generated by our method (blue) with those from a baseline approach without cinematographic costs (green). The upper image displays a top view of the UAV's and target's trajectories. The small white dots on the trajectories depict transition points sampled every \\(5\\mathrm{s}\\) to give a notion of the speed. The bottom image depicts the temporal evolution of the jerk of the camera angles"
1534
+ },
1535
+ {
1536
+ "type": "text",
1537
+ "bbox": [
1538
+ 0.074,
1539
+ 0.483,
1540
+ 0.492,
1541
+ 0.543
1542
+ ],
1543
+ "angle": 0,
1544
+ "content": "square of the jerk of \\(\\varphi\\) and \\(\\xi\\) along the full trajectories and obtained \\(0.0197\\mathrm{rad}\\mathrm{s}^{-3}\\) and \\(0.0048\\mathrm{rad}\\mathrm{s}^{-3}\\), respectively, for our method; and \\(0.0265\\mathrm{rad}\\mathrm{s}^{-3}\\) and \\(0.0075\\mathrm{rad}\\mathrm{s}^{-3}\\), respectively, for the baseline without the cinematographic cost term."
1545
+ },
1546
+ {
1547
+ "type": "text",
1548
+ "bbox": [
1549
+ 0.074,
1550
+ 0.544,
1551
+ 0.493,
1552
+ 0.709
1553
+ ],
1554
+ "angle": 0,
1555
+ "content": "Fig. 7 shows the trajectories followed by the whole UAV formation throughout the experiment to film the maintenance operation. It can be seen that the formation is properly maintained to avoid collisions between the UAVs and the tower, and to provide required lighting of the filmed object. Moreover, none of the UAVs appear in the camera's field of view. The temporal evolution of the deviations from the desired orientation of each light and their distance from entering the camera FoV during this simulation are shown in Fig. 8. A video of the complete simulation can be found at the site with multimedia materials."
1556
+ },
1557
+ {
1558
+ "type": "title",
1559
+ "bbox": [
1560
+ 0.076,
1561
+ 0.73,
1562
+ 0.34,
1563
+ 0.744
1564
+ ],
1565
+ "angle": 0,
1566
+ "content": "C. Simulation - Cluttered environment"
1567
+ },
1568
+ {
1569
+ "type": "text",
1570
+ "bbox": [
1571
+ 0.074,
1572
+ 0.749,
1573
+ 0.491,
1574
+ 0.914
1575
+ ],
1576
+ "angle": 0,
1577
+ "content": "The aim of this simulation was to demonstrate the performance of our method for trajectory planning in a cluttered environment while assessing its scalability with numerous obstacles. We simulated a forest-like scenario with multiple trees as obstacles. As a human target walks through the forest, the filming UAV executes a chase shot from behind while the lighting UAVs follow the leader side by side. Fig. 9 depicts the distribution of the obstacles around the forest and the generated trajectories for the UAVs. In this figure, it is visible that the UAVs were able to follow the human in formation and to simultaneously avoid obstacles."
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "bbox": [
1582
+ 0.075,
1583
+ 0.915,
1584
+ 0.492,
1585
+ 0.946
1586
+ ],
1587
+ "angle": 0,
1588
+ "content": "Finally, we analyze the scalability of our method in terms of computational demand. Simulations were run with a 4-"
1589
+ },
1590
+ {
1591
+ "type": "image",
1592
+ "bbox": [
1593
+ 0.508,
1594
+ 0.08,
1595
+ 0.92,
1596
+ 0.221
1597
+ ],
1598
+ "angle": 0,
1599
+ "content": null
1600
+ },
1601
+ {
1602
+ "type": "image_caption",
1603
+ "bbox": [
1604
+ 0.504,
1605
+ 0.223,
1606
+ 0.922,
1607
+ 0.326
1608
+ ],
1609
+ "angle": 0,
1610
+ "content": "Fig. 7: An illustration of the experiment where an operator is filmed working on a transmission tower. The trajectories of the camera carrying leader (orange), both followers carrying lights (blue and green), and the human worker (red) are shown. The obstacle map is represented by a point cloud, including the power lines and tower. The worker is tracked with a lateral shot as he walks to the tower and then with a sequence of fly-over shots while he climbs up. Several onboard images taken during the experiment are also shown."
1611
+ },
1612
+ {
1613
+ "type": "image",
1614
+ "bbox": [
1615
+ 0.511,
1616
+ 0.349,
1617
+ 0.918,
1618
+ 0.485
1619
+ ],
1620
+ "angle": 0,
1621
+ "content": null
1622
+ },
1623
+ {
1624
+ "type": "image_caption",
1625
+ "bbox": [
1626
+ 0.504,
1627
+ 0.494,
1628
+ 0.921,
1629
+ 0.534
1630
+ ],
1631
+ "angle": 0,
1632
+ "content": "Fig. 8: Temporal evolution of the distance \\( d_{F} \\) of UAVs carrying lights from entering the camera FoV, deviation from desired heading \\( \\varphi_{d} \\) and deviation from desired pitch angle of light \\( \\xi_{d} \\)."
1633
+ },
1634
+ {
1635
+ "type": "text",
1636
+ "bbox": [
1637
+ 0.503,
1638
+ 0.541,
1639
+ 0.922,
1640
+ 0.677
1641
+ ],
1642
+ "angle": 0,
1643
+ "content": "core Intel(R) Core(TM) i7-10510U CPU @ 1.80 GHz. Table I shows the results of our method that correspond to the total planning time for each iteration that was run on the leader UAV. As expected, most time was spent during the non-convex optimization step described in Section III-B. The results for the followers are not included because they skip this non-convex optimization and thus, consume less time. The results are similar for the two simulations, although the second scenario was significantly more cluttered."
1644
+ },
1645
+ {
1646
+ "type": "text",
1647
+ "bbox": [
1648
+ 0.503,
1649
+ 0.678,
1650
+ 0.922,
1651
+ 0.858
1652
+ ],
1653
+ "angle": 0,
1654
+ "content": "Since the map of the environment is transformed into safe corridors made of convex polyhedrons, cluttered environments do not represent an increase in the computational demands of the trajectory optimization method. Therefore, we are able to plan the leader's trajectories at a rate of \\(1\\mathrm{Hz}\\) with horizon lengths of 8s. This rate is adequate for real-time performance in the dynamic scenarios that we target. The lower computational complexity required to generate the initial trajectories of the followers allows us to plan follower's trajectories at a higher rate of \\(2\\mathrm{Hz}\\), enabling faster reactions to changes of the leader's behaviour and thus a more efficient mutual collision avoidance."
1655
+ },
1656
+ {
1657
+ "type": "title",
1658
+ "bbox": [
1659
+ 0.505,
1660
+ 0.88,
1661
+ 0.688,
1662
+ 0.895
1663
+ ],
1664
+ "angle": 0,
1665
+ "content": "D. Real world experiment"
1666
+ },
1667
+ {
1668
+ "type": "text",
1669
+ "bbox": [
1670
+ 0.504,
1671
+ 0.899,
1672
+ 0.922,
1673
+ 0.946
1674
+ ],
1675
+ "angle": 0,
1676
+ "content": "In order to demonstrate our method, we performed field experiments generating trajectories for a real team of UAVs (see Fig. 10). Thus, we proved the real-time performance of"
1677
+ }
1678
+ ],
1679
+ [
1680
+ {
1681
+ "type": "page_number",
1682
+ "bbox": [
1683
+ 0.078,
1684
+ 0.032,
1685
+ 0.087,
1686
+ 0.04
1687
+ ],
1688
+ "angle": 0,
1689
+ "content": "8"
1690
+ },
1691
+ {
1692
+ "type": "header",
1693
+ "bbox": [
1694
+ 0.541,
1695
+ 0.03,
1696
+ 0.92,
1697
+ 0.042
1698
+ ],
1699
+ "angle": 0,
1700
+ "content": "© IEEE, 2021. ACCEPTED TO IEEE RA-L. DOI: 10.1109/LRA.2021.3098811"
1701
+ },
1702
+ {
1703
+ "type": "text",
1704
+ "bbox": [
1705
+ 0.074,
1706
+ 0.071,
1707
+ 0.493,
1708
+ 0.148
1709
+ ],
1710
+ "angle": 0,
1711
+ "content": "TABLE I: The planning times of our method per iteration. The total average values are shown for the two experiments. The percentage of time consumed at each step is shown thereafter. ITG stands for the procedure indicated in Section III-B, SCG for procedure described in Section III-C and FTO for trajectory optimization described in Section III-D."
1712
+ },
1713
+ {
1714
+ "type": "table",
1715
+ "bbox": [
1716
+ 0.079,
1717
+ 0.155,
1718
+ 0.493,
1719
+ 0.212
1720
+ ],
1721
+ "angle": 0,
1722
+ "content": "<table><tr><td rowspan=\"2\"></td><td colspan=\"4\">Time (s)</td></tr><tr><td>Total (Avg ± std)</td><td>ITG (%)</td><td>SCG (%)</td><td>FTO (%)</td></tr><tr><td>Tower</td><td>0.70923 ± 0.10557</td><td>70.9982</td><td>11.81564</td><td>17.18615</td></tr><tr><td>Forest</td><td>0.71274 ± 0.05792</td><td>72.41338</td><td>8.77989</td><td>18.80673</td></tr></table>"
1723
+ },
1724
+ {
1725
+ "type": "image",
1726
+ "bbox": [
1727
+ 0.082,
1728
+ 0.224,
1729
+ 0.492,
1730
+ 0.327
1731
+ ],
1732
+ "angle": 0,
1733
+ "content": null
1734
+ },
1735
+ {
1736
+ "type": "image_caption",
1737
+ "bbox": [
1738
+ 0.075,
1739
+ 0.333,
1740
+ 0.493,
1741
+ 0.384
1742
+ ],
1743
+ "angle": 0,
1744
+ "content": "Fig. 9: A top view of the trajectories generated in the cluttered forest scenario. The trajectories of the target (red), the leader (orange), and both followers (blue and green) are shown. The black dots represent trees."
1745
+ },
1746
+ {
1747
+ "type": "text",
1748
+ "bbox": [
1749
+ 0.075,
1750
+ 0.388,
1751
+ 0.492,
1752
+ 0.448
1753
+ ],
1754
+ "angle": 0,
1755
+ "content": "the proposed approach onboard. A sequence of shots was commanded to film a dynamic target in an outdoor scenario. A video of the experiment can be found at the multimedia material site."
1756
+ },
1757
+ {
1758
+ "type": "image",
1759
+ "bbox": [
1760
+ 0.079,
1761
+ 0.458,
1762
+ 0.491,
1763
+ 0.564
1764
+ ],
1765
+ "angle": 0,
1766
+ "content": null
1767
+ },
1768
+ {
1769
+ "type": "image_caption",
1770
+ "bbox": [
1771
+ 0.075,
1772
+ 0.565,
1773
+ 0.493,
1774
+ 0.604
1775
+ ],
1776
+ "angle": 0,
1777
+ "content": "Fig. 10: A snapshot of a real experiment in an outdoor scenario. UAVs were localized using RTK GPS. The target location was generated from a noisy ground truth, without onboard visual detection."
1778
+ },
1779
+ {
1780
+ "type": "title",
1781
+ "bbox": [
1782
+ 0.214,
1783
+ 0.618,
1784
+ 0.355,
1785
+ 0.632
1786
+ ],
1787
+ "angle": 0,
1788
+ "content": "V. CONCLUSION"
1789
+ },
1790
+ {
1791
+ "type": "text",
1792
+ "bbox": [
1793
+ 0.074,
1794
+ 0.637,
1795
+ 0.493,
1796
+ 0.879
1797
+ ],
1798
+ "angle": 0,
1799
+ "content": "This paper has presented a method for autonomous aerial cinematography with distributed lighting by a team of UAVs. We have proposed a novel methodology for multi-UAV trajectory planning, addressing non-linear cinematographic aspects and obstacle avoidance in separate optimization steps. We have demonstrated that the method is capable of generating smooth trajectories complying with aesthetic objectives for the filming UAV; and trajectories for the follower UAVs that allow them to keep a formation lighting the target properly and staying out of the camera FoV. Besides, our results indicate that we can plan trajectories in a distributed and online manner, and that the method is suitable for obstacle avoidance even in cluttered environments. As future work, we plan to address occlusions caused by obstacles within the camera FoV. Our idea is to compute the regions where these occlusions would take place and include them in the representation of the occupied space."
1800
+ },
1801
+ {
1802
+ "type": "title",
1803
+ "bbox": [
1804
+ 0.236,
1805
+ 0.89,
1806
+ 0.333,
1807
+ 0.903
1808
+ ],
1809
+ "angle": 0,
1810
+ "content": "REFERENCES"
1811
+ },
1812
+ {
1813
+ "type": "ref_text",
1814
+ "bbox": [
1815
+ 0.082,
1816
+ 0.909,
1817
+ 0.493,
1818
+ 0.945
1819
+ ],
1820
+ "angle": 0,
1821
+ "content": "[1] I. Mademlis et al., \"High-level multiple-UAV cinematography tools for covering outdoor events,\" IEEE Trans. on Broadcasting, vol. 65, no. 3, pp. 627-635, 2019."
1822
+ },
1823
+ {
1824
+ "type": "ref_text",
1825
+ "bbox": [
1826
+ 0.513,
1827
+ 0.071,
1828
+ 0.922,
1829
+ 0.106
1830
+ ],
1831
+ "angle": 0,
1832
+ "content": "[2] A. Sabirova et al., \"Drone cinematography system design and new guideline model for scene objects interaction,\" in 2020 Int. Conf. Nonlinearity, Information and Robotics, 2020."
1833
+ },
1834
+ {
1835
+ "type": "ref_text",
1836
+ "bbox": [
1837
+ 0.513,
1838
+ 0.107,
1839
+ 0.922,
1840
+ 0.129
1841
+ ],
1842
+ "angle": 0,
1843
+ "content": "[3] B. F. Jeon et al., \"Detection-Aware Trajectory Generation for a Drone Cinematographer,\" ArXiv e-prints."
1844
+ },
1845
+ {
1846
+ "type": "ref_text",
1847
+ "bbox": [
1848
+ 0.514,
1849
+ 0.13,
1850
+ 0.921,
1851
+ 0.151
1852
+ ],
1853
+ "angle": 0,
1854
+ "content": "[4] L.-E. Caraballo et al., \"Autonomous planning for multiple aerial cinematographers,\" in IEEE/RSJ IROS, 2020."
1855
+ },
1856
+ {
1857
+ "type": "ref_text",
1858
+ "bbox": [
1859
+ 0.514,
1860
+ 0.152,
1861
+ 0.921,
1862
+ 0.174
1863
+ ],
1864
+ "angle": 0,
1865
+ "content": "[5] P. Moreno et al., \"Aerial multi-camera robotic jib crane,\" IEEE RA-L, vol. 6, no. 2, pp. 4103-4108, 2021."
1866
+ },
1867
+ {
1868
+ "type": "ref_text",
1869
+ "bbox": [
1870
+ 0.514,
1871
+ 0.175,
1872
+ 0.915,
1873
+ 0.186
1874
+ ],
1875
+ "angle": 0,
1876
+ "content": "[6] DJI, \"Mavic pro 2,\" 2018. [Online]. Available: www.dji.com/es/mavic"
1877
+ },
1878
+ {
1879
+ "type": "ref_text",
1880
+ "bbox": [
1881
+ 0.514,
1882
+ 0.187,
1883
+ 0.885,
1884
+ 0.197
1885
+ ],
1886
+ "angle": 0,
1887
+ "content": "[7] Skydio, \"Skydio 2,\" 2019. [Online]. Available: www.skydio.com"
1888
+ },
1889
+ {
1890
+ "type": "ref_text",
1891
+ "bbox": [
1892
+ 0.514,
1893
+ 0.198,
1894
+ 0.921,
1895
+ 0.23
1896
+ ],
1897
+ "angle": 0,
1898
+ "content": "[8] R. Bonatti et al., \"Towards a robust aerial cinematography platform: Localizing and tracking moving targets in unstructured environments,\" in IEEE/RSJ IROS, 2019."
1899
+ },
1900
+ {
1901
+ "type": "ref_text",
1902
+ "bbox": [
1903
+ 0.514,
1904
+ 0.231,
1905
+ 0.921,
1906
+ 0.265
1907
+ ],
1908
+ "angle": 0,
1909
+ "content": "[9] R. Bonatti et al., \"Autonomous aerial cinematography in unstructured environments with learned artistic decision-making,\" JFR, vol. 37, no. 4, pp. 606-641, 2020."
1910
+ },
1911
+ {
1912
+ "type": "ref_text",
1913
+ "bbox": [
1914
+ 0.508,
1915
+ 0.265,
1916
+ 0.921,
1917
+ 0.299
1918
+ ],
1919
+ "angle": 0,
1920
+ "content": "[10] T. Nageli et al., \"Real-time motion planning for aerial videography with dynamic obstacle avoidance and viewpoint optimization,\" IEEE RA-L, vol. 2, no. 3, pp. 1696-1703, 2017."
1921
+ },
1922
+ {
1923
+ "type": "ref_text",
1924
+ "bbox": [
1925
+ 0.508,
1926
+ 0.299,
1927
+ 0.921,
1928
+ 0.321
1929
+ ],
1930
+ "angle": 0,
1931
+ "content": "[11] N. Passalis et al., \"Deep reinforcement learning for frontal view person shooting using drones,\" in IEEE EAIS, 2018."
1932
+ },
1933
+ {
1934
+ "type": "ref_text",
1935
+ "bbox": [
1936
+ 0.508,
1937
+ 0.322,
1938
+ 0.92,
1939
+ 0.344
1940
+ ],
1941
+ "angle": 0,
1942
+ "content": "[12] Y. Dang et al., \"Imitation learning-based algorithm for drone cinematography system,\" IEEE Trans. Cogn. Devel. Syst., pp. 1-1, 2020."
1943
+ },
1944
+ {
1945
+ "type": "ref_text",
1946
+ "bbox": [
1947
+ 0.508,
1948
+ 0.345,
1949
+ 0.921,
1950
+ 0.367
1951
+ ],
1952
+ "angle": 0,
1953
+ "content": "[13] C. Huang et al., \"Learning to capture a film-look video with a camera drone,\" in IEEE ICRA, 2019, pp. 1871-1877."
1954
+ },
1955
+ {
1956
+ "type": "ref_text",
1957
+ "bbox": [
1958
+ 0.508,
1959
+ 0.367,
1960
+ 0.921,
1961
+ 0.389
1962
+ ],
1963
+ "angle": 0,
1964
+ "content": "[14] M. Gschwindt et al., \"Can a robot become a movie director? Learning artistic principles for aerial cinematography,\" in IEEE/RSJ IROS, 2019."
1965
+ },
1966
+ {
1967
+ "type": "ref_text",
1968
+ "bbox": [
1969
+ 0.508,
1970
+ 0.39,
1971
+ 0.921,
1972
+ 0.412
1973
+ ],
1974
+ "angle": 0,
1975
+ "content": "[15] T. Nageli et al., \"Real-time planning for automated multi-view drone cinematography,\" ACM Trans. Graph., vol. 36, no. 4, pp. 1-10, 2017."
1976
+ },
1977
+ {
1978
+ "type": "ref_text",
1979
+ "bbox": [
1980
+ 0.508,
1981
+ 0.413,
1982
+ 0.92,
1983
+ 0.435
1984
+ ],
1985
+ "angle": 0,
1986
+ "content": "[16] Q. Galvane et al., “Directing cinematographic drones,” ACM Trans. Graph., vol. 37, no. 3, pp. 1-18, 2018."
1987
+ },
1988
+ {
1989
+ "type": "ref_text",
1990
+ "bbox": [
1991
+ 0.508,
1992
+ 0.436,
1993
+ 0.92,
1994
+ 0.457
1995
+ ],
1996
+ "angle": 0,
1997
+ "content": "[17] A. Bucker et al., “Do You See What I See? Coordinating multiple aerial cameras for robot cinematography,” arXiv, 2020."
1998
+ },
1999
+ {
2000
+ "type": "ref_text",
2001
+ "bbox": [
2002
+ 0.507,
2003
+ 0.458,
2004
+ 0.921,
2005
+ 0.48
2006
+ ],
2007
+ "angle": 0,
2008
+ "content": "[18] A. Alcantara et al., \"Autonomous execution of cinematographic shots with multiple drones,\" IEEE Access, pp. 201300-201316, 2020."
2009
+ },
2010
+ {
2011
+ "type": "ref_text",
2012
+ "bbox": [
2013
+ 0.507,
2014
+ 0.481,
2015
+ 0.921,
2016
+ 0.502
2017
+ ],
2018
+ "angle": 0,
2019
+ "content": "[19] ——, \"Optimal trajectory planning for cinematography with multiple unmanned aerial vehicles,\" RAS, vol. 140, p. 103778, 2021."
2020
+ },
2021
+ {
2022
+ "type": "ref_text",
2023
+ "bbox": [
2024
+ 0.508,
2025
+ 0.503,
2026
+ 0.856,
2027
+ 0.514
2028
+ ],
2029
+ "angle": 0,
2030
+ "content": "[20] B. Hall, Understanding cinematography. Crowood, 2015."
2031
+ },
2032
+ {
2033
+ "type": "ref_text",
2034
+ "bbox": [
2035
+ 0.508,
2036
+ 0.515,
2037
+ 0.921,
2038
+ 0.548
2039
+ ],
2040
+ "angle": 0,
2041
+ "content": "[21] P. Petracek et al., \"Dronument: System for Reliable Deployment of Micro Aerial Vehicles in Dark Areas of Large Historical Monuments,\" IEEE RA-L, vol. 5, no. 2, pp. 2078-2085, 2020."
2042
+ },
2043
+ {
2044
+ "type": "ref_text",
2045
+ "bbox": [
2046
+ 0.507,
2047
+ 0.549,
2048
+ 0.92,
2049
+ 0.571
2050
+ ],
2051
+ "angle": 0,
2052
+ "content": "[22] M. Petrlik et al., \"A robust UAV system for operations in a constrained environment,\" IEEE RA-L, vol. 5, no. 2, pp. 2169-2176, 2020."
2053
+ },
2054
+ {
2055
+ "type": "ref_text",
2056
+ "bbox": [
2057
+ 0.507,
2058
+ 0.571,
2059
+ 0.921,
2060
+ 0.604
2061
+ ],
2062
+ "angle": 0,
2063
+ "content": "[23] M.aska et al., \"Documentation of dark areas of large historical buildings by a formation of unmanned aerial vehicles using model predictive control,\" in IEEE ETFA, 2017."
2064
+ },
2065
+ {
2066
+ "type": "ref_text",
2067
+ "bbox": [
2068
+ 0.508,
2069
+ 0.605,
2070
+ 0.921,
2071
+ 0.637
2072
+ ],
2073
+ "angle": 0,
2074
+ "content": "[24] V. Krátý et al., \"Autonomous reflectance transformation imaging by a team of unmanned aerial vehicles,\" IEEE RA-L, vol. 5, no. 2, pp. 2302-2309, 2020."
2075
+ },
2076
+ {
2077
+ "type": "ref_text",
2078
+ "bbox": [
2079
+ 0.508,
2080
+ 0.638,
2081
+ 0.921,
2082
+ 0.671
2083
+ ],
2084
+ "angle": 0,
2085
+ "content": "[25] J. Chen et al., \"Online generation of collision-free trajectories for quadrotor flight in unknown cluttered environments,\" in IEEE ICRA, 2016."
2086
+ },
2087
+ {
2088
+ "type": "ref_text",
2089
+ "bbox": [
2090
+ 0.507,
2091
+ 0.672,
2092
+ 0.921,
2093
+ 0.695
2094
+ ],
2095
+ "angle": 0,
2096
+ "content": "[26] K. Mohta et al., \"Fast, autonomous flight in GPS-denied and cluttered environments,\" JFR, vol. 35, no. 1, pp. 101-120, 2018."
2097
+ },
2098
+ {
2099
+ "type": "ref_text",
2100
+ "bbox": [
2101
+ 0.508,
2102
+ 0.695,
2103
+ 0.921,
2104
+ 0.729
2105
+ ],
2106
+ "angle": 0,
2107
+ "content": "[27] S. Liu et al., \"Planning dynamically feasible trajectories for quadrotors using safe flight corridors in 3-D complex environments,\" IEEE RA-L, vol. 2, no. 3, pp. 1688-1695, 2017."
2108
+ },
2109
+ {
2110
+ "type": "ref_text",
2111
+ "bbox": [
2112
+ 0.507,
2113
+ 0.729,
2114
+ 0.921,
2115
+ 0.751
2116
+ ],
2117
+ "angle": 0,
2118
+ "content": "[28] J. Tordesillas et al., “FASTER: Fast and Safe Trajectory Planner for Flights in Unknown Environments,” IEEE/RSJ IROS, 2019."
2119
+ },
2120
+ {
2121
+ "type": "ref_text",
2122
+ "bbox": [
2123
+ 0.507,
2124
+ 0.752,
2125
+ 0.921,
2126
+ 0.785
2127
+ ],
2128
+ "angle": 0,
2129
+ "content": "[29] T. Báča et al., “The MRS UAV system: Pushing the frontiers of reproducible research, real-world deployment, and education with autonomous unmanned aerial vehicles,” JINT, vol. 26, 2021."
2130
+ },
2131
+ {
2132
+ "type": "ref_text",
2133
+ "bbox": [
2134
+ 0.507,
2135
+ 0.786,
2136
+ 0.921,
2137
+ 0.808
2138
+ ],
2139
+ "angle": 0,
2140
+ "content": "[30] D. Harabor et al., \"Online graph pruning for pathfinding on grid maps.\" in AAAI Conf. on Artificial Intelligence, vol. 25, no. 1, 2011."
2141
+ },
2142
+ {
2143
+ "type": "ref_text",
2144
+ "bbox": [
2145
+ 0.507,
2146
+ 0.809,
2147
+ 0.921,
2148
+ 0.83
2149
+ ],
2150
+ "angle": 0,
2151
+ "content": "[31] ——, “Improving jump point search,” in Int. Conf. on Automated Planning and Scheduling, vol. 24, no. 1, 2014."
2152
+ },
2153
+ {
2154
+ "type": "ref_text",
2155
+ "bbox": [
2156
+ 0.507,
2157
+ 0.831,
2158
+ 0.921,
2159
+ 0.865
2160
+ ],
2161
+ "angle": 0,
2162
+ "content": "[32] B. Houska et al., \"ACADO Toolkit - An Open Source Framework for Automatic Control and Dynamic Optimization,\" Optimal Control Applications and Methods, vol. 32, no. 3, pp. 298-312, 2011."
2163
+ },
2164
+ {
2165
+ "type": "ref_text",
2166
+ "bbox": [
2167
+ 0.507,
2168
+ 0.866,
2169
+ 0.921,
2170
+ 0.887
2171
+ ],
2172
+ "angle": 0,
2173
+ "content": "[33] C. Lino et al., \"Intuitive and Efficient Camera Control with the Toric Space,\" ACM Trans. Graph., 2015."
2174
+ },
2175
+ {
2176
+ "type": "ref_text",
2177
+ "bbox": [
2178
+ 0.507,
2179
+ 0.888,
2180
+ 0.921,
2181
+ 0.91
2182
+ ],
2183
+ "angle": 0,
2184
+ "content": "[34] K. Shoemake, \"Arcball: a user interface for specifying three-dimensional orientation using a mouse,\" in Proceedings of Graphics Interface, 1992."
2185
+ },
2186
+ {
2187
+ "type": "ref_text",
2188
+ "bbox": [
2189
+ 0.508,
2190
+ 0.91,
2191
+ 0.921,
2192
+ 0.945
2193
+ ],
2194
+ "angle": 0,
2195
+ "content": "[35] C. Gebhardt et al., \"Airways: Optimization-Based Planning of Quadrotor Trajectories according to High-Level User Goals,\" in Proceedings of the Conf. on Human Factors in Computing Systems, 2016."
2196
+ },
2197
+ {
2198
+ "type": "list",
2199
+ "bbox": [
2200
+ 0.507,
2201
+ 0.071,
2202
+ 0.922,
2203
+ 0.945
2204
+ ],
2205
+ "angle": 0,
2206
+ "content": null
2207
+ }
2208
+ ]
2209
+ ]
2303.01xxx/2303.01177/820e6330-cce6-4c34-a45a-1398d2625e6f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01e3957916733d312cacd5675fc151ed49b5f23bb7d7589d36598e07324c7d55
3
+ size 4971979
2303.01xxx/2303.01177/full.md ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # © 2021 IEEE
2
+
3
+ Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting or republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
4
+
5
+ # Autonomous Aerial Filming with Distributed Lighting by a Team of Unmanned Aerial Vehicles
6
+
7
+ Vít Krátký<sup>1</sup>, Alfonso Alcántara<sup>2</sup>, Jesús Capitán<sup>2</sup>, Petr Štepan<sup>1</sup>, Martin Saska<sup>1</sup> and Aníbal Ollero<sup>2</sup>
8
+
9
+ Abstract—This paper describes a method for autonomous aerial cinematography with distributed lighting by a team of unmanned aerial vehicles (UAVs). Although camera-carrying multirotor helicopters have become commonplace in cinematography, their usage is limited to scenarios with sufficient natural light or of lighting provided by static artificial lights. We propose to use a formation of unmanned aerial vehicles as a tool for filming a target under illumination from various directions, which is one of the fundamental techniques of traditional cinematography. We decompose the multi-UAV trajectory optimization problem to tackle non-linear cinematographic aspects and obstacle avoidance at separate stages, which allows us to re-plan in real time and react to changes in dynamic environments. The performance of our method has been evaluated in realistic simulation scenarios and field experiments, where we show how it increases the quality of the shots and that it is capable of planning safe trajectories even in cluttered environments.
10
+
11
+ Index Terms—Multi-Robot Systems, Aerial Systems: Applications, Motion and Path Planning
12
+
13
+ # I. INTRODUCTION
14
+
15
+ T HE interest in Unmanned Aerial Vehicles (UAVs) for aerial photography and filming is growing fast [1]–[5]. This is mainly due to their manoeuvrability and the capacity to create unique shots when compared to standard cameras. The use of UAVs as flying cameras presents not only a remarkable potential for recreational cinematography, but also for the monitoring of inspection operations in outdoor infrastructures with complex access. For instance, the EU-funded project, AERIAL-CORE, proposes UAVs to surveil the safety of human workers during maintenance operations of electrical power lines (see Fig. 1). In this industrial setup, a high-quality video is key, as it is used by supervising ground
16
+
17
+ Manuscript received: February 24, 2021; Revised May 26, 2021; Accepted July 3, 2021.
18
+
19
+ This paper was recommended for publication by Editor M. Ani Hsieh upon evaluation of the Associate Editor and Reviewers' comments. This work was supported by EU project AERIAL-CORE (H2020-2019-871479), by MULTICOP (US-1265072) in FEDER-Junta de Andalucia Programme, by project no. DG18P02OVV069 in program NAKI II, by CTU grant no SGS20/174/OHK3/3T/13, and by OP VVV funded project CZ.02.1.01/0.0/0.0/16 019/0000765 "Research Center for Informatics". (Vit Kratky and Alfonso Alcantara are co-first authors.)
20
+
21
+ $^{1}$ Vit Krátý, Petr Štepan, and Martin Sasha are with Faculty of Electrical Engineering, Czech Technical University in Prague, Czech Republic, {kratkvit|stepan|martin.saska}@fel.cyvut.cz.
22
+
23
+ $^{2}$ Alfonso Alcantara, Jesús Capitan, and Aníbal Ollero are with GRVC Robotics Laboratory, University of Seville, Spain {aamarin|jcapitan|aollero}@us.es.
24
+
25
+ Digital Object Identifier (DOI): see top of this page.
26
+
27
+ ![](images/59e03ea8e09d4ef2373265a82b4ff5d5a8a32bc81e6586ad87f5b27816edee13.jpg)
28
+ Fig. 1: UAV filming applications to provide external lighting; to capture smooth shots outdoors; and to monitor dangerous maintenance operations at electrical lines<sup>1</sup>. Pictures were obtained within AERIAL-CORE and DRONUMENT projects, for which the proposed technology is being developed. Videos of the work in this paper can be seen on the multimedia material page (http://mrs.felk.cvut.cz/papers/aerial-filming).
29
+
30
+ ![](images/3a490b74b87370ceeb2e27c4ee3cd561de41c464aa2d983ea2afe1a008aab634.jpg)
31
+
32
+ operators to monitor safety during the maintenance work. Multi-UAV teams expand upon these possibilities as they could provide alternative points of view or even supplementary illumination. Similarly in our DRONUMENT project of NAKI II program, efficient variable illumination plays a key role for documentation of historical buildings interiors.
33
+
34
+ Proper lighting techniques are fundamental in bringing out details in an image and in creating more natural-looking film scenes. Thus, cinematography sets are packed with different lighting sources, as digital sensors are not as reactive to light as the human eye. This can also be relevant in monitoring maintenance operations scheduled at times of the day with poor illumination. Although aerial cinematography has been attractive to the scientific community as of late, lighting techniques have yet to be applied to improve the performance of filming. Filmmakers apply many types of lighting techniques making use of various equipment. In this work, we only consider direct lighting techniques that do not require additional equipment apart from light sources. Although static lights could produce more pleasant footage in some situations, we believe that UAVs are not optimal for this purpose. Therefore, we only use UAVs as dynamic sources of light to provide lighting to a dynamic scene.
35
+
36
+ In this context, navigating a team of UAVs for filming tasks with distributed lighting is complex. Smooth and safe trajectories are required to achieve pleasing shots that do not compromise safety in dynamic scenarios. We propose
37
+
38
+ a method for online trajectory planning and execution with multiple UAVs. Our team obeys a leader-follower scheme where the formation leader carries an onboard camera to film a moving target and the followers generate trajectories that enable distributed lighting of the target, while maintaining desired lighting angles. We formulate a non-linear, optimization-based method that plans visually pleasant trajectories for the filming UAV and distributes the others in a specified formation. Moreover, we tackle safety by including a systematic framework for obstacle avoidance. Safe flight corridors for the UAVs are generated by forming sets of convex polyhedrons that model free space. Optimal and safe trajectories are thereafter computed within these convex sets.
39
+
40
+ # A. Related works
41
+
42
+ There have been several works focusing on filming dynamic scenes with a single UAV. Commercial products exist (e.g., DJI Mavic [6] or Skydio [7]) that implement semi-autonomous functionalities, such as auto-follow features for tracking an actor with collision avoidance. However, they do not address cinematographic principles. An integrated system for outdoor cinematography combining vision-based target localization with trajectory planning and collision avoidance has been proposed [8], [9]. Smoothness is achieved by minimizing trajectory jerk; shot quality by defining objective curves that fulfill relative angles with respect to the actor. Optimal trajectories for cinematography have also been computed in real-time through receding horizon optimization with non-linear constraints [10]. A user inputs framing objectives for the targets on the image to minimize errors on the image target projections, sizes, and relative viewing angles. Some authors have approached UAV cinematography by applying machine learning [11], [12]. Particularly, such techniques have been applied to demonstrations imitating professional cameraman's behaviors [13] or for reinforcement learning to achieve visually pleasant shots [14]. These works have presented valuable results for online trajectory planning, although they have not addressed the specific complexities for multi-UAV systems.
43
+
44
+ Regarding the methodology for multiple UAVs, a non-linear optimization problem was solved in receding horizon in [15], where collision avoidance to filmed actors and mutual collisions of UAVs were considered. Aesthetic objectives are introduced by the user as virtual reference trails. A specific camera parameter space is proposed in [16] to ensure cinematographic properties and to fulfill dynamic constraints along the trajectories. The motion of multiple UAVs around dynamic targets is coordinated through a centralized master-slave approach. A greedy framework for multi-UAV camera coordination is proposed in [17]. A decentralized planner computes UAV trajectories considering smoothness, shot diversity, collision avoidance, and mutual visibility. We have also addressed the trajectory planning for multi-UAV cinematography in previous work. We presented an architecture to execute cinematographic shots (with different types of camera motion) using multiple UAVs [18] and developed a distributed method to plan optimal trajectories reducing jerky camera movements [19]. In this paper, our focus is on the specifics of outdoor and dynamic
45
+
46
+ settings when compared to indoor scenarios [15]. Therefore, we have integrated obstacle avoidance in a more fundamental manner using local maps. Moreover, a novel problem with respect to previous work has been introduced, as we perform scene illumination with multiple UAVs to increase the quality of image shots.
47
+
48
+ The modification of lighting angles to improve images is fundamental in cinematography [20]. A camera with an onboard light on a UAV can compensate for insufficient lighting, but positioning lights at different angles with respect to the camera axis would require several UAVs. Despite the unquestionable importance of lighting for shot quality, its usage for aerial cinematography is not well-studied. Utilizing UAVs to provide supplementary illumination has been proposed for building documentation tasks [21] or tunnel inspection [22]. A formation with a filming UAV and others carrying lights was deployed to document the overshadowed parts of historical buildings [23]. A similar system has been used to carry out specialized documentation techniques [24]. However, these works have proposed lighting for tasks in static scenes, whereas the present paper deals with filming of moving targets in dynamic and potentially cluttered environments, e.g., to monitor inspection operations in large outdoor infrastructures.
49
+
50
+ In order to guarantee safe trajectories in multi-UAV cinematography, most works [15], [16], [19] only consider collision avoidance with actors, other UAVs, or static objects that can be modelled with previously known no-fly zones. The work in [9] integrates local mapping with onboard sensors to penalize proximity to obstacles and solves an unconstrained optimization problem. Another approach to obstacle avoidance applied for standard UAV trajectory planning is to create a convex representation of free space via a set of linear inequality constraints [25]-[28], to obtain a QP formulation for real-time motion planning. We have been inspired by these single-UAV works to develop a fundamental framework for the representation of obstacles in our non-linear optimization problem for multi-UAV cinematography.
51
+
52
+ # B. Contributions
53
+
54
+ Our main contributions are summarized as the following:
55
+
56
+ - We formulate a novel optimization problem for aerial filming with distributed lighting. Using a leader-follower scheme, we plan and execute trajectories in a distributed manner. Optimization is run in receding horizon to compute smooth trajectories with pleasing footage for the UAV filming (the leader), which takes shots of a dynamic target indicated by an external user. The followers compute their trajectories to maintain a formation with specified lighting angles on the target.
57
+ - We propose a new method to tackle non-convex trajectory optimization with obstacle avoidance in real time. We decompose the problem in two parts. Non-linear kinematical aspects are formulated in a problem without obstacle avoidance to generate reference trajectories. These are used to generate collision-free regions which are convex and to transform the problem into a final QP optimization task.
58
+
59
+ - We present experimental results for different types of cinematographic shots. We prove that our method is capable of computing smooth trajectories for reducing jerky movements and show that the distributed formation improves the illumination of footage. The system is evaluated with field experiments and also in various realistic simulated scenarios, including the filming of a moving target in a cluttered environment.
60
+
61
+ # II. SYSTEM OVERVIEW
62
+
63
+ Fig. 2 depicts the architecture of the entire system. The leader UAV carries a camera for filming while several others carry light sources to provide proper illumination. A human director specifies the cinematographic parameters for the scene. These parameters include the shot type (i.e., the camera motion relative to the target), the camera shooting angle for the leader, and the desired lighting angles for the followers. This information, together with an estimation of the target trajectory, is used to generate reference trajectories for the UAVs (Section III-B). These initial trajectories do not consider obstacle avoidance, but only cinematographic aspects. The leader attempts to execute the commanded shot smoothly, whereas the followers maintain a surrounding formation with the desired lighting angles.
64
+
65
+ Safety is ensured by integrating information from a local map for collision avoidance (Section III-C). Firstly, a collision-free path is generated for each UAV using the map and the initial cinematographic trajectories as guidelines. Then, a safe corridor along each of these paths is computed, consisting of a set of obstacle-free polyhedrons generated by the convex decomposition of free space (see Fig. 5). Finally, the UAV trajectories are obtained as a result of a trajectory optimization process that computes dynamically feasible trajectories inside each safe corridor (Section III-D). Inter-UAV collision avoidance is achieved by including the team-mates planned trajectories as obstacles in the map.
66
+
67
+ The entire pipeline shown in Fig. 2 (except for the Human director component) runs on board each UAV in a receding horizon manner. This enables the online planning to react properly to changes in the behavior of the target being filmed, as well as to malfunctioning team-members or previously unseen obstacles. Note that either the Cinematographic trajectory generator or the Lighting trajectory generator is activated on each UAV, depending on whether it carries a camera or light. The component for trajectory tracking on each UAV is the low-level control pipeline described in [29].
68
+
69
+ # III. AUTONOMOUS AERIAL CINEMATOGRAPHY
70
+
71
+ In this section, we begin by detailing the UAV dynamic model (Section III-A). Then, we describe our procedure to generate optimal and safe trajectories for each UAV (Sections III-B, III-C, and III-D). Lastly, we explain how the orientation of a UAV is controlled (Section III-E).
72
+
73
+ # A. Multi-rotor aerial vehicle dynamic model
74
+
75
+ An independent trajectory tracker [29] for UAV attitude control is used, which allows for planning with a simplified
76
+
77
+ positional dynamic UAV model. In addition, the orientation of the camera or light source onboard (depending on the UAV) needs to be modelled. We assume the existence of a gimbal mechanism to compensate angle deviations due to changes in UAV attitude. Therefore, it is assumed that camera roll is negligible and we only control pitch and heading. Since the heading of a multi-rotor vehicle can be controlled independently of its position, we fix the relative position between the camera/light and the UAV to always point forward and control its heading through the UAV heading. The positional part of the dynamic model is defined as a linear double integrator:
78
+
79
+ $$
80
+ \begin{array}{l} \dot {\mathbf {p}} = \mathbf {v}, \\ \cdot \end{array} \tag {1}
81
+ $$
82
+
83
+ $$
84
+ \dot {\mathbf {v}} = \mathbf {a},
85
+ $$
86
+
87
+ where $\mathbf{p} = [p_x p_y p_z]^T \in \mathbb{R}^3$ is the UAV position, $\mathbf{v} = [v_x v_y v_z]^T \in \mathbb{R}^3$ the linear velocity, and $\mathbf{a} = [a_x a_y a_z]^T \in \mathbb{R}^3$ the linear acceleration. The orientation of the camera/light may be modelled similarly:
88
+
89
+ $$
90
+ \begin{array}{l} \dot {\mathbf {o}} = \boldsymbol {\omega}, \\ \therefore \quad \mathbf {o} \end{array} \tag {2}
91
+ $$
92
+
93
+ $$
94
+ \dot {\boldsymbol {\omega}} = \boldsymbol {\theta},
95
+ $$
96
+
97
+ where $\mathbf{o} = [\varphi \xi]^T$ represents an orientation with respect to a global frame given by its heading and pitch angles, $\omega \in \mathbb{R}^2$ are the corresponding angular rates, and $\theta \in \mathbb{R}^2$ the angular accelerations. For the description of the proposed method, we define a full positional state of the UAV $\mathbf{x}_p = [\mathbf{p}^T\mathbf{v}^T]^T \in \mathbb{R}^6$ , a vector of positional control inputs $\mathbf{u}_p = \mathbf{a}$ , an orientation state $\mathbf{x}_o = [\mathbf{o}^T\boldsymbol{\omega}^T]^T \in \mathbb{R}^4$ , and a vector of orientation control inputs $\mathbf{u}_o = \boldsymbol{\theta}$ .
98
+
99
+ # B. Generation of reference trajectories
100
+
101
+ The first step of our method for trajectory planning is to generate a reference trajectory $D_{j}$ for each UAV $j$ . The problem complexity is alleviated by removing collision avoidance constraints and focusing only on kinematographic aspects. For the filming UAV, the objective is to reach a position relative to the target as provided by the shot type $C_s$ , while minimizing changes in the camera angle to produce pleasant images. A specific camera shooting angle $\psi_d$ over the target needs to be maintained. The following non-linear optimization problem is formulated for the filming UAV:
102
+
103
+ $$
104
+ \underset {\mathbf {u} _ {0}, \dots , \mathbf {u} _ {\mathrm {N} - 1}} {\text {m i n i m i z e}} \sum_ {k = 1} ^ {N} \left(\left\| \mathbf {u} _ {k - 1} \right\| ^ {2} + \alpha_ {1} J _ {\psi , k}\right) + \alpha_ {2} J _ {N}, \tag {3}
105
+ $$
106
+
107
+ subject to $\mathbf{x}_0 = \mathbf{x}'$ (3.a)
108
+
109
+ $$
110
+ \mathbf {x} _ {k + 1} = \mathrm {f} _ {p} \left(\mathbf {x} _ {k}, \mathbf {u} _ {k}\right) \quad \forall k \in \{0, \dots , N - 1 \}, \tag {3.b}
111
+ $$
112
+
113
+ $$
114
+ \mathbf {v} _ {\min } \leq \mathbf {v} _ {k} \leq \mathbf {v} _ {\max } \quad \forall k \in \{1, \dots , N \}, \tag {3.c}
115
+ $$
116
+
117
+ $$
118
+ \mathbf {u} _ {\min } \leq \mathbf {u} _ {k} \leq \mathbf {u} _ {\max } \quad \forall k \in \{0, \dots , N - 1 \}, \tag {3.d}
119
+ $$
120
+
121
+ $$
122
+ q _ {z, \min } \leq q _ {z, k} \quad \forall k \in \{1, \dots , N \}, \tag {3.e}
123
+ $$
124
+
125
+ where $\mathrm{f}_p(\cdot)$ represents the positional part of the dynamic model defined in Section III-A; $\mathbf{v}_{min}$ , $\mathbf{v}_{max}$ are velocity limitations; and $\mathbf{u}_{min}$ , $\mathbf{u}_{max}$ control inputs limitations.
126
+
127
+ The first two terms in the cost function pursue smooth trajectories by penalizing UAV accelerations and reducing
128
+
129
+ ![](images/cbd1334574cfacbdd9e07dbd5d0af4104256d95f335973143e0ac6ff6fcf572b.jpg)
130
+ Fig. 2: The architecture of the proposed system. $C_s$ and $C_l$ represent the desired type of cinematographic shot and lighting configuration specified by a human director; $T_T$ is the target estimated trajectory; $D_L$ , $D_F$ are reference trajectories for the leader UAV and the follower UAVs, respectively; $P_L$ , $P_F$ are collision-free paths generated along the desired trajectories; $S_L$ , $S_F$ are safe corridors along the collision-free paths; and $T_L$ , $T_F$ are optimized trajectories for the camera and lighting UAVs, respectively. The modules enclosed in the blue rectangle run on both types of UAVs.
131
+
132
+ gimbal movements. The director specifies an aesthetic objective through the desired camera shooting angle $\psi_d$ to film the target (see Fig. 3). Emphasis is given on positioning the UAV to keep this angle constant without moving the gimbal. In doing so, the angular changes in the gimbal are reduced to favor less jerky camera motion and therefore, pleasant footage. In order to define $J_{\psi}$ , the relative position between the UAV camera and the target is introduced as:
133
+
134
+ $$
135
+ \mathbf {q} = \left[ \begin{array}{l l l} q _ {x} & q _ {y} & q _ {z} \end{array} \right] ^ {T} = \mathbf {p} _ {L} - \mathbf {p} _ {T}. \tag {4}
136
+ $$
137
+
138
+ Then, we define $J_{\psi}$ as:
139
+
140
+ $$
141
+ J _ {\psi , k} = \left(\tan \left(\psi_ {d}\right) - \frac {q _ {z , k}}{\sqrt {q _ {x , k} ^ {2} + q _ {y , k} ^ {2}}}\right) ^ {2}. \tag {5}
142
+ $$
143
+
144
+ The target position is predicted within the time horizon with a motion model (a constant speed model in our experiments). Prediction errors are tackled by recomputing UAV trajectories with a receding horizon. By minimizing the previous cost, we implicitly minimize variations in camera pitch angle as the relative pitch with respect to the target is kept constant. Moreover, the camera heading corresponds with the UAV heading, whose variations are also smoothed as explained in Section III-D. Therefore, the idea is to generate UAV trajectories where the gimbal only needs to move slightly to compensate for small disturbances.
145
+
146
+ The terminal cost $J_{N} = ||\mathbf{x}_{xy,d} - \mathbf{x}_{xy,N}||^{2}$ guides the UAV to a desired state imposed by the shot type, e.g., at a certain distance beside the target's final position in a lateral shot. Note that a final UAV height is not imposed, as we want the planner to compute the optimal $p_{z}$ to maintain the camera shooting angle commanded by the director. Lastly, the constraint (3.e) establishes a minimum distance above the target for safety purposes.
147
+
148
+ The reference trajectories for the lighting UAVs are computed to achieve a desired leader-follower formation around the target. The desired position of the followers is influenced by the corresponding leader position $\mathbf{p}_L$ and camera orientation $\mathbf{o}_L$ , the target position $\mathbf{p}_T$ , the desired lighting angles of $j$ -th light $\chi_j$ and $\varrho_j$ , and the desired distance of the light to the target $d_j$ . The desired position of $j$ -th follower $\mathbf{p}_j$ is then
149
+
150
+ ![](images/57efac4238ee7e8ae880d17c3c8e57cbf7e2d8b5e027e3119624bc9d2ff5c9cd.jpg)
151
+ Fig. 3: Reference frames and camera shooting angle. The origins of the camera and UAV frames coincide.
152
+
153
+ given by the equation:
154
+
155
+ $$
156
+ \mathbf {p} _ {j} = \mathbf {p} _ {T} + d _ {j} \left[ \begin{array}{c} - \cos (\varphi_ {j}) \cos (\xi_ {j}) \\ - \sin (\varphi_ {j}) \cos (\xi_ {j}) \\ \sin (\xi_ {j}) \end{array} \right], \tag {6}
157
+ $$
158
+
159
+ where $\varphi_{j} = \varphi_{L} + \chi_{j}$ and $\xi_{j} = \xi_{L} + \varrho_{j}$ are desired lighting angles relative to the camera's optical axis (see Fig. 4). To avoid jumps in the desired followers' positions caused by quick changes in the target position (e.g., due to a transition to a new target), a virtual target, located in front of the camera at a certain distance along its optical axis, is used. The position of this virtual target is given by:
160
+
161
+ $$
162
+ \mathbf {p} _ {v} = \mathbf {p} _ {L} + d _ {v} \left[ \begin{array}{c} \cos \left(\varphi_ {L}\right) \cos \left(\xi_ {L}\right) \\ \sin \left(\varphi_ {L}\right) \cos \left(\xi_ {L}\right) \\ \sin \left(\xi_ {L}\right) \end{array} \right], \tag {7}
163
+ $$
164
+
165
+ where $d_v$ is the desired distance between the virtual target and the camera center and $\mathbf{p}_v$ denotes the virtual target position. Substituting position $\mathbf{p}_v$ for $\mathbf{p}_T$ in (6), a more consistent formation scheme is acquired, where less aggressive maneuvers are required; and the lighting always focuses on the scene in front of the camera, which is relevant in obtaining pleasant videos.
166
+
167
+ # C. Generation of safe corridors
168
+
169
+ The initial reference trajectories are computed without considering obstacles. They are, therefore, used as seed to generate a safe corridor $S_{j}$ for each UAV $j$ where collision-free trajectories can then be computed. Firstly, we convert each trajectory $D_{j}$ into a collision-free path $P_{j}$ . We iterate over each of the $N$ waypoints in $D_{j}$ and add it directly to $P_{j}$ if
170
+
171
+ ![](images/049f04c05e12fa4532254bd25aff057223c079a0cdeb6e7841e1331a01a912db.jpg)
172
+ (a) top view
173
+
174
+ ![](images/ee678417bea15d2bde0cc46f260fb234fcdac8e6f2b6c49b56acbd2788a35d0d.jpg)
175
+ (b) side view
176
+ Fig. 4: The leader-follower scheme defined by (6).
177
+
178
+ it is collision-free. Otherwise, we label the previous collision-free waypoint as $A$ and keep moving along $D_{j}$ until we find the next collision-free waypoint $B$ . Then, we try to find an alternative collision-free path from $A$ to $B$ , to be appended to $P_{j}$ and continue iterating. For that alternative path, we use the Jump Point Search (JPS) algorithm introduced in [30], [31] and extended to 3D in [27]. A real-time performance is ensured by introducing a timeout for the JPS path search.
179
+
180
+ If the JPS algorithm fails to find a path within the given timeout from $A$ to $B$ , we run it again to connect $A$ directly to the last waypoint in $D_{j}$ (let this waypoint be $C$ ). If this is not found either, we append to $P_{j}$ the path to the node closest to $C$ from all those expanded during the JPS search. Once completed, $P_{j}$ consists of an arbitrary number of points equal to or greater than $N$ . Since $P_{j}$ is used for the generation of the safety corridors for particular points in $D_{j}$ , it is postprocessed so that $|P_{j}| = |D_{j}| = N$ . $P_{j}$ is sampled so that the waypoint distribution is close to the initial points in $D_{j}$ . Since these collision-free paths are used as a guide for trajectory optimization in subsequent steps, the distance sampling step $d_{s}$ is limited to help avoid the dynamic infeasibility of the final trajectories. If the sampled $P_{j}$ consists of more than $N$ waypoints, the overflowing points are discarded for the subsequent steps of the trajectory optimization process. The process to create a collision-free path $P_{j}$ and its corresponding safe corridor $S_{j}$ is illustrated in Fig. 5.
181
+
182
+ Safe corridors are generated around the collision-free paths with a prefixed initial position of the UAV (i.e., $N + 1$ waypoints), using a map of the environment represented by a point cloud $O_{pcl}$ and the convex decomposition method proposed in [27]. This method is based on an iterative procedure for the generation of polyhedrons. It begins by inflating an ellipsoid aligned with each path segment. In the next step, tangent planes are constructed at the contact points between the ellipsoid and any obstacles. Afterwards, all points lying behind this plane are removed from $O_{pcl}$ . Yet again, the next iteration starts by inflating the ellipsoid up to the nearest point in $O_{pcl}$ . This procedure is terminated if there are no remaining points in $O_{pcl}$ . The generated tangent planes define an obstacle-free polyhedron $\mathcal{P}$ enclosing the corresponding path segment and the set of all polyhedrons along the path constitutes the safe corridor.
183
+
184
+ # D. Trajectory optimization
185
+
186
+ Given a collision-free path $P$ and its corresponding safe corridor $S$ , a final optimal trajectory is computed through a
187
+
188
+ ![](images/1132cd546db0b8c0309a482d08a51eab569d6cd0eceaf38ac5482ade95914580.jpg)
189
+ Fig. 5: The safe corridor generation process. The initial reference trajectory (green) is converted into a collision-free path (purple), and the obstacle-free polyhedrons are generated along this path. The final optimized trajectory within the safe corridor is also shown (blue). We inflate the obstacles for safety purposes (light red).
190
+
191
+ QP problem in receding horizon. The particular optimization task ${}^{3}$ attempts to track a desired trajectory $\mathbf{p}_d$ corresponding to the reference trajectory $D_{j}$ :
192
+
193
+ $$
194
+ \underset {\mathbf {u} _ {0}, \dots , \mathbf {u} _ {N - 1}} {\text {m i n i m i z e}} \sum_ {k = 1} ^ {N} \left(\left\| \mathbf {p} _ {d, k} - \mathbf {p} _ {k} \right\| ^ {2} + \beta \left\| \mathbf {u} _ {k - 1} \right\| ^ {2}\right), \tag {8}
195
+ $$
196
+
197
+ subject to $\mathbf{x}_0 = \mathbf{x}'$ (8.a)
198
+
199
+ $$
200
+ \mathbf {x} _ {k + 1} = \mathrm {f} _ {p} \left(\mathbf {x} _ {k}, \mathbf {u} _ {k}\right) \quad \forall k \in \{0, \dots , N - 1 \}, \tag {8.b}
201
+ $$
202
+
203
+ $$
204
+ \mathbf {v} _ {\min } \leq \mathbf {v} _ {k} \leq \mathbf {v} _ {\max } \forall k \in \{1, \dots , N \}, \tag {8.c}
205
+ $$
206
+
207
+ $$
208
+ \mathbf {u} _ {\min } \leq \mathbf {u} _ {k} \leq \mathbf {u} _ {\max } \forall k \in \{0, \dots , N - 1 \}, \tag {8.d}
209
+ $$
210
+
211
+ $$
212
+ \mathbf {p} _ {k} \in \mathcal {P} _ {k} \quad \forall k \in \{1, \dots , N \}, \tag {8.e}
213
+ $$
214
+
215
+ where $\mathrm{f}_p(\cdot)$ represents the positional part of a dynamic model defined in Section III-A; $\mathbf{v}_{min}$ , $\mathbf{v}_{max}$ are velocity limitations; $\mathbf{u}_{min}$ , $\mathbf{u}_{max}$ control inputs limitations; and $\mathcal{P}_k$ is a convex polyhedron representing a free space associated with $k$ -th transition point. The last constraint ensures a safe resulting trajectory without collisions. Given that the constraint (8.e) can be decoupled in a set of linear constraints, the problem becomes a quadratic convex program.
216
+
217
+ The optimization formulation is the same for both the leader and follower UAVs. However, there are a couple of relevant differences. First, the desired reference trajectories are computed in a different manner, following either filming or lighting criteria (see Section III-B). Second, the followers encode mutual-collision avoidance through constraint (8.e). To prevent negative effects on the cinematographic quality of the performed shot, the entirety of mutual collision avoidance is left to the followers. A fixed priority scheme is defined for the UAVs, and the occupied space $O_{pcl}$ of each follower is updated with the current planned trajectories from the leader and other followers of a higher priority. $O_{pcl}$ is updated with spherical objects of the desired collision avoidance radius at each waypoint of the UAV trajectories to be avoided. A similar procedure is followed to incorporate the target's predicted trajectory (also for the leader in this case). To hold with real-time performance, the occupied space $O_{pcl}$ is assumed static for a given horizon time, but it is updated at each planning step, accommodating all static and dynamic obstacles.
218
+
219
+ Another crucial issue for the applications of multi-UAV cinematography is how to prevent other UAVs from appearing in the Field of View (FoV) of the filming UAV. However, including this in the optimization task as either a constraint or a cost term can remarkably increase the complexity of the problem. We considered including the FoV of the leader camera as an obstacle in the local maps of the followers, so that they may avoid it. Even so, relatively small changes in camera orientation could result in significant changes in the map representation and lead to unstable planned trajectories. Therefore, the camera's FoV is avoided by the lighting UAVs only through penalizing deviations from the desired trajectories $\mathbf{p}_d$ . Thus, FoV avoidance is mostly determined by the choice of lighting parameters that describe the desired formation.
220
+
221
+ Finally, occlusions caused by obstacles in the FoV of the camera or the lights are also a relevant aspect when filming. Occlusion throughout a significant part of the shot renders the shot useless, and in the case of onboard detection of the target, it also disables target following. However in most cases, occlusions are temporary and avoiding them is always a trade-off between significant deviation from the desired cinematographic trajectory and having part of the video occluded. In this work, the trajectories are generated so that they are close to the desired cinematographic shots specified by a director. The possible occlusions have to be resolved by redefining the shot to be performed.
222
+
223
+ # E. Orientation control
224
+
225
+ In this application, both the camera and the light sources need to always be pointing at the filmed target. Hence, their desired orientation is given by:
226
+
227
+ $$
228
+ \mathbf {o} _ {d} = \left[ \varphi_ {d} \xi_ {d} \right] ^ {T} = \left[ \arctan \left(q _ {y}, q _ {x}\right) \sin \left(\frac {q _ {z}}{| | q | |}\right) \right] ^ {T}. \tag {9}
229
+ $$
230
+
231
+ Orientation control is also formulated as a constrained quadratic optimization problem in receding horizon in order to achieve smoother orientation changes. For simplicity of description, $\mathbf{x} \coloneqq \mathbf{x}_o$ and $\mathbf{u} \coloneqq \mathbf{u}_o$ in the following problem formulation:
232
+
233
+ $$
234
+ \underset {\mathbf {u} _ {0}, \dots , \mathbf {u} _ {N - 1}} {\text {m i n i m i z e}} \sum_ {k = 1} ^ {N} \left(\left| \left| \mathbf {o} _ {d, k} - \mathbf {o} _ {k} \right| \right| ^ {2} + \gamma \left| \left| \mathbf {u} _ {k - 1} \right| \right| ^ {2}\right), \tag {10}
235
+ $$
236
+
237
+ subject to $\mathbf{x}_0 = \mathbf{x}'$ (10.a)
238
+
239
+ $$
240
+ \mathbf {x} _ {k + 1} = \mathrm {f} _ {o} \left(\mathbf {x} _ {k}, \mathbf {u} _ {k}\right) \quad \forall k \in \{0, \dots , N - 1 \}, \tag {10.b}
241
+ $$
242
+
243
+ $$
244
+ \omega_ {\min } \leq \omega_ {k} \leq \omega_ {\max } \forall k \in \{1, \dots , N \}, \tag {10.c}
245
+ $$
246
+
247
+ $$
248
+ \left. \xi_ {\min } \leq \xi_ {k} \leq \xi_ {\max } \quad \forall k \in \{1, \dots , N \}, \right. \tag {10.d}
249
+ $$
250
+
251
+ $$
252
+ \mathbf {u} _ {\min } \leq \mathbf {u} _ {k} \leq \mathbf {u} _ {\max } \quad \forall k \in \{0, \dots , N - 1 \}, \tag {10.e}
253
+ $$
254
+
255
+ where $\mathrm{f}_o(\cdot)$ represents the orientation aspect of the dynamic model defined in Section III-A; $\omega_{min}$ , $\omega_{max}$ are limitations on the angular velocities; $\mathbf{u}_{min}$ , $\mathbf{u}_{max}$ control inputs limitations; and $\xi_{min}$ , $\xi_{max}$ represent hardware limitations of the gimbal to adjusting pitch angles. The heading and pitch angles of the camera or light can be controlled independently. Thus, Problem (10) was decoupled into two simpler problems. The optimal solution for each problem can be found analytically
256
+
257
+ with a standard framework for linear MPC (Model Predictive Control).
258
+
259
+ # IV. EXPERIMENTAL EVALUATION
260
+
261
+ In this section, experimental results are presented to demonstrate the performance of our method for multi-UAV trajectory planning. We have assessed that the proposed method is capable of computing smooth cinematographic trajectories in real-time. Additionally, we have evaluated that the trajectories of the follower UAVs which provide lighting for the target are capable of complying with formation constraints to improve the quality of the shot. The safety of our method has also been proved through experiments in the presence of multiple obstacles.
262
+
263
+ # A. Experimental setup
264
+
265
+ We implemented our architecture described in Section II in $\mathrm{C + + }$ using the ROS framework. The ACADO Toolkit [32] was used to solve the optimization problems. We conducted software-in-the-loop simulations using Gazebo to simulate physics and to equip the UAVs with a camera and lights. To solve the optimization problems, a horizon length of 8s and a time step of $0.2\mathrm{s}$ were chosen. The cinematographic parameters were set to $\psi_{d} = 6^{\circ}$ and $q_{z,min} = 0.5\mathrm{m}$ . The maximum distance sampling step was set to $d_{s,max} = 0.5\mathrm{m}$ .
266
+
267
+ # B. Simulation - Cinematography trajectories
268
+
269
+ The objective of this simulation was twofold: to demonstrate how the method computes smoother camera trajectories for the leader UAV while complying with cinematographic aspects, and how the trajectories of the followers keep with formation constraints to light the target properly. We simulated a human worker performing a maintenance operation on a transmission tower while monitored by a team of three UAVs (one filming and two lighting the worker). While the worker approached and climbed the tower, the system was commanded to perform a lateral shot followed by a sequence of fly-over shots.
270
+
271
+ The fly-over shots were selected to film the operation as they impose relative motion between the camera and the target. This feature is regarded as richer from a cinematographic point of view. We further demonstrate how our method is able to execute these relative movements more aesthetically than a baseline approach where the specific term to smooth variations in camera angles has been removed (i.e., $\alpha_{1} = 0$ in Problem 3). Fig. 6 compares the trajectories for the camera carrying UAV generated with both our method and the baseline approach. The baseline approach generates straight trajectories, whereas our method results in orbital trajectories, which have been used in the cinematography literature to produce more pleasant videos. For instance, [16], [17], [33] apply the arcball principle [34] to create a spherical surface around the target for aesthetic camera motion. We can also see in Fig. 6 that our method reduces the jerk of the camera angles. Note that in aerial cinematography literature, the jerk of the camera motion (third derivative of the angles) has been identified as a key aspect for shot quality [9], [35]. We measured the root mean
272
+
273
+ ![](images/b6fb194e06b73a2d5ae24c1662e10947d15be9c6d9978dcc6b2a84c3942752b1.jpg)
274
+
275
+ ![](images/d09cf5cf34df4264f5c390e92936f42db5955c3c588dbb89a8c16e51113684eb.jpg)
276
+ Fig. 6: Trajectories for the camera carrying UAV while monitoring a worker on a transmission tower. For simplicity, only the lateral shot and the first fly-over shot are shown. We compare the trajectories generated by our method (blue) with those from a baseline approach without cinematographic costs (green). The upper image displays a top view of the UAV's and target's trajectories. The small white dots on the trajectories depict transition points sampled every $5\mathrm{s}$ to give a notion of the speed. The bottom image depicts the temporal evolution of the jerk of the camera angles
277
+
278
+ square of the jerk of $\varphi$ and $\xi$ along the full trajectories and obtained $0.0197\mathrm{rad}\mathrm{s}^{-3}$ and $0.0048\mathrm{rad}\mathrm{s}^{-3}$ , respectively, for our method; and $0.0265\mathrm{rad}\mathrm{s}^{-3}$ and $0.0075\mathrm{rad}\mathrm{s}^{-3}$ , respectively, for the baseline without the cinematographic cost term.
279
+
280
+ Fig. 7 shows the trajectories followed by the whole UAV formation throughout the experiment to film the maintenance operation. It can be seen that the formation is properly maintained to avoid collisions between the UAVs and the tower, and to provide required lighting of the filmed object. Moreover, none of the UAVs appear in the camera's field of view. The temporal evolution of the deviations from the desired orientation of each light and their distance from entering the camera FoV during this simulation are shown in Fig. 8. A video of the complete simulation can be found at the site with multimedia materials.
281
+
282
+ # C. Simulation - Cluttered environment
283
+
284
+ The aim of this simulation was to demonstrate the performance of our method for trajectory planning in a cluttered environment while assessing its scalability with numerous obstacles. We simulated a forest-like scenario with multiple trees as obstacles. As a human target walks through the forest, the filming UAV executes a chase shot from behind while the lighting UAVs follow the leader side by side. Fig. 9 depicts the distribution of the obstacles around the forest and the generated trajectories for the UAVs. In this figure, it is visible that the UAVs were able to follow the human in formation and to simultaneously avoid obstacles.
285
+
286
+ Finally, we analyze the scalability of our method in terms of computational demand. Simulations were run with a 4-
287
+
288
+ ![](images/8d1e9cc4caef6ea641aeb8ec088b3f43043a48d9172d29edf00e7dccfa995599.jpg)
289
+ Fig. 7: An illustration of the experiment where an operator is filmed working on a transmission tower. The trajectories of the camera carrying leader (orange), both followers carrying lights (blue and green), and the human worker (red) are shown. The obstacle map is represented by a point cloud, including the power lines and tower. The worker is tracked with a lateral shot as he walks to the tower and then with a sequence of fly-over shots while he climbs up. Several onboard images taken during the experiment are also shown.
290
+
291
+ ![](images/3bc01194f0cb3a741948dbe2cde4c3f80842fc8c38d13a91c491946e1ebff249.jpg)
292
+ Fig. 8: Temporal evolution of the distance $d_{F}$ of UAVs carrying lights from entering the camera FoV, deviation from desired heading $\varphi_{d}$ and deviation from desired pitch angle of light $\xi_{d}$ .
293
+
294
+ core Intel(R) Core(TM) i7-10510U CPU @ 1.80 GHz. Table I shows the results of our method that correspond to the total planning time for each iteration that was run on the leader UAV. As expected, most time was spent during the non-convex optimization step described in Section III-B. The results for the followers are not included because they skip this non-convex optimization and thus, consume less time. The results are similar for the two simulations, although the second scenario was significantly more cluttered.
295
+
296
+ Since the map of the environment is transformed into safe corridors made of convex polyhedrons, cluttered environments do not represent an increase in the computational demands of the trajectory optimization method. Therefore, we are able to plan the leader's trajectories at a rate of $1\mathrm{Hz}$ with horizon lengths of 8s. This rate is adequate for real-time performance in the dynamic scenarios that we target. The lower computational complexity required to generate the initial trajectories of the followers allows us to plan follower's trajectories at a higher rate of $2\mathrm{Hz}$ , enabling faster reactions to changes of the leader's behaviour and thus a more efficient mutual collision avoidance.
297
+
298
+ # D. Real world experiment
299
+
300
+ In order to demonstrate our method, we performed field experiments generating trajectories for a real team of UAVs (see Fig. 10). Thus, we proved the real-time performance of
301
+
302
+ TABLE I: The planning times of our method per iteration. The total average values are shown for the two experiments. The percentage of time consumed at each step is shown thereafter. ITG stands for the procedure indicated in Section III-B, SCG for procedure described in Section III-C and FTO for trajectory optimization described in Section III-D.
303
+
304
+ <table><tr><td rowspan="2"></td><td colspan="4">Time (s)</td></tr><tr><td>Total (Avg ± std)</td><td>ITG (%)</td><td>SCG (%)</td><td>FTO (%)</td></tr><tr><td>Tower</td><td>0.70923 ± 0.10557</td><td>70.9982</td><td>11.81564</td><td>17.18615</td></tr><tr><td>Forest</td><td>0.71274 ± 0.05792</td><td>72.41338</td><td>8.77989</td><td>18.80673</td></tr></table>
305
+
306
+ ![](images/50d0fb859a238de77951221f2f947340ac9365db2284742f1c14b93f141b2ad2.jpg)
307
+ Fig. 9: A top view of the trajectories generated in the cluttered forest scenario. The trajectories of the target (red), the leader (orange), and both followers (blue and green) are shown. The black dots represent trees.
308
+
309
+ the proposed approach onboard. A sequence of shots was commanded to film a dynamic target in an outdoor scenario. A video of the experiment can be found at the multimedia material site.
310
+
311
+ ![](images/8f55c5ae58ba9e6cd6ae5be7eb9bc160e6b122fb9bb4756cb2df3d614f23d238.jpg)
312
+ Fig. 10: A snapshot of a real experiment in an outdoor scenario. UAVs were localized using RTK GPS. The target location was generated from a noisy ground truth, without onboard visual detection.
313
+
314
+ # V. CONCLUSION
315
+
316
+ This paper has presented a method for autonomous aerial cinematography with distributed lighting by a team of UAVs. We have proposed a novel methodology for multi-UAV trajectory planning, addressing non-linear cinematographic aspects and obstacle avoidance in separate optimization steps. We have demonstrated that the method is capable of generating smooth trajectories complying with aesthetic objectives for the filming UAV; and trajectories for the follower UAVs that allow them to keep a formation lighting the target properly and staying out of the camera FoV. Besides, our results indicate that we can plan trajectories in a distributed and online manner, and that the method is suitable for obstacle avoidance even in cluttered environments. As future work, we plan to address occlusions caused by obstacles within the camera FoV. Our idea is to compute the regions where these occlusions would take place and include them in the representation of the occupied space.
317
+
318
+ # REFERENCES
319
+
320
+ [1] I. Mademlis et al., "High-level multiple-UAV cinematography tools for covering outdoor events," IEEE Trans. on Broadcasting, vol. 65, no. 3, pp. 627-635, 2019.
321
+
322
+ [2] A. Sabirova et al., "Drone cinematography system design and new guideline model for scene objects interaction," in 2020 Int. Conf. Nonlinearity, Information and Robotics, 2020.
323
+ [3] B. F. Jeon et al., "Detection-Aware Trajectory Generation for a Drone Cinematographer," ArXiv e-prints.
324
+ [4] L.-E. Caraballo et al., "Autonomous planning for multiple aerial cinematographers," in IEEE/RSJ IROS, 2020.
325
+ [5] P. Moreno et al., "Aerial multi-camera robotic jib crane," IEEE RA-L, vol. 6, no. 2, pp. 4103-4108, 2021.
326
+ [6] DJI, "Mavic pro 2," 2018. [Online]. Available: www.dji.com/es/mavic
327
+ [7] Skydio, "Skydio 2," 2019. [Online]. Available: www.skydio.com
328
+ [8] R. Bonatti et al., "Towards a robust aerial cinematography platform: Localizing and tracking moving targets in unstructured environments," in IEEE/RSJ IROS, 2019.
329
+ [9] R. Bonatti et al., "Autonomous aerial cinematography in unstructured environments with learned artistic decision-making," JFR, vol. 37, no. 4, pp. 606-641, 2020.
330
+ [10] T. Nageli et al., "Real-time motion planning for aerial videography with dynamic obstacle avoidance and viewpoint optimization," IEEE RA-L, vol. 2, no. 3, pp. 1696-1703, 2017.
331
+ [11] N. Passalis et al., "Deep reinforcement learning for frontal view person shooting using drones," in IEEE EAIS, 2018.
332
+ [12] Y. Dang et al., "Imitation learning-based algorithm for drone cinematography system," IEEE Trans. Cogn. Devel. Syst., pp. 1-1, 2020.
333
+ [13] C. Huang et al., "Learning to capture a film-look video with a camera drone," in IEEE ICRA, 2019, pp. 1871-1877.
334
+ [14] M. Gschwindt et al., "Can a robot become a movie director? Learning artistic principles for aerial cinematography," in IEEE/RSJ IROS, 2019.
335
+ [15] T. Nageli et al., "Real-time planning for automated multi-view drone cinematography," ACM Trans. Graph., vol. 36, no. 4, pp. 1-10, 2017.
336
+ [16] Q. Galvane et al., “Directing cinematographic drones,” ACM Trans. Graph., vol. 37, no. 3, pp. 1-18, 2018.
337
+ [17] A. Bucker et al., “Do You See What I See? Coordinating multiple aerial cameras for robot cinematography,” arXiv, 2020.
338
+ [18] A. Alcantara et al., "Autonomous execution of cinematographic shots with multiple drones," IEEE Access, pp. 201300-201316, 2020.
339
+ [19] ——, "Optimal trajectory planning for cinematography with multiple unmanned aerial vehicles," RAS, vol. 140, p. 103778, 2021.
340
+ [20] B. Hall, Understanding cinematography. Crowood, 2015.
341
+ [21] P. Petracek et al., "Dronument: System for Reliable Deployment of Micro Aerial Vehicles in Dark Areas of Large Historical Monuments," IEEE RA-L, vol. 5, no. 2, pp. 2078-2085, 2020.
342
+ [22] M. Petrlik et al., "A robust UAV system for operations in a constrained environment," IEEE RA-L, vol. 5, no. 2, pp. 2169-2176, 2020.
343
+ [23] M.aska et al., "Documentation of dark areas of large historical buildings by a formation of unmanned aerial vehicles using model predictive control," in IEEE ETFA, 2017.
344
+ [24] V. Krátý et al., "Autonomous reflectance transformation imaging by a team of unmanned aerial vehicles," IEEE RA-L, vol. 5, no. 2, pp. 2302-2309, 2020.
345
+ [25] J. Chen et al., "Online generation of collision-free trajectories for quadrotor flight in unknown cluttered environments," in IEEE ICRA, 2016.
346
+ [26] K. Mohta et al., "Fast, autonomous flight in GPS-denied and cluttered environments," JFR, vol. 35, no. 1, pp. 101-120, 2018.
347
+ [27] S. Liu et al., "Planning dynamically feasible trajectories for quadrotors using safe flight corridors in 3-D complex environments," IEEE RA-L, vol. 2, no. 3, pp. 1688-1695, 2017.
348
+ [28] J. Tordesillas et al., “FASTER: Fast and Safe Trajectory Planner for Flights in Unknown Environments,” IEEE/RSJ IROS, 2019.
349
+ [29] T. Báča et al., “The MRS UAV system: Pushing the frontiers of reproducible research, real-world deployment, and education with autonomous unmanned aerial vehicles,” JINT, vol. 26, 2021.
350
+ [30] D. Harabor et al., "Online graph pruning for pathfinding on grid maps." in AAAI Conf. on Artificial Intelligence, vol. 25, no. 1, 2011.
351
+ [31] ——, “Improving jump point search,” in Int. Conf. on Automated Planning and Scheduling, vol. 24, no. 1, 2014.
352
+ [32] B. Houska et al., "ACADO Toolkit - An Open Source Framework for Automatic Control and Dynamic Optimization," Optimal Control Applications and Methods, vol. 32, no. 3, pp. 298-312, 2011.
353
+ [33] C. Lino et al., "Intuitive and Efficient Camera Control with the Toric Space," ACM Trans. Graph., 2015.
354
+ [34] K. Shoemake, "Arcball: a user interface for specifying three-dimensional orientation using a mouse," in Proceedings of Graphics Interface, 1992.
355
+ [35] C. Gebhardt et al., "Airways: Optimization-Based Planning of Quadrotor Trajectories according to High-Level User Goals," in Proceedings of the Conf. on Human Factors in Computing Systems, 2016.
2303.01xxx/2303.01177/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79fbdf292596e5ed76bcc798a70af7220f07061e652167237db5a0a7eaaa9048
3
+ size 488760
2303.01xxx/2303.01177/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01179/3e5857a0-f202-447f-a88c-4abb51daea13_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c81301c15b129e04f7acf56434fcf85c1d1b6b62b7d7c3f57f55976e52040d1
3
+ size 1722834
2303.01xxx/2303.01179/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01179/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8bfd10aeb2aea3896cc9368891c4cbf41b5b6ae028aad133461178d599ec45
3
+ size 1836028
2303.01xxx/2303.01179/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_content_list.json ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "LEARNING FROM YOURSELF: A SELF-DISTILLATION METHOD FOR FAKE SPEECH DETECTION",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 102,
8
+ 117,
9
+ 893,
10
+ 154
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jun Xue $^{1}$ , Cunhang Fan $^{1,*}$ , Jiangyan Yi $^{2}$ , Chenglong Wang $^{2}$ , Zhengqi Wen $^{3}$ , Dan Zhang $^{4}$ , Zhao Lv $^{1,*}$",
17
+ "bbox": [
18
+ 102,
19
+ 172,
20
+ 890,
21
+ 191
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "<sup>1</sup>Anhui Province Key Laboratory of Multimodal Cognitive Computation, School of Computer Science and Technology, Anhui University, Hefei 230601, China \n<sup>2</sup>NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China \n<sup>3</sup>Qiyuan Laboratory, Beijing 100190, China",
28
+ "bbox": [
29
+ 153,
30
+ 208,
31
+ 844,
32
+ 279
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$^{4}$ Department of Psychology, Tsinghua University, Beijing 100190, China",
39
+ "bbox": [
40
+ 178,
41
+ 279,
42
+ 787,
43
+ 297
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "e21201068@stu.ahu.edu.cn, {cunhang.fan, kjlz} @ahu.edu.cn, {jiangyan.yi,chenglong.wang} @nlpr.i.ac.cn,",
50
+ "bbox": [
51
+ 179,
52
+ 299,
53
+ 821,
54
+ 314
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "wenzhengqi@qiyuanlab.com, dzhang@tsinghua.edu.cn",
61
+ "bbox": [
62
+ 334,
63
+ 316,
64
+ 661,
65
+ 330
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "ABSTRACT",
72
+ "text_level": 1,
73
+ "bbox": [
74
+ 240,
75
+ 366,
76
+ 331,
77
+ 378
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "In this paper, we propose a novel self-distillation method for fake speech detection (FSD), which can significantly improve the performance of FSD without increasing the model complexity. For FSD, some fine-grained information is very important, such as spectrogram defects, mute segments, and so on, which are often perceived by shallow networks. However, shallow networks have much noise, which can not capture this very well. To address this problem, we propose using the deepest network instruct shallow network for enhancing shallow networks. Specifically, the networks of FSD are divided into several segments, the deepest network being used as the teacher model, and all shallow networks become multiple student models by adding classifiers. Meanwhile, the distillation path between the deepest network feature and shallow network features is used to reduce the feature difference. A series of experimental results on the ASVspoof 2019 LA and PA datasets show the effectiveness of the proposed method, with significant improvements compared to the baseline.",
84
+ "bbox": [
85
+ 86,
86
+ 383,
87
+ 486,
88
+ 656
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Index Terms— Fake speech detection, self-distillation, automatic speaker verification, ASVspoof",
95
+ "bbox": [
96
+ 83,
97
+ 662,
98
+ 486,
99
+ 693
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "1. INTRODUCTION",
106
+ "text_level": 1,
107
+ "bbox": [
108
+ 207,
109
+ 712,
110
+ 362,
111
+ 726
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "With the rise of biometrics, automatic speaker verification (ASV) has also started to be widely used. However, the development of synthetic speech technology seriously threatens the security of ASV systems. The main attack types for ASV systems are audio replay, text-to-speech (TTS), and voice conversion (VC). Therefore, a series of fake speech detection (FSD) challenges are used to improve the security of ASV systems.",
118
+ "bbox": [
119
+ 81,
120
+ 739,
121
+ 486,
122
+ 845
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "The research of FSD revolves around finding discriminately features and designing robust networks. The front-end features mainly are raw waveforms [1, 2], linear frequency",
129
+ "bbox": [
130
+ 81,
131
+ 845,
132
+ 486,
133
+ 892
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "cepstral coefficients (LFCC), log power spectrogram (LPS), and so on. For different front-end features, researchers have proposed a series of convolutional neural network modeling methods. Most of the networks are based on ResNet, LCNN [3], and graph networks. To further improve the model performance, researchers introduced attention mechanisms into different networks. Ling et al. [4] proposed an attention-based convolutional neural network for the FSD task, authors designed a frequency attention block and a channel attention block to focus the discriminative information of the features. In [5], they first time applied graph attention (GAT) networks to FSD tasks to further analyze the relationship between adjacent subbands in spectral features. Jung et al. [6] proposed a GAT-based architecture using a heterogeneous attention mechanism and stack nodes to integrate temporal and spectral domains. Zhang et al. [7] used the squeeze and excitation (SE) network for FSD, which is the SE part for computing global channel attention weights. In addition, efficient channel attention (ECA) [8] is proposed to solve the problem of information loss due to SE block dimensionality reduction, which is widely used in other fields.",
140
+ "bbox": [
141
+ 508,
142
+ 364,
143
+ 913,
144
+ 681
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "In addition, self-distillation as popular methods has gained good performance in many fields. In [9], the authors proposed an attention-based feature self-distillation method, which can utilize more high-level information to improve the performance of automatic speech recognition. Liu et al. [10] designed a feature-enhanced self-distillation method for speaker verification, which achieved good performance. Ge et al. [11] proposed an integrated propagation of knowledge form to design a self-distillation framework, which is very effective for image classification tasks.",
151
+ "bbox": [
152
+ 508,
153
+ 684,
154
+ 913,
155
+ 835
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "In this paper, we propose a novel self-distillation approach for FSD. Many studies show that the shallow feature of voice is important for FSD. In [7], the authors find that mute segments of voices affect the performance of FSD seriously. Further, Deng et al. [12] utilize shallow feature information by",
162
+ "bbox": [
163
+ 506,
164
+ 837,
165
+ 913,
166
+ 914
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "aside_text",
172
+ "text": "arXiv:2303.01211v1 [cs.SD] 2 Mar 2023",
173
+ "bbox": [
174
+ 22,
175
+ 268,
176
+ 57,
177
+ 700
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "page_footnote",
183
+ "text": "* Corresponding authors.",
184
+ "bbox": [
185
+ 107,
186
+ 898,
187
+ 243,
188
+ 912
189
+ ],
190
+ "page_idx": 0
191
+ },
192
+ {
193
+ "type": "image",
194
+ "img_path": "images/d9f0dabf7b90694e4c149a71870e15b561a064270b7b2367a6f6a958596c50dd.jpg",
195
+ "image_caption": [
196
+ "Fig. 1. The proposed self-distillation framework is based on SENet and ECANet networks. The framework is divided into four blocks and each block is set with a classifier (AngleLinear, AL). Note: The additional classifiers can all be removed during evaluation and thus do not increase the model load."
197
+ ],
198
+ "image_footnote": [],
199
+ "bbox": [
200
+ 84,
201
+ 85,
202
+ 916,
203
+ 305
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "text",
209
+ "text": "designing simple classifiers, such as spectrum defects, mute segments, and so on. While shallow networks are sensitive to this kind of information, the capture ability is not well as deep networks. To solve this problem, we propose a self-distillation framework that the deepest network instructs shallow networks, which can further strengthen shallow networks. Specifically, during training, we add classifiers as multiple student models for all shallow networks and set the deepest network as the teacher model. Based on the prediction results, the teacher model instructs the student model to transform the deepest knowledge into all shallow networks. Meanwhile, to reduce the feature difference between the shallow and deepest networks, we also build self-distillation models in the feature dimension. It is worth noting that the classifiers in all shallow layers can be removed during inference, thus causing no additional computational and model complexity. To validate the effectiveness of the proposed approach, we used the ECANet and SENet networks as the baseline and the LA and PA from the ASVspoof 2019 challenge as the dataset. Compared to the baseline, the performance of the system is significantly improved after using self-distillation.",
210
+ "bbox": [
211
+ 86,
212
+ 388,
213
+ 488,
214
+ 705
215
+ ],
216
+ "page_idx": 1
217
+ },
218
+ {
219
+ "type": "text",
220
+ "text": "2. THE PROPOSED SELF-DISTILLATION METHOD",
221
+ "text_level": 1,
222
+ "bbox": [
223
+ 84,
224
+ 724,
225
+ 485,
226
+ 739
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "2.1. Self-distillation",
233
+ "text_level": 1,
234
+ "bbox": [
235
+ 84,
236
+ 753,
237
+ 230,
238
+ 767
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "In this section, we propose a self-distillation method as shown in Fig.1. Specifically, Inspired by [13], we divide it into four segments based on the model architecture. In this paper, we divide the model into four blocks, and the block is SE block or ECA block. Then, we set a classifier after each block, and we use AngleLinear to obtain the prediction results of each layer. Note that these extra classifiers are only used in training and do not increase the load during inference. In the training phase, we use the fourth layer of the network as the teacher",
245
+ "bbox": [
246
+ 81,
247
+ 777,
248
+ 488,
249
+ 912
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "model and the first three layers as the student model. The deep network knowledge is transformed to the shallow layer in both feature and prediction dimensions.",
256
+ "bbox": [
257
+ 508,
258
+ 388,
259
+ 913,
260
+ 431
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "text",
266
+ "text": "To make full use of the network information, three losses during the training process are introduced:",
267
+ "bbox": [
268
+ 508,
269
+ 433,
270
+ 913,
271
+ 463
272
+ ],
273
+ "page_idx": 1
274
+ },
275
+ {
276
+ "type": "list",
277
+ "sub_type": "text",
278
+ "list_items": [
279
+ "- Hard loss: The A-softmax function is used to calculate the loss of the labels and the fourth layer classifier. This is calculated using the labels of the training dataset and the output from the Anglelinear classifier, which is used to fully extract the hidden knowledge in the training dataset.",
280
+ "- Feature loss: The L2 function is used to compute the feature mapping between the fourth layer network and each shallow layer network. This can introduce nonexplicit knowledge of the deepest features into the shallow features so that their shallow networks can match better with the deeper networks when predicting.",
281
+ "- Soft loss: The KL divergence function is used to calculate the soft loss in the teacher-student model. The deepest network output is used as the teacher model and several shallow network outputs are used as the student model. The difference between the distributions of the two outputs is calculated, which can guide the shallow network to learn more."
282
+ ],
283
+ "bbox": [
284
+ 535,
285
+ 472,
286
+ 913,
287
+ 771
288
+ ],
289
+ "page_idx": 1
290
+ },
291
+ {
292
+ "type": "text",
293
+ "text": "2.2. Training Methods",
294
+ "text_level": 1,
295
+ "bbox": [
296
+ 509,
297
+ 784,
298
+ 674,
299
+ 799
300
+ ],
301
+ "page_idx": 1
302
+ },
303
+ {
304
+ "type": "text",
305
+ "text": "When training with the self-distillation method, the loss has three components. First, we compute the hard loss of the deepest network and labels:",
306
+ "bbox": [
307
+ 506,
308
+ 808,
309
+ 913,
310
+ 852
311
+ ],
312
+ "page_idx": 1
313
+ },
314
+ {
315
+ "type": "equation",
316
+ "text": "\n$$\n\\mathcal {L} _ {\\text {h a r d}} = A _ {-} \\operatorname {s o f t m a x} \\left(p ^ {n}, L\\right) \\tag {1}\n$$\n",
317
+ "text_format": "latex",
318
+ "bbox": [
319
+ 609,
320
+ 857,
321
+ 913,
322
+ 873
323
+ ],
324
+ "page_idx": 1
325
+ },
326
+ {
327
+ "type": "text",
328
+ "text": "where $p^n$ is the deepest output of the network, in this paper, we set the $n$ is 4. $L$ is the label of the training data set,",
329
+ "bbox": [
330
+ 508,
331
+ 883,
332
+ 913,
333
+ 914
334
+ ],
335
+ "page_idx": 1
336
+ },
337
+ {
338
+ "type": "table",
339
+ "img_path": "images/549f92da6d7e33fb67163bc995b07c27dc031b9b9f4b932f02e12482584c5da7.jpg",
340
+ "table_caption": [
341
+ "Table 1. EER and t-DCF results for different architectures based on the ASVspoof 2019 LA and PA dataset. SD indicates that the self-distillation method is used."
342
+ ],
343
+ "table_footnote": [],
344
+ "table_body": "<table><tr><td rowspan=\"5\">LA dataset</td><td colspan=\"2\">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan=\"2\">EER</td><td>baseline</td><td>1.48</td><td>1.18</td><td>1.44</td><td>1.87</td><td>1.97</td><td>1.52</td><td>1.23</td><td>1.83</td></tr><tr><td>SD</td><td>1.22</td><td>0.88</td><td>1.14</td><td>1.09</td><td>1.23</td><td>1.37</td><td>1.08</td><td>1.00</td></tr><tr><td rowspan=\"2\">t-DCF</td><td>baseline</td><td>0.0493</td><td>0.0378</td><td>0.0460</td><td>0.0605</td><td>0.0610</td><td>0.0497</td><td>0.0358</td><td>0.0536</td></tr><tr><td>SD</td><td>0.0376</td><td>0.0295</td><td>0.0334</td><td>0.0318</td><td>0.0388</td><td>0.0417</td><td>0.0347</td><td>0.0309</td></tr><tr><td rowspan=\"5\">PA dataset</td><td colspan=\"2\">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan=\"2\">EER</td><td>baseline</td><td>0.83</td><td>0.95</td><td>0.88</td><td>0.90</td><td>0.97</td><td>0.93</td><td>1.14</td><td>0.82</td></tr><tr><td>SD</td><td>0.74</td><td>0.83</td><td>0.70</td><td>0.82</td><td>0.85</td><td>0.87</td><td>0.65</td><td>0.79</td></tr><tr><td rowspan=\"2\">t-DCF</td><td>baseline</td><td>0.0221</td><td>0.0284</td><td>0.0255</td><td>0.0262</td><td>0.0269</td><td>0.0266</td><td>0.0334</td><td>0.0228</td></tr><tr><td>SD</td><td>0.0199</td><td>0.0219</td><td>0.0208</td><td>0.0222</td><td>0.0232</td><td>0.0239</td><td>0.0174</td><td>0.0219</td></tr></table>",
345
+ "bbox": [
346
+ 88,
347
+ 126,
348
+ 911,
349
+ 268
350
+ ],
351
+ "page_idx": 2
352
+ },
353
+ {
354
+ "type": "table",
355
+ "img_path": "images/325ab45b469fbcf3c3d2e4e847d156f51221a995a651c1a03bde4e4bbcbfe50e.jpg",
356
+ "table_caption": [
357
+ "Table 2. The ECANet and SENet model architecture and configuration. Dimensions refer to (channels, frequency, and time). Batch normalization (BN) and Rectified Linear Unit (ReLU). ECA and SE are the efficient channel attention block and the squeeze and excitation block, respectively."
358
+ ],
359
+ "table_footnote": [],
360
+ "table_body": "<table><tr><td>Layer</td><td colspan=\"2\">Input: 27000 samples</td><td>Output shape</td></tr><tr><td>Front-end</td><td colspan=\"2\">F0 subband</td><td>(45,600)(F,T)</td></tr><tr><td>Post-processing</td><td colspan=\"2\">Add channelConv2D_1BN &amp; ReLU</td><td>(1,45,600)(16,45,600)</td></tr><tr><td>Block1</td><td colspan=\"2\">c1×{Conv2D_3Conv2D_3ECA or SE}</td><td>(32,45,600)</td></tr><tr><td>Block2</td><td colspan=\"2\">c2×{Conv2D_3Conv2D_3ECA or SE}</td><td>(64,23,300)</td></tr><tr><td>Block3</td><td colspan=\"2\">c3×{Conv2D_3Conv2D_3ECA or SE}</td><td>(128,12,150)</td></tr><tr><td>Block4</td><td colspan=\"2\">c4×{Conv2D_3Conv2D_3ECA or SE}</td><td>(256,6,75)</td></tr><tr><td>Output</td><td colspan=\"2\">Avgpool2D(1,1)AngleLinear</td><td>(256,1,1)2</td></tr></table>",
361
+ "bbox": [
362
+ 88,
363
+ 377,
364
+ 486,
365
+ 660
366
+ ],
367
+ "page_idx": 2
368
+ },
369
+ {
370
+ "type": "text",
371
+ "text": "and $A_{-}$ softmax denotes the A-softmax function. $\\mathcal{L}_{\\text{hard}}$ is the hard loss.",
372
+ "bbox": [
373
+ 83,
374
+ 681,
375
+ 488,
376
+ 710
377
+ ],
378
+ "page_idx": 2
379
+ },
380
+ {
381
+ "type": "text",
382
+ "text": "Soft loss is used for knowledge distillation of the shallow and deep networks, and we calculate KL divergence using each shallow and deepest network.",
383
+ "bbox": [
384
+ 83,
385
+ 712,
386
+ 488,
387
+ 757
388
+ ],
389
+ "page_idx": 2
390
+ },
391
+ {
392
+ "type": "equation",
393
+ "text": "\n$$\n\\mathcal {L} _ {\\text {s o f t}} = \\sum_ {i} ^ {n - 1} K L \\left(p ^ {i}, p ^ {n}\\right) \\tag {2}\n$$\n",
394
+ "text_format": "latex",
395
+ "bbox": [
396
+ 199,
397
+ 758,
398
+ 486,
399
+ 799
400
+ ],
401
+ "page_idx": 2
402
+ },
403
+ {
404
+ "type": "text",
405
+ "text": "$KL$ denotes the KL divergence function and $p^i$ is the output of each layer of the network after the Angellinear classifier. $\\mathcal{L}_{soft}$ is the final soft loss.",
406
+ "bbox": [
407
+ 83,
408
+ 806,
409
+ 488,
410
+ 853
411
+ ],
412
+ "page_idx": 2
413
+ },
414
+ {
415
+ "type": "text",
416
+ "text": "The feature loss is used to balance the difference between the shallow and deepest networks, which can be fed back to the classification output of the shallow network to facilitate the soft loss fit.",
417
+ "bbox": [
418
+ 83,
419
+ 853,
420
+ 488,
421
+ 912
422
+ ],
423
+ "page_idx": 2
424
+ },
425
+ {
426
+ "type": "equation",
427
+ "text": "\n$$\n\\mathcal {L} _ {\\text {f e a t u r e}} = \\sum_ {i} ^ {n - 1} L 2 \\left(\\mathcal {F} ^ {i}, \\mathcal {F} ^ {n}\\right) \\tag {3}\n$$\n",
428
+ "text_format": "latex",
429
+ "bbox": [
430
+ 612,
431
+ 291,
432
+ 911,
433
+ 330
434
+ ],
435
+ "page_idx": 2
436
+ },
437
+ {
438
+ "type": "text",
439
+ "text": "$\\mathcal{F}^i$ is the output feature of each layer, and $\\mathcal{F}^n$ is the output feature of the deepest layer. $L2$ is the L2 loss function, and $\\mathcal{L}_{\\text{feature}}$ is the final feature loss.",
440
+ "bbox": [
441
+ 509,
442
+ 340,
443
+ 915,
444
+ 387
445
+ ],
446
+ "page_idx": 2
447
+ },
448
+ {
449
+ "type": "equation",
450
+ "text": "\n$$\n\\mathcal {L} = \\alpha * \\mathcal {L} _ {\\text {h a r d}} + (1 - \\alpha) * \\mathcal {L} _ {\\text {s o f t}} + \\beta * \\mathcal {L} _ {\\text {f e a t u r e}} \\tag {4}\n$$\n",
451
+ "text_format": "latex",
452
+ "bbox": [
453
+ 535,
454
+ 398,
455
+ 913,
456
+ 416
457
+ ],
458
+ "page_idx": 2
459
+ },
460
+ {
461
+ "type": "text",
462
+ "text": "The loss at training consists of the following three components, $\\alpha$ and $\\beta$ are hyperparameters to balance the three sources of loss. $\\mathcal{L}$ is the final loss.",
463
+ "bbox": [
464
+ 508,
465
+ 422,
466
+ 913,
467
+ 468
468
+ ],
469
+ "page_idx": 2
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "3. EXPERIMENTS AND RESULTS",
474
+ "text_level": 1,
475
+ "bbox": [
476
+ 580,
477
+ 487,
478
+ 843,
479
+ 502
480
+ ],
481
+ "page_idx": 2
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "3.1. Datasets",
486
+ "text_level": 1,
487
+ "bbox": [
488
+ 509,
489
+ 516,
490
+ 609,
491
+ 530
492
+ ],
493
+ "page_idx": 2
494
+ },
495
+ {
496
+ "type": "text",
497
+ "text": "We trained and evaluated models on the ASVspoof 2019 LA and PA datasets. The LA set includes three types of spoofing attacks (TTS, VS, and audio replay), which are divided into 19 attack algorithms (A01-A19). The PA set includes only replay attacks, and there are 27 replay attacks in different acoustic environments. In this paper, EER and the minimum normalized tandem detection cost function (min t-DCF) are used as evaluation metrics for assessing the performance of different systems.",
498
+ "bbox": [
499
+ 506,
500
+ 539,
501
+ 915,
502
+ 675
503
+ ],
504
+ "page_idx": 2
505
+ },
506
+ {
507
+ "type": "text",
508
+ "text": "3.2. Training setup and baseline",
509
+ "text_level": 1,
510
+ "bbox": [
511
+ 509,
512
+ 693,
513
+ 741,
514
+ 709
515
+ ],
516
+ "page_idx": 2
517
+ },
518
+ {
519
+ "type": "text",
520
+ "text": "Front-end features: Inspired by [20], we use the F0 subband as our input features. Firstly, we extract the full frequency band of LPS and use the window function as Blackman's Short Time Fourier Transform (STFT), setting the window length and hop length as 1728 and 130 respectively. we fix the frame number as 600 and get the LPS to feature $865\\times 600$ . Finally, we take the first 45 dimensions of the frequency and finally get the front-end F0 subband feature size of $45\\times 600$ .",
521
+ "bbox": [
522
+ 506,
523
+ 715,
524
+ 913,
525
+ 837
526
+ ],
527
+ "page_idx": 2
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "Back-end classifier: As shown in Table 2, we use SENet and ECANet as deep neural network classifiers. where the $c_{1} - c_{4}$ vectors corresponding to the 9, 18, 34 layers are $(1,1,1,1)$ , $(2,2,2,2)$ , and $(3,4,6,3)$ , respectively. The 50-layer network is set to three convolutions in the block, with",
532
+ "bbox": [
533
+ 508,
534
+ 837,
535
+ 913,
536
+ 912
537
+ ],
538
+ "page_idx": 2
539
+ },
540
+ {
541
+ "type": "table",
542
+ "img_path": "images/802fce2fb7f70c5e9e39516d56440722a0ccf698db87b60151a2a17f5b9920dd.jpg",
543
+ "table_caption": [
544
+ "Table 3. Comparison of our self-distillation system with other known single systems."
545
+ ],
546
+ "table_footnote": [],
547
+ "table_body": "<table><tr><td colspan=\"4\">LA dataset</td><td colspan=\"4\">PA dataset</td></tr><tr><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td></tr><tr><td>AASIST [6]</td><td>Raw waveform</td><td>0.83</td><td>0.0275</td><td>T28 [14]</td><td>-</td><td>0.52</td><td>0.1470</td></tr><tr><td>ECANet18(SD) Ours</td><td>F0 subband</td><td>0.88</td><td>0.0295</td><td>SENet34(SD) Ours</td><td>F0 subband</td><td>0.65</td><td>0.0174</td></tr><tr><td>SENet50(SD) Ours</td><td>F0 subband</td><td>1.00</td><td>0.0309</td><td>ECANet34(SD) Ours</td><td>F0 subband</td><td>0.70</td><td>0.0208</td></tr><tr><td>RawGAT-ST [15]</td><td>Raw waveform</td><td>1.06</td><td>0.0340</td><td>SE-Res2Net50 [16]</td><td>Spec</td><td>0.74</td><td>0.0207</td></tr><tr><td>SENet34(SD) Ours</td><td>F0 subband</td><td>1.08</td><td>0.0347</td><td>T10 [14]</td><td>-</td><td>1.08</td><td>0.1598</td></tr><tr><td>FFT-L-SENet [7]</td><td>LPS</td><td>1.14</td><td>0.0368</td><td>T45 [14]</td><td>-</td><td>1.23</td><td>0.1610</td></tr><tr><td>MCG-Res2Net50 [17]</td><td>CQT</td><td>1.78</td><td>0.0520</td><td>T44 [14]</td><td>-</td><td>1.29</td><td>0.1666</td></tr><tr><td>Resnet18-OC-softmax [18]</td><td>LFCC</td><td>2.19</td><td>0.0590</td><td>T53 [14]</td><td>-</td><td>1.66</td><td>0.1729</td></tr><tr><td>ResNet18-GAT-T [5]</td><td>LFB</td><td>4.71</td><td>0.0894</td><td>Capsule [19]</td><td>LFCC</td><td>2.76</td><td>0.0730</td></tr></table>",
548
+ "bbox": [
549
+ 88,
550
+ 114,
551
+ 911,
552
+ 280
553
+ ],
554
+ "page_idx": 3
555
+ },
556
+ {
557
+ "type": "text",
558
+ "text": "convolution kernels of 1,3,1. The rest is the same as the 34-layer network. For training, we use Adam as the optimizer with parameters $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.98$ , $\\epsilon = 10^{-9}$ and weight decay $10^{-4}$ . The number of the epoch is 32. The two hyperparameters $\\alpha$ and $\\beta$ are set to 0.7 and 0.3, respectively.",
559
+ "bbox": [
560
+ 81,
561
+ 305,
562
+ 486,
563
+ 381
564
+ ],
565
+ "page_idx": 3
566
+ },
567
+ {
568
+ "type": "text",
569
+ "text": "3.3. Experiment results on LA dataset",
570
+ "text_level": 1,
571
+ "bbox": [
572
+ 83,
573
+ 402,
574
+ 357,
575
+ 417
576
+ ],
577
+ "page_idx": 3
578
+ },
579
+ {
580
+ "type": "text",
581
+ "text": "Table 1 shows the EER and t-DCF of the baseline and self-distillation systems for the ASVspoof 2019 LA dataset. The \"SD\" denotes self-distillation. According to Table 1, it can be seen that our self-distillation method significantly outperforms the baseline system. In addition, we can observe several interesting phenomena. First, the baseline system \"ECANet18\" has the best performance with an EER of $1.18\\%$ . Even so, the self-distillation method can improve it by $25\\%$ . Second, the performance of the different network architectures decreases significantly as the network gets deeper. For example, the EER of the baseline \"SENet50\" is $1.83\\%$ , and the EER of its self-distillation is $1.00\\%$ , which is a $45\\%$ improvement. Self-distillation effectively reduces the performance degradation of the FSD due to depth and makes its performance smoother for different network depths. Table 3 shows the performance of the most recently known SOTA single system, and our best system is ranked second. In general, the self-distillation method has the effect of fully exploiting the information of different levels of the network. Further, this method has strong generality and improves for different architectures at different depths.",
582
+ "bbox": [
583
+ 81,
584
+ 428,
585
+ 488,
586
+ 744
587
+ ],
588
+ "page_idx": 3
589
+ },
590
+ {
591
+ "type": "text",
592
+ "text": "3.4. Experiment results on PA dataset",
593
+ "text_level": 1,
594
+ "bbox": [
595
+ 83,
596
+ 767,
597
+ 356,
598
+ 782
599
+ ],
600
+ "page_idx": 3
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "Table 1 shows the EER and t-DCF of the baseline and the self-distillation system for the ASVspoof 2019 PA dataset. according to Table 1, it can be seen that the self-distillation system outperforms the baseline. The EER of the baseline \"SENet34\" is $1.14\\%$ , which is the worst performance in the overall baseline. However, the \"SENet34(SD)\" system could obtain an EER of $0.65\\%$ , making it the best-performing system. This may be because the shallow network contains more",
605
+ "bbox": [
606
+ 81,
607
+ 792,
608
+ 488,
609
+ 912
610
+ ],
611
+ "page_idx": 3
612
+ },
613
+ {
614
+ "type": "text",
615
+ "text": "unexplored information, and thus the self-distillation system has a teacher for guidance, which allows shallow networks to mine more fine-grained information as well. In addition, Table 3 compares the Top systems on the PA dataset, and our method can also get the second one. This indicates that the self-distillation method is very effective. Further, our method can also be adapted to different datasets.",
616
+ "bbox": [
617
+ 506,
618
+ 305,
619
+ 915,
620
+ 411
621
+ ],
622
+ "page_idx": 3
623
+ },
624
+ {
625
+ "type": "text",
626
+ "text": "4. CONCLUSIONS",
627
+ "text_level": 1,
628
+ "bbox": [
629
+ 638,
630
+ 430,
631
+ 785,
632
+ 444
633
+ ],
634
+ "page_idx": 3
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "In this paper, we propose a novel self-distillation method for FSD tasks. This can further improve the performance of FSD without increasing the load and has generality for networks of different architectures. Specifically, we add classifiers behind the shallow network to build interaction with the deepest network in both feature and prediction dimensions, which enhances shallow networks' ability that captures detailed discriminately information. The feature distillation aims to reduce the difference between deep and shallow features, and the distillation of the prediction dimension is to fully exploit the information in each layer of the network to further optimize the network. We use different architectures of ECANet and SENet, and our experimental results on ASVspoof 2019 LA and PA datasets validate the effectiveness and generality of our approach, significantly improving the performance of the baseline. In the future, we will work on building more low-parameter and highly robust FSD systems.",
639
+ "bbox": [
640
+ 506,
641
+ 458,
642
+ 915,
643
+ 715
644
+ ],
645
+ "page_idx": 3
646
+ },
647
+ {
648
+ "type": "text",
649
+ "text": "5. ACKNOWLEDGEMENTS",
650
+ "text_level": 1,
651
+ "bbox": [
652
+ 602,
653
+ 734,
654
+ 821,
655
+ 748
656
+ ],
657
+ "page_idx": 3
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "This work is supported by the National Key Research and Development Plan of China (No.2020AAA0140003), the National Natural Science Foundation of China (NSFC) (No.62201002, No.61972437), Excellent Youth Foundation of Anhui Scientific Committee (No. 2208085J05), Special Fund for Key Program of Science and Technology of Anhui Province (No. 202203a07020008), the Open Research Projects of Zhejiang Lab (NO. 2021KH0AB06) and the Open Projects Program of National Laboratory of Pattern Recognition (NO. 202200014).",
662
+ "bbox": [
663
+ 506,
664
+ 762,
665
+ 915,
666
+ 912
667
+ ],
668
+ "page_idx": 3
669
+ },
670
+ {
671
+ "type": "text",
672
+ "text": "6. REFERENCES",
673
+ "text_level": 1,
674
+ "bbox": [
675
+ 217,
676
+ 90,
677
+ 354,
678
+ 104
679
+ ],
680
+ "page_idx": 4
681
+ },
682
+ {
683
+ "type": "list",
684
+ "sub_type": "ref_text",
685
+ "list_items": [
686
+ "[1] Jee-weon Jung, Hee-Soo Heo, Ju-ho Kim, Hye-jin Shim, and Ha-Jin Yu, “Rawnet: Advanced end-to-end deep neural network using raw waveforms for text-independent speaker verification,” Proc. Interspeech 2019, pp. 1268-1272, 2019.",
687
+ "[2] Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch, Nicholas Evans, and Anthony Larcher, \"End-to-end anti-spoofing with rawnet2,\" in ICASSP 2021. IEEE, 2021, pp. 6369-6373.",
688
+ "[3] Chenglong Wang, Jiangyan Yi, Jianhua Tao, Haiyang Sun, Xun Chen, Zhengkun Tian, Haoxin Ma, Cunhang Fan, and Ruibo Fu, “Fully automated end-to-end fake audio detection,” in DDAM 2022, 2022, pp. 27-33.",
689
+ "[4] Hefei Ling, Leichao Huang, Junrui Huang, Baiyan Zhang, and Ping Li, “Attention-Based Convolutional Neural Network for ASV Spoofing Detection,” in Proc. Interspeech, 2021, pp. 4289–4293.",
690
+ "[5] Hemlata Tak, Jee-weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in Proc. ASVSproof Challenge, 2021, pp. 1–8.",
691
+ "[6] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, \"Aassist: Audio anti-spoofing using integrated spectro-temporal graph attention networks,\" in ICASSP 2022. IEEE, 2022, pp. 6367-6371.",
692
+ "[7] Yuxiang Zhang, Wenchao Wang, and Pengyuan Zhang, “The Effect of Silence and Dual-Band Fusion in Anti-Spoofing System,” in Proc. Interspeech, 2021, pp. 4279–4283.",
693
+ "[8] Qilong Wang, Banggu Wu, Pengfei Zhu, P. Li, Wangmeng Zuo, and Qinghua Hu, \"Eca-net: Efficient channel attention for deep convolutional neural networks,\" CVPR 2020, pp. 11531-11539, 2020.",
694
+ "[9] Qiang Xu, Tongtong Song, Longbiao Wang, Hao Shi, Yuqin Lin, Yongjie Lv, Meng Ge, Qiang Yu, and Jianwu Dang, \"Self-distillation based on high-level information supervision for compressing end-to-end asr model,\" Proc. Interspeech 2022, pp. 1716-1720, 2022.",
695
+ "[10] Bei Liu, Haoyu Wang, Zhengyang Chen, Shuai Wang, and Yanmin Qian, \"Self-knowledge distillation via feature enhancement for speaker verification,\" in ICASSP 2022. IEEE, 2022, pp. 7542-7546."
696
+ ],
697
+ "bbox": [
698
+ 86,
699
+ 119,
700
+ 486,
701
+ 912
702
+ ],
703
+ "page_idx": 4
704
+ },
705
+ {
706
+ "type": "list",
707
+ "sub_type": "ref_text",
708
+ "list_items": [
709
+ "[11] Yixiao Ge, Xiao Zhang, Ching Lam Choi, Ka Chun Cheung, Peipei Zhao, Feng Zhu, Xiaogang Wang, Rui Zhao, and Hongsheng Li, \"Self-distillation with batch knowledge ensembling improves imagenet classification,\" arXiv, 2021.",
710
+ "[12] JiaCheng Deng, Terui Mao, Diqun Yan, Li Dong, and Mingyu Dong, “Detection of synthetic speech based on spectrum defects,” in DDAM 2022, 2022, pp. 3-8.",
711
+ "[13] Linfeng Zhang, Jiebo Song, Anni Gao, Jingwei Chen, Chenglong Bao, and Kaisheng Ma, “Be your own teacher: Improve the performance of convolutional neural net via self distillation,” in ICCV 2019. IEEE Computer Society, 2019, pp. 3712–3721.",
712
+ "[14] Andreas Nautsch, Xin Wang, Nicholas Evans, Tomi H Kinnunen, Ville Vestman, Massimiliano Todisco, Héctor Delgado, Md Sahidullah, Junichi Yamagishi, and Kong Aik Lee, \"Asvspoof 2019: spoofing countermeasures for the detection of synthesized, converted and replayed speech,\" IEEE Transactions on Biometrics, Behavior, and Identity Science, vol. 3, no. 2, pp. 252-265, 2021.",
713
+ "[15] Hemlata Tak, Jee-Weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in ASVSPoOF 2021, Automatic Speaker Verification and Spoofing Countermeasures Challenge. ISCA, 2021, pp. 1–8.",
714
+ "[16] Xu Li, Na Li, Chao Weng, Xunying Liu, Dan Su, Dong Yu, and Helen Meng, \"Replay and synthetic speech detection with res2net architecture,\" in ICASSP 2021. IEEE, 2021, pp. 6354-6358.",
715
+ "[17] Xu Li, Xixin Wu, Hui Lu, Xunying Liu, and Helen Meng, “Channel-Wise Gated Res2Net: Towards Robust Detection of Synthetic Speech Attacks,” in Proc. Interspeech 2021, 2021, pp. 4314–4318.",
716
+ "[18] You Zhang, Fei Jiang, and Zhiyao Duan, \"One-class learning towards synthetic voice spoofing detection,\" IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021.",
717
+ "[19] Anwei Luo, Enlei Li, Yongliang Liu, Xiangui Kang, and Z Jane Wang, “A capsule network based approach for detection of audio spoofing attacks,” in ICASSP 2021. IEEE, 2021, pp. 6359–6363.",
718
+ "[20] Jun Xue, Cunhang Fan, Zhao Lv, Jianhua Tao, Jiangyan Yi, Chengshi Zheng, Zhengqi Wen, Minmin Yuan, and Shegang Shao, \"Audio deepfake detection based on a combination of f0 information and real plus imaginary spectrogram features,\" in DDAM 2022, 2022, pp. 19-26."
719
+ ],
720
+ "bbox": [
721
+ 511,
722
+ 90,
723
+ 913,
724
+ 905
725
+ ],
726
+ "page_idx": 4
727
+ }
728
+ ]
2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_model.json ADDED
@@ -0,0 +1,969 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.269,
8
+ 0.058,
9
+ 0.702
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2303.01211v1 [cs.SD] 2 Mar 2023"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.104,
18
+ 0.118,
19
+ 0.895,
20
+ 0.155
21
+ ],
22
+ "angle": 0,
23
+ "content": "LEARNING FROM YOURSELF: A SELF-DISTILLATION METHOD FOR FAKE SPEECH DETECTION"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.104,
29
+ 0.173,
30
+ 0.892,
31
+ 0.192
32
+ ],
33
+ "angle": 0,
34
+ "content": "Jun Xue\\(^{1}\\), Cunhang Fan\\(^{1,*}\\), Jiangyan Yi\\(^{2}\\), Chenglong Wang\\(^{2}\\), Zhengqi Wen\\(^{3}\\), Dan Zhang\\(^{4}\\), Zhao Lv\\(^{1,*}\\)"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.154,
40
+ 0.209,
41
+ 0.845,
42
+ 0.28
43
+ ],
44
+ "angle": 0,
45
+ "content": "<sup>1</sup>Anhui Province Key Laboratory of Multimodal Cognitive Computation, School of Computer Science and Technology, Anhui University, Hefei 230601, China \n<sup>2</sup>NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China \n<sup>3</sup>Qiyuan Laboratory, Beijing 100190, China"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.179,
51
+ 0.28,
52
+ 0.788,
53
+ 0.298
54
+ ],
55
+ "angle": 0,
56
+ "content": "\\(^{4}\\)Department of Psychology, Tsinghua University, Beijing 100190, China"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.18,
62
+ 0.3,
63
+ 0.823,
64
+ 0.315
65
+ ],
66
+ "angle": 0,
67
+ "content": "e21201068@stu.ahu.edu.cn, {cunhang.fan, kjlz} @ahu.edu.cn, {jiangyan.yi,chenglong.wang} @nlpr.i.ac.cn,"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.336,
73
+ 0.318,
74
+ 0.663,
75
+ 0.332
76
+ ],
77
+ "angle": 0,
78
+ "content": "wenzhengqi@qiyuanlab.com, dzhang@tsinghua.edu.cn"
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.241,
84
+ 0.367,
85
+ 0.333,
86
+ 0.38
87
+ ],
88
+ "angle": 0,
89
+ "content": "ABSTRACT"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.087,
95
+ 0.385,
96
+ 0.487,
97
+ 0.657
98
+ ],
99
+ "angle": 0,
100
+ "content": "In this paper, we propose a novel self-distillation method for fake speech detection (FSD), which can significantly improve the performance of FSD without increasing the model complexity. For FSD, some fine-grained information is very important, such as spectrogram defects, mute segments, and so on, which are often perceived by shallow networks. However, shallow networks have much noise, which can not capture this very well. To address this problem, we propose using the deepest network instruct shallow network for enhancing shallow networks. Specifically, the networks of FSD are divided into several segments, the deepest network being used as the teacher model, and all shallow networks become multiple student models by adding classifiers. Meanwhile, the distillation path between the deepest network feature and shallow network features is used to reduce the feature difference. A series of experimental results on the ASVspoof 2019 LA and PA datasets show the effectiveness of the proposed method, with significant improvements compared to the baseline."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.084,
106
+ 0.664,
107
+ 0.487,
108
+ 0.694
109
+ ],
110
+ "angle": 0,
111
+ "content": "Index Terms— Fake speech detection, self-distillation, automatic speaker verification, ASVspoof"
112
+ },
113
+ {
114
+ "type": "title",
115
+ "bbox": [
116
+ 0.209,
117
+ 0.713,
118
+ 0.364,
119
+ 0.727
120
+ ],
121
+ "angle": 0,
122
+ "content": "1. INTRODUCTION"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.083,
128
+ 0.741,
129
+ 0.488,
130
+ 0.846
131
+ ],
132
+ "angle": 0,
133
+ "content": "With the rise of biometrics, automatic speaker verification (ASV) has also started to be widely used. However, the development of synthetic speech technology seriously threatens the security of ASV systems. The main attack types for ASV systems are audio replay, text-to-speech (TTS), and voice conversion (VC). Therefore, a series of fake speech detection (FSD) challenges are used to improve the security of ASV systems."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.083,
139
+ 0.847,
140
+ 0.488,
141
+ 0.893
142
+ ],
143
+ "angle": 0,
144
+ "content": "The research of FSD revolves around finding discriminately features and designing robust networks. The front-end features mainly are raw waveforms [1, 2], linear frequency"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.509,
150
+ 0.366,
151
+ 0.915,
152
+ 0.682
153
+ ],
154
+ "angle": 0,
155
+ "content": "cepstral coefficients (LFCC), log power spectrogram (LPS), and so on. For different front-end features, researchers have proposed a series of convolutional neural network modeling methods. Most of the networks are based on ResNet, LCNN [3], and graph networks. To further improve the model performance, researchers introduced attention mechanisms into different networks. Ling et al. [4] proposed an attention-based convolutional neural network for the FSD task, authors designed a frequency attention block and a channel attention block to focus the discriminative information of the features. In [5], they first time applied graph attention (GAT) networks to FSD tasks to further analyze the relationship between adjacent subbands in spectral features. Jung et al. [6] proposed a GAT-based architecture using a heterogeneous attention mechanism and stack nodes to integrate temporal and spectral domains. Zhang et al. [7] used the squeeze and excitation (SE) network for FSD, which is the SE part for computing global channel attention weights. In addition, efficient channel attention (ECA) [8] is proposed to solve the problem of information loss due to SE block dimensionality reduction, which is widely used in other fields."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.509,
161
+ 0.685,
162
+ 0.915,
163
+ 0.836
164
+ ],
165
+ "angle": 0,
166
+ "content": "In addition, self-distillation as popular methods has gained good performance in many fields. In [9], the authors proposed an attention-based feature self-distillation method, which can utilize more high-level information to improve the performance of automatic speech recognition. Liu et al. [10] designed a feature-enhanced self-distillation method for speaker verification, which achieved good performance. Ge et al. [11] proposed an integrated propagation of knowledge form to design a self-distillation framework, which is very effective for image classification tasks."
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.508,
172
+ 0.838,
173
+ 0.915,
174
+ 0.915
175
+ ],
176
+ "angle": 0,
177
+ "content": "In this paper, we propose a novel self-distillation approach for FSD. Many studies show that the shallow feature of voice is important for FSD. In [7], the authors find that mute segments of voices affect the performance of FSD seriously. Further, Deng et al. [12] utilize shallow feature information by"
178
+ },
179
+ {
180
+ "type": "page_footnote",
181
+ "bbox": [
182
+ 0.109,
183
+ 0.9,
184
+ 0.245,
185
+ 0.913
186
+ ],
187
+ "angle": 0,
188
+ "content": "* Corresponding authors."
189
+ }
190
+ ],
191
+ [
192
+ {
193
+ "type": "image",
194
+ "bbox": [
195
+ 0.086,
196
+ 0.086,
197
+ 0.918,
198
+ 0.306
199
+ ],
200
+ "angle": 0,
201
+ "content": null
202
+ },
203
+ {
204
+ "type": "image_caption",
205
+ "bbox": [
206
+ 0.082,
207
+ 0.321,
208
+ 0.916,
209
+ 0.366
210
+ ],
211
+ "angle": 0,
212
+ "content": "Fig. 1. The proposed self-distillation framework is based on SENet and ECANet networks. The framework is divided into four blocks and each block is set with a classifier (AngleLinear, AL). Note: The additional classifiers can all be removed during evaluation and thus do not increase the model load."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.087,
218
+ 0.389,
219
+ 0.49,
220
+ 0.706
221
+ ],
222
+ "angle": 0,
223
+ "content": "designing simple classifiers, such as spectrum defects, mute segments, and so on. While shallow networks are sensitive to this kind of information, the capture ability is not well as deep networks. To solve this problem, we propose a self-distillation framework that the deepest network instructs shallow networks, which can further strengthen shallow networks. Specifically, during training, we add classifiers as multiple student models for all shallow networks and set the deepest network as the teacher model. Based on the prediction results, the teacher model instructs the student model to transform the deepest knowledge into all shallow networks. Meanwhile, to reduce the feature difference between the shallow and deepest networks, we also build self-distillation models in the feature dimension. It is worth noting that the classifiers in all shallow layers can be removed during inference, thus causing no additional computational and model complexity. To validate the effectiveness of the proposed approach, we used the ECANet and SENet networks as the baseline and the LA and PA from the ASVspoof 2019 challenge as the dataset. Compared to the baseline, the performance of the system is significantly improved after using self-distillation."
224
+ },
225
+ {
226
+ "type": "title",
227
+ "bbox": [
228
+ 0.085,
229
+ 0.726,
230
+ 0.486,
231
+ 0.74
232
+ ],
233
+ "angle": 0,
234
+ "content": "2. THE PROPOSED SELF-DISTILLATION METHOD"
235
+ },
236
+ {
237
+ "type": "title",
238
+ "bbox": [
239
+ 0.085,
240
+ 0.754,
241
+ 0.231,
242
+ 0.768
243
+ ],
244
+ "angle": 0,
245
+ "content": "2.1. Self-distillation"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.083,
251
+ 0.778,
252
+ 0.489,
253
+ 0.913
254
+ ],
255
+ "angle": 0,
256
+ "content": "In this section, we propose a self-distillation method as shown in Fig.1. Specifically, Inspired by [13], we divide it into four segments based on the model architecture. In this paper, we divide the model into four blocks, and the block is SE block or ECA block. Then, we set a classifier after each block, and we use AngleLinear to obtain the prediction results of each layer. Note that these extra classifiers are only used in training and do not increase the load during inference. In the training phase, we use the fourth layer of the network as the teacher"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.509,
262
+ 0.389,
263
+ 0.915,
264
+ 0.433
265
+ ],
266
+ "angle": 0,
267
+ "content": "model and the first three layers as the student model. The deep network knowledge is transformed to the shallow layer in both feature and prediction dimensions."
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.509,
273
+ 0.434,
274
+ 0.915,
275
+ 0.464
276
+ ],
277
+ "angle": 0,
278
+ "content": "To make full use of the network information, three losses during the training process are introduced:"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.537,
284
+ 0.473,
285
+ 0.913,
286
+ 0.561
287
+ ],
288
+ "angle": 0,
289
+ "content": "- Hard loss: The A-softmax function is used to calculate the loss of the labels and the fourth layer classifier. This is calculated using the labels of the training dataset and the output from the Anglelinear classifier, which is used to fully extract the hidden knowledge in the training dataset."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.537,
295
+ 0.57,
296
+ 0.913,
297
+ 0.661
298
+ ],
299
+ "angle": 0,
300
+ "content": "- Feature loss: The L2 function is used to compute the feature mapping between the fourth layer network and each shallow layer network. This can introduce nonexplicit knowledge of the deepest features into the shallow features so that their shallow networks can match better with the deeper networks when predicting."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.537,
306
+ 0.668,
307
+ 0.914,
308
+ 0.772
309
+ ],
310
+ "angle": 0,
311
+ "content": "- Soft loss: The KL divergence function is used to calculate the soft loss in the teacher-student model. The deepest network output is used as the teacher model and several shallow network outputs are used as the student model. The difference between the distributions of the two outputs is calculated, which can guide the shallow network to learn more."
312
+ },
313
+ {
314
+ "type": "list",
315
+ "bbox": [
316
+ 0.537,
317
+ 0.473,
318
+ 0.914,
319
+ 0.772
320
+ ],
321
+ "angle": 0,
322
+ "content": null
323
+ },
324
+ {
325
+ "type": "title",
326
+ "bbox": [
327
+ 0.51,
328
+ 0.785,
329
+ 0.676,
330
+ 0.8
331
+ ],
332
+ "angle": 0,
333
+ "content": "2.2. Training Methods"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.508,
339
+ 0.809,
340
+ 0.914,
341
+ 0.853
342
+ ],
343
+ "angle": 0,
344
+ "content": "When training with the self-distillation method, the loss has three components. First, we compute the hard loss of the deepest network and labels:"
345
+ },
346
+ {
347
+ "type": "equation",
348
+ "bbox": [
349
+ 0.61,
350
+ 0.858,
351
+ 0.914,
352
+ 0.874
353
+ ],
354
+ "angle": 0,
355
+ "content": "\\[\n\\mathcal {L} _ {\\text {h a r d}} = A _ {-} \\operatorname {s o f t m a x} \\left(p ^ {n}, L\\right) \\tag {1}\n\\]"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.509,
361
+ 0.884,
362
+ 0.915,
363
+ 0.915
364
+ ],
365
+ "angle": 0,
366
+ "content": "where \\( p^n \\) is the deepest output of the network, in this paper, we set the \\( n \\) is 4. \\( L \\) is the label of the training data set,"
367
+ }
368
+ ],
369
+ [
370
+ {
371
+ "type": "table_caption",
372
+ "bbox": [
373
+ 0.084,
374
+ 0.1,
375
+ 0.916,
376
+ 0.128
377
+ ],
378
+ "angle": 0,
379
+ "content": "Table 1. EER and t-DCF results for different architectures based on the ASVspoof 2019 LA and PA dataset. SD indicates that the self-distillation method is used."
380
+ },
381
+ {
382
+ "type": "table",
383
+ "bbox": [
384
+ 0.089,
385
+ 0.127,
386
+ 0.913,
387
+ 0.27
388
+ ],
389
+ "angle": 0,
390
+ "content": "<table><tr><td rowspan=\"5\">LA dataset</td><td colspan=\"2\">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan=\"2\">EER</td><td>baseline</td><td>1.48</td><td>1.18</td><td>1.44</td><td>1.87</td><td>1.97</td><td>1.52</td><td>1.23</td><td>1.83</td></tr><tr><td>SD</td><td>1.22</td><td>0.88</td><td>1.14</td><td>1.09</td><td>1.23</td><td>1.37</td><td>1.08</td><td>1.00</td></tr><tr><td rowspan=\"2\">t-DCF</td><td>baseline</td><td>0.0493</td><td>0.0378</td><td>0.0460</td><td>0.0605</td><td>0.0610</td><td>0.0497</td><td>0.0358</td><td>0.0536</td></tr><tr><td>SD</td><td>0.0376</td><td>0.0295</td><td>0.0334</td><td>0.0318</td><td>0.0388</td><td>0.0417</td><td>0.0347</td><td>0.0309</td></tr><tr><td rowspan=\"5\">PA dataset</td><td colspan=\"2\">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan=\"2\">EER</td><td>baseline</td><td>0.83</td><td>0.95</td><td>0.88</td><td>0.90</td><td>0.97</td><td>0.93</td><td>1.14</td><td>0.82</td></tr><tr><td>SD</td><td>0.74</td><td>0.83</td><td>0.70</td><td>0.82</td><td>0.85</td><td>0.87</td><td>0.65</td><td>0.79</td></tr><tr><td rowspan=\"2\">t-DCF</td><td>baseline</td><td>0.0221</td><td>0.0284</td><td>0.0255</td><td>0.0262</td><td>0.0269</td><td>0.0266</td><td>0.0334</td><td>0.0228</td></tr><tr><td>SD</td><td>0.0199</td><td>0.0219</td><td>0.0208</td><td>0.0222</td><td>0.0232</td><td>0.0239</td><td>0.0174</td><td>0.0219</td></tr></table>"
391
+ },
392
+ {
393
+ "type": "table_caption",
394
+ "bbox": [
395
+ 0.082,
396
+ 0.303,
397
+ 0.489,
398
+ 0.378
399
+ ],
400
+ "angle": 0,
401
+ "content": "Table 2. The ECANet and SENet model architecture and configuration. Dimensions refer to (channels, frequency, and time). Batch normalization (BN) and Rectified Linear Unit (ReLU). ECA and SE are the efficient channel attention block and the squeeze and excitation block, respectively."
402
+ },
403
+ {
404
+ "type": "table",
405
+ "bbox": [
406
+ 0.089,
407
+ 0.378,
408
+ 0.487,
409
+ 0.661
410
+ ],
411
+ "angle": 0,
412
+ "content": "<table><tr><td>Layer</td><td colspan=\"2\">Input: 27000 samples</td><td>Output shape</td></tr><tr><td>Front-end</td><td colspan=\"2\">F0 subband</td><td>(45,600)(F,T)</td></tr><tr><td>Post-processing</td><td colspan=\"2\">Add channelConv2D_1BN &amp; ReLU</td><td>(1,45,600)(16,45,600)</td></tr><tr><td>Block1</td><td colspan=\"2\">c1×{Conv2D_3Conv2D_3ECA or SE}</td><td>(32,45,600)</td></tr><tr><td>Block2</td><td colspan=\"2\">c2×{Conv2D_3Conv2D_3ECA or SE}</td><td>(64,23,300)</td></tr><tr><td>Block3</td><td colspan=\"2\">c3×{Conv2D_3Conv2D_3ECA or SE}</td><td>(128,12,150)</td></tr><tr><td>Block4</td><td colspan=\"2\">c4×{Conv2D_3Conv2D_3ECA or SE}</td><td>(256,6,75)</td></tr><tr><td>Output</td><td colspan=\"2\">Avgpool2D(1,1)AngleLinear</td><td>(256,1,1)2</td></tr></table>"
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.084,
418
+ 0.683,
419
+ 0.489,
420
+ 0.711
421
+ ],
422
+ "angle": 0,
423
+ "content": "and \\( A_{-} \\)softmax denotes the A-softmax function. \\( \\mathcal{L}_{\\text{hard}} \\) is the hard loss."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.084,
429
+ 0.713,
430
+ 0.489,
431
+ 0.758
432
+ ],
433
+ "angle": 0,
434
+ "content": "Soft loss is used for knowledge distillation of the shallow and deep networks, and we calculate KL divergence using each shallow and deepest network."
435
+ },
436
+ {
437
+ "type": "equation",
438
+ "bbox": [
439
+ 0.2,
440
+ 0.76,
441
+ 0.488,
442
+ 0.8
443
+ ],
444
+ "angle": 0,
445
+ "content": "\\[\n\\mathcal {L} _ {\\text {s o f t}} = \\sum_ {i} ^ {n - 1} K L \\left(p ^ {i}, p ^ {n}\\right) \\tag {2}\n\\]"
446
+ },
447
+ {
448
+ "type": "text",
449
+ "bbox": [
450
+ 0.084,
451
+ 0.808,
452
+ 0.489,
453
+ 0.854
454
+ ],
455
+ "angle": 0,
456
+ "content": "\\(KL\\) denotes the KL divergence function and \\(p^i\\) is the output of each layer of the network after the Angellinear classifier. \\(\\mathcal{L}_{soft}\\) is the final soft loss."
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.084,
462
+ 0.854,
463
+ 0.489,
464
+ 0.913
465
+ ],
466
+ "angle": 0,
467
+ "content": "The feature loss is used to balance the difference between the shallow and deepest networks, which can be fed back to the classification output of the shallow network to facilitate the soft loss fit."
468
+ },
469
+ {
470
+ "type": "equation",
471
+ "bbox": [
472
+ 0.614,
473
+ 0.292,
474
+ 0.913,
475
+ 0.332
476
+ ],
477
+ "angle": 0,
478
+ "content": "\\[\n\\mathcal {L} _ {\\text {f e a t u r e}} = \\sum_ {i} ^ {n - 1} L 2 \\left(\\mathcal {F} ^ {i}, \\mathcal {F} ^ {n}\\right) \\tag {3}\n\\]"
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.51,
484
+ 0.341,
485
+ 0.916,
486
+ 0.388
487
+ ],
488
+ "angle": 0,
489
+ "content": "\\(\\mathcal{F}^i\\) is the output feature of each layer, and \\(\\mathcal{F}^n\\) is the output feature of the deepest layer. \\(L2\\) is the L2 loss function, and \\(\\mathcal{L}_{\\text{feature}}\\) is the final feature loss."
490
+ },
491
+ {
492
+ "type": "equation",
493
+ "bbox": [
494
+ 0.537,
495
+ 0.4,
496
+ 0.915,
497
+ 0.417
498
+ ],
499
+ "angle": 0,
500
+ "content": "\\[\n\\mathcal {L} = \\alpha * \\mathcal {L} _ {\\text {h a r d}} + (1 - \\alpha) * \\mathcal {L} _ {\\text {s o f t}} + \\beta * \\mathcal {L} _ {\\text {f e a t u r e}} \\tag {4}\n\\]"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.509,
506
+ 0.424,
507
+ 0.915,
508
+ 0.469
509
+ ],
510
+ "angle": 0,
511
+ "content": "The loss at training consists of the following three components, \\(\\alpha\\) and \\(\\beta\\) are hyperparameters to balance the three sources of loss. \\(\\mathcal{L}\\) is the final loss."
512
+ },
513
+ {
514
+ "type": "title",
515
+ "bbox": [
516
+ 0.581,
517
+ 0.488,
518
+ 0.844,
519
+ 0.503
520
+ ],
521
+ "angle": 0,
522
+ "content": "3. EXPERIMENTS AND RESULTS"
523
+ },
524
+ {
525
+ "type": "title",
526
+ "bbox": [
527
+ 0.51,
528
+ 0.517,
529
+ 0.61,
530
+ 0.531
531
+ ],
532
+ "angle": 0,
533
+ "content": "3.1. Datasets"
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.508,
539
+ 0.54,
540
+ 0.916,
541
+ 0.676
542
+ ],
543
+ "angle": 0,
544
+ "content": "We trained and evaluated models on the ASVspoof 2019 LA and PA datasets. The LA set includes three types of spoofing attacks (TTS, VS, and audio replay), which are divided into 19 attack algorithms (A01-A19). The PA set includes only replay attacks, and there are 27 replay attacks in different acoustic environments. In this paper, EER and the minimum normalized tandem detection cost function (min t-DCF) are used as evaluation metrics for assessing the performance of different systems."
545
+ },
546
+ {
547
+ "type": "title",
548
+ "bbox": [
549
+ 0.51,
550
+ 0.694,
551
+ 0.743,
552
+ 0.71
553
+ ],
554
+ "angle": 0,
555
+ "content": "3.2. Training setup and baseline"
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.508,
561
+ 0.717,
562
+ 0.915,
563
+ 0.838
564
+ ],
565
+ "angle": 0,
566
+ "content": "Front-end features: Inspired by [20], we use the F0 subband as our input features. Firstly, we extract the full frequency band of LPS and use the window function as Blackman's Short Time Fourier Transform (STFT), setting the window length and hop length as 1728 and 130 respectively. we fix the frame number as 600 and get the LPS to feature \\(865\\times 600\\). Finally, we take the first 45 dimensions of the frequency and finally get the front-end F0 subband feature size of \\(45\\times 600\\)."
567
+ },
568
+ {
569
+ "type": "text",
570
+ "bbox": [
571
+ 0.509,
572
+ 0.838,
573
+ 0.915,
574
+ 0.914
575
+ ],
576
+ "angle": 0,
577
+ "content": "Back-end classifier: As shown in Table 2, we use SENet and ECANet as deep neural network classifiers. where the \\(c_{1} - c_{4}\\) vectors corresponding to the 9, 18, 34 layers are \\((1,1,1,1)\\), \\((2,2,2,2)\\), and \\((3,4,6,3)\\), respectively. The 50-layer network is set to three convolutions in the block, with"
578
+ }
579
+ ],
580
+ [
581
+ {
582
+ "type": "table_caption",
583
+ "bbox": [
584
+ 0.22,
585
+ 0.101,
586
+ 0.779,
587
+ 0.114
588
+ ],
589
+ "angle": 0,
590
+ "content": "Table 3. Comparison of our self-distillation system with other known single systems."
591
+ },
592
+ {
593
+ "type": "table",
594
+ "bbox": [
595
+ 0.089,
596
+ 0.115,
597
+ 0.913,
598
+ 0.281
599
+ ],
600
+ "angle": 0,
601
+ "content": "<table><tr><td colspan=\"4\">LA dataset</td><td colspan=\"4\">PA dataset</td></tr><tr><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td></tr><tr><td>AASIST [6]</td><td>Raw waveform</td><td>0.83</td><td>0.0275</td><td>T28 [14]</td><td>-</td><td>0.52</td><td>0.1470</td></tr><tr><td>ECANet18(SD) Ours</td><td>F0 subband</td><td>0.88</td><td>0.0295</td><td>SENet34(SD) Ours</td><td>F0 subband</td><td>0.65</td><td>0.0174</td></tr><tr><td>SENet50(SD) Ours</td><td>F0 subband</td><td>1.00</td><td>0.0309</td><td>ECANet34(SD) Ours</td><td>F0 subband</td><td>0.70</td><td>0.0208</td></tr><tr><td>RawGAT-ST [15]</td><td>Raw waveform</td><td>1.06</td><td>0.0340</td><td>SE-Res2Net50 [16]</td><td>Spec</td><td>0.74</td><td>0.0207</td></tr><tr><td>SENet34(SD) Ours</td><td>F0 subband</td><td>1.08</td><td>0.0347</td><td>T10 [14]</td><td>-</td><td>1.08</td><td>0.1598</td></tr><tr><td>FFT-L-SENet [7]</td><td>LPS</td><td>1.14</td><td>0.0368</td><td>T45 [14]</td><td>-</td><td>1.23</td><td>0.1610</td></tr><tr><td>MCG-Res2Net50 [17]</td><td>CQT</td><td>1.78</td><td>0.0520</td><td>T44 [14]</td><td>-</td><td>1.29</td><td>0.1666</td></tr><tr><td>Resnet18-OC-softmax [18]</td><td>LFCC</td><td>2.19</td><td>0.0590</td><td>T53 [14]</td><td>-</td><td>1.66</td><td>0.1729</td></tr><tr><td>ResNet18-GAT-T [5]</td><td>LFB</td><td>4.71</td><td>0.0894</td><td>Capsule [19]</td><td>LFCC</td><td>2.76</td><td>0.0730</td></tr></table>"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.082,
607
+ 0.306,
608
+ 0.488,
609
+ 0.382
610
+ ],
611
+ "angle": 0,
612
+ "content": "convolution kernels of 1,3,1. The rest is the same as the 34-layer network. For training, we use Adam as the optimizer with parameters \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.98\\), \\(\\epsilon = 10^{-9}\\) and weight decay \\(10^{-4}\\). The number of the epoch is 32. The two hyperparameters \\(\\alpha\\) and \\(\\beta\\) are set to 0.7 and 0.3, respectively."
613
+ },
614
+ {
615
+ "type": "title",
616
+ "bbox": [
617
+ 0.084,
618
+ 0.404,
619
+ 0.358,
620
+ 0.419
621
+ ],
622
+ "angle": 0,
623
+ "content": "3.3. Experiment results on LA dataset"
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.082,
629
+ 0.429,
630
+ 0.49,
631
+ 0.746
632
+ ],
633
+ "angle": 0,
634
+ "content": "Table 1 shows the EER and t-DCF of the baseline and self-distillation systems for the ASVspoof 2019 LA dataset. The \"SD\" denotes self-distillation. According to Table 1, it can be seen that our self-distillation method significantly outperforms the baseline system. In addition, we can observe several interesting phenomena. First, the baseline system \"ECANet18\" has the best performance with an EER of \\(1.18\\%\\). Even so, the self-distillation method can improve it by \\(25\\%\\). Second, the performance of the different network architectures decreases significantly as the network gets deeper. For example, the EER of the baseline \"SENet50\" is \\(1.83\\%\\), and the EER of its self-distillation is \\(1.00\\%\\), which is a \\(45\\%\\) improvement. Self-distillation effectively reduces the performance degradation of the FSD due to depth and makes its performance smoother for different network depths. Table 3 shows the performance of the most recently known SOTA single system, and our best system is ranked second. In general, the self-distillation method has the effect of fully exploiting the information of different levels of the network. Further, this method has strong generality and improves for different architectures at different depths."
635
+ },
636
+ {
637
+ "type": "title",
638
+ "bbox": [
639
+ 0.084,
640
+ 0.768,
641
+ 0.357,
642
+ 0.784
643
+ ],
644
+ "angle": 0,
645
+ "content": "3.4. Experiment results on PA dataset"
646
+ },
647
+ {
648
+ "type": "text",
649
+ "bbox": [
650
+ 0.082,
651
+ 0.793,
652
+ 0.489,
653
+ 0.913
654
+ ],
655
+ "angle": 0,
656
+ "content": "Table 1 shows the EER and t-DCF of the baseline and the self-distillation system for the ASVspoof 2019 PA dataset. according to Table 1, it can be seen that the self-distillation system outperforms the baseline. The EER of the baseline \"SENet34\" is \\(1.14\\%\\), which is the worst performance in the overall baseline. However, the \"SENet34(SD)\" system could obtain an EER of \\(0.65\\%\\), making it the best-performing system. This may be because the shallow network contains more"
657
+ },
658
+ {
659
+ "type": "text",
660
+ "bbox": [
661
+ 0.508,
662
+ 0.306,
663
+ 0.916,
664
+ 0.412
665
+ ],
666
+ "angle": 0,
667
+ "content": "unexplored information, and thus the self-distillation system has a teacher for guidance, which allows shallow networks to mine more fine-grained information as well. In addition, Table 3 compares the Top systems on the PA dataset, and our method can also get the second one. This indicates that the self-distillation method is very effective. Further, our method can also be adapted to different datasets."
668
+ },
669
+ {
670
+ "type": "title",
671
+ "bbox": [
672
+ 0.639,
673
+ 0.431,
674
+ 0.786,
675
+ 0.445
676
+ ],
677
+ "angle": 0,
678
+ "content": "4. CONCLUSIONS"
679
+ },
680
+ {
681
+ "type": "text",
682
+ "bbox": [
683
+ 0.508,
684
+ 0.459,
685
+ 0.916,
686
+ 0.716
687
+ ],
688
+ "angle": 0,
689
+ "content": "In this paper, we propose a novel self-distillation method for FSD tasks. This can further improve the performance of FSD without increasing the load and has generality for networks of different architectures. Specifically, we add classifiers behind the shallow network to build interaction with the deepest network in both feature and prediction dimensions, which enhances shallow networks' ability that captures detailed discriminately information. The feature distillation aims to reduce the difference between deep and shallow features, and the distillation of the prediction dimension is to fully exploit the information in each layer of the network to further optimize the network. We use different architectures of ECANet and SENet, and our experimental results on ASVspoof 2019 LA and PA datasets validate the effectiveness and generality of our approach, significantly improving the performance of the baseline. In the future, we will work on building more low-parameter and highly robust FSD systems."
690
+ },
691
+ {
692
+ "type": "title",
693
+ "bbox": [
694
+ 0.604,
695
+ 0.735,
696
+ 0.822,
697
+ 0.749
698
+ ],
699
+ "angle": 0,
700
+ "content": "5. ACKNOWLEDGEMENTS"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.508,
706
+ 0.763,
707
+ 0.916,
708
+ 0.913
709
+ ],
710
+ "angle": 0,
711
+ "content": "This work is supported by the National Key Research and Development Plan of China (No.2020AAA0140003), the National Natural Science Foundation of China (NSFC) (No.62201002, No.61972437), Excellent Youth Foundation of Anhui Scientific Committee (No. 2208085J05), Special Fund for Key Program of Science and Technology of Anhui Province (No. 202203a07020008), the Open Research Projects of Zhejiang Lab (NO. 2021KH0AB06) and the Open Projects Program of National Laboratory of Pattern Recognition (NO. 202200014)."
712
+ }
713
+ ],
714
+ [
715
+ {
716
+ "type": "title",
717
+ "bbox": [
718
+ 0.218,
719
+ 0.092,
720
+ 0.356,
721
+ 0.106
722
+ ],
723
+ "angle": 0,
724
+ "content": "6. REFERENCES"
725
+ },
726
+ {
727
+ "type": "ref_text",
728
+ "bbox": [
729
+ 0.096,
730
+ 0.121,
731
+ 0.488,
732
+ 0.196
733
+ ],
734
+ "angle": 0,
735
+ "content": "[1] Jee-weon Jung, Hee-Soo Heo, Ju-ho Kim, Hye-jin Shim, and Ha-Jin Yu, “Rawnet: Advanced end-to-end deep neural network using raw waveforms for text-independent speaker verification,” Proc. Interspeech 2019, pp. 1268-1272, 2019."
736
+ },
737
+ {
738
+ "type": "ref_text",
739
+ "bbox": [
740
+ 0.096,
741
+ 0.21,
742
+ 0.488,
743
+ 0.27
744
+ ],
745
+ "angle": 0,
746
+ "content": "[2] Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch, Nicholas Evans, and Anthony Larcher, \"End-to-end anti-spoofing with rawnet2,\" in ICASSP 2021. IEEE, 2021, pp. 6369-6373."
747
+ },
748
+ {
749
+ "type": "ref_text",
750
+ "bbox": [
751
+ 0.096,
752
+ 0.283,
753
+ 0.488,
754
+ 0.342
755
+ ],
756
+ "angle": 0,
757
+ "content": "[3] Chenglong Wang, Jiangyan Yi, Jianhua Tao, Haiyang Sun, Xun Chen, Zhengkun Tian, Haoxin Ma, Cunhang Fan, and Ruibo Fu, “Fully automated end-to-end fake audio detection,” in DDAM 2022, 2022, pp. 27-33."
758
+ },
759
+ {
760
+ "type": "ref_text",
761
+ "bbox": [
762
+ 0.096,
763
+ 0.356,
764
+ 0.488,
765
+ 0.415
766
+ ],
767
+ "angle": 0,
768
+ "content": "[4] Hefei Ling, Leichao Huang, Junrui Huang, Baiyan Zhang, and Ping Li, “Attention-Based Convolutional Neural Network for ASV Spoofing Detection,” in Proc. Interspeech, 2021, pp. 4289–4293."
769
+ },
770
+ {
771
+ "type": "ref_text",
772
+ "bbox": [
773
+ 0.096,
774
+ 0.429,
775
+ 0.488,
776
+ 0.517
777
+ ],
778
+ "angle": 0,
779
+ "content": "[5] Hemlata Tak, Jee-weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in Proc. ASVSproof Challenge, 2021, pp. 1–8."
780
+ },
781
+ {
782
+ "type": "ref_text",
783
+ "bbox": [
784
+ 0.096,
785
+ 0.532,
786
+ 0.488,
787
+ 0.607
788
+ ],
789
+ "angle": 0,
790
+ "content": "[6] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, \"Aassist: Audio anti-spoofing using integrated spectro-temporal graph attention networks,\" in ICASSP 2022. IEEE, 2022, pp. 6367-6371."
791
+ },
792
+ {
793
+ "type": "ref_text",
794
+ "bbox": [
795
+ 0.096,
796
+ 0.62,
797
+ 0.488,
798
+ 0.678
799
+ ],
800
+ "angle": 0,
801
+ "content": "[7] Yuxiang Zhang, Wenchao Wang, and Pengyuan Zhang, “The Effect of Silence and Dual-Band Fusion in Anti-Spoofing System,” in Proc. Interspeech, 2021, pp. 4279–4283."
802
+ },
803
+ {
804
+ "type": "ref_text",
805
+ "bbox": [
806
+ 0.096,
807
+ 0.693,
808
+ 0.488,
809
+ 0.753
810
+ ],
811
+ "angle": 0,
812
+ "content": "[8] Qilong Wang, Banggu Wu, Pengfei Zhu, P. Li, Wangmeng Zuo, and Qinghua Hu, \"Eca-net: Efficient channel attention for deep convolutional neural networks,\" CVPR 2020, pp. 11531-11539, 2020."
813
+ },
814
+ {
815
+ "type": "ref_text",
816
+ "bbox": [
817
+ 0.096,
818
+ 0.766,
819
+ 0.488,
820
+ 0.841
821
+ ],
822
+ "angle": 0,
823
+ "content": "[9] Qiang Xu, Tongtong Song, Longbiao Wang, Hao Shi, Yuqin Lin, Yongjie Lv, Meng Ge, Qiang Yu, and Jianwu Dang, \"Self-distillation based on high-level information supervision for compressing end-to-end asr model,\" Proc. Interspeech 2022, pp. 1716-1720, 2022."
824
+ },
825
+ {
826
+ "type": "ref_text",
827
+ "bbox": [
828
+ 0.088,
829
+ 0.854,
830
+ 0.488,
831
+ 0.914
832
+ ],
833
+ "angle": 0,
834
+ "content": "[10] Bei Liu, Haoyu Wang, Zhengyang Chen, Shuai Wang, and Yanmin Qian, \"Self-knowledge distillation via feature enhancement for speaker verification,\" in ICASSP 2022. IEEE, 2022, pp. 7542-7546."
835
+ },
836
+ {
837
+ "type": "list",
838
+ "bbox": [
839
+ 0.088,
840
+ 0.121,
841
+ 0.488,
842
+ 0.914
843
+ ],
844
+ "angle": 0,
845
+ "content": null
846
+ },
847
+ {
848
+ "type": "ref_text",
849
+ "bbox": [
850
+ 0.513,
851
+ 0.092,
852
+ 0.914,
853
+ 0.166
854
+ ],
855
+ "angle": 0,
856
+ "content": "[11] Yixiao Ge, Xiao Zhang, Ching Lam Choi, Ka Chun Cheung, Peipei Zhao, Feng Zhu, Xiaogang Wang, Rui Zhao, and Hongsheng Li, \"Self-distillation with batch knowledge ensembling improves imagenet classification,\" arXiv, 2021."
857
+ },
858
+ {
859
+ "type": "ref_text",
860
+ "bbox": [
861
+ 0.513,
862
+ 0.176,
863
+ 0.914,
864
+ 0.221
865
+ ],
866
+ "angle": 0,
867
+ "content": "[12] JiaCheng Deng, Terui Mao, Diqun Yan, Li Dong, and Mingyu Dong, “Detection of synthetic speech based on spectrum defects,” in DDAM 2022, 2022, pp. 3-8."
868
+ },
869
+ {
870
+ "type": "ref_text",
871
+ "bbox": [
872
+ 0.513,
873
+ 0.229,
874
+ 0.914,
875
+ 0.305
876
+ ],
877
+ "angle": 0,
878
+ "content": "[13] Linfeng Zhang, Jiebo Song, Anni Gao, Jingwei Chen, Chenglong Bao, and Kaisheng Ma, “Be your own teacher: Improve the performance of convolutional neural net via self distillation,” in ICCV 2019. IEEE Computer Society, 2019, pp. 3712–3721."
879
+ },
880
+ {
881
+ "type": "ref_text",
882
+ "bbox": [
883
+ 0.513,
884
+ 0.313,
885
+ 0.914,
886
+ 0.432
887
+ ],
888
+ "angle": 0,
889
+ "content": "[14] Andreas Nautsch, Xin Wang, Nicholas Evans, Tomi H Kinnunen, Ville Vestman, Massimiliano Todisco, Héctor Delgado, Md Sahidullah, Junichi Yamagishi, and Kong Aik Lee, \"Asvspoof 2019: spoofing countermeasures for the detection of synthesized, converted and replayed speech,\" IEEE Transactions on Biometrics, Behavior, and Identity Science, vol. 3, no. 2, pp. 252-265, 2021."
890
+ },
891
+ {
892
+ "type": "ref_text",
893
+ "bbox": [
894
+ 0.513,
895
+ 0.442,
896
+ 0.914,
897
+ 0.547
898
+ ],
899
+ "angle": 0,
900
+ "content": "[15] Hemlata Tak, Jee-Weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in ASVSPoOF 2021, Automatic Speaker Verification and Spoofing Countermeasures Challenge. ISCA, 2021, pp. 1–8."
901
+ },
902
+ {
903
+ "type": "ref_text",
904
+ "bbox": [
905
+ 0.513,
906
+ 0.556,
907
+ 0.914,
908
+ 0.616
909
+ ],
910
+ "angle": 0,
911
+ "content": "[16] Xu Li, Na Li, Chao Weng, Xunying Liu, Dan Su, Dong Yu, and Helen Meng, \"Replay and synthetic speech detection with res2net architecture,\" in ICASSP 2021. IEEE, 2021, pp. 6354-6358."
912
+ },
913
+ {
914
+ "type": "ref_text",
915
+ "bbox": [
916
+ 0.513,
917
+ 0.624,
918
+ 0.914,
919
+ 0.684
920
+ ],
921
+ "angle": 0,
922
+ "content": "[17] Xu Li, Xixin Wu, Hui Lu, Xunying Liu, and Helen Meng, “Channel-Wise Gated Res2Net: Towards Robust Detection of Synthetic Speech Attacks,” in Proc. Interspeech 2021, 2021, pp. 4314–4318."
923
+ },
924
+ {
925
+ "type": "ref_text",
926
+ "bbox": [
927
+ 0.513,
928
+ 0.693,
929
+ 0.914,
930
+ 0.751
931
+ ],
932
+ "angle": 0,
933
+ "content": "[18] You Zhang, Fei Jiang, and Zhiyao Duan, \"One-class learning towards synthetic voice spoofing detection,\" IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021."
934
+ },
935
+ {
936
+ "type": "ref_text",
937
+ "bbox": [
938
+ 0.513,
939
+ 0.762,
940
+ 0.914,
941
+ 0.822
942
+ ],
943
+ "angle": 0,
944
+ "content": "[19] Anwei Luo, Enlei Li, Yongliang Liu, Xiangui Kang, and Z Jane Wang, “A capsule network based approach for detection of audio spoofing attacks,” in ICASSP 2021. IEEE, 2021, pp. 6359–6363."
945
+ },
946
+ {
947
+ "type": "ref_text",
948
+ "bbox": [
949
+ 0.513,
950
+ 0.831,
951
+ 0.914,
952
+ 0.906
953
+ ],
954
+ "angle": 0,
955
+ "content": "[20] Jun Xue, Cunhang Fan, Zhao Lv, Jianhua Tao, Jiangyan Yi, Chengshi Zheng, Zhengqi Wen, Minmin Yuan, and Shegang Shao, \"Audio deepfake detection based on a combination of f0 information and real plus imaginary spectrogram features,\" in DDAM 2022, 2022, pp. 19-26."
956
+ },
957
+ {
958
+ "type": "list",
959
+ "bbox": [
960
+ 0.513,
961
+ 0.092,
962
+ 0.914,
963
+ 0.906
964
+ ],
965
+ "angle": 0,
966
+ "content": null
967
+ }
968
+ ]
969
+ ]
2303.01xxx/2303.01211/8f863c6c-6773-4847-97f6-bf8177a76d73_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b796a2e0785d30d0a823b46b34efdad5e6679acd170fd8a28b54fff266421ed
3
+ size 269829
2303.01xxx/2303.01211/full.md ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LEARNING FROM YOURSELF: A SELF-DISTILLATION METHOD FOR FAKE SPEECH DETECTION
2
+
3
+ Jun Xue $^{1}$ , Cunhang Fan $^{1,*}$ , Jiangyan Yi $^{2}$ , Chenglong Wang $^{2}$ , Zhengqi Wen $^{3}$ , Dan Zhang $^{4}$ , Zhao Lv $^{1,*}$
4
+
5
+ <sup>1</sup>Anhui Province Key Laboratory of Multimodal Cognitive Computation, School of Computer Science and Technology, Anhui University, Hefei 230601, China
6
+ <sup>2</sup>NLPR, Institute of Automation, Chinese Academy of Sciences, Beijing 100190, China
7
+ <sup>3</sup>Qiyuan Laboratory, Beijing 100190, China
8
+
9
+ $^{4}$ Department of Psychology, Tsinghua University, Beijing 100190, China
10
+
11
+ e21201068@stu.ahu.edu.cn, {cunhang.fan, kjlz} @ahu.edu.cn, {jiangyan.yi,chenglong.wang} @nlpr.i.ac.cn,
12
+
13
+ wenzhengqi@qiyuanlab.com, dzhang@tsinghua.edu.cn
14
+
15
+ # ABSTRACT
16
+
17
+ In this paper, we propose a novel self-distillation method for fake speech detection (FSD), which can significantly improve the performance of FSD without increasing the model complexity. For FSD, some fine-grained information is very important, such as spectrogram defects, mute segments, and so on, which are often perceived by shallow networks. However, shallow networks have much noise, which can not capture this very well. To address this problem, we propose using the deepest network instruct shallow network for enhancing shallow networks. Specifically, the networks of FSD are divided into several segments, the deepest network being used as the teacher model, and all shallow networks become multiple student models by adding classifiers. Meanwhile, the distillation path between the deepest network feature and shallow network features is used to reduce the feature difference. A series of experimental results on the ASVspoof 2019 LA and PA datasets show the effectiveness of the proposed method, with significant improvements compared to the baseline.
18
+
19
+ Index Terms— Fake speech detection, self-distillation, automatic speaker verification, ASVspoof
20
+
21
+ # 1. INTRODUCTION
22
+
23
+ With the rise of biometrics, automatic speaker verification (ASV) has also started to be widely used. However, the development of synthetic speech technology seriously threatens the security of ASV systems. The main attack types for ASV systems are audio replay, text-to-speech (TTS), and voice conversion (VC). Therefore, a series of fake speech detection (FSD) challenges are used to improve the security of ASV systems.
24
+
25
+ The research of FSD revolves around finding discriminately features and designing robust networks. The front-end features mainly are raw waveforms [1, 2], linear frequency
26
+
27
+ cepstral coefficients (LFCC), log power spectrogram (LPS), and so on. For different front-end features, researchers have proposed a series of convolutional neural network modeling methods. Most of the networks are based on ResNet, LCNN [3], and graph networks. To further improve the model performance, researchers introduced attention mechanisms into different networks. Ling et al. [4] proposed an attention-based convolutional neural network for the FSD task, authors designed a frequency attention block and a channel attention block to focus the discriminative information of the features. In [5], they first time applied graph attention (GAT) networks to FSD tasks to further analyze the relationship between adjacent subbands in spectral features. Jung et al. [6] proposed a GAT-based architecture using a heterogeneous attention mechanism and stack nodes to integrate temporal and spectral domains. Zhang et al. [7] used the squeeze and excitation (SE) network for FSD, which is the SE part for computing global channel attention weights. In addition, efficient channel attention (ECA) [8] is proposed to solve the problem of information loss due to SE block dimensionality reduction, which is widely used in other fields.
28
+
29
+ In addition, self-distillation as popular methods has gained good performance in many fields. In [9], the authors proposed an attention-based feature self-distillation method, which can utilize more high-level information to improve the performance of automatic speech recognition. Liu et al. [10] designed a feature-enhanced self-distillation method for speaker verification, which achieved good performance. Ge et al. [11] proposed an integrated propagation of knowledge form to design a self-distillation framework, which is very effective for image classification tasks.
30
+
31
+ In this paper, we propose a novel self-distillation approach for FSD. Many studies show that the shallow feature of voice is important for FSD. In [7], the authors find that mute segments of voices affect the performance of FSD seriously. Further, Deng et al. [12] utilize shallow feature information by
32
+
33
+ ![](images/d9f0dabf7b90694e4c149a71870e15b561a064270b7b2367a6f6a958596c50dd.jpg)
34
+ Fig. 1. The proposed self-distillation framework is based on SENet and ECANet networks. The framework is divided into four blocks and each block is set with a classifier (AngleLinear, AL). Note: The additional classifiers can all be removed during evaluation and thus do not increase the model load.
35
+
36
+ designing simple classifiers, such as spectrum defects, mute segments, and so on. While shallow networks are sensitive to this kind of information, the capture ability is not well as deep networks. To solve this problem, we propose a self-distillation framework that the deepest network instructs shallow networks, which can further strengthen shallow networks. Specifically, during training, we add classifiers as multiple student models for all shallow networks and set the deepest network as the teacher model. Based on the prediction results, the teacher model instructs the student model to transform the deepest knowledge into all shallow networks. Meanwhile, to reduce the feature difference between the shallow and deepest networks, we also build self-distillation models in the feature dimension. It is worth noting that the classifiers in all shallow layers can be removed during inference, thus causing no additional computational and model complexity. To validate the effectiveness of the proposed approach, we used the ECANet and SENet networks as the baseline and the LA and PA from the ASVspoof 2019 challenge as the dataset. Compared to the baseline, the performance of the system is significantly improved after using self-distillation.
37
+
38
+ # 2. THE PROPOSED SELF-DISTILLATION METHOD
39
+
40
+ # 2.1. Self-distillation
41
+
42
+ In this section, we propose a self-distillation method as shown in Fig.1. Specifically, Inspired by [13], we divide it into four segments based on the model architecture. In this paper, we divide the model into four blocks, and the block is SE block or ECA block. Then, we set a classifier after each block, and we use AngleLinear to obtain the prediction results of each layer. Note that these extra classifiers are only used in training and do not increase the load during inference. In the training phase, we use the fourth layer of the network as the teacher
43
+
44
+ model and the first three layers as the student model. The deep network knowledge is transformed to the shallow layer in both feature and prediction dimensions.
45
+
46
+ To make full use of the network information, three losses during the training process are introduced:
47
+
48
+ - Hard loss: The A-softmax function is used to calculate the loss of the labels and the fourth layer classifier. This is calculated using the labels of the training dataset and the output from the Anglelinear classifier, which is used to fully extract the hidden knowledge in the training dataset.
49
+ - Feature loss: The L2 function is used to compute the feature mapping between the fourth layer network and each shallow layer network. This can introduce nonexplicit knowledge of the deepest features into the shallow features so that their shallow networks can match better with the deeper networks when predicting.
50
+ - Soft loss: The KL divergence function is used to calculate the soft loss in the teacher-student model. The deepest network output is used as the teacher model and several shallow network outputs are used as the student model. The difference between the distributions of the two outputs is calculated, which can guide the shallow network to learn more.
51
+
52
+ # 2.2. Training Methods
53
+
54
+ When training with the self-distillation method, the loss has three components. First, we compute the hard loss of the deepest network and labels:
55
+
56
+ $$
57
+ \mathcal {L} _ {\text {h a r d}} = A _ {-} \operatorname {s o f t m a x} \left(p ^ {n}, L\right) \tag {1}
58
+ $$
59
+
60
+ where $p^n$ is the deepest output of the network, in this paper, we set the $n$ is 4. $L$ is the label of the training data set,
61
+
62
+ Table 1. EER and t-DCF results for different architectures based on the ASVspoof 2019 LA and PA dataset. SD indicates that the self-distillation method is used.
63
+
64
+ <table><tr><td rowspan="5">LA dataset</td><td colspan="2">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan="2">EER</td><td>baseline</td><td>1.48</td><td>1.18</td><td>1.44</td><td>1.87</td><td>1.97</td><td>1.52</td><td>1.23</td><td>1.83</td></tr><tr><td>SD</td><td>1.22</td><td>0.88</td><td>1.14</td><td>1.09</td><td>1.23</td><td>1.37</td><td>1.08</td><td>1.00</td></tr><tr><td rowspan="2">t-DCF</td><td>baseline</td><td>0.0493</td><td>0.0378</td><td>0.0460</td><td>0.0605</td><td>0.0610</td><td>0.0497</td><td>0.0358</td><td>0.0536</td></tr><tr><td>SD</td><td>0.0376</td><td>0.0295</td><td>0.0334</td><td>0.0318</td><td>0.0388</td><td>0.0417</td><td>0.0347</td><td>0.0309</td></tr><tr><td rowspan="5">PA dataset</td><td colspan="2">Model</td><td>ECANet9</td><td>ECANet18</td><td>ECANet34</td><td>ECANet50</td><td>SENet9</td><td>SENet18</td><td>SENet34</td><td>SENet50</td></tr><tr><td rowspan="2">EER</td><td>baseline</td><td>0.83</td><td>0.95</td><td>0.88</td><td>0.90</td><td>0.97</td><td>0.93</td><td>1.14</td><td>0.82</td></tr><tr><td>SD</td><td>0.74</td><td>0.83</td><td>0.70</td><td>0.82</td><td>0.85</td><td>0.87</td><td>0.65</td><td>0.79</td></tr><tr><td rowspan="2">t-DCF</td><td>baseline</td><td>0.0221</td><td>0.0284</td><td>0.0255</td><td>0.0262</td><td>0.0269</td><td>0.0266</td><td>0.0334</td><td>0.0228</td></tr><tr><td>SD</td><td>0.0199</td><td>0.0219</td><td>0.0208</td><td>0.0222</td><td>0.0232</td><td>0.0239</td><td>0.0174</td><td>0.0219</td></tr></table>
65
+
66
+ Table 2. The ECANet and SENet model architecture and configuration. Dimensions refer to (channels, frequency, and time). Batch normalization (BN) and Rectified Linear Unit (ReLU). ECA and SE are the efficient channel attention block and the squeeze and excitation block, respectively.
67
+
68
+ <table><tr><td>Layer</td><td colspan="2">Input: 27000 samples</td><td>Output shape</td></tr><tr><td>Front-end</td><td colspan="2">F0 subband</td><td>(45,600)(F,T)</td></tr><tr><td>Post-processing</td><td colspan="2">Add channelConv2D_1BN &amp; ReLU</td><td>(1,45,600)(16,45,600)</td></tr><tr><td>Block1</td><td colspan="2">c1×{Conv2D_3Conv2D_3ECA or SE}</td><td>(32,45,600)</td></tr><tr><td>Block2</td><td colspan="2">c2×{Conv2D_3Conv2D_3ECA or SE}</td><td>(64,23,300)</td></tr><tr><td>Block3</td><td colspan="2">c3×{Conv2D_3Conv2D_3ECA or SE}</td><td>(128,12,150)</td></tr><tr><td>Block4</td><td colspan="2">c4×{Conv2D_3Conv2D_3ECA or SE}</td><td>(256,6,75)</td></tr><tr><td>Output</td><td colspan="2">Avgpool2D(1,1)AngleLinear</td><td>(256,1,1)2</td></tr></table>
69
+
70
+ and $A_{-}$ softmax denotes the A-softmax function. $\mathcal{L}_{\text{hard}}$ is the hard loss.
71
+
72
+ Soft loss is used for knowledge distillation of the shallow and deep networks, and we calculate KL divergence using each shallow and deepest network.
73
+
74
+ $$
75
+ \mathcal {L} _ {\text {s o f t}} = \sum_ {i} ^ {n - 1} K L \left(p ^ {i}, p ^ {n}\right) \tag {2}
76
+ $$
77
+
78
+ $KL$ denotes the KL divergence function and $p^i$ is the output of each layer of the network after the Angellinear classifier. $\mathcal{L}_{soft}$ is the final soft loss.
79
+
80
+ The feature loss is used to balance the difference between the shallow and deepest networks, which can be fed back to the classification output of the shallow network to facilitate the soft loss fit.
81
+
82
+ $$
83
+ \mathcal {L} _ {\text {f e a t u r e}} = \sum_ {i} ^ {n - 1} L 2 \left(\mathcal {F} ^ {i}, \mathcal {F} ^ {n}\right) \tag {3}
84
+ $$
85
+
86
+ $\mathcal{F}^i$ is the output feature of each layer, and $\mathcal{F}^n$ is the output feature of the deepest layer. $L2$ is the L2 loss function, and $\mathcal{L}_{\text{feature}}$ is the final feature loss.
87
+
88
+ $$
89
+ \mathcal {L} = \alpha * \mathcal {L} _ {\text {h a r d}} + (1 - \alpha) * \mathcal {L} _ {\text {s o f t}} + \beta * \mathcal {L} _ {\text {f e a t u r e}} \tag {4}
90
+ $$
91
+
92
+ The loss at training consists of the following three components, $\alpha$ and $\beta$ are hyperparameters to balance the three sources of loss. $\mathcal{L}$ is the final loss.
93
+
94
+ # 3. EXPERIMENTS AND RESULTS
95
+
96
+ # 3.1. Datasets
97
+
98
+ We trained and evaluated models on the ASVspoof 2019 LA and PA datasets. The LA set includes three types of spoofing attacks (TTS, VS, and audio replay), which are divided into 19 attack algorithms (A01-A19). The PA set includes only replay attacks, and there are 27 replay attacks in different acoustic environments. In this paper, EER and the minimum normalized tandem detection cost function (min t-DCF) are used as evaluation metrics for assessing the performance of different systems.
99
+
100
+ # 3.2. Training setup and baseline
101
+
102
+ Front-end features: Inspired by [20], we use the F0 subband as our input features. Firstly, we extract the full frequency band of LPS and use the window function as Blackman's Short Time Fourier Transform (STFT), setting the window length and hop length as 1728 and 130 respectively. we fix the frame number as 600 and get the LPS to feature $865\times 600$ . Finally, we take the first 45 dimensions of the frequency and finally get the front-end F0 subband feature size of $45\times 600$ .
103
+
104
+ Back-end classifier: As shown in Table 2, we use SENet and ECANet as deep neural network classifiers. where the $c_{1} - c_{4}$ vectors corresponding to the 9, 18, 34 layers are $(1,1,1,1)$ , $(2,2,2,2)$ , and $(3,4,6,3)$ , respectively. The 50-layer network is set to three convolutions in the block, with
105
+
106
+ Table 3. Comparison of our self-distillation system with other known single systems.
107
+
108
+ <table><tr><td colspan="4">LA dataset</td><td colspan="4">PA dataset</td></tr><tr><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td><td>Systems</td><td>Front-end</td><td>EER(%)</td><td>t-DCF</td></tr><tr><td>AASIST [6]</td><td>Raw waveform</td><td>0.83</td><td>0.0275</td><td>T28 [14]</td><td>-</td><td>0.52</td><td>0.1470</td></tr><tr><td>ECANet18(SD) Ours</td><td>F0 subband</td><td>0.88</td><td>0.0295</td><td>SENet34(SD) Ours</td><td>F0 subband</td><td>0.65</td><td>0.0174</td></tr><tr><td>SENet50(SD) Ours</td><td>F0 subband</td><td>1.00</td><td>0.0309</td><td>ECANet34(SD) Ours</td><td>F0 subband</td><td>0.70</td><td>0.0208</td></tr><tr><td>RawGAT-ST [15]</td><td>Raw waveform</td><td>1.06</td><td>0.0340</td><td>SE-Res2Net50 [16]</td><td>Spec</td><td>0.74</td><td>0.0207</td></tr><tr><td>SENet34(SD) Ours</td><td>F0 subband</td><td>1.08</td><td>0.0347</td><td>T10 [14]</td><td>-</td><td>1.08</td><td>0.1598</td></tr><tr><td>FFT-L-SENet [7]</td><td>LPS</td><td>1.14</td><td>0.0368</td><td>T45 [14]</td><td>-</td><td>1.23</td><td>0.1610</td></tr><tr><td>MCG-Res2Net50 [17]</td><td>CQT</td><td>1.78</td><td>0.0520</td><td>T44 [14]</td><td>-</td><td>1.29</td><td>0.1666</td></tr><tr><td>Resnet18-OC-softmax [18]</td><td>LFCC</td><td>2.19</td><td>0.0590</td><td>T53 [14]</td><td>-</td><td>1.66</td><td>0.1729</td></tr><tr><td>ResNet18-GAT-T [5]</td><td>LFB</td><td>4.71</td><td>0.0894</td><td>Capsule [19]</td><td>LFCC</td><td>2.76</td><td>0.0730</td></tr></table>
109
+
110
+ convolution kernels of 1,3,1. The rest is the same as the 34-layer network. For training, we use Adam as the optimizer with parameters $\beta_{1} = 0.9$ , $\beta_{2} = 0.98$ , $\epsilon = 10^{-9}$ and weight decay $10^{-4}$ . The number of the epoch is 32. The two hyperparameters $\alpha$ and $\beta$ are set to 0.7 and 0.3, respectively.
111
+
112
+ # 3.3. Experiment results on LA dataset
113
+
114
+ Table 1 shows the EER and t-DCF of the baseline and self-distillation systems for the ASVspoof 2019 LA dataset. The "SD" denotes self-distillation. According to Table 1, it can be seen that our self-distillation method significantly outperforms the baseline system. In addition, we can observe several interesting phenomena. First, the baseline system "ECANet18" has the best performance with an EER of $1.18\%$ . Even so, the self-distillation method can improve it by $25\%$ . Second, the performance of the different network architectures decreases significantly as the network gets deeper. For example, the EER of the baseline "SENet50" is $1.83\%$ , and the EER of its self-distillation is $1.00\%$ , which is a $45\%$ improvement. Self-distillation effectively reduces the performance degradation of the FSD due to depth and makes its performance smoother for different network depths. Table 3 shows the performance of the most recently known SOTA single system, and our best system is ranked second. In general, the self-distillation method has the effect of fully exploiting the information of different levels of the network. Further, this method has strong generality and improves for different architectures at different depths.
115
+
116
+ # 3.4. Experiment results on PA dataset
117
+
118
+ Table 1 shows the EER and t-DCF of the baseline and the self-distillation system for the ASVspoof 2019 PA dataset. according to Table 1, it can be seen that the self-distillation system outperforms the baseline. The EER of the baseline "SENet34" is $1.14\%$ , which is the worst performance in the overall baseline. However, the "SENet34(SD)" system could obtain an EER of $0.65\%$ , making it the best-performing system. This may be because the shallow network contains more
119
+
120
+ unexplored information, and thus the self-distillation system has a teacher for guidance, which allows shallow networks to mine more fine-grained information as well. In addition, Table 3 compares the Top systems on the PA dataset, and our method can also get the second one. This indicates that the self-distillation method is very effective. Further, our method can also be adapted to different datasets.
121
+
122
+ # 4. CONCLUSIONS
123
+
124
+ In this paper, we propose a novel self-distillation method for FSD tasks. This can further improve the performance of FSD without increasing the load and has generality for networks of different architectures. Specifically, we add classifiers behind the shallow network to build interaction with the deepest network in both feature and prediction dimensions, which enhances shallow networks' ability that captures detailed discriminately information. The feature distillation aims to reduce the difference between deep and shallow features, and the distillation of the prediction dimension is to fully exploit the information in each layer of the network to further optimize the network. We use different architectures of ECANet and SENet, and our experimental results on ASVspoof 2019 LA and PA datasets validate the effectiveness and generality of our approach, significantly improving the performance of the baseline. In the future, we will work on building more low-parameter and highly robust FSD systems.
125
+
126
+ # 5. ACKNOWLEDGEMENTS
127
+
128
+ This work is supported by the National Key Research and Development Plan of China (No.2020AAA0140003), the National Natural Science Foundation of China (NSFC) (No.62201002, No.61972437), Excellent Youth Foundation of Anhui Scientific Committee (No. 2208085J05), Special Fund for Key Program of Science and Technology of Anhui Province (No. 202203a07020008), the Open Research Projects of Zhejiang Lab (NO. 2021KH0AB06) and the Open Projects Program of National Laboratory of Pattern Recognition (NO. 202200014).
129
+
130
+ # 6. REFERENCES
131
+
132
+ [1] Jee-weon Jung, Hee-Soo Heo, Ju-ho Kim, Hye-jin Shim, and Ha-Jin Yu, “Rawnet: Advanced end-to-end deep neural network using raw waveforms for text-independent speaker verification,” Proc. Interspeech 2019, pp. 1268-1272, 2019.
133
+ [2] Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch, Nicholas Evans, and Anthony Larcher, "End-to-end anti-spoofing with rawnet2," in ICASSP 2021. IEEE, 2021, pp. 6369-6373.
134
+ [3] Chenglong Wang, Jiangyan Yi, Jianhua Tao, Haiyang Sun, Xun Chen, Zhengkun Tian, Haoxin Ma, Cunhang Fan, and Ruibo Fu, “Fully automated end-to-end fake audio detection,” in DDAM 2022, 2022, pp. 27-33.
135
+ [4] Hefei Ling, Leichao Huang, Junrui Huang, Baiyan Zhang, and Ping Li, “Attention-Based Convolutional Neural Network for ASV Spoofing Detection,” in Proc. Interspeech, 2021, pp. 4289–4293.
136
+ [5] Hemlata Tak, Jee-weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in Proc. ASVSproof Challenge, 2021, pp. 1–8.
137
+ [6] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, "Aassist: Audio anti-spoofing using integrated spectro-temporal graph attention networks," in ICASSP 2022. IEEE, 2022, pp. 6367-6371.
138
+ [7] Yuxiang Zhang, Wenchao Wang, and Pengyuan Zhang, “The Effect of Silence and Dual-Band Fusion in Anti-Spoofing System,” in Proc. Interspeech, 2021, pp. 4279–4283.
139
+ [8] Qilong Wang, Banggu Wu, Pengfei Zhu, P. Li, Wangmeng Zuo, and Qinghua Hu, "Eca-net: Efficient channel attention for deep convolutional neural networks," CVPR 2020, pp. 11531-11539, 2020.
140
+ [9] Qiang Xu, Tongtong Song, Longbiao Wang, Hao Shi, Yuqin Lin, Yongjie Lv, Meng Ge, Qiang Yu, and Jianwu Dang, "Self-distillation based on high-level information supervision for compressing end-to-end asr model," Proc. Interspeech 2022, pp. 1716-1720, 2022.
141
+ [10] Bei Liu, Haoyu Wang, Zhengyang Chen, Shuai Wang, and Yanmin Qian, "Self-knowledge distillation via feature enhancement for speaker verification," in ICASSP 2022. IEEE, 2022, pp. 7542-7546.
142
+
143
+ [11] Yixiao Ge, Xiao Zhang, Ching Lam Choi, Ka Chun Cheung, Peipei Zhao, Feng Zhu, Xiaogang Wang, Rui Zhao, and Hongsheng Li, "Self-distillation with batch knowledge ensembling improves imagenet classification," arXiv, 2021.
144
+ [12] JiaCheng Deng, Terui Mao, Diqun Yan, Li Dong, and Mingyu Dong, “Detection of synthetic speech based on spectrum defects,” in DDAM 2022, 2022, pp. 3-8.
145
+ [13] Linfeng Zhang, Jiebo Song, Anni Gao, Jingwei Chen, Chenglong Bao, and Kaisheng Ma, “Be your own teacher: Improve the performance of convolutional neural net via self distillation,” in ICCV 2019. IEEE Computer Society, 2019, pp. 3712–3721.
146
+ [14] Andreas Nautsch, Xin Wang, Nicholas Evans, Tomi H Kinnunen, Ville Vestman, Massimiliano Todisco, Héctor Delgado, Md Sahidullah, Junichi Yamagishi, and Kong Aik Lee, "Asvspoof 2019: spoofing countermeasures for the detection of synthesized, converted and replayed speech," IEEE Transactions on Biometrics, Behavior, and Identity Science, vol. 3, no. 2, pp. 252-265, 2021.
147
+ [15] Hemlata Tak, Jee-Weon Jung, Jose Patino, Madhu Kamble, Massimiliano Todisco, and Nicholas Evans, “End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,” in ASVSPoOF 2021, Automatic Speaker Verification and Spoofing Countermeasures Challenge. ISCA, 2021, pp. 1–8.
148
+ [16] Xu Li, Na Li, Chao Weng, Xunying Liu, Dan Su, Dong Yu, and Helen Meng, "Replay and synthetic speech detection with res2net architecture," in ICASSP 2021. IEEE, 2021, pp. 6354-6358.
149
+ [17] Xu Li, Xixin Wu, Hui Lu, Xunying Liu, and Helen Meng, “Channel-Wise Gated Res2Net: Towards Robust Detection of Synthetic Speech Attacks,” in Proc. Interspeech 2021, 2021, pp. 4314–4318.
150
+ [18] You Zhang, Fei Jiang, and Zhiyao Duan, "One-class learning towards synthetic voice spoofing detection," IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021.
151
+ [19] Anwei Luo, Enlei Li, Yongliang Liu, Xiangui Kang, and Z Jane Wang, “A capsule network based approach for detection of audio spoofing attacks,” in ICASSP 2021. IEEE, 2021, pp. 6359–6363.
152
+ [20] Jun Xue, Cunhang Fan, Zhao Lv, Jianhua Tao, Jiangyan Yi, Chengshi Zheng, Zhengqi Wen, Minmin Yuan, and Shegang Shao, "Audio deepfake detection based on a combination of f0 information and real plus imaginary spectrogram features," in DDAM 2022, 2022, pp. 19-26.
2303.01xxx/2303.01211/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b1a6e2e39b39768c0df85117c54e40473d4a317651d057b7ae56a397b7d95ac
3
+ size 295585
2303.01xxx/2303.01211/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01212/77689b23-f58b-481c-a4dd-45092d52ee6d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:007f37dce302758c9260739a61630e4a0198ffccf0fc057bdd92bcb307c2a911
3
+ size 2226886
2303.01xxx/2303.01212/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01212/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96fe8beb2af95f8d9a37624436424644af820ea7a08e5b7e19457ccfce6b5426
3
+ size 1280406
2303.01xxx/2303.01212/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.01xxx/2303.01217/ff487c30-0260-4593-894c-b42a3537b180_content_list.json ADDED
@@ -0,0 +1,1284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "SYNTHETIC MISINFORMERS: GENERATING AND COMBATING MULTIMODAL MISINFORMATION",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 130,
8
+ 119,
9
+ 867,
10
+ 165
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Stefanos-Iordanis Papadopoulos\\*1,2, Christos Koutlis', Symeon Papadopoulos', and Panagiotis C. Petrantonakis'",
17
+ "bbox": [
18
+ 161,
19
+ 224,
20
+ 831,
21
+ 239
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Information Technology Institute, Centre for Research & Technology, Hellas. $^{2}$ School of Electrical & Computer Engineering, Aristotle University of Thessaloniki. {stefpapad,ckoutlis,papadop}@iti.gr, ppetrant@ece.auth.gr",
28
+ "bbox": [
29
+ 248,
30
+ 251,
31
+ 750,
32
+ 292
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "ABSTRACT",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 449,
42
+ 345,
43
+ 545,
44
+ 359
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "With the expansion of social media and the increasing dissemination of multimedia content, the spread of misinformation has become a major concern. This necessitates effective strategies for multimodal misinformation detection (MMD) that detect whether the combination of an image and its accompanying text could mislead or misinform. Due to the data-intensive nature of deep neural networks and the labor-intensive process of manual annotation, researchers have been exploring various methods for automatically generating synthetic multimodal misinformation - which we refer to as Synthetic Misinformers - in order to train MMD models. However, limited evaluation on real-world misinformation and a lack of comparisons with other Synthetic Misinformers makes difficult to assess progress in the field. To address this, we perform a comparative study on existing and new Synthetic Misinformers that involves (1) out-of-context (OOC) image-caption pairs, (2) cross-modal named entity inconsistency (NEI) as well as (3) hybrid approaches and we evaluate them against real-world misinformation; using the COSMOS benchmark. The comparative study showed that our proposed CLIP-based Named Entity Swapping can lead to MMD models that surpass other OOC and NEI Misinformers in terms of multimodal accuracy and that hybrid approaches can lead to even higher detection accuracy. Nevertheless, after alleviating information leakage from the COSMOS evaluation protocol, low Sensitivity scores indicate that the task is significantly more challenging than previous studies suggested. Finally, our findings showed that NEI-based Synthetic Misinformers tend to suffer from a unimodal bias, where text-only MMDs can outperform multimodal ones.",
51
+ "bbox": [
52
+ 169,
53
+ 367,
54
+ 826,
55
+ 617
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Keywords Misinformation detection, Multimodal learning, Synthetic datasets, Comparative study",
62
+ "bbox": [
63
+ 109,
64
+ 633,
65
+ 759,
66
+ 648
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1 Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 112,
76
+ 672,
77
+ 253,
78
+ 688
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "The proliferation of misinformation is a growing challenge in today's society, especially with the widespread use of social media and the Internet. Consequently, the automatic detection of misinformation has become an important challenge, with researchers exploring various methods for identifying false claims through natural language processing [1] and detecting manipulated images, such as DeepFakes, through computer vision techniques [2]. The aforementioned challenges primarily focus on individual modalities, necessitating the use of unimodal detection models. However, multimedia content has been shown to be more attention-grabbing and widely disseminated than plain text content [3]. Furthermore, the presence of an image can make a false statement more convincing to individuals [4], emphasizing the importance of multimodal misinformation detection (MMD).",
85
+ "bbox": [
86
+ 109,
87
+ 707,
88
+ 883,
89
+ 818
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "MMD models, as seen in Fig. 1, are trained to identify whether an image and its accompanying caption in combination are accurate (truthful) or misleading (leading to misinformation). The image at the top of Fig. 1 shows a music festival with littered grounds, while the accompanying caption claims that the event occurred in June 2022, after a speech by environmentalist Greta Thunberg. In reality, even though the image depicts a Glastonbury festival, it was not taken in",
96
+ "bbox": [
97
+ 109,
98
+ 824,
99
+ 883,
100
+ 881
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "aside_text",
106
+ "text": "arXiv:2303.01217v1 [cs.MM] 2 Mar 2023",
107
+ "bbox": [
108
+ 22,
109
+ 255,
110
+ 57,
111
+ 700
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "page_footnote",
117
+ "text": "*Corresponding author",
118
+ "bbox": [
119
+ 135,
120
+ 897,
121
+ 272,
122
+ 911
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "image",
128
+ "img_path": "images/fc939ab35c2feac523f7b4ccb406df1a975f016ade786e76219515cf6e17743c.jpg",
129
+ "image_caption": [
130
+ "(Image, Caption) pairs"
131
+ ],
132
+ "image_footnote": [
133
+ "The aftermath of environmentalist Greta Thunberg's speech at the Glastonbury Music Festival in June 2022"
134
+ ],
135
+ "bbox": [
136
+ 189,
137
+ 130,
138
+ 362,
139
+ 220
140
+ ],
141
+ "page_idx": 1
142
+ },
143
+ {
144
+ "type": "image",
145
+ "img_path": "images/81b212ae31261d334192da6557726d767049e4f5009feb3480ceb461be315931.jpg",
146
+ "image_caption": [
147
+ "Figure 1: Multimodal misinformation detection (MMD) models attempt to identify whether an (Image, Caption) pair is truthful or misleading. The images and captions are taken from reuters.com."
148
+ ],
149
+ "image_footnote": [
150
+ "Image of a damaged railway bridge in Kursk, Russia was taken in May 2022 during the Russia-Ukraine war."
151
+ ],
152
+ "bbox": [
153
+ 189,
154
+ 244,
155
+ 362,
156
+ 325
157
+ ],
158
+ "page_idx": 1
159
+ },
160
+ {
161
+ "type": "text",
162
+ "text": "M M D",
163
+ "text_level": 1,
164
+ "bbox": [
165
+ 622,
166
+ 191,
167
+ 663,
168
+ 287
169
+ ],
170
+ "page_idx": 1
171
+ },
172
+ {
173
+ "type": "image",
174
+ "img_path": "images/5934106fa890c950c0615a9fd992829f8945d7c0b395bec9ec6c4a72924d92d5.jpg",
175
+ "image_caption": [],
176
+ "image_footnote": [],
177
+ "bbox": [
178
+ 694,
179
+ 210,
180
+ 751,
181
+ 267
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "image",
187
+ "img_path": "images/da5262032460fdf15d3b04b722f0003d9ef4788bfa40acd39058760aa8d0795b.jpg",
188
+ "image_caption": [],
189
+ "image_footnote": [],
190
+ "bbox": [
191
+ 756,
192
+ 172,
193
+ 792,
194
+ 200
195
+ ],
196
+ "page_idx": 1
197
+ },
198
+ {
199
+ "type": "image",
200
+ "img_path": "images/a099c2ddb3a2df986ab9d10eebff54e500412364ceaad588afc29792f660be23.jpg",
201
+ "image_caption": [],
202
+ "image_footnote": [],
203
+ "bbox": [
204
+ 756,
205
+ 215,
206
+ 794,
207
+ 257
208
+ ],
209
+ "page_idx": 1
210
+ },
211
+ {
212
+ "type": "image",
213
+ "img_path": "images/53d061f1481ff821c3f189d5db78fe9ee0793018cb6a56a7c26a88cf96e457ab.jpg",
214
+ "image_caption": [],
215
+ "image_footnote": [],
216
+ "bbox": [
217
+ 756,
218
+ 273,
219
+ 792,
220
+ 299
221
+ ],
222
+ "page_idx": 1
223
+ },
224
+ {
225
+ "type": "text",
226
+ "text": "June 2022 after Greta Thunberg's speech, but rather in 2015; years before Greta Thunberg became a public figure<sup>2</sup>. This case involves the manipulation of named entities (person and date) in order to frame a target public figure and her associated audience under a negative spotlight. The bottom image illustrates a railway bridge that collapsed into a body of water with the caption claiming that the event took place in Kursk, Russia during the 2022 Russia-Ukraine war. A bridge was indeed damaged that day in Kursk, however, this image was actually taken in 2020 in Murmansk, Russia<sup>3</sup>. This case illustrates the use of an out-of-context image, either by mistake or with the intention to exaggerate or downplay the severity of the event. From the above examples, it is evident that multimodal misinformation takes diverse forms, can be disseminated for various motives, and entails subtle cues that are challenging to discern.",
227
+ "bbox": [
228
+ 109,
229
+ 426,
230
+ 883,
231
+ 539
232
+ ],
233
+ "page_idx": 1
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "Researchers have been exploring training deep neural networks for MMD [5]. Considering the data-intensive nature of training deep neural networks for MMD as well as the time-consuming and labor-intensive nature of manual annotation, researchers have been investigating methods for automatically generating synthetic multimodal misinformation, which we refer to as Synthetic Misinformers. These methods include the generation of out-of-context (OOC) image-text pairs or the creation of cross-modal entity inconsistencies (NEI). OOC involves pairing an image with an incongruous caption [6] while NEI involves manipulating the named entities in otherwise truthful captions [7]. Previous works have relied on random sampling [8, 6] or feature-informed sampling methods [9, 10] for generating OOC and in-cluster random sampling [11] or rule-based random sampling [7] for generating NEI. Examples of generated OOC and NEI misinformation can be seen in Fig. 2.",
238
+ "bbox": [
239
+ 109,
240
+ 544,
241
+ 883,
242
+ 669
243
+ ],
244
+ "page_idx": 1
245
+ },
246
+ {
247
+ "type": "text",
248
+ "text": "Nevertheless, despite the efforts of previous studies, those have been limited by evaluating their methods on test sets generated by their own Synthetic Misinformers instead of real-world multimodal misinformation, the only exception being the work presenting the COSMOS dataset [6]. However, the authors of the latter work made use of a problematic evaluation protocol that suffers from information leakage, as seen in Fig.3 (See section 3.1.4 for further details). Additionally, prior studies did not compare their methods with other Synthetic Misinformers. Lack of comparison and evaluation on real-world data hinders the ability of the research community to assess the progress made and determine the current state-of-the-art on MMD. To this end, we replicate multiple Synthetic Misinformers, fine-tune our Transformer-based MMD model (termed DT-Transformer) on the generated data and finally compare them on the COSMOS benchmark [6] that encompasses real-world multimodal misinformation. In our comparative study, we examine whether OOC or NEI are more representative of real-world misinformation. Our hypothesis is that - despite prior research treating them as separate tasks - both OOC and NEI are crucial components of effective MMD. For this reason, we also investigate a range of hybrid Synthetic Misinformers.",
249
+ "bbox": [
250
+ 109,
251
+ 675,
252
+ 883,
253
+ 843
254
+ ],
255
+ "page_idx": 1
256
+ },
257
+ {
258
+ "type": "text",
259
+ "text": "The main contributions of our work can be summarised as follows:",
260
+ "bbox": [
261
+ 112,
262
+ 847,
263
+ 553,
264
+ 861
265
+ ],
266
+ "page_idx": 1
267
+ },
268
+ {
269
+ "type": "header",
270
+ "text": "Papadopoulos S.I. et al.",
271
+ "bbox": [
272
+ 112,
273
+ 42,
274
+ 272,
275
+ 56
276
+ ],
277
+ "page_idx": 1
278
+ },
279
+ {
280
+ "type": "page_footnote",
281
+ "text": "<sup>2</sup>https://www.reuters.com/article/factcheck-glastonbury-greta-idUSL1N2YE1JD",
282
+ "bbox": [
283
+ 132,
284
+ 883,
285
+ 707,
286
+ 897
287
+ ],
288
+ "page_idx": 1
289
+ },
290
+ {
291
+ "type": "page_footnote",
292
+ "text": "<sup>3</sup>https://www.reuters.com/article/factcheck-destroyed-bridge-idUSL2N2WU1CM",
293
+ "bbox": [
294
+ 132,
295
+ 896,
296
+ 700,
297
+ 911
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "page_number",
303
+ "text": "2",
304
+ "bbox": [
305
+ 493,
306
+ 935,
307
+ 504,
308
+ 946
309
+ ],
310
+ "page_idx": 1
311
+ },
312
+ {
313
+ "type": "image",
314
+ "img_path": "images/05e9356557104d24d67631d5c5bfae6bc1f6f6ea8bf22c82dac9340b6af30d4c.jpg",
315
+ "image_caption": [
316
+ "Figure 2: Training data generated by two types of Synthetic Misinformers: one creates out-of-context (OOC) misinformation and the other produces cross-modal named entity inconsistencies (NEI). Given a truthful $(I_a, C_a)$ image-caption pair, OOC samples an image $I_x$ creating $(I_x, C_a)$ while NEI manipulates the named entities in $C_a$ to create the falsified $C_f$ . These examples were generated by CLIP-based sampling (CSt-alt) and CLIP-based named entity swapping (CLIP-NESt-alt) for OOC and NEI respectively (See Section 3.2 for more details on these methods). The images and captions are taken from the VisualNews dataset [12]."
317
+ ],
318
+ "image_footnote": [],
319
+ "bbox": [
320
+ 114,
321
+ 94,
322
+ 880,
323
+ 478
324
+ ],
325
+ "page_idx": 2
326
+ },
327
+ {
328
+ "type": "list",
329
+ "sub_type": "text",
330
+ "list_items": [
331
+ "- We perform the first comparative study on Synthetic Misinformers, which offers a comprehensive evaluation of the current state-of-the-art on MMD and can provide guidance for future research in the field.",
332
+ "- We introduce \"CLIP-based Named Entity Swapping\", which demonstrates the highest multimodal accuracy among other OOC and NEI approaches. Additionally, we demonstrate that hybrid Synthetic Misinformers can further enhance detection accuracy.",
333
+ "- Our findings highlight that although NEI-based may outperform OOC methods, they tend to suffer from a unimodal bias, where text-only models can outperform multimodal ones. Moreover, low Sensitivity scores - or Hit Rate for 'Falsified' pairs - indicate that the problem is significantly more challenging than previous works suggested. We offer recommendations on how future studies may address these challenges."
334
+ ],
335
+ "bbox": [
336
+ 156,
337
+ 599,
338
+ 880,
339
+ 734
340
+ ],
341
+ "page_idx": 2
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "2 Related Work",
346
+ "text_level": 1,
347
+ "bbox": [
348
+ 112,
349
+ 755,
350
+ 264,
351
+ 771
352
+ ],
353
+ "page_idx": 2
354
+ },
355
+ {
356
+ "type": "text",
357
+ "text": "Studies on multimodal misinformation detection (MMD) have focused on out-of-context image-language pairs (OOCs) or cross-modal named entity inconsistencies (NEI). One common form of multimodal misinformation involves decontextualization; a legitimate image being paired with an out-of-context caption creating a deceptive impression. Consequently, researchers have used random-sampling ([8, 6]) and feature-informed sampling methods ([9, 10]) for generating OOCs. The MAIM dataset was created by randomly sampling image-text pairs collected among image_caption pairs collected from Flickr [8]. The authors developed a joint embedding with the use of deep representation learning and then calculated the image_caption consistency. Similarly, Aneja et al.[6] created the COSMOS training dataset by collecting truthful image-captions pairs from credible news websites and then matching captions with random images to create OOCs. The authors utilized self-supervised deep learning and evaluated their method on the COSMOS",
358
+ "bbox": [
359
+ 109,
360
+ 786,
361
+ 883,
362
+ 912
363
+ ],
364
+ "page_idx": 2
365
+ },
366
+ {
367
+ "type": "header",
368
+ "text": "Papadopoulos S.I. et al.",
369
+ "bbox": [
370
+ 114,
371
+ 42,
372
+ 272,
373
+ 58
374
+ ],
375
+ "page_idx": 2
376
+ },
377
+ {
378
+ "type": "page_number",
379
+ "text": "3",
380
+ "bbox": [
381
+ 493,
382
+ 935,
383
+ 503,
384
+ 946
385
+ ],
386
+ "page_idx": 2
387
+ },
388
+ {
389
+ "type": "text",
390
+ "text": "benchmark, consisting of real-world multimodal misinformation. However, random sampling can not ensure that the image-caption pair will bear any relation and tends to generate easy negative samples that do not resemble realistic multimodal misinformation; capable of deceiving humans. To this end, Luo et al.[9] created the NewsCLIPings datasets by utilizing the large cross-modal CLIP model [13] along with scene-learning and person matching models in order to generate hard negative samples. Similarly, the Twitter-COMMs dataset was created by combining and applying CLIP-based sampling (to generate hard negatives) and in-topic random sampling (to resolve class imbalance) on data collected from $\\text{Twitter}^4$ , related to three topics: climate, COVID, and military vehicles [10].",
391
+ "bbox": [
392
+ 109,
393
+ 90,
394
+ 883,
395
+ 189
396
+ ],
397
+ "page_idx": 3
398
+ },
399
+ {
400
+ "type": "text",
401
+ "text": "On the other hand, NEI involves legitimate images being accompanied by a manipulated caption whose named entities (person, location, event) do not match with the content or the context of the image. The \"Multimodal Entity Image Re-purposing\" (MEIR) dataset was created by clustering image-caption pairs based on 'relatedness' - location proximity, text and image similarity - by using GPS coordinates, word2vec and VGG19 pre-trained on ImageNet respectively. Then they randomly swap named entities of the same type between the current caption and another caption taken from the same cluster [11]. Similarly, the TamperedNews dataset was created by randomly replacing named entities with ones of the same type given that the replaced person is of the same gender and/or country, locations are within high geographical proximity and events belong to the same category (e.g sport competitions or natural disasters) [7].",
402
+ "bbox": [
403
+ 109,
404
+ 194,
405
+ 883,
406
+ 306
407
+ ],
408
+ "page_idx": 3
409
+ },
410
+ {
411
+ "type": "text",
412
+ "text": "The above works either provide internal ablation [11, 7, 9, 10] or comparison with simple baselines [11] and do not compare their methods with other Synthetic Misinformers. Moreover, studies that used some of the above datasets have mostly focused on incremental methodological improvements [14], such as the integration of sentiment [15] or evidence [16] in MMD models. With the exception of [6], prior works have not evaluated their methods on real-world misinformation but on data generated by their own Synthetic Misinformer. Therefore, there is no clear way for the research community to assess progress in the field including the current state-of-the-art on MMD, which s the best way to generate training data for MMD and whether OOC or NEI (or both) are better representative of real-world misinformation. To address this gap, we perform a comparative study on various Synthetic Misinformers - both OOC, NEI and hybrid methods - and evaluate them on real-world multimodal misinformation.",
413
+ "bbox": [
414
+ 109,
415
+ 311,
416
+ 883,
417
+ 436
418
+ ],
419
+ "page_idx": 3
420
+ },
421
+ {
422
+ "type": "text",
423
+ "text": "Finally, Luo et al.[9] argued that methods utilizing named entity manipulations may introduce linguistic biases. To investigate this, the authors trained a text-only BERT [17] model and achieved similar results to the multimodal models used in [7]. However, the latter extracted visual features from an ImageNet pre-trained ResNet and textual features from off-the-self fastText [18]. At the time of writing (2021 [9]), BERT-like models were considered among the state-of-the-art for text-based tasks while fastText was an older architecture, rendering the training protocols significantly different and thus not directly comparable. Therefore, it is not possible to conclude definitively about the existence of unimodal bias based solely on these results. To address this gap, we re-examine whether NEI methods suffer from unimodal bias, within a controlled training framework and an evaluation on real-world misinformation.",
424
+ "bbox": [
425
+ 109,
426
+ 441,
427
+ 883,
428
+ 554
429
+ ],
430
+ "page_idx": 3
431
+ },
432
+ {
433
+ "type": "image",
434
+ "img_path": "images/86ff2696e53b2e643bad216c5760f34b7d59df2f4c55bc683db47c0397f9c2a6.jpg",
435
+ "image_caption": [
436
+ "Figure 3: Examples of falsified $(I, C1, C2)$ triplets from the COSMOS benchmark [6]. Caption $C2$ either provides the correct description for the image (left) or an explanation of why $C1$ is false (right). We consider $C2$ to be information leakage and excluded them from the evaluation protocol."
437
+ ],
438
+ "image_footnote": [],
439
+ "bbox": [
440
+ 124,
441
+ 575,
442
+ 491,
443
+ 813
444
+ ],
445
+ "page_idx": 3
446
+ },
447
+ {
448
+ "type": "image",
449
+ "img_path": "images/5ed9706c9442561e9dba68b4efe36bb587809d3070dc6ed36913fad62182be1e.jpg",
450
+ "image_caption": [],
451
+ "image_footnote": [],
452
+ "bbox": [
453
+ 504,
454
+ 575,
455
+ 870,
456
+ 813
457
+ ],
458
+ "page_idx": 3
459
+ },
460
+ {
461
+ "type": "header",
462
+ "text": "Papadopoulos S.I. et al.",
463
+ "bbox": [
464
+ 112,
465
+ 42,
466
+ 272,
467
+ 56
468
+ ],
469
+ "page_idx": 3
470
+ },
471
+ {
472
+ "type": "page_footnote",
473
+ "text": "4https://twitter.com",
474
+ "bbox": [
475
+ 133,
476
+ 896,
477
+ 290,
478
+ 911
479
+ ],
480
+ "page_idx": 3
481
+ },
482
+ {
483
+ "type": "page_number",
484
+ "text": "4",
485
+ "bbox": [
486
+ 493,
487
+ 935,
488
+ 503,
489
+ 946
490
+ ],
491
+ "page_idx": 3
492
+ },
493
+ {
494
+ "type": "text",
495
+ "text": "3 Methodological Framework",
496
+ "text_level": 1,
497
+ "bbox": [
498
+ 112,
499
+ 89,
500
+ 380,
501
+ 107
502
+ ],
503
+ "page_idx": 4
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "3.1 Problem Formulation",
508
+ "text_level": 1,
509
+ "bbox": [
510
+ 112,
511
+ 119,
512
+ 305,
513
+ 133
514
+ ],
515
+ "page_idx": 4
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "In this study we compare numerous Synthetic Misinformers, methods for generating synthetic multimodal misinformation, and evaluate them on real-world multimodal misinformation. The problem is defined as a binary classification task, where an (I,C) image-caption pair is either truthful or falsified. Truthful captions are collected from credible news sources while falsified ones are produced by a Synthetic Misinformer. Each (I,C) is encoded by a visual encoder $\\mathrm{E}_{\\mathrm{V}}(\\cdot)$ and a textual encoder $\\mathrm{E}_{\\mathrm{T}}(\\cdot)$ that produce the corresponding vector representations $\\mathbf{v}_I \\in \\mathbb{R}^{e \\times 1}$ and $\\mathbf{t}_C \\in \\mathbb{R}^{e \\times 1}$ for the image $I$ and caption $C$ respectively, where $e$ is the encoder's embedding dimension. The extracted features are concatenated and passed through the multimodal detection deep learning neural network $\\mathrm{D}(\\cdot)$ - referred to as the Detector - whose parameters have to be optimized and its hyper-parameters tuned. Finally, the predictions of the trained Detector will be evaluated against a test set consisting of real-world multimodal misinformation. In order to accurately and fairly compare various Misinformers, they need to share a common training and evaluation framework. Therefore, the (1) Encoder, (2) Detector, (3) optimization and hyper-parameter tuning and the (4) evaluation process should remain constant during the comparative study while only the Misinformer method changes. This framework ensures that any change in performance will be the result of the Misinformer and not other factors. A high-level illustration of the proposed workflow can be seen in Fig. 4. In this section, we address each aspect individually.",
520
+ "bbox": [
521
+ 111,
522
+ 143,
523
+ 885,
524
+ 339
525
+ ],
526
+ "page_idx": 4
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "3.1.1 Encoder",
531
+ "text_level": 1,
532
+ "bbox": [
533
+ 112,
534
+ 352,
535
+ 225,
536
+ 366
537
+ ],
538
+ "page_idx": 4
539
+ },
540
+ {
541
+ "type": "text",
542
+ "text": "After generating a training dataset with a Synthetic Misinformer, we use an Encoder for extracting visual and textual features from all images and captions that will be used to train the Detector. The first works on MMD mostly relied on convolutional neural networks pre-trained on ImageNet to extract features from images (namely VGG-19 [8, 11] and ResNet50 [7, 6]) and word embeddings to extract features from captions (namely word2vec [8, 11] and fastText [7]). More recent approaches, have utilized large-scale multimodal and cross-modal models, namely CLIP [9, 10, 19], VisualBERT [9] and VinVL [19] to extract both their visual and textual features. In the aforementioned works, CLIP [13] tended to outperform other cross-modal methods (VinVL and VisualBERT) for MMD [9, 10, 19].",
543
+ "bbox": [
544
+ 111,
545
+ 376,
546
+ 883,
547
+ 474
548
+ ],
549
+ "page_idx": 4
550
+ },
551
+ {
552
+ "type": "text",
553
+ "text": "Contrastive language-image pre-training, or CLIP in short, is a cross-modal model trained to match the most relevant text to an image. Developed by Radford et al.[13] and trained on a large-scale dataset - approximately $4 \\times 10^{8}$ image-text pairs - CLIP has proven to have powerful zero-shot capabilities; meaning that it performs well on tasks and domains that it was not explicitly trained for. In our study, we first perform an experiment using CLIP ViT-B/32 on the NewsCLIPings datasets, in order to compare our training pipeline with [9] but we also utilize the updated and improved<sup>5</sup> CLIP ViT-L/14 version in the comparative study. CLIP ViT-B/32 produces an embeddings vector of size $e = 512$ while ViT-L/14 produces $e = 768$ . We use CLIP off-the-shelf<sup>6</sup> and do not fine-tune it further due to computational resource constraints. Luo et al.[9] experimented with fine-tuning the whole or the top layers of CLIP-ResNet-50, but their results were mixed; fine-tuning could not consistently outperform the \"frozen\", off-the-shelf, CLIP in all cases. There have been proposed methods for robustly fine-tuning large-scale cross-modal neural networks [20] but they are outside the scope of this study; since we do not attempt to reach the highest possible performance but primarily focus on providing a fair comparative study of various Synthetic Misinformers.",
554
+ "bbox": [
555
+ 111,
556
+ 479,
557
+ 885,
558
+ 647
559
+ ],
560
+ "page_idx": 4
561
+ },
562
+ {
563
+ "type": "text",
564
+ "text": "3.1.2 Detector",
565
+ "text_level": 1,
566
+ "bbox": [
567
+ 112,
568
+ 659,
569
+ 227,
570
+ 672
571
+ ],
572
+ "page_idx": 4
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "Previous works either calculate a cross-modal similarity score [8, 7, 6, 14] or define binary classifier on top [9, 15, 11, 10]. Few works have also added fully-connected layers to analyse the extracted features before the final classification layer [15, 11]. Instead, we consider the Transformer architecture [21] is an even more appropriate choice for the Detector. Our Transformer-based Detector (DT-Transformer in short), first concatenates the encoded captions and images and pass them through a Transformer architecture consisting of $L$ layers that have $h$ attention heads of embedding dimension $d$ . Its output is then passed through a normalization layer, a dropout layer, a fully connected layer which is activated by the GELU function, a second dropout layer and a final binary classification layer.",
577
+ "bbox": [
578
+ 111,
579
+ 683,
580
+ 885,
581
+ 782
582
+ ],
583
+ "page_idx": 4
584
+ },
585
+ {
586
+ "type": "text",
587
+ "text": "3.1.3 Optimization",
588
+ "text_level": 1,
589
+ "bbox": [
590
+ 112,
591
+ 794,
592
+ 261,
593
+ 810
594
+ ],
595
+ "page_idx": 4
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "Given that we define MMD as a binary classification task, the Detector is optimized based on the binary cross entropy loss function. Due to differences in distribution, scale, complexity and other factors, we assume that the Detector may require different hyper-parameters to perform optimally with different training datasets. To this end, we tune the Detector's hyper-parameters based on the following grid search: $L \\in \\{1,4\\}$ transformer layers of $d \\in \\{128,1024\\}$",
600
+ "bbox": [
601
+ 111,
602
+ 818,
603
+ 883,
604
+ 876
605
+ ],
606
+ "page_idx": 4
607
+ },
608
+ {
609
+ "type": "header",
610
+ "text": "Papadopoulos S.I. et al.",
611
+ "bbox": [
612
+ 112,
613
+ 42,
614
+ 271,
615
+ 56
616
+ ],
617
+ "page_idx": 4
618
+ },
619
+ {
620
+ "type": "page_footnote",
621
+ "text": "<sup>5</sup>https://huggingface.co/sentence-transformersclip-ViT-L-14",
622
+ "bbox": [
623
+ 132,
624
+ 883,
625
+ 591,
626
+ 897
627
+ ],
628
+ "page_idx": 4
629
+ },
630
+ {
631
+ "type": "page_footnote",
632
+ "text": "$^{6}$ https://github.com/openai/CLIP",
633
+ "bbox": [
634
+ 132,
635
+ 896,
636
+ 375,
637
+ 911
638
+ ],
639
+ "page_idx": 4
640
+ },
641
+ {
642
+ "type": "page_number",
643
+ "text": "5",
644
+ "bbox": [
645
+ 493,
646
+ 935,
647
+ 504,
648
+ 946
649
+ ],
650
+ "page_idx": 4
651
+ },
652
+ {
653
+ "type": "text",
654
+ "text": "dimensions, $h \\in \\{2,8\\}$ attention heads, and a learning rate of $lr \\in \\{1e - 4,5e - 5\\}$ . The dropout rate is constant at 0.1 and the batch size at 512. The selected hyper-parameter grid amounts to 16 experiments for each Synthetic Misinformer dataset. This is clearly not exhaustive, but adding any more options would exponentially increase the required time and computational resources. Instead, our aim is to give the chance to each method to reach an adequate and representative performance, even if it is not the globally optimal that would be possible through exhaustive optimization. The Detector is optimized by the ADAM optimizer for a maximum of 30 epoch with early-stopping at 10 epochs. At the end, we retrieve the checkpoint with the highest validation accuracy and use it for the final evaluation on the test set.",
655
+ "bbox": [
656
+ 109,
657
+ 90,
658
+ 883,
659
+ 189
660
+ ],
661
+ "page_idx": 5
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "3.1.4 Evaluation protocol",
666
+ "text_level": 1,
667
+ "bbox": [
668
+ 112,
669
+ 207,
670
+ 305,
671
+ 222
672
+ ],
673
+ "page_idx": 5
674
+ },
675
+ {
676
+ "type": "text",
677
+ "text": "With the exception of Aneja et al.[6], previous works on Synthetic Misinformers have not evaluated their methods on real-world misinformation. Instead, they first propose a method for generating multimodal misinformation which they apply on a body of truthful image-captions pairs. After generating the dataset, the authors split it into training, validation and test sets and report the best performance on the test set. This only tells us how a model trained on a synthetic dataset will perform on a dataset generated by the same process, not how accurately it could potentially detect misinformation \"in the wild\". Moreover, prior works have not provided direct comparison with other Synthetic Misinformers and therefore, we can not assess progress or the current state-of-the-art in the field. To the best of our knowledge, COSMOS [6] is the only publicly available, manually annotated benchmark for MMD<sup>7</sup>. The COSMOS benchmark - also used in the MMSys'21 \"Grand Challenge on Detecting Cheapfakes\" [22] - consists of 850 image-caption pairs from the fact-checking website SNOPEs<sup>8</sup> and 850 truthful image-caption pairs from credible news sources. Therefore, we will use the COSMOS evaluation set for our study.",
678
+ "bbox": [
679
+ 109,
680
+ 232,
681
+ 883,
682
+ 383
683
+ ],
684
+ "page_idx": 5
685
+ },
686
+ {
687
+ "type": "text",
688
+ "text": "Nevertheless, it is important to highlight certain problematic aspects of the evaluation protocol used in [6]. During evaluation, the authors provide a triplet of an image and two captions $(I,C1,C2)$ and make a threshold-based decision by examining $C1 - C2$ similarity and their overlap with the objects in the image. First, this protocol does not reflect how we encounter real-world misinformation, with an image usually being accompanied by a single caption or a small paragraph (e.g. on Twitter). More importantly, $C2$ in falsified instances is either an explanation of why $C1$ is false or the truthful caption for the image. In two examples taken from the COSMOS benchmark and shown in Fig. 3, $C2$ reads \"Toronto Raptors just unveiled Black Lives Matter Buses and fans are impressed\" and \"Photograph showing a 'trampoline bridge' in Paris is a concept design for an architecture competition\". This is a clear case of information leakage and does not reflect how we encounter misinformation in the real world. Fact checkers are not usually presented with two separate bodies of texts and have to decide which is the correct one. Instead, they are usually presented with an image and a single body of text and they have to determine whether the text is truthful and whether the text accurately matches and describes the image. It is important to note that these are not outlier cases. We have manually examined hundreds from the COSMOS benchmark and Caption 2 suffers from the same problem. Therefore, we do not consider the $88\\%$ detection accuracy reported by the authors to be representative [6]. For that reason, in this study, we only use the (I,C1) tuples from the COSMOS benchmark.",
689
+ "bbox": [
690
+ 109,
691
+ 390,
692
+ 883,
693
+ 598
694
+ ],
695
+ "page_idx": 5
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "3.2 Synthetic Misinformers",
700
+ "text_level": 1,
701
+ "bbox": [
702
+ 112,
703
+ 617,
704
+ 318,
705
+ 633
706
+ ],
707
+ "page_idx": 5
708
+ },
709
+ {
710
+ "type": "text",
711
+ "text": "We define three types of Synthetic Misinformer, namely methods that generate: (1) out-of-context (OOC) image-captions pairs, (2) cross-modal entity inconsistency (NEI) where certain named entities in the caption are tampered and do not correspond with the content of the image, and (3) hybrid approaches that combine both OOC and NEI misinformation. Table 1 displays the number of training samples produced by each Synthetic Misinformer (OOC or NEI) as well as the number of Truthful pairs.",
712
+ "bbox": [
713
+ 109,
714
+ 643,
715
+ 883,
716
+ 715
717
+ ],
718
+ "page_idx": 5
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "3.2.1 Out-of-context misinformation",
723
+ "text_level": 1,
724
+ "bbox": [
725
+ 112,
726
+ 732,
727
+ 380,
728
+ 746
729
+ ],
730
+ "page_idx": 5
731
+ },
732
+ {
733
+ "type": "text",
734
+ "text": "In order to create out-of-context (OOC) image-caption pairs, we first need a dataset of truthful pairs $(I_a, C_a)$ and then a method for sampling an OOC image $I_x$ or an OOC caption $C_x$ . In this study we make use of the VisualNews dataset [12] that consists of 1,259,732 truthful $(I_a, C_a)$ pairs collected by four credible sources (The Washington Post, USA Today, The Guardian and the BBC) regarding 159 topics, namely: art and culture, world, law and crime, international relations, science and technology sports, environment, elections and others. We use the VisualNews training set to generate training data and the VisualNews validation set to generate the validation data in order to avoid overlapping samples and information leakage. We experiment with the following OOC Synthetic Misinformer methods:",
735
+ "bbox": [
736
+ 109,
737
+ 757,
738
+ 883,
739
+ 856
740
+ ],
741
+ "page_idx": 5
742
+ },
743
+ {
744
+ "type": "header",
745
+ "text": "Papadopoulos S.I. et al.",
746
+ "bbox": [
747
+ 112,
748
+ 42,
749
+ 271,
750
+ 56
751
+ ],
752
+ "page_idx": 5
753
+ },
754
+ {
755
+ "type": "page_footnote",
756
+ "text": "7The DARPA SemaFor is another benchmark consisting of real-world misinformation and was used in [10, 19] - consisting of 200 falsified instances - but we were unable to obtain access.",
757
+ "bbox": [
758
+ 112,
759
+ 869,
760
+ 885,
761
+ 897
762
+ ],
763
+ "page_idx": 5
764
+ },
765
+ {
766
+ "type": "page_footnote",
767
+ "text": "<sup>8</sup>https://www.snopes.com/",
768
+ "bbox": [
769
+ 135,
770
+ 896,
771
+ 320,
772
+ 911
773
+ ],
774
+ "page_idx": 5
775
+ },
776
+ {
777
+ "type": "page_number",
778
+ "text": "6",
779
+ "bbox": [
780
+ 493,
781
+ 936,
782
+ 504,
783
+ 946
784
+ ],
785
+ "page_idx": 5
786
+ },
787
+ {
788
+ "type": "table",
789
+ "img_path": "images/177624f11f41d356145657a7a2e462c8f9bd4467c670bbf3bc9344b1f26dcd77.jpg",
790
+ "table_caption": [
791
+ "Table 1: Number of instances per class for training datasets generated by different Synthetic Mismformers."
792
+ ],
793
+ "table_footnote": [],
794
+ "table_body": "<table><tr><td>Synthetic Misinformer</td><td>Truthful</td><td>OOC</td><td>NEI</td></tr><tr><td>VisualNews-Training</td><td>1,007,785</td><td>-</td><td>-</td></tr><tr><td>NC/Bal</td><td>35,536</td><td>35,536</td><td>-</td></tr><tr><td>NC/I-T</td><td>226,564</td><td>226,564</td><td>-</td></tr><tr><td>NC/T-T</td><td>258,036</td><td>258,036</td><td>-</td></tr><tr><td>Random sampling</td><td>1,007,744</td><td>1,007,744</td><td>-</td></tr><tr><td>CLIP-based sampling</td><td>1,007,744</td><td>1,007,744</td><td>-</td></tr><tr><td>MEIR</td><td>82,156</td><td>-</td><td>57,940</td></tr><tr><td>R-NESt</td><td>1,007,744</td><td>-</td><td>924,586</td></tr><tr><td>CLIP-NESt-C</td><td>1007744</td><td>-</td><td>835,537</td></tr><tr><td>CLIP-NESt-I</td><td>1007744</td><td>-</td><td>859,618</td></tr><tr><td>CLIP-NESt-alt</td><td>1007744</td><td>-</td><td>847,693</td></tr><tr><td>R-NESt + NC/I-T</td><td>226,564</td><td>226,564</td><td>226,564</td></tr><tr><td>R-NESt + CSt-alt</td><td>1,007,744</td><td>1,007,744</td><td>924,586</td></tr><tr><td>CLIP-NESt-alt + CSt-alt</td><td>1,007,744</td><td>982,645</td><td>847,693</td></tr></table>",
795
+ "bbox": [
796
+ 284,
797
+ 109,
798
+ 715,
799
+ 354
800
+ ],
801
+ "page_idx": 6
802
+ },
803
+ {
804
+ "type": "list",
805
+ "sub_type": "text",
806
+ "list_items": [
807
+ "- Random sampling by caption (RS-C): for every actual $(I_a, C_a)$ pair, sample a random caption $C_x$ from the whole corpus. This process was used to generate the COSMOS training set[6] but we apply it on the VisualNews dataset instead.",
808
+ "- In-topic random sampling by caption (RSt-C): for every $(I_a, C_a)$ , sample a random caption $C_x$ of the same topic as $C_a$ (e.g. international politics, elections, environment, etc). Using candidates from the same topic can increase the chance of relevance. A similar process was used in [10] but only as a means to mitigate class imbalance. We also define in-topic random sampling by image (RSt-I): sampling a random image $I_x$ for an actual $(I_a, C_a)$ and in-topic random sampling by alternating between image or caption (RSt-alt): choose whether to sample an $I_x$ or an $C_x$ at random (determined by a random selection function).",
809
+ "- Similarly, we implement in-topic CLIP-based sampling by caption to caption similarity (CSt-C), by image-image similarity (CSt-I) or by alternating between image-image and caption-caption similarity (CSt-alt): calculate the most similar item $(I_x$ or $C_x)$ based on features extracted from CLIP. As candidates we define $(I_a, C_a)$ pairs from the same topic and use the cosine similarity as the metric of similarity.",
810
+ "- Finally, we experiment with three versions of the NewsCLIPings (NC) datasets, namely (1) NewsCLIPings Semantics / CLIP Text-Image (NC/T-I), (2) NewsCLIPings Semantics / CLIP Text-Text (NC/T-T) and the (3) NewsCLIPings Merged / Balanced (NC/Bal) as provided by the authors[9]."
811
+ ],
812
+ "bbox": [
813
+ 156,
814
+ 378,
815
+ 883,
816
+ 617
817
+ ],
818
+ "page_idx": 6
819
+ },
820
+ {
821
+ "type": "text",
822
+ "text": "3.2.2 Cross-modal named entity inconsistency",
823
+ "text_level": 1,
824
+ "bbox": [
825
+ 112,
826
+ 631,
827
+ 447,
828
+ 647
829
+ ],
830
+ "page_idx": 6
831
+ },
832
+ {
833
+ "type": "text",
834
+ "text": "In order to generate image-caption pairs that suffer from cross-modal named entity inconsistency we need truthful $(I_a, C_a)$ pairs, a method for sampling and swapping the named entities in $C_a$ in order to create the falsified $C_f$ . We use the VisualNews and experiment with the following methods:",
835
+ "bbox": [
836
+ 111,
837
+ 656,
838
+ 883,
839
+ 699
840
+ ],
841
+ "page_idx": 6
842
+ },
843
+ {
844
+ "type": "list",
845
+ "sub_type": "text",
846
+ "list_items": [
847
+ "- In-topic random named entity swapping (R-NESt): for every $(I_a, C_a)$ , identify all entities in $C_a$ and replace them with randomly sampled entities of the same type (person, location, organization, date, event etc) that belong to the same topic as $C_a$ .",
848
+ "- The MEIR dataset as was provided by the authors $^{10}$ , but we extract features from CLIP ViT L/14 instead of using VGG19 as in the original paper [11].",
849
+ "- We propose in-topic CLIP-based named entity swapping (CLIP-NESt) by image-image similarity (CLIP-NESt-I), caption-caption similarity (CLIP-NESt-C) or alternating between image-image and caption-caption similarity (CLIP-NESt-alt): for every $(I_a, C_a)$ pair, identify the most similar $(I_x, C_x)$ pair based on features extracted from CLIP and swap the entities of the same type between $C_a$ and $C_x$ in order to create $C_f$ . $C_x$ should have at least one named entity of the same type as $C_a$ but be a different named entity (avoid swapping one named entity with itself), otherwise we select the next most similar pair. As candidates we define"
850
+ ],
851
+ "bbox": [
852
+ 156,
853
+ 709,
854
+ 883,
855
+ 873
856
+ ],
857
+ "page_idx": 6
858
+ },
859
+ {
860
+ "type": "header",
861
+ "text": "Papadopoulos S.I. et al.",
862
+ "bbox": [
863
+ 112,
864
+ 42,
865
+ 272,
866
+ 58
867
+ ],
868
+ "page_idx": 6
869
+ },
870
+ {
871
+ "type": "page_footnote",
872
+ "text": "$^{9}$ https://github.com/g-luo/news_clippings",
873
+ "bbox": [
874
+ 130,
875
+ 883,
876
+ 442,
877
+ 898
878
+ ],
879
+ "page_idx": 6
880
+ },
881
+ {
882
+ "type": "page_footnote",
883
+ "text": "$^{10}$ https://github.com/Ekraam/MEIR",
884
+ "bbox": [
885
+ 130,
886
+ 898,
887
+ 375,
888
+ 911
889
+ ],
890
+ "page_idx": 6
891
+ },
892
+ {
893
+ "type": "page_number",
894
+ "text": "7",
895
+ "bbox": [
896
+ 493,
897
+ 935,
898
+ 504,
899
+ 946
900
+ ],
901
+ "page_idx": 6
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "image-caption pairs from the same topic and use the cosine similarity as the metric of similarity. We use the SpaCy Named Entity Recognizer (NER) and specifically the en_core_web_trf module, which exhibits 0.90 F1-score for $\\mathrm{NER^{11}}$ . Our rationale for proposing CLIP-NESt, is that CLIP-based similarity will retrieve a semantically or thematically similar caption and as a result, their entities are more likely to be related in some aspect. Therefore, swapping entities between similar captions will create more plausible misinformation than randomly sampled ones.",
906
+ "bbox": [
907
+ 169,
908
+ 90,
909
+ 885,
910
+ 175
911
+ ],
912
+ "page_idx": 7
913
+ },
914
+ {
915
+ "type": "text",
916
+ "text": "3.2.3 Hybrid methods",
917
+ "text_level": 1,
918
+ "bbox": [
919
+ 112,
920
+ 186,
921
+ 282,
922
+ 203
923
+ ],
924
+ "page_idx": 7
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "We also experiment with methods that combine both OOC and NEI misinformation, which we refer to as hybrid methods. We follow the same training process but instead of binary classification, we train the Detector for multi-class classification with the use of the cross-entropy loss function. The Detector is trained to classify $(I,C)$ pairs into three classes: Truthful, NEI or 00C. During evaluation on the COSMOS dataset, NEI and 00C predictions are set to Falsified pairs; since COSMOS is a binary dataset. In this study, we combine few of the best performing methods: (1) R-NESt + CSt-alt, (2) R-NESt + NC/I-T and (3) CLIP-NESt-alt + CSt-alt. Our motivation for exploring hybrid methods is that real-world multimodal misinformation may not be adequately represented by OOC or NEI alone, but may require a combination of both. OOC and MEI methods produce balanced datasets, since they create one falsified pair for every truthful pair. On the other hand, some hybrid methods showcase imbalanced classes, in this case, we apply random down-sampling.",
929
+ "bbox": [
930
+ 109,
931
+ 210,
932
+ 885,
933
+ 352
934
+ ],
935
+ "page_idx": 7
936
+ },
937
+ {
938
+ "type": "image",
939
+ "img_path": "images/3b14b30979bac365bcb2db185620a236797752e243e0da8b49f4a27e7f61bfcc.jpg",
940
+ "image_caption": [
941
+ "Figure 4: High-level overview of the proposed workflow. Truthful $(I_a, C_a)$ image-caption pairs are manipulated by a Synthetic Misinformer which generates Falsified pairs. Truthful and Falsified pairs are encoded by CLIP ViT-L/14 and used to train the Detector who is optimized for binary classification when using one OOC or NEI dataset, or for multi-class classification when combining one OOC and one NEI method. Here, we showcase the hybrid Synthetic Misinformer $CLIP-NESt-alt + CST-alt$ ."
942
+ ],
943
+ "image_footnote": [],
944
+ "bbox": [
945
+ 117,
946
+ 369,
947
+ 880,
948
+ 465
949
+ ],
950
+ "page_idx": 7
951
+ },
952
+ {
953
+ "type": "text",
954
+ "text": "4 Experimental Results",
955
+ "text_level": 1,
956
+ "bbox": [
957
+ 112,
958
+ 569,
959
+ 328,
960
+ 585
961
+ ],
962
+ "page_idx": 7
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "Before starting with the comparative study, we wanted to examine whether our training pipeline - the choice of Encoder and Detector - was valid; if it outperforms or at the very least competes with previous works. To this end, we compare our training pipeline with [9]. In Table 2 we observe that while using the same Encoder (CLIP ViT-B/32), our DT-Transformer can consistently outperform [9] on all three NewsCLIPings datasets. Moreover, using the CLIP ViT-L/14 Encoder can significantly surpass ViT-B/32. We therefore proceeded our comparative study with the DT-Transformer and the CLIP ViT-L/14 Encoder.",
967
+ "bbox": [
968
+ 109,
969
+ 599,
970
+ 887,
971
+ 681
972
+ ],
973
+ "page_idx": 7
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "Table 3 shows the comparative study between various Synthetic Misinformers. Commencing with the OOC Misinformers, we observe that in-topic candidates can improve random sampling ( $+3\\%$ improvement). Since image-caption pairs are more likely to be related if they are taken from the same topic (e.g. election) than completely random topics (e.g. elections and sports). Secondly, we observe that alternating between sampling $(I_a, C_x)$ and $(I_x, C_a)$ pairs improves the performance of $RSt$ , and $CSt$ . Previous works on OOC Misinformers were only sampling $C_x$ captions. As a result, each image would appear twice in the dataset while the captions could appear once, twice or multiple times; depending on the sampling method. Even if minor, this process could lead to certain biases and imbalances. Similarly, we observe that alternating between image-image and caption-caption similarity can improve $CLIP-NESt$ ; presumably by generating more diverse image-caption pairs. Furthermore, our results show that feature-based negative sampling, including both $CSt$ and $NC$ , can surpass random negative sampling, due to their ability to generate hard negative OOC pairs, which more accurately reflect real-world misinformation. Finally, all multimodal OOC-based Synthetic Misinformers outperform their unimodal counterparts (image-only and text-only) and therefore do not suffer from a unimodal bias.",
978
+ "bbox": [
979
+ 109,
980
+ 689,
981
+ 885,
982
+ 854
983
+ ],
984
+ "page_idx": 7
985
+ },
986
+ {
987
+ "type": "text",
988
+ "text": "Shifting to NEI Misminformers, we see that both $R$ -NESt and $CLIP$ -NESt surpass MEIR [11]. The methodology could be a contributing factor but we should also consider the difference in data scale. MEIR consists of only 82,156 truthful and",
989
+ "bbox": [
990
+ 109,
991
+ 861,
992
+ 883,
993
+ 890
994
+ ],
995
+ "page_idx": 7
996
+ },
997
+ {
998
+ "type": "header",
999
+ "text": "Papadopoulos S.I. et al.",
1000
+ "bbox": [
1001
+ 112,
1002
+ 42,
1003
+ 272,
1004
+ 56
1005
+ ],
1006
+ "page_idx": 7
1007
+ },
1008
+ {
1009
+ "type": "page_footnote",
1010
+ "text": "<sup>11</sup>https://spacy.io/models/en#en_core_web_trf-accuracy",
1011
+ "bbox": [
1012
+ 129,
1013
+ 897,
1014
+ 540,
1015
+ 912
1016
+ ],
1017
+ "page_idx": 7
1018
+ },
1019
+ {
1020
+ "type": "page_number",
1021
+ "text": "8",
1022
+ "bbox": [
1023
+ 493,
1024
+ 935,
1025
+ 504,
1026
+ 946
1027
+ ],
1028
+ "page_idx": 7
1029
+ },
1030
+ {
1031
+ "type": "table",
1032
+ "img_path": "images/6333e225148c8ff845e63ab9a2097b936f65d560a01d0ae178c5fa41893ba2ae.jpg",
1033
+ "table_caption": [
1034
+ "Table 2: Comparison between different Detectors and Encoders trained and evaluated on three NewsCLIPings datasets. We report the binary accuracy and the per-class recall for truthful and falsified image-caption pairs. Our proposed Transformer-based Detector (DT-Transformer) consistently outperforms the original NewsCLIPings Detector and using features from CLIP ViT-L/14 further improves performance. We report the overall Accuracy and Hit Rate per class (Truthful and Falsified pairs) or Specificity and Sensitivity respectively."
1035
+ ],
1036
+ "table_footnote": [],
1037
+ "table_body": "<table><tr><td>Synthetic Misinformer</td><td>Detector</td><td>Encoder</td><td>Accuracy</td><td>Truthful</td><td>Falsified</td></tr><tr><td>NC/Bal</td><td>[9]</td><td>CLIP ViT-B/32</td><td>60.23</td><td>70.07</td><td>50.39</td></tr><tr><td>NC/Bal</td><td>DT-Transformer</td><td>CLIP ViT-B/32</td><td>65.67</td><td>73.73</td><td>57.60</td></tr><tr><td>NC/Bal</td><td>DT-Transformer</td><td>CLIP ViT-L/14</td><td>77.09</td><td>78.58</td><td>75.61</td></tr><tr><td>NC/T-T</td><td>[9]</td><td>CLIP ViT-B/32</td><td>69.39</td><td>74.09</td><td>64.69</td></tr><tr><td>NC/T-T</td><td>DT-Transformer</td><td>CLIP ViT-B/32</td><td>71.63</td><td>75.56</td><td>67.69</td></tr><tr><td>NC/T-T</td><td>DT-Transformer</td><td>CLIP ViT-L/14</td><td>84.12</td><td>87.45</td><td>80.79</td></tr><tr><td>NC/I-T</td><td>[9]</td><td>CLIP ViT-B/32</td><td>66.98</td><td>75.43</td><td>58.53</td></tr><tr><td>NC/I-T</td><td>DT-Transformer</td><td>CLIP ViT-B/32</td><td>69.12</td><td>67.51</td><td>70.73</td></tr><tr><td>NC/I-T</td><td>DT-Transformer</td><td>CLIP ViT-L/14</td><td>80.98</td><td>84.62</td><td>77.33</td></tr></table>",
1038
+ "bbox": [
1039
+ 163,
1040
+ 166,
1041
+ 833,
1042
+ 332
1043
+ ],
1044
+ "page_idx": 8
1045
+ },
1046
+ {
1047
+ "type": "text",
1048
+ "text": "57,940 falsified instances compared to the approximately 1,2M data falsified points produced by other Misinformers, which may be inadequate to correctly train the Detector. We also observe that the proposed $CLIP-NESt-alt$ surpasses all other multimodal NEI and OOC methods; achieving $56.9\\%$ accuracy. Furthermore, combining R-NESt with CST-alt achieved the same score but using NC/I-T in conjunction with R-NESt did not perform as well since NC/I-T consists of 226,564 samples and required heavily down-sampling truthful and R-NESt samples (1M each), resulting in a notably smaller dataset. Finally we observe that our hybrid $CLIP-NESt-alt + CSt-alt$ method proved capable of achieving the highest multimodal accuracy $(58.1\\%)$ .",
1049
+ "bbox": [
1050
+ 114,
1051
+ 359,
1052
+ 883,
1053
+ 458
1054
+ ],
1055
+ "page_idx": 8
1056
+ },
1057
+ {
1058
+ "type": "text",
1059
+ "text": "Nevertheless, we also recognize certain problems and limitations. First, even the best performing methods have trouble accurately identifying the falsified pairs, scoring lower than $50\\%$ in terms of Falsified Hit Rate (Sensitivity) while having high Truthful Hit Rate scores (Specificity). This indicates that the task of multimodal misinformation detection is significantly more challenging than previous studies suggested, e.g. showcasing scores higher than $88\\%$ on the COSMOS dataset [6, 14, 15] while using a problematic evaluation protocol (discussed in Section 3.1.4). Finally, we found that unimodal text-only methods, such as CLIP-NESt-C and R-NESt, R-NESt + CSt-alt, can outperform their multimodal counterparts - with the latter scoring $59.1\\%$ - on a supposedly multimodal task. This suggests the existence of a unimodal bias in the dataset, which needs to be addressed in future studies.",
1060
+ "bbox": [
1061
+ 114,
1062
+ 463,
1063
+ 883,
1064
+ 574
1065
+ ],
1066
+ "page_idx": 8
1067
+ },
1068
+ {
1069
+ "type": "text",
1070
+ "text": "5 Conclusions",
1071
+ "text_level": 1,
1072
+ "bbox": [
1073
+ 116,
1074
+ 595,
1075
+ 246,
1076
+ 611
1077
+ ],
1078
+ "page_idx": 8
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "text": "In this study we address the task of multimodal misinformation detection (MMD). More specifically, we examine and compare multiple methods that generate training data (Synthetic Misminformers) for MMD, either out-of-context image-text pairs (OOC) or cross-modal named entity inconsistencies (NEI). We perform a comparative study and evaluate all Synthetic Misminformers on the COSMOS benchmark; consisting of real-world multimodal misinformation. The comparative study illustrated that NEI methods tend to - on average - outperform OOC methods on the COSMOS benchmark. Moreover, our proposed CLIP-NESt-alt method reached the highest multimodal accuracy $(56.9\\%)$ among NEI and OOC methods; having a $2.15\\%$ and $2.7\\%$ advantage over the next best performing method from each respectively. Furthermore, we hypothesized that real-world misinformation is not solely captured by OOC or NEI instances separately but instead necessitate both. This is validated by the proposed hybrid approach (CLIP-NESt-alt + CSt-alt) achieving the highest multimodal accuracy $(58.1\\%)$ ; showing a $2.47\\%$ improvement over CLIP-NESt-alt.",
1083
+ "bbox": [
1084
+ 114,
1085
+ 628,
1086
+ 883,
1087
+ 767
1088
+ ],
1089
+ "page_idx": 8
1090
+ },
1091
+ {
1092
+ "type": "text",
1093
+ "text": "Nevertheless, low Sensitivity scores (Table 3) indicate that - under the corrected evaluation protocol - MMD is a significantly more challenging task than previous works suggested [6] and extensive further research is essential. Future studies could consider the integration of external evidence [16, 23] or knowledge graphs [24] not only to improve detection accuracy but also to develop new Synthetic Misinformers that generate more realistic synthetic training data, and as a result produce better Detectors. Moreover, experimentation with different modality fusion techniques can further improve performance [25, 26]. Furthermore, our empirical results showed that NEI Misinformers tend to introduce a unimodal bias, leading to unimodal Detectors competing or even outperforming multimodal ones. Named entity manipulations could create certain linguistic patterns, biases or shortcuts that render the visual information less important. Future studies could explore developing methods for generating de-biased NEI or learning strategies for reducing unimodal bias [27]. Moreover, task-specific modality fusion methods could potentially help mitigate",
1094
+ "bbox": [
1095
+ 114,
1096
+ 772,
1097
+ 883,
1098
+ 911
1099
+ ],
1100
+ "page_idx": 8
1101
+ },
1102
+ {
1103
+ "type": "header",
1104
+ "text": "Papadopoulos S.I. et al.",
1105
+ "bbox": [
1106
+ 114,
1107
+ 42,
1108
+ 269,
1109
+ 56
1110
+ ],
1111
+ "page_idx": 8
1112
+ },
1113
+ {
1114
+ "type": "page_number",
1115
+ "text": "9",
1116
+ "bbox": [
1117
+ 493,
1118
+ 936,
1119
+ 503,
1120
+ 946
1121
+ ],
1122
+ "page_idx": 8
1123
+ },
1124
+ {
1125
+ "type": "table",
1126
+ "img_path": "images/b5e185c0cefbffc0dbb24db9b4292e2416d91d4ff9fa7e5e8b01142cdd8eaa4a.jpg",
1127
+ "table_caption": [
1128
+ "Table 3: Comparative study between numerous Synthetic Misinformer methods evaluated on the COSMOS benchmark. We use the DT-Transformer and the CLIP ViT L/14 encoder. We report the Accuracy of unimodal (image-only, text-only) and multimodal Detectors as well as the multimodal Hit Rate per class (Truthful and Falsified pairs) or Specificity and Sensitivity respectively. Bold denotes the overall highest accuracy while underline denotes the highest multimodal accuracy."
1129
+ ],
1130
+ "table_footnote": [],
1131
+ "table_body": "<table><tr><td>Type</td><td>Synthetic Misinformer</td><td>Image-only Accuracy</td><td>Text-only Accuracy</td><td>Multimodal Truthful</td><td>Falsified</td></tr><tr><td rowspan=\"10\">OOC</td><td>RS-C</td><td>50.0</td><td>50.0</td><td>52.2</td><td>92.4</td></tr><tr><td>RSt-C</td><td>50.0</td><td>50.0</td><td>53.8</td><td>93.5</td></tr><tr><td>RSt-I</td><td>50.2</td><td>50.0</td><td>52.3</td><td>96.6</td></tr><tr><td>RSt-alt</td><td>50.0</td><td>50.0</td><td>53.9</td><td>92.0</td></tr><tr><td>CSt-C</td><td>50.0</td><td>48.5</td><td>53.1</td><td>70.7</td></tr><tr><td>CSt-I</td><td>49.5</td><td>50.0</td><td>54.7</td><td>83.4</td></tr><tr><td>CSt-alt</td><td>52.3</td><td>51.5</td><td>55.0</td><td>76.6</td></tr><tr><td>NC/Bal [9]</td><td>50.0</td><td>50.0</td><td>54.1</td><td>52.8</td></tr><tr><td>NC/T-T [9]</td><td>48.8</td><td>50.0</td><td>53.7</td><td>66.1</td></tr><tr><td>NC/I-T [9]</td><td>49.2</td><td>50.0</td><td>55.4</td><td>84.6</td></tr><tr><td rowspan=\"5\">NEI</td><td>MEIR [11]</td><td>49.4</td><td>50.5</td><td>53.9</td><td>90.5</td></tr><tr><td>R-NESt</td><td>50.1</td><td>58.5</td><td>55.7</td><td>90.5</td></tr><tr><td>CLIP-NESt-C</td><td>50.0</td><td>58.1</td><td>55.3</td><td>75.1</td></tr><tr><td>CLIP-NESt-I</td><td>50.0</td><td>54.1</td><td>55.2</td><td>81.2</td></tr><tr><td>CLIP-NESt-alt</td><td>50.0</td><td>56.7</td><td>56.9</td><td>81.7</td></tr><tr><td rowspan=\"3\">Hybrid</td><td>R-NESt + CSt-alt</td><td>51.8</td><td>59.1</td><td>56.9</td><td>89.3</td></tr><tr><td>R-NESt + NC/I-T</td><td>51.5</td><td>57.2</td><td>54.4</td><td>90.5</td></tr><tr><td>CLIP-NESt-alt + CSt-alt</td><td>51.3</td><td>52.8</td><td>58.1</td><td>74.4</td></tr></table>",
1132
+ "bbox": [
1133
+ 151,
1134
+ 166,
1135
+ 848,
1136
+ 470
1137
+ ],
1138
+ "page_idx": 9
1139
+ },
1140
+ {
1141
+ "type": "text",
1142
+ "text": "this challenge [28]. Finally, the unimodal bias may not lie with the training process but with the evaluation dataset. The COSMOS benchmark was not collected with certain criteria in place to explicitly make it difficult for unimodal architectures. Future studies could explore and define relevant rules and criteria for collecting a more robust real-world MMD benchmark.",
1143
+ "bbox": [
1144
+ 109,
1145
+ 497,
1146
+ 883,
1147
+ 554
1148
+ ],
1149
+ "page_idx": 9
1150
+ },
1151
+ {
1152
+ "type": "text",
1153
+ "text": "6 Acknowledgments",
1154
+ "text_level": 1,
1155
+ "bbox": [
1156
+ 112,
1157
+ 573,
1158
+ 302,
1159
+ 590
1160
+ ],
1161
+ "page_idx": 9
1162
+ },
1163
+ {
1164
+ "type": "text",
1165
+ "text": "This work is partially funded by the project \"vera.ai: VERIFICATION Assisted by Artificial Intelligence\" under grant agreement no. 101070093.",
1166
+ "bbox": [
1167
+ 109,
1168
+ 604,
1169
+ 883,
1170
+ 635
1171
+ ],
1172
+ "page_idx": 9
1173
+ },
1174
+ {
1175
+ "type": "text",
1176
+ "text": "References",
1177
+ "text_level": 1,
1178
+ "bbox": [
1179
+ 112,
1180
+ 654,
1181
+ 209,
1182
+ 671
1183
+ ],
1184
+ "page_idx": 9
1185
+ },
1186
+ {
1187
+ "type": "list",
1188
+ "sub_type": "ref_text",
1189
+ "list_items": [
1190
+ "[1] Muhammad F Mridha, Ashfia Jannat Keya, Md Abdul Hamid, Muhammad Mostafa Monowar, and Md Saifur Rahman. A comprehensive review on fake news detection with deep learning. IEEE Access, 9:156151-156170, 2021.",
1191
+ "[2] Md Shohel Rana, Mohammad Nur Nobi, Bedduh Murali, and Andrew H Sung. Deepfake detection: A systematic literature review. IEEE Access, 2022.",
1192
+ "[3] Yiyi Li and Ying Xie. Is a picture worth a thousand words? an empirical study of image content and social media engagement. Journal of Marketing Research, 57(1):1-19, 2020.",
1193
+ "[4] Eryn J Newman, Maryanne Garry, Daniel M Bernstein, Justin Kantner, and D Stephen Lindsay. Nonprobative photographs (or words) inflate truthiness. Psychonomic Bulletin & Review, 19:969-974, 2012.",
1194
+ "[5] Firoj Alam, Stefano Cresci, Tanmoy Chakraborty, Fabrizio Silvestri, Dimiter Dimitrov, Giovanni Da San Martino, Shaden Shaar, Hamed Firooz, and Preslav Nakov. A survey on multimodal disinformation detection. arXiv preprint arXiv:2103.12541, 2021.",
1195
+ "[6] Shivangi Aneja, Chris Bregler, and Matthias Nießner. Cosmos: Catching out-of-context misinformation with self-supervised learning. arXiv preprint arXiv:2101.06278, 2021."
1196
+ ],
1197
+ "bbox": [
1198
+ 120,
1199
+ 686,
1200
+ 885,
1201
+ 912
1202
+ ],
1203
+ "page_idx": 9
1204
+ },
1205
+ {
1206
+ "type": "header",
1207
+ "text": "Papadopoulos S.I. et al.",
1208
+ "bbox": [
1209
+ 112,
1210
+ 42,
1211
+ 272,
1212
+ 56
1213
+ ],
1214
+ "page_idx": 9
1215
+ },
1216
+ {
1217
+ "type": "page_number",
1218
+ "text": "10",
1219
+ "bbox": [
1220
+ 488,
1221
+ 935,
1222
+ 508,
1223
+ 946
1224
+ ],
1225
+ "page_idx": 9
1226
+ },
1227
+ {
1228
+ "type": "list",
1229
+ "sub_type": "ref_text",
1230
+ "list_items": [
1231
+ "[7] Eric Müller-Budack, Jonas Theiner, Sebastian Diering, Maximilian Idahl, and Ralph Ewerth. Multimodal analytics for real-world news using measures of cross-modal entity consistency. In Proceedings of the 2020 International Conference on Multimedia Retrieval, pages 16–25, 2020.",
1232
+ "[8] Ayush Jaiswal, Ekraam Sabir, Wael AbdAlmageed, and Premkumar Natarajan. Multimedia semantic integrity assessment using joint embedding of images and text. In Proceedings of the 25th ACM international conference on Multimedia, pages 1465-1471, 2017.",
1233
+ "[9] Grace Luo, Trevor Darrell, and Anna Rohrbach. Newsclippings: Automatic generation of out-of-context multimodal media. arXiv preprint arXiv:2104.05893, 2021.",
1234
+ "[10] Giscard Biamby, Grace Luo, Trevor Darrell, and Anna Rohrbach. Twitter-comms: Detecting climate, covid, and military multimodal misinformation. arXiv preprint arXiv:2112.08594, 2021.",
1235
+ "[11] Ekraam Sabir, Wael AbdAlmageed, Yue Wu, and Prem Natarajan. Deep multimodal image-repurposing detection. In Proceedings of the 26th ACM international conference on Multimedia, pages 1337-1345, 2018.",
1236
+ "[12] Fuxiao Liu, Yinghan Wang, Tianlu Wang, and Vicente Ordonez. Visual news: Benchmark and challenges in news image captioning. arXiv preprint arXiv:2010.03743, 2020.",
1237
+ "[13] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.",
1238
+ "[14] Tankut Akgul, Tugce Erkilic Civelek, Deniz Ugur, and Ali C Begen. Cosmos on steroids: a cheap detector for cheapfakes. In Proceedings of the 12th ACM Multimedia Systems Conference, pages 327-331, 2021.",
1239
+ "[15] Muhannad Alkaddour, Abhinav Dhall, Usman Tariq, Hasan Al Nashash, and Fares Al-Shargie. Sentiment-aware classifier for out-of-context caption detection. In Proceedings of the 30th ACM International Conference on Multimedia, pages 7180-7184, 2022.",
1240
+ "[16] Sahar Abdelnabi, Rakibul Hasan, and Mario Fritz. Open-domain, content-based, multi-modal fact-checking of out-of-context images via online resources. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14940-14949, 2022.",
1241
+ "[17] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.",
1242
+ "[18] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. Enriching word vectors with subword information. Transactions of the association for computational linguistics, 5:135-146, 2017.",
1243
+ "[19] Mingzhen Huang, Shan Jia, Ming-Ching Chang, and Siwei Lyu. Text-image de-contextualization detection using vision-language models. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 8967-8971. IEEE, 2022.",
1244
+ "[20] Mitchell Wortsman, Gabriel Ilharco, Jong Wook Kim, Mike Li, Simon Kornblith, Rebecca Roelofs, Raphael Gontijo Lopes, Hannaneh Hajishirzi, Ali Farhadi, Hongseok Namkoong, et al. Robust fine-tuning of zero-shot models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7959-7971, 2022.",
1245
+ "[21] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.",
1246
+ "[22] Shivangi Aneja, Cise Midoglu, Duc-Tien Dang-Nguyen, Michael Alexander Riegler, Paal Halvorsen, Matthias Nießner, Balu Adsumilli, and Chris Bregler. Mmsys' 21 grand challenge on detecting cheapfakes. arXiv preprint arXiv:2107.05297, 2021.",
1247
+ "[23] Max Glockner, Yufang Hou, and Iryna Gurevych. Missing counter-evidence renders nlp fact-checking unrealistic for misinformation. arXiv preprint arXiv:2210.13865, 2022.",
1248
+ "[24] Lihui Liu, Houxiang Ji, Jiejun Xu, and Hanghang Tong. Comparative reasoning for knowledge graph fact checking. In 2022 IEEE International Conference on Big Data (Big Data), pages 2309-2312. IEEE, 2022.",
1249
+ "[25] Rina Kumari and Asif Ekbal. Amfb: Attention based multimodal factorized bilinear pooling for multimodal fake news detection. Expert Systems with Applications, 184:115412, 2021.",
1250
+ "[26] Chuanming Yu, Yinxue Ma, Lu An, and Gang Li. Bcmf: A bidirectional cross-modal fusion model for fake news detection. Information Processing & Management, 59(5):103063, 2022.",
1251
+ "[27] Remi Cadene, Corentin Dancette, Matthieu Cord, Devi Parikh, et al. Rubi: Reducing unimodal biases for visual question answering. Advances in neural information processing systems, 32, 2019.",
1252
+ "[28] Peiguang Li, Xian Sun, Hongfeng Yu, Yu Tian, Fanglong Yao, and Guangliu Xu. Entity-oriented multi-modal alignment and fusion network for fake news detection. IEEE Transactions on Multimedia, 24:3455-3468, 2021."
1253
+ ],
1254
+ "bbox": [
1255
+ 116,
1256
+ 90,
1257
+ 883,
1258
+ 907
1259
+ ],
1260
+ "page_idx": 10
1261
+ },
1262
+ {
1263
+ "type": "header",
1264
+ "text": "Papadopoulos S.I. et al.",
1265
+ "bbox": [
1266
+ 112,
1267
+ 42,
1268
+ 272,
1269
+ 58
1270
+ ],
1271
+ "page_idx": 10
1272
+ },
1273
+ {
1274
+ "type": "page_number",
1275
+ "text": "11",
1276
+ "bbox": [
1277
+ 488,
1278
+ 935,
1279
+ 506,
1280
+ 946
1281
+ ],
1282
+ "page_idx": 10
1283
+ }
1284
+ ]