SlowGuess commited on
Commit
14f8356
·
verified ·
1 Parent(s): 79412b2

Add Batch ccbaa207-cbbe-42a2-bec0-66a9e84d7345

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_content_list.json +0 -0
  3. 2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_model.json +0 -0
  4. 2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_origin.pdf +3 -0
  5. 2401.13xxx/2401.13822/full.md +462 -0
  6. 2401.13xxx/2401.13822/images.zip +3 -0
  7. 2401.13xxx/2401.13822/layout.json +0 -0
  8. 2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_content_list.json +0 -0
  9. 2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_model.json +0 -0
  10. 2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_origin.pdf +3 -0
  11. 2401.13xxx/2401.13835/full.md +477 -0
  12. 2401.13xxx/2401.13835/images.zip +3 -0
  13. 2401.13xxx/2401.13835/layout.json +0 -0
  14. 2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_content_list.json +0 -0
  15. 2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_model.json +0 -0
  16. 2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_origin.pdf +3 -0
  17. 2401.13xxx/2401.13836/full.md +0 -0
  18. 2401.13xxx/2401.13836/images.zip +3 -0
  19. 2401.13xxx/2401.13836/layout.json +0 -0
  20. 2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_content_list.json +1745 -0
  21. 2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_model.json +0 -0
  22. 2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_origin.pdf +3 -0
  23. 2401.13xxx/2401.13856/full.md +368 -0
  24. 2401.13xxx/2401.13856/images.zip +3 -0
  25. 2401.13xxx/2401.13856/layout.json +0 -0
  26. 2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_content_list.json +0 -0
  27. 2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_model.json +0 -0
  28. 2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_origin.pdf +3 -0
  29. 2401.13xxx/2401.13858/full.md +568 -0
  30. 2401.13xxx/2401.13858/images.zip +3 -0
  31. 2401.13xxx/2401.13858/layout.json +0 -0
  32. 2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_content_list.json +883 -0
  33. 2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_model.json +1107 -0
  34. 2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_origin.pdf +3 -0
  35. 2401.13xxx/2401.13867/full.md +167 -0
  36. 2401.13xxx/2401.13867/images.zip +3 -0
  37. 2401.13xxx/2401.13867/layout.json +0 -0
  38. 2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_content_list.json +0 -0
  39. 2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_model.json +0 -0
  40. 2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_origin.pdf +3 -0
  41. 2401.13xxx/2401.13870/full.md +386 -0
  42. 2401.13xxx/2401.13870/images.zip +3 -0
  43. 2401.13xxx/2401.13870/layout.json +0 -0
  44. 2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_content_list.json +0 -0
  45. 2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_model.json +0 -0
  46. 2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_origin.pdf +3 -0
  47. 2401.13xxx/2401.13912/full.md +0 -0
  48. 2401.13xxx/2401.13912/images.zip +3 -0
  49. 2401.13xxx/2401.13912/layout.json +0 -0
  50. 2401.13xxx/2401.13919/f82c75ef-bcfa-4523-9ab5-fea12b9952a1_content_list.json +0 -0
.gitattributes CHANGED
@@ -10198,3 +10198,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10198
  2402.01xxx/2402.01735/b626276e-361c-458f-addf-44b0a63dcd0f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10199
  2402.03xxx/2402.03358/b9aa3ea1-fca6-43f5-ac06-51646e73c7ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
10200
  2402.04xxx/2402.04268/bccace19-cc92-4ccb-b8d1-d2a72950939a_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10198
  2402.01xxx/2402.01735/b626276e-361c-458f-addf-44b0a63dcd0f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10199
  2402.03xxx/2402.03358/b9aa3ea1-fca6-43f5-ac06-51646e73c7ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
10200
  2402.04xxx/2402.04268/bccace19-cc92-4ccb-b8d1-d2a72950939a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10201
+ 2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_origin.pdf filter=lfs diff=lfs merge=lfs -text
10202
+ 2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10203
+ 2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10204
+ 2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10205
+ 2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_origin.pdf filter=lfs diff=lfs merge=lfs -text
10206
+ 2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10207
+ 2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10208
+ 2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_origin.pdf filter=lfs diff=lfs merge=lfs -text
10209
+ 2401.13xxx/2401.13919/f82c75ef-bcfa-4523-9ab5-fea12b9952a1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10210
+ 2401.13xxx/2401.13923/01fcd097-35c0-4db9-8359-67a19a407c6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10211
+ 2401.13xxx/2401.13927/83e2930c-6668-4c51-bcc4-2f7048147b81_origin.pdf filter=lfs diff=lfs merge=lfs -text
10212
+ 2401.13xxx/2401.13964/13d46c7c-62fd-4cde-b273-fff445577230_origin.pdf filter=lfs diff=lfs merge=lfs -text
10213
+ 2401.13xxx/2401.13974/02c31775-7024-4e56-aa1e-f2eaadabbd74_origin.pdf filter=lfs diff=lfs merge=lfs -text
10214
+ 2401.13xxx/2401.13990/2576f0f6-53fe-48d0-af34-c46404be9fec_origin.pdf filter=lfs diff=lfs merge=lfs -text
10215
+ 2401.14xxx/2401.14009/668d4d56-bb0d-4d71-8af2-f5de1d759c59_origin.pdf filter=lfs diff=lfs merge=lfs -text
10216
+ 2401.14xxx/2401.14011/7b688030-bf92-4d0f-9aed-68e8e9f541b5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10217
+ 2401.14xxx/2401.14055/0fd7dab5-0e55-4b78-91a6-7aa7e87845fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10218
+ 2401.14xxx/2401.14113/ed9fbc6f-b1c0-4f49-92ab-d6132a17cd1e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10219
+ 2401.14xxx/2401.14142/e221c091-a210-4f1a-b786-465b531c366b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10220
+ 2401.14xxx/2401.14151/cf095750-c37c-4d45-b05f-a92525e7547c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10221
+ 2401.14xxx/2401.14155/09bb0c98-e055-4881-9ecb-0f4bfaccc728_origin.pdf filter=lfs diff=lfs merge=lfs -text
10222
+ 2401.14xxx/2401.14159/0ba32643-586b-4254-bc7a-704e4caf85a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10223
+ 2401.14xxx/2401.14160/889615cc-c11d-40bc-92c0-fc2a7d593e8c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10224
+ 2401.14xxx/2401.14168/414d8bdd-fbe3-4714-8ee0-321f2de213b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10225
+ 2401.14xxx/2401.14192/c1b95e9f-304d-415c-9a1d-c1f880a0dd88_origin.pdf filter=lfs diff=lfs merge=lfs -text
10226
+ 2401.14xxx/2401.14196/759c87f3-b22e-4d4d-b22d-c8e68e0865b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10227
+ 2401.14xxx/2401.14270/30e94175-2c4a-4cd5-b206-79d3d16f601b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10228
+ 2401.14xxx/2401.14295/6e55df97-f14f-4ca1-8095-cc04a2a85023_origin.pdf filter=lfs diff=lfs merge=lfs -text
10229
+ 2401.14xxx/2401.14321/08692370-384c-486a-8d2e-0c10973f2da0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10230
+ 2401.14xxx/2401.14351/59abd46a-3866-47b4-9533-228db161925f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10231
+ 2401.14xxx/2401.14362/39f9a2c4-6660-4ae0-8729-dd91c3f0508b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10232
+ 2401.14xxx/2401.14367/85593a32-830a-4775-a178-7946f969347c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10233
+ 2401.14xxx/2401.14391/b7439e9b-b279-449f-ab78-50eec6358478_origin.pdf filter=lfs diff=lfs merge=lfs -text
10234
+ 2401.14xxx/2401.14398/db35f4dc-3fd2-46e8-a418-c12f08790144_origin.pdf filter=lfs diff=lfs merge=lfs -text
10235
+ 2401.14xxx/2401.14403/24c3f6e5-c29a-4c3c-8edb-ab70f2b3ba97_origin.pdf filter=lfs diff=lfs merge=lfs -text
10236
+ 2401.14xxx/2401.14404/f1fd927b-0622-440d-ba47-62cd4abab9b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10237
+ 2401.14xxx/2401.14444/ce9f3660-728a-4873-bfa1-dafde6aef197_origin.pdf filter=lfs diff=lfs merge=lfs -text
10238
+ 2401.14xxx/2401.14446/87c733fd-0e1c-4fc5-beca-587190d1cae4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10239
+ 2401.14xxx/2401.14461/7fd4b8d8-770a-483b-88f3-4b8cd9748ed1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10240
+ 2401.14xxx/2401.14462/022e1c0d-c157-4934-9e17-955e3b38f132_origin.pdf filter=lfs diff=lfs merge=lfs -text
10241
+ 2401.14xxx/2401.14484/90933ed0-ed71-422a-bbce-cefd51433223_origin.pdf filter=lfs diff=lfs merge=lfs -text
10242
+ 2401.14xxx/2401.14527/630cef6c-2d7b-4e33-b217-746ec06b7e85_origin.pdf filter=lfs diff=lfs merge=lfs -text
10243
+ 2401.14xxx/2401.14554/70ddb9d0-1dad-4de6-ae1c-0da14e95f477_origin.pdf filter=lfs diff=lfs merge=lfs -text
10244
+ 2401.14xxx/2401.14595/e3225015-1502-43fa-bd6b-21ddaf069a49_origin.pdf filter=lfs diff=lfs merge=lfs -text
10245
+ 2401.14xxx/2401.14606/15d29800-c157-4d6a-b981-b8c660fcafb3_origin.pdf filter=lfs diff=lfs merge=lfs -text
10246
+ 2401.14xxx/2401.14656/ea512135-120a-4f3d-9592-0cb9ab445082_origin.pdf filter=lfs diff=lfs merge=lfs -text
10247
+ 2401.14xxx/2401.14673/878c799d-053d-41c4-859b-a36c9aea97fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10248
+ 2401.14xxx/2401.14780/01363e85-4e74-4813-867f-420b6be7a7c7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10249
+ 2401.14xxx/2401.14818/0de1c70f-c352-4493-aa47-a825ffa7b961_origin.pdf filter=lfs diff=lfs merge=lfs -text
10250
+ 2401.14xxx/2401.14828/3ad1e856-ab78-4dca-9251-b2ae645c7a9e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10251
+ 2401.14xxx/2401.14840/749dc458-a650-4566-b235-23e821eb93e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10252
+ 2401.14xxx/2401.14857/026a3d0b-3b31-43e4-a113-3df9ffdb006f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10253
+ 2401.14xxx/2401.14882/bcb7aa56-8442-45a9-b7d7-a7b46ebf4f56_origin.pdf filter=lfs diff=lfs merge=lfs -text
10254
+ 2401.14xxx/2401.14886/1b3d0a68-5c20-4c14-b2c3-74a52e40ac32_origin.pdf filter=lfs diff=lfs merge=lfs -text
10255
+ 2401.14xxx/2401.14887/4fee52fe-d8cb-4035-b19a-2d7032043bf9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10256
+ 2401.15xxx/2401.15123/6eaf5dd9-2e61-4953-824a-4fc3775b42b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10257
+ 2401.15xxx/2401.15127/864fb2f4-3efe-4ed1-b775-6b6648c7eac5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10258
+ 2401.17xxx/2401.17319/fbc47c2f-d075-4b7a-a803-d2010c785d46_origin.pdf filter=lfs diff=lfs merge=lfs -text
10259
+ 2402.01xxx/2402.01704/c28d2389-7b57-4194-a607-72fa5e541b4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10260
+ 2402.01xxx/2402.01705/e197d6c4-092f-42b4-ab0d-f837f2e419f8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10261
+ 2402.01xxx/2402.01708/3dc1efe2-1c51-4565-bdfa-d7f10afee771_origin.pdf filter=lfs diff=lfs merge=lfs -text
10262
+ 2402.01xxx/2402.01712/cd860e7e-cea9-4cfb-a584-55721b6c908e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10263
+ 2402.01xxx/2402.01715/574ddcbe-6659-413e-ac39-a0e219ba994e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10264
+ 2402.09xxx/2402.09432/5de9be86-d49c-4d86-93d0-30bcff928d14_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13822/fcbfe3c4-95d8-4b9b-82ab-bab21d6d8561_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5795897f593ede3d5bbedb39e1c05fa428e1c54fc4ba049d18b1f8f6826895a
3
+ size 3662826
2401.13xxx/2401.13822/full.md ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NAVIGATING DATASET DOCUMENTATIONS IN AI: A LARGE-SCALE ANALYSIS OF DATASET CARDS ON HUGGING FACE
2
+
3
+ Xinyu Yang *
4
+
5
+ Cornell University
6
+
7
+ xy468@cornell.edu
8
+
9
+ Weixin Liang*
10
+
11
+ Stanford University
12
+
13
+ wxliang@stanford.edu
14
+
15
+ James Zou
16
+
17
+ Stanford University
18
+
19
+ jamesz@stanford.edu
20
+
21
+ # ABSTRACT
22
+
23
+ Advances in machine learning are closely tied to the creation of datasets. While data documentation is widely recognized as essential to the reliability, reproducibility, and transparency of ML, we lack a systematic empirical understanding of current dataset documentation practices. To shed light on this question, here we take Hugging Face – one of the largest platforms for sharing and collaborating on ML models and datasets – as a prominent case study. By analyzing all 7,433 dataset documentation on Hugging Face, our investigation provides an overview of the Hugging Face dataset ecosystem and insights into dataset documentation practices, yielding 5 main findings: (1) The dataset card completion rate shows marked heterogeneity correlated with dataset popularity: While $86.0\%$ of the top 100 downloaded dataset cards fill out all sections suggested by Hugging Face community, only $7.9\%$ of dataset cards with no downloads complete all these sections. (2) A granular examination of each section within the dataset card reveals that the practitioners seem to prioritize Dataset Description and Dataset Structure sections, accounting for $36.2\%$ and $33.6\%$ of the total card length, respectively, for the most downloaded datasets. In contrast, the Considerations for Using the Data section receives the lowest proportion of content, accounting for just $2.1\%$ of the text. (3) By analyzing the subsections within each section and utilizing topic modeling to identify key topics, we uncover what is discussed in each section, and underscore significant themes encompassing both technical and social impacts, as well as limitations within the Considerations for Using the Data section. (4) Our findings also highlight the need for improved accessibility and reproducibility of datasets in the Usage sections. (5) In addition, our human annotation evaluation emphasizes the pivotal role of comprehensive dataset content in shaping individuals' perceptions of a dataset card's overall quality. Overall, our study offers a unique perspective on analyzing dataset documentation through large-scale data science analysis and underlines the need for more thorough dataset documentation in machine learning research.
24
+
25
+ # 1 INTRODUCTION
26
+
27
+ Datasets form the backbone of machine learning research (Koch et al., 2021). The proliferation of machine learning research has spurred rapid advancements in machine learning dataset development, validation, and real-world deployment across academia and industry. Such growing availability of ML datasets underscores the crucial role of proper documentation in ensuring transparency, reproducibility, and data quality in research (Haibe-Kains et al., 2020; Stodden et al., 2018; Hutson, 2018). Documentation provides details about the dataset, including sources of data, methods used to collect it, and preprocessing or cleaning that was performed. This information holds significant value for dataset users, as it facilitates a quick understanding of the dataset's motivation and its overall scope. These insights are also crucial for fostering responsible data sharing and promoting interdisciplinary collaborations.
28
+
29
+ Despite numerous studies exploring the structure and content of dataset cards across various research domains (Afzal et al., 2020; Gebru et al., 2021; Papakyriakopoulos et al., 2023; Barman et al., 2023; Costa-jussa et al., 2020), there remains a notable gap in empirical analyses of community norms and practices for dataset documentation. This knowledge gap is significant because adherence to community norms and the quality of dataset documentation directly impact the transparency, reliability, and reproducibility in the field of data-driven research. For instance, inadequate dataset descriptions, structural details, or limitations can hinder users from utilizing the dataset appropriately, potentially resulting in misuse or unintended consequences; the absence of information on data cleaning and readiness assessment practices in data documentation limits dataset reusability and productivity gains. Furthermore, without a systematic analysis of current dataset documentation practices, we risk perpetuating insufficient documentation standards, which can impede efforts to ensure fairness, accountability, and equitable use of AI technologies.
30
+
31
+ To address this question, we conducted a comprehensive empirical analysis of dataset cards hosted on Hugging Face, one of the largest platforms for sharing and collaborating on ML models and datasets, as a prominent case study. Dataset cards on the Hugging Face platform are Markdown files that serve as the README for a dataset repository. While several open-source platforms also facilitate the sharing of ML datasets, such as Kaggle, Papers with Code, and GitHub, we chose Hugging Face for two primary reasons. Firstly, it stands out as one of the most popular platforms for developers to publish, share, and reuse ML-based projects, offering a vast repository of ML datasets for study. Secondly, Hugging Face is one of the few open-source platforms that offer an official dataset card template. This feature not only enhances the accessibility and user-friendliness of the dataset card community but also makes the analysis process more efficient and informative.
32
+
33
+ By analyzing all 7,433 dataset documentation hosted on Hugging Face, our investigation provides an overview of the Hugging Face dataset ecosystem and insights into dataset documentation practices. Based on our research findings, we emphasize the importance of comprehensive dataset documentation and offer suggestions to practitioners on how to write documentation that promotes reproducibility, transparency, and accessibility of their datasets, which can help to improve the overall quality and usability of the dataset community. Our study aims to bridge the notable gap in the community concerning data documentation norms, taking the first step toward identifying deficiencies in current practices and offering guidelines for enhancing dataset documentation.
34
+
35
+ ![](images/d5724deab615fba29b5400f3396c49bd3db4d26adf121ce8b0b3389d80b50d0c.jpg)
36
+ Figure 1: Systematic Analysis of 24,065 Datasets Hosted on Hugging Face. (a) Exponential Growth of Datasets: The Hugging Face platform has seen a remarkable surge in the number of datasets, with the count doubling approximately every 18 weeks. (b) Power Law in Dataset Usage: Dataset downloads on Hugging Face follow a power-law distribution, as indicated by the linear relationship on the log-log plot. The top 82 datasets account for $80\%$ of the total downloads; datasets with documentation dominate the top downloaded datasets. (c) Documentation Associated with Usage: Despite only $30.9\%$ of dataset repositories (7,433 out of 24,065) featuring non-empty dataset cards, these datasets account for an overwhelming $95.0\%$ of total download traffic on the platform.
37
+
38
+ ![](images/28ca70c933af91bde50c669ec8bf3716ce9fcf8dd403b60f60772e71b8ab0672.jpg)
39
+
40
+ ![](images/f740121389607ad40c6ad835a8055fb1921ff123714e964885b3d963f3367d25.jpg)
41
+
42
+ # 2 OVERVIEW
43
+
44
+ # Finding
45
+
46
+ - Exponential Growth of Datasets: The number of datasets on Hugging Face doubles every 18 weeks.
47
+ - Documentation Associated with Usage: $95.0\%$ of download traffic comes from the $30.9\%$ of datasets with documentation.
48
+
49
+ Exponential Growth of Datasets Our analysis encompasses 24,065 dataset repositories on Hugging Face uploaded by 7,811 distinct user accounts as of March 16th, 2023 (see Table. S5 for varying documentation practices by creators). The number of datasets exhibits exponential growth, with a weekly growth rate of $3.97\%$ and a doubling time of 18 weeks (Fig. 1a). As a sanity check, the number of dataset repositories reached 35,973 by May 23rd, 2023, confirming the exponential trend.
50
+
51
+ Power Law in Dataset Usage Although Hugging Face has seen a significant increase in the number of dataset repositories, our analysis reveals a significant imbalance in dataset downloads, which follows a power law distribution. This means that a small proportion of the most popular datasets receive the majority of the downloads, while the vast majority of datasets receive very few downloads. In fact, our analysis shows that just the 82 datasets with the most downloads account for $80\%$ of total downloads (Fig. 1b). Fig. S4 further demonstrates that the power law distribution persists across various task domains, even with the varied number of datasets within each domain.
52
+
53
+ Documentation Associated with Usage Despite the importance of dataset cards, only $58.2\%$ (14,011 out of 24,065 dataset repositories contributed by 4,782 distinct user accounts) include dataset cards as Markdown README.md files within their dataset repositories. Among these, 6,578 dataset cards are empty, resulting in only $30.9\%$ (7,433 out of 24,065 dataset repositories contributed by 1,982 distinct user accounts) featuring non-empty dataset cards (Fig. 1c). As illustrated in Fig. 1b, dataset cards are prevalent among the most downloaded datasets. Notably, datasets with non-empty dataset cards account for $95.0\%$ of total download traffic, underscoring a potential positive correlation between dataset cards and dataset popularity. For the rest of the paper, we focus our analyses on these 7,433 non-empty dataset cards. We sort these non-empty dataset cards based on the number of downloads for the corresponding datasets. So top $k$ dataset cards (e.g. $k = 100$ ) refer to the dataset cards corresponding to the $k$ most downloaded datasets.
54
+
55
+ # 3 STRUCTURE OF DATASET DOCUMENTATIONS
56
+
57
+ # Finding
58
+
59
+ - The dataset card completion rate shows marked heterogeneity correlated with dataset popularity: While $86.0\%$ of the top 100 downloaded datasets fill out all sections suggested by the Hugging Face community, only $7.9\%$ of dataset cards with no downloads complete all these sections.
60
+
61
+ <table><tr><td>Section Title</td><td>Subsection Title</td><td>Description</td></tr><tr><td rowspan="3">Dataset Description</td><td>Dataset Summary</td><td>A brief summary of the dataset, including its intended use, supported tasks, an overview of how and why the dataset was created, etc.</td></tr><tr><td>Supported Tasks and Leaderboards</td><td>Brief description of the tag, metrics, and suggested models of the dataset.</td></tr><tr><td>Languages</td><td>The languages represented in the dataset.</td></tr><tr><td rowspan="3">Dataset Structure</td><td>Data Instances</td><td>JSON-formed example and description of a typical instance in the dataset.</td></tr><tr><td>Data Fields</td><td>List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports.</td></tr><tr><td>Data Splits</td><td>Criteria for splitting the data, descriptive statistics for the features, such as size, average length, etc.</td></tr><tr><td rowspan="4">Dataset Creation</td><td>Curation Rationale</td><td>Motivation for the creation of the dataset.</td></tr><tr><td>Source Data</td><td>The source data (e.g. news text and headlines, social media posts, translated sentences, etc.), including the data collection process, and data producer.</td></tr><tr><td>Annotations</td><td>Annotation process, annotation tools, annotators, etc.</td></tr><tr><td>Personal and Sensitive Information</td><td>Statement of whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, financial or health data, etc.).</td></tr><tr><td rowspan="3">Considerations for Using the Data</td><td>Social Impact of Dataset</td><td>Discussion of the ways the use of the dataset will impact society.</td></tr><tr><td>Discussion of Biases</td><td>Descriptions of specific biases that are likely to be reflected in the data</td></tr><tr><td>Other Known Limitations</td><td>Other limitations of the dataset, like annotation artifacts.</td></tr><tr><td rowspan="4">Additional Information</td><td>Dataset Curators</td><td>The people involved in collecting the dataset and their affiliation(s)</td></tr><tr><td>Licensing Information</td><td>The license and link to the license webpage if available.</td></tr><tr><td>Citation Information</td><td>The BibTex-formatted reference for the dataset.</td></tr><tr><td>Contributions</td><td>&#x27;Thanks to @github-username for adding this dataset.&#x27;</td></tr></table>
62
+
63
+ Table 1: Community-Endorsed Dataset Card Structure. This table shows the sections and their suggested subsections provided by the Hugging Face community, along with their descriptions. For more information, please refer to https://github.com/huggingface/datasets/blob/main/template5/README_guide.md.
64
+
65
+ Community-Endorsed Dataset Card Structure Grounded in academic literature (Mitchell et al., 2019) and official guidelines from Hugging Face (HuggingFace, 2021), the Hugging Face community provides suggestions for what to write in each section. This community-endorsed dataset card provides a standardized structure for conveying key information about datasets. It generally contains 5 sections: Dataset Description, Dataset Structure, Dataset Creation, Considerations for Using the Data, and Additional Information (Table. 1). To examine the structure of dataset cards, we used a pipeline that detects exact word matches for each section title. We then identified the section titles and checked whether they had contents (Appendix B.1). If a dataset card had all five sections completed, we considered it to be following the community-endorsed dataset card.
66
+
67
+ Adherence to Community-Endorsed Guidelines Correlates with Popularity Our evaluation found that popular datasets have better adherence to the dataset card community-endorsed dataset card structure. As illustrated in Fig. 2, compliance with the template varies significantly among datasets with different download counts. Among the 7,433 dataset cards analyzed, $86.0\%$ of the top 100 downloaded dataset cards have completed all five sections of the community-endorsed dataset card, while
68
+
69
+ only $7.9\%$ of dataset cards with no downloads follow it. Fig. S5 further reveals that popular dataset cards achieve higher completion in all Hugging Face-recommended sections. This implies a potential correlation between adherence to community-endorsed guidelines and dataset popularity.
70
+
71
+ ![](images/c2a685790802b342d40580b25e718a109cf8b2424d7c0490a372417476ca4d90.jpg)
72
+ Figure 2: Highly downloaded datasets consistently show better compliance with the community-endorsed documentation structure.
73
+
74
+ 4 PRACTITIONERS EMPHASIZE DESCRIPTION AND STRUCTURE OVER SOCIAL IMPACT AND LIMITATIONS
75
+
76
+ # Finding
77
+
78
+ - Practitioners seem to prioritize on Dataset Description and Dataset Structure sections, which account for $36.2\%$ and $33.6\%$ of the total card length, respectively, on the top 100 most downloaded datasets.
79
+ - In contrast, the Considerations for Using the Data section receives the lowest proportion of content, just $2.1\%$ . The Considerations for Using the Data section covers the social impact of datasets, discussions of biases, and limitations of datasets.
80
+
81
+ Social Impact, Dataset Limitations and Biases are Lacking in Most Documentations Following the community-endorsed dataset card, we conducted an analysis to determine the level of emphasis placed on each section. Fig. 3b shows the word count distribution among the top 100 downloaded dataset cards, revealing their high level of comprehensiveness: $91.0\%$ of them have a word count exceeding 200. We step further into these dataset cards to examine the emphasis placed on each section. We calculated the word count of each section and its proportion to the entire dataset card. As shown in Fig. 3c, the Dataset Description and Dataset Structure sections received the most attention, accounting for $36.2\%$ and $33.6\%$ of the dataset card length, respectively. On the other hand, the Considerations for Using the Data section received a notably low proportion of only $2.1\%$ .
82
+
83
+ Section Length Reflects Practitioner Attention The length of sections within dataset cards is reflective of practitioner attention, and it varies significantly based on the popularity of the dataset. Highly downloaded datasets tend to have more comprehensive and longer dataset cards (Fig. 3a), with an emphasis on the Dataset Description and Dataset Structure sections (Fig. 3d). Conversely, less popular datasets have shorter cards (Fig. 3a) with a greater emphasis on the Additional Information section (Fig. 3d). Despite this, sections such as Dataset Creation and Considerations for Using the Data consistently receive lower attention, regardless of download rates (Fig. 3d). This suggests a need to promote more comprehensive documentation, particularly in critical sections, to enhance dataset usage and facilitate ethical considerations.
84
+
85
+ ![](images/771733d5dad38b012a48e52bf864a60dd3fb9f12a6cd5e372f3420e93cbd370a.jpg)
86
+ a
87
+
88
+ ![](images/32300b90b79f1832c84641ad72db048c74d55daacbb7cf6c2534aec116ae1fdf.jpg)
89
+ b
90
+
91
+ ![](images/a5f9fb3d147dc9d9ec8987f3a17c9c9de0aee38438ae010c74d36ff30ff6d549.jpg)
92
+ C
93
+
94
+ ![](images/9585931c93cee00750ea65bee005c46434ca1f034a20cce744c3c277cfe9d6e1.jpg)
95
+ d
96
+ Figure 3: Section Length Reflects Practitioner Attention. (a) Popularity Correlates with Documentation Length: The top downloaded dataset cards are longer, indicating that they contain more comprehensive information. (b) Distribution of Word Count Among Top 100 Downloaded Dataset Cards (c) Section Length Proportions in Top 100 Downloaded Dataset Cards: The Dataset Description and Dataset Structure sections dominate in the top 100 downloaded dataset cards, with proportions of $36.2\%$ and $33.6\%$ , respectively. In contrast, the Considerations for Using the Data section receives the least attention, with a proportion of only $2.1\%$ . (d) Section Length Proportion Changes over Downloads: The section length proportion changes over downloads, with Dataset Description and Dataset Structure decreasing in length, and Additional Information and Other increasing. Notably, there is a consistently low emphasis placed on the Dataset Creation and Considerations for Using the Data sections across all dataset cards with different downloads.
97
+
98
+ # 5 UNDERSTANDING CONTENT DYNAMICS IN DATASET DOCUMENTATION
99
+
100
+ # Finding
101
+
102
+ - Strong Community Adherence to Subsection Guidelines: Practitioners contributing to the Hugging Face community exhibit high compliance with standards, filling out 14 of the 17 recommended subsections across five main sections at a rate exceeding $50\%$ .
103
+ - Emergence of the Usage Section Beyond the Community Template: Surprisingly, $33.2\%$ of dataset cards includes a Usage section. The community template does not include such Usage section in its current form and should include one in the future.
104
+
105
+ Section Content Detection Pipeline To gain a deeper understanding of the topics discussed in each section, we conducted a content analysis within each section of the community-endorsed dataset card structure, which includes suggested subsections within the five main sections. We used exact keyword matching to identify the corresponding subsections and calculate their filled-out rates. Fig. 4 shows that 14 out of 17 subsections have filled-out rates above $50\%$ , indicating adherence to the community-endorsed dataset cards.
106
+
107
+ Limitation Section is Rare, but Long if itexists The Considerations for Using the Data section (i.e., limitation section), despite being frequently overlooked and often left empty by practitioners, holds particular significance. When this section is included, it tends to adhere well to community guidelines, with subsections having a completion rate exceeding $50\%$ and a reasonably substantial word count (98.2 words). This suggests that this section has the potential to provide valuable insights and guidance. This motivates our use of topic modeling to identify key discussion topics within this section, potentially aiding practitioners in crafting meaningful content.
108
+
109
+ ![](images/4eaf05077f9e7a87eaa644ada72af6834e94f283d684951aadefc6cf0325e3bc.jpg)
110
+
111
+ ![](images/f38e0cd6208e0151bb31a711e7899eb13c26173074c3f81cafb103dc19f20cad.jpg)
112
+ Figure 5: Key Topics in Considerations for Using the Data through Topic Modeling Analysis. This figure displays the outcomes of the topic modeling assessment on the contents of the (a) Social Impact of Dataset Subsection, (b) Discussion of Biases Subsection, and (c) Other Known Limitations Subsection. Each panel illustrates the human-assigned topic label and representative sentences for each section. Topics are generated by Latent Dirichlet Allocation (LDA).
113
+
114
+ ![](images/4762014e48f255e33431c60a15648386f3c5d0bb8b606647d8e4d78da9f780d0.jpg)
115
+
116
+ ![](images/73463b7e4a5ed615f2792d413d4dc0b4d793870bff175060d65f933d1b8f24a1.jpg)
117
+ Figure 4: Highlighting the Hugging Face Community's Compliance with Subsection Guidelines. This figure shows subsection filled-out rates within different sections, stratified by download counts. Each section has multiple subsections, with bars representing the filled-out rate of each subsection. Green texts indicate filled-out rates above $50\%$ , while red texts indicate rates below $50\%$ . Of the 17 subsections within the five sections of the community-endorsed dataset, 14 have filled-out rates above $50\%$ .
118
+
119
+ ![](images/1c7c883c676cae175ec3d82756b3cd7d9489c3fbe53c2a588efb99a29a9a7fe5.jpg)
120
+
121
+ ![](images/3d979d7d918758f6ad329c715bea5f94e6fb4d66282ad297209640e0184b2b6d.jpg)
122
+
123
+ <table><tr><td colspan="2">a Social Impact of Dataset</td></tr><tr><td>Topic</td><td>Representative Sentences</td></tr><tr><td>Technical or Research Scope</td><td>• Adding a Spanish resource may help others to improve their research and educational activities.
124
+ • The creation of the dataset contributes to expanding the scope of NLP research to under-explored languages across the world.</td></tr><tr><td>Social Scope or Background</td><td>• This dataset can be used to gain insights into the social, cultural, and political views of people in African countries.
125
+ • If this matter isn&#x27;t tackled with enough urgency, we might see the rise of a new dark era in Latin America politics, where many unscrupulous parties and people will manage to gain power and control the lives of many people.</td></tr></table>
126
+
127
+ <table><tr><td colspan="2">b Discussion of Biases</td></tr><tr><td>Topic</td><td>Representative Sentences</td></tr><tr><td>Subpopulation Biases</td><td>·Gender speakers distribution is imbalanced, percentage of female speakers is mostly lower than 50% across languages.
128
+ ·The social biases of the time in terms of race, sex, gender, etc. might be encountered in this dataset.</td></tr><tr><td>Biases from Collection Procedure</td><td>·With respect to the potential risks, we note that the subjectivity of human annotation would impact on the quality of the dataset.
129
+ ·In terms of data collection, by using keywords and user mentions, we are introducing some bias to the data, restricting our scope to the list of keywords and users we created.</td></tr></table>
130
+
131
+ <table><tr><td>c</td><td>Other Known Limitations</td></tr><tr><td>Topic</td><td>Representative Sentences</td></tr><tr><td>Data Quality</td><td>The nature of the task introduces a variability in the quality of the target translations.A number of errors, omissions and inconsistencies are expected to be found within the corpus.</td></tr><tr><td>ProcessingLimitation</td><td>Our augmentation process can sometimes create nonexistent versions of real people.Satellite annotation is not as accurate for pixel-level representation due to single-point annotations.</td></tr></table>
132
+
133
+ Limitation Section Covers Diverse and Crucial Topics The Considerations for Using the Data section (i.e., limitation section) encompasses diverse and crucial topics. The Hugging Face community emphasizes three major themes within this section: Social Impact of Dataset, Discussion of Biases, and Other Known Limitations.
134
+
135
+ The Social Impact of Dataset aspect explores not only societal implications but also the potential benefits to technology and research communities. In this section, practitioners discuss issues like
136
+
137
+ how the dataset can expand the scope of NLP research (Armstrong et al., 2022), and increase access to natural language technology across diverse regions and cultures (Tache et al., 2021). Additionally, the subsection covers sensitive topics related to politics, ethics, and culture within the social scope.
138
+
139
+ Discussion of Biases delves into subpopulation bias and data collection biases, highlighting the importance of addressing bias-related issues. Previous research have identified numerous technical and social biases such as subgroup bias (Buolamwini & Gebru, 2018), data collection bias (Wang et al., 2019), and label bias (Jiang & Nachum, 2020). Our topic modeling results reveal that two primary biases are discussed by practitioners in this subsection. The first is subpopulation bias, which includes biases related to gender, age, or race. For instance, an audio dataset (Nsoesie & Galea, 2022) notes that female speakers are underrepresented, comprising less than $50\%$ of the dataset. The second major bias arises from the data collection process, specifically the annotation process, which is often a significant bottleneck and source of errors.
140
+
141
+ Lastly, Other Known Limitations focuses on technical limitations, particularly data quality and processing limitations. This comprehensive coverage underscores the multifaceted nature of considerations related to dataset usage. Data quality is often a focus in other disciplines, such as the social sciences and biomedicine, and there are many insights to draw upon (Paullada et al., 2021; Fedorov, 2010; Fan & Geerts, 2012). Meanwhile, processing limitations encompass a broader range of issues beyond biases from the collection procedure, such as inaccuracies or the absence of some data points.
142
+
143
+ Emergence of the Usage Section Beyond the Community Template While Hugging Face's community-endorsed dataset card structure comprises five main sections, there are instances where practitioners encounter valuable information that doesn't neatly fit into these sections. These additional sections, referred to as Other sections, can contain important content. Notably, among these Other sections, discussions related to Usage emerge as a frequent (nearly one-third of the time, $33.2\%$ ) and significant theme. These Usage sections offer a diverse range of information, including details on downloading, version specifications, and general guidelines to maximize the dataset's utility. This highlights the importance of considering content that falls outside the predefined template and suggests a potential area for improvement in dataset card templates.
144
+
145
+ Quantifying the Impact of Usage Section on Dataset Downloads To assess the influence of a Usage section in dataset documentation, we conducted a counterfactual analysis experiment (Appendix. C). We trained a BERT (Devlin et al., 2018) model using dataset card content and download counts, which were normalized to fall within the range of [0, 1] for meaningful comparisons. When a dataset card that initially included a Usage section had this section removed, there was a substantial decrease of $1.85\%$ in downloads, with statistical significance. This result underscores the significant impact of the Usage section in bolstering dataset accessibility and popularity, emphasizing its pivotal role in enhancing the documentation and usability of datasets.
146
+
147
+ # 6 ANALYZING HUMAN PERCEIVED DATASET DOCUMENTATION QUALITY
148
+
149
+ # Finding
150
+
151
+ - Our human annotation evaluation emphasizes the pivotal role of comprehensive dataset content in shaping individuals' perceptions of a dataset card's overall quality.
152
+
153
+ Human Annotations for Comprehensive Evaluation of Dataset Card Quality We utilized human annotations to evaluate the quality of dataset cards, considering seven distinct aspects, drawing from prior research in dataset documentation literature and the Hugging Face community-endorsed dataset card (Afzal et al., 2020; Gebru et al., 2021; Papakyriakopoulos et al., 2023; Barman et al., 2023; Costa-jussa et al., 2020): (1) Structural Organization, (2) Content Comprehensiveness, (3) Dataset Description, (4) Dataset Structure, (5) Dataset Preprocessing, (6) Usage Guidance, and (7) Additional Information. While Dataset Description, Dataset Structure, and Additional Information can be found in sections of community-endorsed dataset cards, we added evaluation aspects highlighted in the literature, like aspects that constitute the overall presentation (Structural Organization and Content Comprehensiveness), Data Preprocessing and Usage Guidance. To conduct this assessment, we randomly selected a subset containing 150 dataset cards and engaged five human annotators. These
154
+
155
+ annotators were tasked with evaluating each dataset card across these seven aspects and providing an overall quality score within a range of 5 (Appendix B.2). The overall quality is assessed through the subjective perception of human annotators, taking into account the seven aspects as well as their overall impression. This evaluation approach aims to provide a comprehensive assessment of dataset card quality, reflecting the importance of these aspects in effective dataset documentation.
156
+
157
+ Human Perception of Documentation Quality Strongly Aligns with Quantitative Analysis Human annotation evaluation of dataset cards shows varying scores across different aspects. While Dataset Description (2.92/5), Structural Organization (2.82/5), Data Structure (2.7/5), and Content Comprehensiveness (2.48/5) received relatively higher scores, areas like Data Preprocessing (1.21/5) and Usage Guidance (1.14/5) scored lower. This aligns with the quantitative analysis that indicates a greater emphasis on the Dataset Description and Dataset Structure sections. Notably, even the highest-scoring aspect, Dataset Description, falls below $60\%$ of the highest possible score, indicating room for improvement in dataset documentation.
158
+
159
+ Content Comprehensiveness has the strongest positive correlation with the overall quality of a dataset card (Coefficient: 0.3935, p-value: 3.67E-07), emphasizing the pivotal role of comprehensive dataset content in shaping individuals' perceptions of a dataset card's overall quality. Additionally, aspects like Dataset Description (Coefficient: 0.2137, p-value: 3.04E-07), Structural Organization (Coefficient: 0.1111, p-value: 2.17E-03), Data Structure (Coefficient: 0.0880, p-value: 6.49E-03), and Data Preprocessing (Coefficient: 0.0855, p-value: 2.27E-03) also significantly contribute to people's evaluations of dataset documentation quality. Moreover, the length of a dataset card is positively related to Content Comprehensiveness (p-value: 1.89E-011), reinforcing the importance of detailed documentation in enhancing dataset quality and usability.
160
+
161
+ # 7 RELATED WORKS
162
+
163
+ Dataset has long been seen as a significant constraint in the realm of machine learning research (Halevy et al., 2009; Sun et al., 2017). The process of creating datasets remains arduous and time-intensive, primarily due to the costs of curation and annotation (IBM, 2020). Moreover, the quality of data assumes a pivotal role in shaping the outcomes of machine learning research (Liang et al., 2022). Consequently, a profound understanding of datasets is indispensable in the context of machine learning research, and this understanding is most effectively conveyed through comprehensive dataset documentation.
164
+
165
+ A long-standing problem in the literature is that there is no industry standard being formed about data documentation. Therefore, much existing work in the literature has been in exploring, conceptualizing and proposing different dataset documentation frameworks. Data-focused tools such as datasheets for datasets and data nutrition labels have been proposed to promote communication between dataset creators and users, and address the lack of industry-wide standards for documenting AI datasets (Bender & Friedman, 2018; Bender et al., 2021; Pushkarna et al., 2022; Gebru et al., 2021; Holland et al., 2018; Chmielinski et al., 2022; Papakyriakopoulos et al., 2023). Additionally, there are studies that concentrate on leveraging human-centered methods to scrutinize the design and evaluation aspects of dataset documentation (Fabris et al., 2022; Mahajan & Shaikh, 2021; Hanley et al., 2020; Hutiri et al., 2022). In the library domain, numerous works have proposed methods to tackle the absence of universally accepted guidelines for publishing library-linked data. These efforts are aimed at enhancing data quality, promoting interoperability, and facilitating the discoverability of data resources (Villazón-Terrazas et al., 2011; Hidalgo-Delgado et al., 2017; Abida et al., 2020). These tools and frameworks provide detailed information on the composition, collection process, recommended uses, and other contextual factors of datasets, promoting greater transparency, accountability, and reproducibility of AI results while mitigating unwanted biases in AI datasets. Additionally, they enable dataset creators to be more intentional throughout the dataset creation process. Consequently, datasheets and other forms of data documentation are now commonly included with datasets, helping researchers and practitioners to select the most appropriate dataset for their particular needs.
166
+
167
+ Despite the proliferation of dataset documentation tools and the growing emphasis on them, the current landscape of dataset documentation remains largely unexplored. In this paper, we present
168
+
169
+ a comprehensive analysis of AI dataset documentation on Hugging Face to provide insights into current dataset documentation practices.
170
+
171
+ # 8 DISCUSSION
172
+
173
+ In this paper, we present a comprehensive large-scale analysis of 7,433 AI dataset documentation on Hugging Face. The analysis offers insights into the current state of adoption of dataset cards by the community, evaluates the effectiveness of current documentation efforts, and provides guidelines for writing effective dataset cards. Overall, our main findings cover 5 aspects:
174
+
175
+ - Varied Adherence to Community-Endorsed Dataset Card: We observe that high-download datasets tend to adhere more closely to the community-endorsed dataset card structure.
176
+ - Varied Emphasis on Sections: Our analysis of individual sections within dataset cards reveals that practitioners place varying levels of emphasis on different sections. For instance, among the top 100 downloaded dataset cards, Dataset Description and Dataset Structure sections receive the most attention. In contrast, the Considerations for Using the Data section garners notably lower engagement across all downloads, with only approximately $2\%$ of dataset cards containing this section. This discrepancy can be attributed to the section's content, which involves detailing limitations, biases, and the societal impact of datasets – a more complex and nuanced endeavor. An internal user study conducted by Hugging Face (HuggingFace, 2022) also identified the Limitation section within this category as the most challenging to compose.
177
+ - Topics Discussed in Each Section: Our examination of subsections within each section of dataset cards reveals a high completion rate for those suggested by the Hugging Face community. This highlights the effectiveness of the community-endorsed dataset card structure. In particular, our study places a special focus on the Considerations for Using the Data section, employing topic modeling to identify key themes, including technical and social aspects of dataset limitations and impact.
178
+ - Importance of Including Usage Sections: We observe that many dataset card creators go beyond the recommended structure by incorporating Usage sections, which provide instructions on effectively using the dataset. Our empirical experiment showcases the potential positive impact of these Usage sections in promoting datasets, underscoring their significance.
179
+ - Human Evaluation of Dataset Card Quality: Our human evaluation of dataset card quality aligns well with our quantitative analysis. It underscores the pivotal role of Content Comprehensiveness in shaping people's assessments of dataset card quality. This finding offers clear guidance to practitioners, emphasizing the importance of creating comprehensive dataset cards. Moreover, we establish a quantitative relationship between Content Comprehensiveness and the word length of dataset cards, providing a measurable method for evaluation.
180
+
181
+ **Limitations and Future Works** Our analysis of ML dataset documentation relies on the distinctive community-curated resource, Hugging Face, which may introduce biases and limitations due to the platform's structure and coverage. For example, Hugging Face's NLP-oriented concentration could introduce biases into the dataset categories. However, our method is transferable and could easily be reproduced for another platform, facilitating future studies (Appendix. E). Additionally, our analysis of completeness and informativeness is based on word count and topic modeling, which may not fully capture the nuances of the documentation. Furthermore, measuring dataset popularity based on downloads alone may not fully reflect the dataset's impact. Future research could consider additional factors, such as the creation time of the dataset and research area of the dataset (Appendix. D). Lastly, our human evaluation serves as a preliminary evaluation. Future analyses could involve a more diverse group of annotators with varying backgrounds and perspectives.
182
+
183
+ Research Significance To summarize, our study uncovers the current community norms and practices in dataset documentation, and demonstrates the importance of comprehensive dataset documentation in promoting transparency, accessibility, and reproducibility in the AI community. We hope to offer a foundation step in the large-scale empirical analysis of dataset documentation practices and contribute to the responsible and ethical use of AI while highlighting the importance of ongoing efforts to improve dataset documentation practices.
184
+
185
+ # REPRODUCIBILITY STATEMENT
186
+
187
+ We have assembled a collection of dataset cards as a community resource, which includes extracted metadata such as the number of downloads and textual analyses. This resource along with our analysis code can be accessed at https://github.com/YoungXinyu1802/HuggingFace-Dataset-Card-Analysis. The Hugging Face datasets can be accessed through the Hugging Face Hub API, which is available at https://huggingface.co/docs/huggingface_hub/package_reference/hf_api.
188
+
189
+ # ACKNOWLEDGMENTS
190
+
191
+ We thank Yian Yin and Nazneen Rajani for their helpful comments and discussions. J.Z. is supported by the National Science Foundation (CCF 1763191 and CAREER 1942926), the US National Institutes of Health (P30AG059307 and U01MH098953) and grants from the Silicon Valley Foundation and the Chan-Zuckerberg Initiative.
192
+
193
+ # REFERENCES
194
+
195
+ Rabeb Abida, Emma Hachicha Belghith, and Anthony Cleve. An end-to-end framework for integrating and publishing linked open government data. In 2020 IEEE 29th International Conference on Enabling Technologies: Infrastructure for Collaborative Enterprises (WETICE), pp. 257-262, 2020. doi: 10.1109/WETICE49692.2020.00057.
196
+ Shazia Afzal, Rajmohan C, Manish Kesarwani, Sameep Mehta, and Hima Patel. Data readiness report, 2020.
197
+ Ruth-Ann Armstrong, John Hewitt, and Christopher Manning. Jampatoisnli: A jamaican patois natural language inference dataset. arXiv preprint arXiv:2212.03419, 2022.
198
+ Nabajeet Barman, Yuriy Reznik, and Maria Martini. Datasheet for subjective and objective quality assessment datasets, 2023.
199
+ Emily M Bender and Batya Friedman. Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics, 6:587-604, 2018.
200
+ Emily M Bender, Batya Friedman, and Angelina McMillan-Major. A guide for writing data statements for natural language processing, 2021.
201
+ Joy Buolamwini and Timnit Gebru. Gender shades: Intersectional accuracy disparities in commercial gender classification. In Conference on fairness, accountability and transparency, pp. 77-91. PMLR, 2018.
202
+ Kasia S Chmielinski, Sarah Newman, Matt Taylor, Josh Joseph, Kemi Thomas, Jessica Yurkofsky, and Yue Chelsea Qiu. The dataset nutrition label (2nd gen): Leveraging context to mitigate harms in artificial intelligence. arXiv preprint arXiv:2201.03954, 2022.
203
+ Marta R. Costa-jussà, Roger Creus, Oriol Domingo, Albert Dominguez, Miquel Escobar, Cayetana López, Marina Garcia, and Margarita Geleta. Mt-adapted datasheets for datasets: Template and repository, 2020.
204
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. URL http://arxiv.org/abs/1810.04805.
205
+ Alessandro Fabris, Stefano Messina, Gianmaria Silvello, and Gian Antonio Susto. Tackling documentation debt: A survey on algorithmic fairness datasets. In Proceedings of the 2nd ACM Conference on Equity and Access in Algorithms, Mechanisms, and Optimization, EAAMO '22, New York, NY, USA, 2022. Association for Computing Machinery. ISBN 9781450394772. doi: 10.1145/3551624.3555286. URL https://doi.org/10.1145/3551624.3555286.
206
+ Wenfei Fan and Floris Geerts. Foundations of data quality management. Synthesis Lectures on Data Management, 4(5):1-217, 2012.
207
+
208
+ Valerii Fedorov. Optimal experimental design. Wiley Interdisciplinary Reviews: Computational Statistics, 2(5):581-589, 2010.
209
+ Timnit Gebru, Jamie Morgenstern, Briana Vecchione, Jennifer Wortman Vaughan, Hanna Wallach, Hal Daumé III, and Kate Crawford. Datasheets for datasets. Commun. ACM, 64(12):86-92, nov 2021. ISSN 0001-0782. doi: 10.1145/3458723. URL https://doi.org/10.1145/3458723.
210
+ Benjamin Haibe-Kains, George Alexandru Adam, Ahmed Hosny, Farnoosh Khodakarami, Massive Analysis Quality Control (MAQC) Society Board of Directors Shraddha Thakkar 35 Kusko Rebecca 36 Sansone Susanna-Assunta 37 Tong Weida 35 Wolfinger Russ D. 38 Mason Christopher E. 39 Jones Wendell 40 Dopazo Joaquin 41 Furlanello Cesare 42, Levi Waldron, Bo Wang, Chris McIntosh, Anna Goldenberg, Anshul Kundaje, et al. Transparency and reproducibility in artificial intelligence. Nature, 586(7829):E14-E16, 2020.
211
+ Alon Halevy, Peter Norvig, and Fernando Pereira. The unreasonable effectiveness of data. IEEE intelligent systems, 24(2):8-12, 2009.
212
+ Margot Hanley, Apoorv Khandelwal, Hadar Averbuch-Elor, Noah Snavely, and Helen Nissenbaum. An ethical highlighter for people-centric dataset creation, 2020.
213
+ Yusniel Hidalgo-Delgado, Reina Estrada-Nelson, Bin Xu, Boris Villazon-Terrazas, Amed Leiva-Mederos, and Andres Tello. Methodological guidelines for publishing library data as linked data. In 2017 International Conference on Information Systems and Computer Science (INCISCOS), pp. 241-246, 2017. doi: 10.1109/INCISCOS.2017.17.
214
+ Sarah Holland, Ahmed Hosny, Sarah Newman, Joshua Joseph, and Kasia Chmielinski. The dataset nutrition label: A framework to drive higher data quality standards. arXiv preprint arXiv:1805.03677, 2018.
215
+ HuggingFace. Huggingface dataset card guidebook, 2021. URL https://huggingface.co/docs/datasets/dataset_card. Accessed: 2023-05-23.
216
+ HuggingFace. Model card user studies. https://huggingface.co/docs/hub/model-cards-user-studies, 2022.
217
+ Wiebke Toussaint Hutiri, Lauriane Gorce, and Aaron Yi Ding. Design guidelines for inclusive speaker verification evaluation datasets, 2022.
218
+ Matthew Hutson. Artificial intelligence faces reproducibility crisis, 2018.
219
+ IBM. Overcome obstacles to get to ai at scale. https://www.ibm.com/downloads/cas/VBMPEQLN, 2020.
220
+ Heinrich Jiang and Ofir Nachum. Identifying and correcting label bias in machine learning. In International Conference on Artificial Intelligence and Statistics, pp. 702-712. PMLR, 2020.
221
+ Bernard Koch, Emily Denton, Alex Hanna, and Jacob Gates Foster. Reduced, reused and recycled: The life of a dataset in machine learning research. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021. URL https://openreview.net/forum?id=zNQBIBKJRkd.
222
+ TK Koo and MY Li. A guideline of selecting and reporting intraclass correlation coefficients for reliability research. j chiropr med. 2016; 15 (2): 155-63, 2000.
223
+ Weixin Liang, Girmaw Abebe Tadesse, Daniel Ho, L Fei-Fei, Matei Zaharia, Ce Zhang, and James Zou. Advances, challenges and opportunities in creating data for trustworthy ai. Nature Machine Intelligence, 4(8):669-677, 2022.
224
+ Khyati Mahajan and Samira Shaikh. On the need for thoughtful data collection for multi-party dialogue: A survey of available corpora and collection methods. In Haizhou Li, Gina-Anne Levow, Zhou Yu, Chitralekha Gupta, Terrak Sisman, Siqi Cai, David Vandyke, Nina Dethlefs, Yan Wu, and Junyi Jessy Li (eds.), Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 338-352, Singapore and Online, July 2021. Association for Computational Linguistics. URL https://aclanthology.org/2021.sigdial-1.36.
225
+
226
+ Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji, and Timnit Gebru. Model cards for model reporting. In Proceedings of the conference on fairness, accountability, and transparency, pp. 220-229, 2019.
227
+ Elaine O Nsoesie and Sandro Galea. Towards better Data Science to address racial bias and health equity. *PNAS Nexus*, 1(3), 07 2022. ISSN 2752-6542. doi: 10.1093/pnas nexus/pgac120. URL https://doi.org/10.1093/pnas nexus/pgac120. pgac120.
228
+ Orestis Papakyriakopoulos, Anna Seo Gyeong Choi, William Thong, Dora Zhao, Jerone Andrews, Rebecca Bourke, Alice Xiang, and Allison Koenecke. Augmented datasheets for speech datasets and ethical decision-making. In 2023 ACM Conference on Fairness, Accountability, and Transparency. ACM, jun 2023. doi: 10.1145/3593013.3594049. URL https://doi.org/10.1145%2F3593013.3594049.
229
+ Amandalynne Paullada, Inioluwa Deborah Raji, Emily M. Bender, Emily Denton, and Alex Hanna. Data and its (dis)contents: A survey of dataset development and use in machine learning research. Patterns, 2(11):100336, nov 2021. doi: 10.1016/j.patter.2021.100336. URL https://doi.org/10.1016%2Fj.patter.2021.100336.
230
+ Mahima Pushkarna, Andrew Zaldivar, and Oddur Kjartansson. Data cards: Purposeful and transparent dataset documentation for responsible ai. In 2022 ACM Conference on Fairness, Accountability, and Transparency, pp. 1776-1826, 2022.
231
+ Victoria Stodden, Jennifer Seiler, and Zhaokun Ma. An empirical analysis of journal policy effectiveness for computational reproducibility. Proceedings of the National Academy of Sciences, 115(11): 2584-2589, 2018.
232
+ Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE international conference on computer vision, pp. 843-852, 2017.
233
+ Anca Maria Tache, Mihaela Gaman, and Radu Tudor Ionescu. Clustering word embeddings with self-organizing maps. application on laroseda - a large romanian sentiment data set. ArXiv, 2021.
234
+ Boris Villazón-Terrazas, Luis M Vilches-Blázquez, Oscar Corcho, and Asunción Gómez-Pérez. Methodological guidelines for publishing government linked data. Linking government data, pp. 27-49, 2011.
235
+ Tianlu Wang, Jieyu Zhao, Mark Yatskar, Kai-Wei Chang, and Vicente Ordonez. Balanced datasets are not enough: Estimating and mitigating gender bias in deep image representations. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5310-5319, 2019.
236
+
237
+ # A ILLUSTRATIONS FOR DATASET CARDS SUGGESTED BY HUGGING FACE COMMUNITY
238
+
239
+ a
240
+
241
+ # Table of Contents
242
+
243
+ - Dataset Card Creation Guide
244
+
245
+ Table of Contents
246
+
247
+ Dataset Description
248
+
249
+ Dataset Summary
250
+
251
+ Supported Tasks and Leaderboards
252
+
253
+ - Languages
254
+
255
+ Dataset Structure
256
+
257
+ Data Instances
258
+
259
+ Data Fields
260
+
261
+ Data Splits
262
+
263
+ Dataset Creation
264
+
265
+ Curation Rationale
266
+
267
+ Source Data
268
+
269
+ Initial Data Collection and Normalization
270
+
271
+ Who are the source language producers?
272
+
273
+ - Annotations
274
+
275
+ - Annotation process
276
+
277
+ Who are the annotators?
278
+
279
+ Personal and Sensitive Information
280
+
281
+ $\circ$ Considerations
282
+
283
+ Social Impact of Dataset
284
+
285
+ Discussion of Biases
286
+
287
+ Other Known Limitations
288
+
289
+ Additional Int
290
+
291
+ Dataset
292
+ - Licensing Information
293
+ Citation Information
294
+ Contributions
295
+
296
+ C
297
+
298
+ Datasets: argilla/news-summary
299
+
300
+ ![](images/a981781bc5a2724f2b69a3719b1a9e7f59f79058590d8036530e4d2fdb0ed348.jpg)
301
+
302
+ ![](images/b14f9a792fd1b7051e0c9534d552dea80dfaa495e2875a8f68d8320b3e222d71.jpg)
303
+
304
+ ![](images/8b304047ab0a3ca17d98211d1db90a16a5e49e31578f943e5be726451a511205.jpg)
305
+
306
+ ![](images/c5f35eed2e5ded76a2bee2423a78e437f254583185fb40e67746c7b3faf85c3b.jpg)
307
+
308
+ ![](images/6e91eebe75cc2dd9f72dcd5893c6129abb95f48db8ffb052774d36290c58061d.jpg)
309
+
310
+ ![](images/ba110c5cfc8beda2a4c3486c192057169ecfb22e4e609e3cf4a44d095971e74e.jpg)
311
+ Dataset Card for "news-summary"
312
+
313
+ Dataset Description
314
+
315
+ Homepage: Kaggle Challenge
316
+ Repository: https://www.kaggle.com/datasets/clmentbisailon/fake-and-real
317
+ Paper: N.A.
318
+ Leaderboard:N.A.
319
+ Point of Contact: N.A.
320
+
321
+ Dataset Summary
322
+
323
+ Officially it was supposed to be used for classification but, can you use this data set
324
+
325
+ Languages
326
+
327
+ english
328
+
329
+ Citation Information
330
+
331
+ Acknowledgements
332
+
333
+ Ahmed H, Traore I, Saad S. "Detecting opinion spams and fake news using text clas
334
+
335
+ "Detection of Online Fake News Using N-Gram Analysis and Machine Learning Tech
336
+
337
+ Environments. ISDDC 2017. Lecture Notes in Computer Science, vol 10618. Springer
338
+
339
+ Contributions
340
+
341
+ Thanks to @davidberenstein1957 for adding this dataset
342
+
343
+ b
344
+
345
+ ![](images/11c60a9284a7473ded5af4fc562baf8cef9ea9beb2c16c97af1bbb4ad4780bd3.jpg)
346
+
347
+ d
348
+
349
+ Datasets: HuggingFaceM4/cm4-synthetic-testing
350
+
351
+ License: bigscience-openrail-m
352
+
353
+ ![](images/2a0e768394a2a36f2a1c3c38ea19348caf81ec5af297afe40dac478d004a0e70.jpg)
354
+
355
+ ![](images/7eae68506e67bddd7fbe70e60a0357af008f1162ded34dd882b92ddad7f50bbe.jpg)
356
+
357
+ ![](images/ed3636e4f74e3c92e8796f816c70f9c4d0ff081f32b34c2548abdf3c3b7b3c60.jpg)
358
+
359
+ ![](images/a576662a486eb151e9dc3733bebf366626c9b3cb6727cacadaba51eb8d9765b1.jpg)
360
+ Figure S1: Illustration of Adherence to Community-Endorsed Dataset Card. (a) Community-Endorsed Dataset Card Struture: Hugging Face community provides a suggested dataset card structure, which contains five main sections: Dataset Description, Dataset Structure, Dataset Creation, Considerations for Using the Data, and Additional Information. (b) Example of a Dataset Card Conforming to the Community Guidelines: A dataset card is considered to conform to the community guidelines when it includes the five main sections outlined in the community guidelines, with the corresponding content provided for each section. (c) Example of Dataset Cards Not Following Community Guidelines (1): A dataset card is considered non-conforming if it omits any of the five main sections provided in the suggested dataset card structure. (d) Example of Dataset Cards Not Following Community Guidelines (2): This dataset card contains only a few words and does not follow the structure at all.
361
+
362
+ metadata
363
+
364
+ This dataset is designed to be used in testing multimodal text/image models.
365
+
366
+ The current splits are: ['100.unique', '100repeat', '300.unique',
367
+
368
+ The unique ones ensure uniqueness across text entries.
369
+
370
+ The repeat ones are repeating the same 10 unique records: - these are useful
371
+
372
+ The default split is 100. unique.
373
+
374
+ The full process of this dataset creation is documented inside cm4-synthetic
375
+
376
+ # B METHOD
377
+
378
+ # B.1 ACCESSING ANDParsing DATASETCARDS
379
+
380
+ In this work, we analyze datasets hosted on Hugging Face, a popular platform that provides a wealth of tools and resources for AI developers. One of its key features is the Hugging Face Hub API, which grants access to a large library of pre-trained models and datasets for various tasks. With this API, we obtained all 24,065 datasets hosted on the Hub as of March 16th, 2023.
381
+
382
+ Dataset cards are Markdown files that serve as the README for a dataset repository. They provide information about the dataset and are displayed on the dataset's homepage. We downloaded all dataset repositories hosted on Hugging Face and extracted its README file to get the dataset cards. For further analysis of the documentation content, we utilized the Python package mistune (https://mistune.readthedocs.io/en/latest/) to parse the README file and extract the intended content. The structure of dataset cards typically consists of five sections: Dataset Description, Dataset Structure, Dataset Creation, Additional Information, and Considerations for Using the Data, as recommended by Hugging Face community. Examples of dataset cards, as shown in Fig. S1, illustrate the essential components and information provided by dataset cards. We identified and extracted different types of sections through parsing and word matching of the section heading. A significant $84\%$ of the section titles in the 7,433 dataset cards matched one of the 27 titles suggested by the HuggingFace community using the exact keyword matching. This strong alignment underscores the effectiveness of exact keyword matching as an analytical tool.
383
+
384
+ # B.2 HUMAN-ANNOTATED DATASET CARD EVALUATION METHODOLOGY AND CRITERIA
385
+
386
+ We conducted an evaluation on a sample of 150 dataset cards from a total of 7,433. The assessment involved five human annotators to evaluate the dataset cards, who are PhD students with a solid background in AI fields such as NLP, Computer Vision, Human-AI, ML, and Data Science. Their extensive experience with datasets ensured a deep understanding of dataset documentation. To confirm the reliability of our evaluation, we randomly sampled 30 dataset cards for the annotators to assess and achieved an Intraclass Correlation Coefficient (ICC) of 0.76, which is considered a good agreement (Koo & Li, 2000). This high level of agreement, combined with the annotators' deep expertise in AI research, substantially reinforces the trustworthiness of the annotation results. We focused on seven key aspects of the dataset cards drawing from prior research in dataset documentation and the Hugging Face community-endorsed dataset card:
387
+
388
+ <table><tr><td>Aspect</td><td>Description</td></tr><tr><td>Structural Organization</td><td>How well is the documentation structured with headings, sections, or subsections?</td></tr><tr><td>Content Comprehensiveness</td><td>How comprehensive is the information provided in the documentation?</td></tr><tr><td>Dataset Description</td><td>How effectively does the documentation describe the dataset?</td></tr><tr><td>Dataset Preprocessing</td><td>How well does the documentation describe any preprocessing steps applied to the data?</td></tr><tr><td>Usage Guidance</td><td>How well does the documentation offer guidance on using the dataset?</td></tr><tr><td>Additional Information</td><td>How well does the documentation provide extra details such as citations and references?</td></tr></table>
389
+
390
+ Table S1: Descriptions of Evaluation Aspects
391
+
392
+ Each aspect received a score on a scale from 0 to 5, with the following score metrics:
393
+
394
+ <table><tr><td>Score</td><td>Description</td><td>Comment</td></tr><tr><td>5</td><td>Exceptionally comprehensive and effective</td><td>Covers all subsections in detail</td></tr><tr><td>4</td><td>Very good and thorough</td><td>Includes many subsections comprehensively</td></tr><tr><td>3</td><td>Moderately satisfactory</td><td>Covers some subsections adequately</td></tr><tr><td>2</td><td>Insufficient</td><td>Provides a basic, general overview</td></tr><tr><td>1</td><td>Poor and inadequate</td><td>Offers minimal, vague content</td></tr><tr><td>0</td><td>Absent</td><td>Lacks relevant content</td></tr></table>
395
+
396
+ Table S2: Metrics of the Scores
397
+
398
+ # C ADDITIONAL ANALYSIS OF Usage SECTION
399
+
400
+ Among 7,433 dataset cards, there are 567 dataset cards uploaded by 52 distinct practitioners that contain a Usage section, instructing how to use the dataset through text and codes. A specific example of Usage section is from ai4bharat/naamapadam, which has 469 downloads and has a Usage section to instruct how to use the dataset (Fig. S2).
401
+
402
+ ![](images/c0d5b10dc8df5c17ce9b65dc0456b497cd392c1df0d9282a3cce0ec9ca898cdb.jpg)
403
+ Figure S2: Example of a Usage Section
404
+
405
+ Intuitively, a Usage section could give users quick instructions on how to use the dataset, which could make the dataset more accessible, transparent, and reproducible. To verify this intuition, we conduct an experiment to quantify how the Usage section will affect the dataset's popularity.
406
+
407
+ In our experiment, we trained a BERT (Devlin et al., 2018) Model using the content of dataset cards and their corresponding download counts. To ensure comparability, the download counts were normalized to a range of [0,1] and stratified monthly based on the dataset's creation time. This ranking system assigned a rank of 1 to the dataset with the highest downloads within a given month, and a rank of 0 to the dataset with the lowest downloads.
408
+
409
+ Using the dataset card content, the trained BERT Model predicted the download counts. Subsequently, we conducted a test using 567 dataset cards that included a Usage section. For this test, we deliberately removed the Usage section from the dataset cards and employed the BERT Model to predict the download counts for these modified cards. The resulting predictions are summarized in Table. S3. The average predicted score of downloads after removing the Usage section is 0.0185 lower compared to the original dataset card. This indicates a decrease in the number of downloads, highlighting the negative impact of not including a Usage section.
410
+
411
+ In future research, it would be valuable to further investigate the effect of adding a Usage section to the dataset cards that do not have one originally. A randomized controlled trial (RCT) experiment could be conducted to assess whether the inclusion of a Usage section leads to an increase in downloads.
412
+
413
+ <table><tr><td>Condition</td><td>Predicted Score of Downloads</td></tr><tr><td>With Usage Section</td><td>0.3917</td></tr><tr><td>Without Usage Section</td><td>0.3732</td></tr><tr><td>Change in Score</td><td>-0.0185</td></tr></table>
414
+
415
+ Table S3: Predicted Impact of Usage Section on Dataset Downloads. This table presents a comparative analysis of predicted download scores for dataset cards, distinguishing between those that include a Usage Section and those from which it has been removed. It indicates a potential decrease in download rates following the removal of the Usage Section.
416
+
417
+ # D OPTIONAL METRICS FOR DATASETS
418
+
419
+ In our analysis, we employ downloads as a metric to gauge the popularity of the dataset. Numerous factors can influence the download count, including the dataset's publication date and its associated research field. Moreover, aside from dataset downloads, we can incorporate other indicators of dataset popularity, such as the count of models utilizing the datasets and the corresponding download counts.
420
+
421
+ To address the concerns of factors that might affect downloads, we expanded our dataset analysis by extracting more metadata from the Hugging Face dataset information. We collected data such as the models utilizing the corresponding dataset, the total number of downloads for these models, and the dataset's task domain. The primary dataset tasks recognized by Hugging Face encompass Multimodal, Computer Vision, Natural Language Processing, Audio, Tabular and Reinforcement Learning. Among the total of 7,433 dataset cards, 1,988 are categorized as NLP dataset cards, 198 are related to computer vision, and 102 pertain to multimodal datasets. We proceeded with additional analysis by employing the following metrics:
422
+
423
+ 1. We integrated dataset downloads ("direct usage") with the downloads of models employing the dataset ("secondary usage").
424
+ 2. A time range (measured in months) was selected, encompassing dataset cards created within the designated time frame and specified task domain.
425
+ 3. Selected dataset cards were ranked within each domain for each time range and then normalized to a range of [0, 1].
426
+
427
+ By adopting this approach, we were able to compare dataset cards created in the same month and task domain, assessing them based on the metrics of direct and secondary usage metrics. We conducted a word count analysis using this new metric and attained results consistent with our prior analysis that datasets with higher rankings tend to have longer dataset cards, as shown in Fig. S3.
428
+
429
+ ![](images/f88b10872a7abaf0169b5a1058dc915a061602ae27b5a097c01c942290dbb060.jpg)
430
+ Figure S3: Word Count Variation Based on Direct and Secondary Usage Rankings. This figure demonstrates the relationship between the length of dataset cards and their rankings in terms of direct and secondary usage. It reveals a distinct pattern: dataset cards with higher rankings tend to have a greater word count, suggesting a correlation with more thorough and detailed content.
431
+
432
+ ![](images/9b0917cca908daeda53e20788345b1134386e72adacfbb1022efb523c14c02ef.jpg)
433
+
434
+ ![](images/23e7b8d0a62982a78bb7d5e50d1dac9609bc707029164afca561e3724c589d33.jpg)
435
+
436
+ The finding enables us to contemplate an alternative metric option, factoring in publication time, research area, and secondary dataset usage. However, the results remain aligned with our previous analysis, which solely considered download counts, highlighting the reasonableness of using download counts as metrics.
437
+
438
+ # E APPLICABILITY ACROSS PLATFORMS: ADAPTING TO GITHUB
439
+
440
+ Our study demonstrates strong potential for application across various platforms. The foundational format of Hugging Face's dataset cards, essentially README files, is a prevalent documentation standard shared by many platforms, notably GitHub. This commonality implies that our approach to parsing and analyzing dataset cards can be readily adapted for broader studies. To illustrate, we present an example of how our analysis methodology can be effectively applied to GitHub, a widely recognized open-source platform for data and code sharing.
441
+
442
+ Our expanded analysis involved sourcing datasets from a GitHub repository of Papers With Code<sup>1</sup>. We chose repositories linked to dataset-relevant papers and processed their README files using the pipeline proposed in our paper on Hugging Face dataset card analysis. This exploration revealed a more varied structure in GitHub's dataset cards. For example, $57\%$ of the section titles on GitHub are unique, compared to just $3\%$ on Hugging Face. Due to their specificity, we excluded these unique sections and created a categorization list based on Hugging Face's community-endorsed dataset card structure, mapping GitHub's titles through keyword matching. This method successfully categorized $74\%$ of GitHub's section titles.
443
+
444
+ As shown in Table. S4, our analysis reveals that both platforms excel in Dataset Description and Additional Information sections but underperform in Dataset Creation and Considerations for Using the Data, underscoring points raised in our paper. A notable difference is GitHub's lower emphasis on Dataset Structure, highlighting the potentially positive impact of Hugging Face's community-endorsed dataset structure. Furthermore, the prevalence of Usage and Experiment sections on GitHub, absent in Hugging Face, highlights the practical value of these sections in promoting the usability of datasets. Adopting these sections, as suggested in our paper, could enrich the structure of Hugging Face's dataset cards, making them more comprehensive and practically useful.
445
+
446
+ These results indicate our method's adaptability to other platforms and provide a benchmark for evaluating dataset documentation elsewhere. The insights from our Hugging Face study can guide the categorization and enhancement of dataset documentation across various platforms, especially in the current situation that most other platforms don't have a standardized dataset card structure.
447
+
448
+ <table><tr><td>Section Type</td><td>GitHub</td><td>Hugging Face</td><td>Description</td></tr><tr><td>Dataset Description</td><td>0.62</td><td>0.46</td><td>Summary, leaderboard, languages, etc.</td></tr><tr><td>Dataset Structure</td><td>0.09</td><td>0.34</td><td>Format, fields, splits, etc.</td></tr><tr><td>Dataset Creation</td><td>0.08</td><td>0.15</td><td>Motivation, collection procedures, etc.</td></tr><tr><td>Considerations for Using the Data</td><td>0.02</td><td>0.08</td><td>Limitations, biases, disclaimers, etc.</td></tr><tr><td>Additional Information</td><td>0.62</td><td>0.58</td><td>Citations, acknowledgements, licensing, etc.</td></tr><tr><td>Experiment</td><td>0.57</td><td>-</td><td>Model experiments, training, evaluation on the dataset, etc.</td></tr><tr><td>Usage</td><td>0.38</td><td>-</td><td>Instructions for setup, installation, requirements, etc.</td></tr></table>
449
+
450
+ Table S4: Comparison of Fill-out Rate of Dataset Documentation on GitHub and Hugging Face. Dataset cards from both GitHub and Hugging Face perform well in the Dataset Description and Additional Information sections, but fall short in the Dataset Creation and Considerations for Using the Data sections. While GitHub places less emphasis on Dataset Structure, it shows a higher occurrence of Usage and Experiment sections.
451
+
452
+ # F ADDITIONAL FIGURES AND TABLES
453
+
454
+ ![](images/cb4b880800e6359a79db59029abceba1bab471fd42419df94464989a6302df0a.jpg)
455
+ Figure S4: Power Law Distribution Patterns in Dataset Usage across Task Domains. This figure illustrates the dataset usage distribution within each task domain, demonstrating a consistent power law distribution, despite the variations in the number of datasets across different domains.
456
+
457
+ ![](images/2d09a2f7a7b7be78ffeb746ead5910f00ec6455eb2e8d727d619196b3dd83228.jpg)
458
+ Figure S5: Highly Downloaded Dataset Cards Exhibit Greater Completion across All Sections. This figure indicates that the top 100 downloaded dataset cards exhibit a higher completion rate compared to all dataset cards in the sections recommended by the Hugging Face community. However, there is a consistently low completion rate in the Dataset Creation and Considerations for Using the Data sections, regardless of the dataset cards' popularity.
459
+
460
+ <table><tr><td>Category</td><td>Description</td><td>Dataset Card Number</td><td>Adherence to Guidelines</td><td>Avg. Word Count</td></tr><tr><td>Industry organization</td><td>Companies (e.g. Hugging Face, Facebook)</td><td>2,527</td><td>0.34</td><td>219</td></tr><tr><td>Academic organization</td><td>Universities, Research Labs (e.g. Stanford CRFM, jhu-clsp)</td><td>985</td><td>0.31</td><td>427</td></tr><tr><td>Community</td><td>Non-profit Communities (e.g. allenai, bio-datasets)</td><td>1,387</td><td>0.27</td><td>190</td></tr><tr><td>Industry professional</td><td>Engineers, Industry Scientists</td><td>985</td><td>0.25</td><td>256</td></tr><tr><td>Academic professional</td><td>Students, Postdocs, Faculty</td><td>672</td><td>0.16</td><td>180</td></tr><tr><td>All dataset cards</td><td>7,433 dataset cards analyzed</td><td>7,433</td><td>0.29</td><td>234</td></tr></table>
461
+
462
+ Table S5: Differences in the Practices of Dataset Documentation across Creators from Different Backgrounds. This table highlights the diverse documentation practices across creators from different backgrounds. Industry organizations, with the most creators, adhere to the guidelines best. Academics, though fewer, offer the most comprehensive documentation, while academic professionals exhibit lower guideline adherence and shorter word counts. The information about these creators is gathered from their linked GitHub, Twitter, and personal websites on their Hugging Face profiles.
2401.13xxx/2401.13822/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c3643ab28380f4cc7124e0bf8488a6887e377971d6504579210f004e64d0116
3
+ size 869381
2401.13xxx/2401.13822/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13835/5f871f8e-55f5-463f-b5a5-14517ab789e0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f23d64efbc8d8c3a709487ac3931fc8742646917dcf2e31954f1b25541c7ff0
3
+ size 1445568
2401.13xxx/2401.13835/full.md ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # What Large Language Models Know and What People Think They Know
2
+
3
+ Mark Steyvers
4
+
5
+ MARK.STEYVERS@UCI.EDU
6
+
7
+ Department of Cognitive Sciences, University of California, Irvine
8
+
9
+ Heliodoro Tejeda
10
+
11
+ HTEJEDA@UCI.EDU
12
+
13
+ Department of Cognitive Sciences, University of California, Irvine
14
+
15
+ Aakriti Kumar
16
+
17
+ AAKRITK@UCI.EDU
18
+
19
+ Department of Cognitive Sciences, University of California, Irvine
20
+
21
+ Catarina Belem
22
+
23
+ CBELEM@UCI.EDU
24
+
25
+ Department of Computer Science, University of California, Irvine
26
+
27
+ Sheer Karny
28
+
29
+ SKARNY@UCI.EDU
30
+
31
+ Department of Cognitive Sciences, University of California, Irvine
32
+
33
+ Xinyue Hu
34
+
35
+ XHU26@UCI.EDU
36
+
37
+ Department of Cognitive Sciences, University of California, Irvine
38
+
39
+ Lukas Mayer
40
+
41
+ LWMAYER@UCI.EDU
42
+
43
+ Department of Cognitive Sciences, University of California, Irvine
44
+
45
+ Padhraic Smyth
46
+
47
+ SMYTH@ICS. UCI. EDU
48
+
49
+ Department of Computer Science, University of California, Irvine
50
+
51
+ # Abstract
52
+
53
+ As artificial intelligence systems, particularly large language models (LLMs), become increasingly integrated into decision-making processes, the ability to trust their outputs is crucial. To earn human trust, LLMs must be well calibrated such that they can accurately assess and communicate the likelihood of their predictions being correct. Whereas recent work has focused on LLMs' internal confidence, less is understood about how effectively they convey uncertainty to users. Here we explore the calibration gap, which refers to the difference between human confidence in LLM-generated answers and the models' actual confidence, and the discrimination gap, which reflects how well humans and models can distinguish between correct and incorrect answers. Our experiments with multiple-choice and short-answer questions reveal that users tend to overestimate the accuracy of LLM responses when provided with default explanations. Moreover, longer explanations increased user confidence, even when the extra length did not improve answer accuracy. By adjusting LLM explanations to better reflect the models' internal confidence, both the calibration gap and the discrimination gap narrowed, significantly improving user perception of LLM accuracy. These findings underscore the importance of accurate uncertainty communication and highlight the effect of explanation length in influencing user trust in artificial-intelligence-assisted decision-making environments.
54
+
55
+ Keywords: Large Language Models, LLMs, Calibration, Trust, Explanations, User Confidence
56
+
57
+ # 1. Introduction
58
+
59
+ Uncertainty communication plays a critical role in decision-making and policy development. Uncertainties are often expressed verbally to help stakeholders understand risks and make informed choices across a wide range of domains, including climate policy, law, medicine, and intelligence forecasting. Psychology research has investigated perceptions of verbally expressed uncertainty (e.g., phrases such as "very unlikely", or "almost certain") in these domains Budescu et al. (2014); Ho et al. (2015); Karelitz et al. (2002); Wallsten et al. (2008); O'Brien (1989). Despite their lack of precision in communicating probabilities, verbal probability phrases provide a simple and effective way to communicate uncertainty in natural language contexts. The emergence of large language models (LLMs) introduces new complexities in the area of uncertainty communication. These models are increasingly integrated into areas such as public health Ali et al. (2023), coding Zambrano et al. (2023), and education Whalen et al. (2023). However, the question of how effectively LLMs communicate uncertainty is unexplored. As the primary mode of communication with LLMs is through natural language, it is critical to understand if LLMs are able to accurately convey through verbal means what they know or do not know.
60
+
61
+ Recent research raises doubts about the reliability of the information that LLMs generate. One notable issue is the possibility of generating responses that, while convincing, may be inaccurate or nonsensical Jo (2023); Huang et al. (2023). The unreliability of LLMs has led developers of LLMs to caution against uncritical acceptance of model outputs OpenAI (2022b), suggesting that it is not always clear when the models are or are not confident in the knowledge communicated to the user.
62
+
63
+ At the same time, recent research has also indicated that LLMs have the ability, to a certain degree, to accurately discern their own knowledge boundaries. LLMs in particular can exhibit a reasonable level of calibration for multiple-choice questions such that the probability the model assigns to a selected answer tracks with the probability that this answer is correct Achiam et al. (2023); Kadavath et al. (2022); Srivastava et al. (2023). In addition, recent studies show that LLMs can distinguish between answerable and unanswerable questions Yin et al. (2023); Kadavath et al. (2022) and the internal state of an LLM can distinguish between truthful statements and lies Azaria and Mitchell (2023) and truthful statements and confabulations Farquhar et al. (2024). These findings suggest that LLMs may possess an internal mechanism that is reflective of self-knowledge.
64
+
65
+ In the specific context of question-answering, an LLM's "model confidence" is typically equated to the probability assigned by the LLM to the selected answer relative to other possible answers (e.g., Jiang et al. (2021); Hendrycks et al. (2021)). However, from the perspective of a human interacting with the LLM, this internal model confidence is not usually displayed to human users as part of LLM output. Instead, in current practice, humans rely solely on the language produced by the LLM in order to assess LLM confidence. To contrast with model confidence, in this paper we use the term "human confidence" to refer to a human's assessment (expressed as a probability) of how likely it is that the LLM's answer is correct based only on the language produced by the LLM without any knowledge of the LLM's internal model confidence.
66
+
67
+ Surprisingly, studies focused on investigating human confidence in LLM outputs are lacking. In this paper, we take a step in addressing this issue and investigate what we term
68
+
69
+ “the calibration gap”, namely the difference in the reliability of LLM model confidence and human confidence. In effect, the calibration gap represents the gap between an LLM's own internal confidence of what it knows and human perception of this confidence. In addition, we investigate “the discrimination gap”, which relates to the difference in the ability to discriminate between likely correct and incorrect answers. Any discrimination gap shows that whatever internal LLM representation is used to tell the difference between likely correct and incorrect answers is not conveyed effectively to humans. We address two specific research questions in this context. First, how large are the calibration and discrimination gaps? i.e., is there a significant gap between LLM model confidence and human confidence in terms of how each assesses the true accuracy of the LLM? Second, can the calibration and discrimination gaps be reduced? Can the quality of human confidence in an LLM be improved by adapting the textual output of the LLM to internal model confidence? These questions have important implications for the design of reliable LLM assistants. By aligning the LLM's internal confidence with human perception of this confidence, we can bridge the gap between what LLMs know and what people think they know, which is crucial for the development of effective and trustworthy assistants
70
+
71
+ Our contributions in this context are twofold. First, we present a set of experimental studies and dataset that directly captures human assessment of LLM confidence in a question-answering context, providing insight into human perceptions of LLM textual responses. Second, we test and suggest ways of generating LLM responses that improve the calibration quality of human confidence relative to the LLM assistant's model confidence and the LLM's true accuracy.
72
+
73
+ # 1.1 Large Language Models
74
+
75
+ We use three publicly available LLMs in our studies: GPT-3.5 OpenAI (2022a), PaLM2 Anil et al. (2023), and GPT-4o. We apply the GPT-3.5 and PaLM2 models to a subset of multiple-choice questions from the Massive Multitask Language Understanding (MMLU) dataset, a comprehensive dataset that contains multiple-choice questions from various knowledge domains, such as STEM, humanities, social sciences, and more Hendrycks et al. (2021). We apply the GPT-4o model to a subset of short-answer questions from the Trivia QA data set Joshi et al. (2017). For each multiple-choice and short-answer question, we assess model confidence by computing the token likelihoods (see Methods for details). This method for reading out model confidence allows for a direct computation of the relative probabilities of different possible answers in multiple-choice questions Jiang et al. (2021); Kadavath et al. (2022); Xiao et al. (2022); Hendrycks et al. (2021); Achiam et al. (2023) and the probability that the answer to an open-ended question is correct Kadavath et al. (2022); Farquhar et al. (2024). We investigate the relationship between model confidence and accuracy to determine whether the LLM is reasonably well-calibrated, independent of the LLM's ability to elicit well-calibrated confidence from humans who use the LLM.
76
+
77
+ # 2. Methodology
78
+
79
+ We designed behavioral experiments to evaluate human perceptions of LLM confidence. In these experiments, participants estimate the probability that the LLM's answer to a multiple-choice or short-answer question is correct based on the explanation that the LLM
80
+
81
+ ![](images/79ce2aa03f1515b39f87e34b6edb7f6b25c1379ee20829bdcd1760cf2d61e0db.jpg)
82
+ Figure 1: Overview of the evaluation methodology for assessing the calibration gap between model confidence and human confidence in the model. The multiple choice questions (top), the approach works as follows: (1) prompt the LLM with a question to obtain the model's internal confidence for each answer choice; (2) select the most likely answer and prompt the model a second time to generate an explanation for the given answer; (3) obtain the human confidence by showing users the question and the LLM's explanation and asking users to indicate the probability that the model is correct. In this toy example the model confidence for the multiple choice question is 0.46 for answer C, whereas the human confidence is 0.95. For short-answer questions, the approach is similar except that internal model confidence is obtained by an additional step where the LLM is prompted to evaluate whether the previously provided answer to the question is true or false Kadavath et al. (2022). In the short-answer question example, the LLM model explanation was modified with uncertainty language to convey the low model confidence (0.18). For the two toy examples, the correct answers are "A" and "blue bird".
83
+
84
+ provided (see Figure 1). Participants are not provided any direct access to the LLM's numerical model confidence, allowing us to make inferences about participants' perceptions of the confidence of the LLM based on model explanations alone. In addition, for the multiple-choice questions part of the experiment only, with the assistance of the LLM, participants provided answers to the questions. Previous research has demonstrated that the MMLU multiple-choice questions are difficult for participants who lack domain expertise, resulting in near-chance accuracy Hendrycks et al. (2021). We anticipate that these questions will be difficult to answer without the assistance of the LLM because the majority of the partici
85
+
86
+ pants in our experiments lack domain expertise, and their perception of the explanation's content will influence their evaluation more than their own knowledge.
87
+
88
+ We conducted two experiments each involving the three types of LLMs and two types of questions (see Table 1 for an overview). Experiment 1 assesses human perceptions of LLM accuracy using the LLM's default explanations for either multiple-choice or short-answer questions. The results from this experiment allow us to address the first research question regarding the size of the calibration and discrimination gap between model and human confidence. Experiment 2 manipulates the prompts to produce three levels of uncertainty language (low, medium, and high confidence) and three levels of explanation length, resulting in nine different types of explanations presented to participants. The prompts are designed to include uncertainty language corresponding to model confidence at the start of the explanation. Table 6 illustrates explanations from a particular multiple-choice question used in the experiments (see Supplementary Table 2 for the full model explanations). The results from this experiment serve two purposes. First, we establish that human confidence varies with the uncertainty language and the length of the explanation. Next, we use the results from Experiment 2 to answer the second research question, which is to understand how the calibration and discrimination gap can be reduced by aligning the uncertainty language with model confidence—showing a low/medium/high confidence explanation when the model has low/medium/high confidence. The Supplementary Information ("Experiment 3") reports the results from an additional experiment with a different prompting approach that alters the default explanations from Experiment 1. We use the two metrics to assess the relationship between human and model confidence and model accuracy. See Section 2.4 Metrics for details.
89
+
90
+ # 2.1 Question data sets
91
+
92
+ MMLU dataset for multiple choice questions. The MMLU dataset is a comprehensive multitask dataset that contains multiple-choice questions from various knowledge domains, such as STEM, humanities, social sciences, and more Hendrycks et al. (2021). In total, there are 14042 test set questions from 57 categories curated by undergraduate and graduate students from freely available online resources such as the GRE and USMLE. These questions range in difficulty from high-school to the professional level. The MMLU dataset is widely employed to measure a text model's multitask accuracy, as it challenges models on their real-world text understanding beyond mere linguistic comprehension, thus making it a robust benchmark for model evaluation Hendrycks et al. (2021); Hoffmann et al. (2022); Rae et al. (2021). For this research, we sampled a subset of 350 questions from a range of model confidence levels in 10 select categories from the full dataset to comprehensively evaluate people's assessment of LLM model confidence.
93
+
94
+ Trivia QA dataset for short answer questions. Trivia QA is a data set of trivia questions that can be answered in short answers Joshi et al. (2017). Similar to methodology by Farquhar et al. (2024), contextual information was excluded to make the question answering more challenging for LLMs and more suitable for our behavioral experiments. For this research, we assessed model confidence for 5000 questions from the original 650K dataset before selecting a final sample of 336 questions from a range of model confidence
95
+
96
+ levels. The final set of questions was categorized into 7 different topics (culture & society, entertainment, geography, history, politics, science & technology and sports).
97
+
98
+ # 2.2 Assessing model confidence and creating question subsets
99
+
100
+ Several approaches have been developed to elicit confidence in LLMs and to assess the degree to which the elicited confidence scores are calibrated (see Geng et al. (2023) for an overview). In this research, we use an approach commonly used to access internal model information based on token likelihoods, allowing for direct computation of relative probabilities of different possible answers in multiple-choice questions Jiang et al. (2021); Kadavath et al. (2022); Xiao et al. (2022); Hendrycks et al. (2021); Achiam et al. (2023). In addition, the token-likelihood approach can be extended to short-answer questions such that the token-likelihood reflects the model confidence that the LLM answer is correct Kadavath et al. (2022).
101
+
102
+ Methods that do not require access to internal model representations have used prompting strategies designed to elicit verbal expressions of uncertainty Xiong et al. (2024); Zhou et al. (2023). Confidence is expressed in natural language as numeric strings (e.g., "80%) Lin et al. (2022); Xiong et al. (2024) or more qualitative expressions of confidence (e.g., "I am not confident the answer is X"). Prompts can be designed to emphasize step-by-step reasoning about the correctness of individual steps and clarify the space of possible answers lead to better calibration than simple prompts that simply ask for a confidence rating Xiong et al. (2024). For short-form question answering, prompting strategies can lead to calibrated confidence levels Tian et al. (2023). However, prompting approaches have been found to be less accurate compared to methods that read out model confidence Xiong et al. (2024).
103
+
104
+ Multiple choice questions. For the multiple choice questions, we followed the procedures based on reading out the internal token likelihoods as described in the GPT-4 Technical Report Achiam et al. (2023). We used a zero-shot prompting approach, in which the model was only prompted with the target question and its associated answer options (Extended Data Figure 1). We first assessed the LLM model confidence of GPT-3.5 and PaLM2 language models to 14042 MMLU multiple-choice questions. This allowed us to then select questions with (somewhat) evenly distributed confidence levels. We read out the log-probabilities for the top 5 tokens completed by the model using the APIs for the GPT3.5 (gpt-3.5-turbo-instruct) and the PaLM2 (text-bison@002) models. The temperature parameter was set to 0. The answer was considered complete if the tokens included the single letters A, B, C, and D. The log scores were then converted and normalized to probabilities across the four answer options (so that the sum of the scores equaled one). In this research, internal uncertainties, referred to in this paper as "model confidence", were represented by these probabilities in all experiments, a common technique in calibration assessment with LLMs Jiang et al. (2021); Kadavath et al. (2022); Xiao et al. (2022); Hendrycks et al. (2021); Achiam et al. (2023).
105
+
106
+ Based on the model confidence levels of each LLM for all MMLU questions, we created a subset separately for each LLM. In total, 35 questions were sampled for each of 10 topics, for a total of 350 questions. For each topic, the 35 questions were sampled to approximately create a uniform distribution over model confidence using the confidence bins: 0.2-0.4, 0.4-0.6, 0.6-0.8, and 0.8-1.0. However, due to the small number of questions that lead to model
107
+
108
+ confidence in the lowest confidence bin, fewer questions were sampled for the 0.2-0.4 confidence range. Supplementary Figure 1 shows the distribution over model confidence levels for the entire MMLU dataset as well as the question subset sampled for our study. Model accuracy across the 350 questions is $55\%$ and $50\%$ for GPT-3.5 and PaLM2, respectively.
109
+
110
+ Short-answer questions. For the short-answer questions, we used a procedure based on the "pTrue" method Kadavath et al. (2022) to assess internal model confidence. All experiments with short-answer questions were performed with the API for the GPT-4o model (gpt-4o-mini) with the temperature parameter set to 0.7 (similar to Kadavath et al. (2022) and Farquhar et al. (2024)). The model was first prompted to generate the answer to each of the 5000 trivia questions in the sample. To ensure that the model response was restricted to short answers, we used a 10-shot prompting approach where the prompt contained the target question preceded by a random sample of 10 trivia question with the reference answers. Median answer length was 2 words.
111
+
112
+ To assess model confidence for short-answer questions, as shown in Figure 1 (bottom panel), we prompted the model with the question, the proposed answer, and asked it to determine whether the proposed answer is true or false (see Extended Data Figure 1 for an example of the exact prompt). The log scores for the true and false answer options were then converted and normalized to probabilities across the two answer options. Model confidence in our experiments corresponds to the probability for the true answer option.
113
+
114
+ For the behavioral experiments, we created a subset of 336 questions to ensure a uniform distribution across four confidence bins: 0-0.25, 0.25-0.50, 0.50-0.75, and 0.75-1.0. Supplementary Figures 1 and 2 show the distribution of model confidence levels for the 5000 sample and the 336 subset used in our behavioral experiments. Model accuracy across the 336 questions is $63\%$ .
115
+
116
+ We used both automatic and human scoring methods to assess model accuracy. For the 5000 question sample, we prompted an LLM (GPT-4o) to determine whether the reference answer from the Trivia QA had the same meaning as the LLM answer within the context of the question. For the 336 question sample, we additionally applied human scoring. For $97\%$ of questions, automatic and human scoring agreed. Model accuracy for the 336 question subset was based on human evaluation.
117
+
118
+ # 2.3 Behavioral Experiments
119
+
120
+ This section describes the methodology we used for our behavioral experiments. Experiment 1 presented default explanations from LLMs to participants, whereas Experiment 2 presented explanations that were altered by different types of uncertainty language and overall length (see Table 1 for an overview of all experiments). Within each experiment, across different groups of participants, we varied the type of question as well as the type of LLM. Experiments 1a and 2a used explanations from GPT-3.5 for the MMLU multiple questions. Experiments 1b and 2b used explanations from PaLM2 for the MMLU multiple questions. Finally, Experiments 1c and 2c used explanations from GPT-4o for the Trivia QA short-answer questions. The Supplementary Information ("Experiment 3") describes the results from an additional Experiment 3, which was conducted to verify that our results generalize to different ways to vary the type of uncertainty language in the explanations.
121
+
122
+ Table 1: Overview of experiments.
123
+
124
+ <table><tr><td>Experiment</td><td>Question Type</td><td>LLM</td><td>Explanation Type</td><td>Number of Participants</td></tr><tr><td>1a</td><td>Multiple Choice</td><td>GPT-3.5</td><td>Default explanations</td><td>41</td></tr><tr><td>1b</td><td>Multiple Choice</td><td>PaLM2</td><td>Default explanations</td><td>39</td></tr><tr><td>1c</td><td>Short Answer</td><td>GPT-4o</td><td>Default explanations</td><td>42</td></tr><tr><td>2a</td><td>Multiple Choice</td><td>GPT-3.5</td><td>Modified explanations</td><td>60</td></tr><tr><td>2b</td><td>Multiple Choice</td><td>PaLM2</td><td>Modified explanations</td><td>60</td></tr><tr><td>2c</td><td>Short Answer</td><td>GPT-4o</td><td>Modified explanations</td><td>59</td></tr></table>
125
+
126
+ # 2.3.1 PARTICIPANTS
127
+
128
+ A total of 301 participants completed the study across Experiments 1 and 2 (Table 1 shows the breakdown by experiment). Participants were native English speakers residing in the United States, recruited through Prolific (www.prolific.com). Demographic data was obtained for 284 participants. There were 146 female and 138 Male participants. The median age was 34 (age range from 18 to 79). The University of California, Irvine Institutional Review Board (IRB) approved the experimental protocol. Participants who completed Experiments 1a, 1b, 2a, or 2b were paid $8 USD for their participation. Participants in Experiments 1c and 2c required less time to complete the study and were paid$ 5. The payments across experiments corresponded to a rate of approximately $12/hr. Prior to the experiment, participants were given detailed instructions outlining the experimental procedure as well as how to understand and interact with the user interface. Participants were asked to sign an integrity pledge after reading all of the instructions, stating that they would complete the experiment to the best of their abilities. After submitting their integrity pledge, participants were granted access to the experiment.
129
+
130
+ # 2.3.2 EXPERIMENTAL PROCEDURE
131
+
132
+ Across all experiments, participants were randomly assigned 40 questions (from the pool of 350 multiple-choice questions or the pool of 336 short-answer questions). The questions were sampled to balance across model confidence bins ensuring that all participants were exposed to questions at all levels of difficulty.
133
+
134
+ Furthermore, in Experiments 2a, 2b, and 2c, we balanced the types of explanation styles across questions so that each question was presented approximately the same number of times with each style. It should be noted that for each subject, each question was presented only once, and each question received only one explanation style. The counterbalancing, on the other hand, ensured that the same question had (roughly) an equal number of observations for each explanation style (across participants).
135
+
136
+ For the multiple choice questions, the participant's task was divided into two phases for each question (Extended Data Figure 2). In the first stage, participants had to provide a probability estimate that the LLM's answer was correct. In the second phase, participants had to answer the question with the assistance of the LLM. Participants were instructed to use their own knowledge as well as the LLM's response when making their own answer
137
+
138
+ selection for this phase. For the short-answer questions, participants only had to provide a probability estimate that the LLM's answer was correct. They were instructed not to look up the answer. For the short-answer questions, the question answering phase was omitted to avoid participants from looking up the answers.
139
+
140
+ At the end of the experiments, we administered a brief survey in which participants self-assessed their knowledge of topics associated with the multiple choice and short answer questions. Participants were asked to estimate the expected accuracy for each topic if they were presented with questions similar to those they encountered during the experiment.
141
+
142
+ # 2.3.3 CREATING EXPLANATION STYLES WITH VARYING DEGREES OF UNCERTAINTY
143
+
144
+ This section describes how we constructed prompts to elicit explanations with varying levels of uncertainty language in the experiments. Table 6 contains examples of explanations from the two experiments and styles. For additional details on the exact prompts used, see PT 3.
145
+
146
+ Table 2: Example explanations for the answer by GPT-3.5 to a multiple-choice question used in Experiments 1a and 2a. The particular question is: A moving $1.5\mathrm{kg}$ cart collides with and sticks to a $0.5\mathrm{kg}$ cart which was initially at rest. Immediately after the collision, the carts each have the same ______ as each other. Choose from the following options: A) Velocity B) Kinetic energy C) Mass D) Linear momentum. The correct answer is A.
147
+
148
+ <table><tr><td>Prompt Style</td><td>Explanation</td></tr><tr><td>Experiment 1aBaseline</td><td>The answer is [A] Velocity. This is because in an inelastic collision, the two objects involved will stick together and move with a common velocity after the collision. [...] In this case, the initial momentum of the 1.5-kg cart is transferred to the combined mass of the two carts after the collision, resulting in the same velocity for both carts. Therefore, the correct answer is [A] Velocity.</td></tr><tr><td>Experiment 2aLow Confidence &amp; Long</td><td>I am not completely sure that the answer is [A] because velocity is a vector quantity that includes both magnitude and direction. In this scenario, the carts may have the same magnitude of velocity, but they may not necessarily have the same direction. Therefore, I am not completely convinced that the answer is [A]. [...] In this case, the carts would indeed have the same velocity as each other.</td></tr><tr><td>Medium Confidence &amp; Long</td><td>I am somewhat sure that the answer is [A] because velocity is a vector quantity that takes into account both the speed and direction of an object&#x27;s motion. In this scenario, the two carts are colliding and sticking together, [...] Therefore, I am somewhat sure that the answer is [A] because the velocity of the two carts will be the same after the collision due to the conservation of momentum principle.</td></tr><tr><td>High Confidence &amp; Long</td><td>I am sure that the answer is [A] because velocity is a vector quantity that describes the speed and direction of an object&#x27;s motion. In this scenario, the two carts are colliding and sticking together, meaning they will have the same final velocity after the collision. [...] This is also supported by the fact that the carts have different masses, so their kinetic energies and linear momenta will not be the same. Thus, the correct answer is [A] velocity.</td></tr><tr><td>Low Confidence &amp; Short</td><td>I am not sure that the answer is [A] because the carts may have different velocities before and after the collision due to the conservation of momentum.</td></tr><tr><td>Medium Confidence &amp; Short</td><td>I am somewhat sure that the answer is [A] because velocity is a vector quantity that takes into account both the speed and direction of an object&#x27;s motion. In this scenario, the carts will have the same velocity after the collision because they are stuck together and moving in the same direction.</td></tr><tr><td>High Confidence &amp; Short</td><td>I am sure that the answer is [A] because velocity is a vector quantity that is conserved in collisions, meaning that the final velocity of the two carts will be the same.</td></tr><tr><td>Low Confidence &amp; Uncertainty Only</td><td>I am not sure that the answer is [A].</td></tr><tr><td>Medium Confidence &amp; Uncertainty Only</td><td>I am somewhat sure that the answer is [A].</td></tr><tr><td>High Confidence &amp; Uncertainty Only</td><td>I am sure that the answer is [A].</td></tr></table>
149
+
150
+ Experiment 1: baseline explanations. To generate the baseline explanations for Experiment 1, we used a simple prompt that asked for an explanation, which would also repeat the answer.
151
+
152
+ Experiment 2: modified explanations. In Experiment 2, explanations were manipulated in terms of the level of confidence expressed in the answer as well as the length of the answer. In total, the experiment included nine types of explanations (three levels of uncertainty x three levels of length). The three levels of confidence (low, medium, and high) were generated by prompts that instructed the LLM to "mention you are not sure/somewhat sure/sure" in the explanations respectively. The prompts elicited responses in which the beginning of each explanation indicated the level of uncertainty (e.g., "I am not sure the answer is [B] because" for the low confidence prompt). Note that expressions of uncertainty were not limited to the start of the explanation. Answers often contained additional explanations for why the LLM lacked confidence (e.g., "further research may be required to confirm this," "it is not possible to definitively state that..."). Experiment 2 also varied the length of the explanation across three levels: long, short, and uncertainty only. The long explanations were generated by not including any instruction regarding the length of the answer. The short explanations were generated by adding an instruction to use as few words as possible in the explanation. The uncertainty only explanation were generated by removing the rationale for the answer and included only the expression of uncertainty and the answer (e.g., "I am not sure the answer is [B]").
153
+
154
+ For Experiment 2, the median lengths of the long and short explanations were: 115 and 34 words (GPT-3.5, Multiple Choice), 64 and 24 words (PaLM2, Multiple Choice) and 95 and 24 words (GPT-4o, Short-Answer). In comparison, the uncertainty only responses contained a median of 9 words across all variants of Experiment.
155
+
156
+ # 2.4 Metrics
157
+
158
+ To investigate the relationship between the accuracy of answers to the multiple-choice and short-answer questions and the confidence (either human confidence or model confidence) associated with them, we utilize a range of metrics to evaluate this association. The primary focus is on understanding how well confidence levels correlate with the correctness of answers. To achieve this, we use both Expected Calibration Error (ECE) and the Area under the Curve (AUC) metric. These metrics assess the extent of overconfidence in predictions as well as the diagnostic effectiveness of confidence scores in distinguishing between correct and incorrect answers Xiong et al. (2024); Tian et al. (2023); Jiang et al. (2021); Kadavath et al. (2022); Xiao et al. (2022). The use of AUC in this context parallels various metrics in psychology for metacognitive discrimination or sensitivity, which similarly aim to evaluate the effectiveness of confidence scores in distinguishing between correct and incorrect answers Fleming and Lau (2014). In addition, in the Supplementary Information ("Overconfidence Error"), we also show results for the additional metric of Overconfidence Error (OE).
159
+
160
+ # 2.4.1 EXPECTED CALIBRATION ERROR (ECE)
161
+
162
+ We evaluate miscalibration using the Expected Calibration Error (ECE), as detailed in Guo et al. (2017); Naeini et al. (2015). ECE is calculated by averaging the absolute differences between accuracy and confidence across $M$ equal-width probability bins:
163
+
164
+ $$
165
+ \mathrm {E C E} = \sum_ {m = 1} ^ {M} \frac {\left| B _ {m} \right|}{N} \left| \operatorname {c o n f} \left(B _ {m}\right) - \operatorname {a c c} \left(B _ {m}\right) \right| \tag {1}
166
+ $$
167
+
168
+ where $N$ represents the total sample count, $B_{m}$ the $m$ th confidence bin, and $\text{acc}(B_m)$ and $\text{conf}(B_m)$ denote the accuracy and average confidence for samples in the $m$ th bin. ECE does not account for the direction of deviations between accuracy and confidence per bin respectively, so a nonzero ECE can indicate a mix of over- and underconfidence. While recent work Kumar et al. (2019); Gruber and Buettner (2022) has shown that ECE can under-estimate the true calibration error, the potential for under-estimation should not be a significant issue given that we are interested in analyzing differences in ECE rather than unbiased estimates of the error itself.
169
+
170
+ # 2.4.2 AREA UNDER THE CURVE (AUC)
171
+
172
+ The AUC metric is employed to assess the diagnostic ability of confidence scores in distinguishing between correct and incorrect answers. Utilizing the Mann-Whitney U statistic approach, the AUC represents the probability that a randomly chosen correct answer has a higher confidence score compared to a randomly chosen incorrect answer:
173
+
174
+ $$
175
+ \mathrm {A U C} = \frac {1}{N _ {\text {p o s}} \times N _ {\text {n e g}}} \sum_ {i = 1} ^ {N _ {\text {p o s}}} \sum_ {j = 1} ^ {N _ {\text {n e g}}} I \left(C _ {i} > C _ {j}\right) \tag {2}
176
+ $$
177
+
178
+ In this equation, $N_{pos}$ and $N_{neg}$ denote the counts of correct (positive) and incorrect (negative) answers, respectively. $C_i$ and $C_j$ represent the confidence scores of the $i$ th and $j$ th correct and incorrect answers, respectively. $I$ is an indicator function, which equals 1 if $C_i > C_j$ and 0 otherwise. This method evaluates each pair of correct and incorrect answers to determine if the confidence score for the correct answer surpasses that of the incorrect one. The AUC is then the fraction of these pairs satisfying this criterion, measuring the capability of confidence scores to differentiate between correct and incorrect responses, with AUC values ranging from 0.5 (indicating no better than chance discrimination) to 1 (signifying perfect discrimination).
179
+
180
+ # 2.5 Statistical Analysis
181
+
182
+ To assess statistical significance, we utilize Bayes factors ( $BF$ ) to determine the extent to which the observed data adjust our belief in the alternative and null hypotheses. Values of $3 < BF < 10$ and $BF > 10$ indicate moderate and strong evidence against the null hypothesis, respectively. Similarly, values of $1/10 < BF < 1/3$ and $BF < 1/10$ indicate moderate and strong evidence in favor of the null hypothesis, respectively. We report Bayes factors for Bayesian t-tests using the default priors as recommended by Rouder et al. (2012).
183
+
184
+ # 3. Results
185
+
186
+ We start by examining the results from Experiment 1 and compare human and model confidence in the case where LLMs generate default explanations for participants. We present the results for two different metrics: 1) Expected Calibration Error (ECE), which
187
+
188
+ assesses the degree to which confidence scores from the model or the human reflect the true accuracy of the LLM, and 2) Area Under the Curve (AUC), which assesses the degree to which confidence scores discriminate between correct and incorrect responses (see Methods for details). The findings indicate that there is a significant gap, as measured by calibration and discrimination, between what LLMs know and what humans believe they know based on default explanations.
189
+
190
+ ![](images/24cdf4c9c14007fc830e4d2eaafe146ca5d3fdaeca143d1534a2cce8575d62f5.jpg)
191
+ Figure 2: Calibration error and discrimination for model confidence and human confidence across the behavioral experiments and LLMs. Calibration error is assessed by ECE (lower is better) while discrimination is assessed by AUC (higher is better). Vertical dashed lines represent the calibration and discrimination gap between model confidence and human confidence for unmodified explanations (Experiments 1a, 1b, and 1c). For human confidence, data points represent the AUC values computed separately for each participant ( $n$ shown in figure), and error bars represent the $95\%$ confidence interval of the mean across participants. Because of data sparsity, the ECE values were computed at the group level.
192
+
193
+ ![](images/82d158ac6a75cbb1ca067e733a8cd22dd53064eb411b2ca0f31302846871b0b6.jpg)
194
+
195
+ ![](images/0153c67c7bc796519c8ffb74426b4e0f94fde817c9f1f9d7a23372e2683d1e78.jpg)
196
+
197
+ # 3.0.1 CALIBRATION GAP
198
+
199
+ Figure 2, left panel, shows the ECE for both model and human confidence. The results show a calibration gap; across the different types of LLMs and types of questions (multiple choice and short answer), the ECE metric is much lower for model confidence (in gray) than for human confidence with baseline explanations (in green). This gap demonstrates that standard explanations provided by the LLM do not enable participants to judge the likelihood of correctness of the LLM's answers, leading to a misalignment between perceived accuracy and actual LLM accuracy.
200
+
201
+ Figure 3 expands on the calibration results in Figure 2 to show detailed calibration results for each LLM and each experimental condition. The diagrams show how well model confidence (left column) and human confidence (right two columns) are calibrated. The ideal calibration (i.e., $\mathrm{ECE} = 0$ ) would yield results along the diagonal. For multiple-choice questions, both LLMs have a tendency to be overconfident, resulting in calibration lines below the diagonal. For the short-answer questions, the LLM is somewhat underconfident. Comparing the LLM to the human calibration in Experiment 1 (middle column), the re
202
+
203
+ sults show that for the multiple choice questions, human miscalibration is primarily due to overconfidence, indicating that people generally believe that LLMs are more accurate than they actually are. The histograms (inset panels) demonstrate that a significant portion of the calibration error is due to participants' propensity to produce high confidence scores, even though the model accuracy for the associated questions is much lower than expected based on confidence.
204
+
205
+ # 3.0.2 DISCRIMINATION GAP
206
+
207
+ Participants are not very good, relative to the LLM, at discriminating between which answers are likely correct or incorrect based on the default explanation. We assess discrimination using the AUC metric applied to the human confidence ratings. Figure 2, right panel, shows the AUC for both model and human confidence. The results show a gap between how well model and human confidence discriminate between correct and incorrect answers. The LLM model confidence discriminates between correct and incorrect answers well above chance (GPT-3.5 AUC=.751, PaLM2 AUC=0.746 for the multiple choice questions and GPT-4o AUC=0.781 for the short-answer questions). In contrast, participants who view the default explanations in Experiment 1 were only slightly better than random guessing (AUC=0.589 and AUC=0.602 for the multiple choice explanations by GPT-3.5 and PaLM2 respectively and AUC=0.592 for the short-answer explanations by GPT-4o). Therefore, default explanations lead to a discrimination gap as well.
208
+
209
+ # 3.1 Explanation style and length affect human confidence
210
+
211
+ Experiment 2 evaluates how human confidence is affected by the degree of uncertainty expressed in LLM explanations (across three levels of confidence) as well as the overall length of the LLM explanation (across three levels of length).
212
+
213
+ Figure 4 shows that the type of uncertainty language used in the explanations has a strong influence on human confidence regardless of the type of LLM that produced the explanation or the type of question. Low confidence explanations ("I am not sure") produced significantly lower human confidence than medium confidence explanations ("I am somewhat sure"); $BF > 100$ across Experiments 2a, 2b and 2c. Similarly, medium confidence explanations produced lower human confidence than high confidence explanations; $BF > 100$ across Experiments 2a, 2b, and 2c. The Supplementary Information ("Human Confidence Agreement") shows an analysis of the reliability of the confidence ratings across participants.
214
+
215
+ In addition, the length of the explanations also affected the human confidence in the LLM answers. Long explanations led to significantly higher confidence than the short explanations ( $BF = 25$ with data combined across Experiments 2a, 2b and 2c) and short explanations led to significantly higher confidence than the responses that only contained the uncertainty expression ( $BF > 100$ with data combined across Experiments 2a, 2b, and 2c). The additional information presented in longer explanations did not enable participants to better discriminate between likely correct and incorrect answers for longer explanations. Across Experiments 2a, 2b, and 2c, the mean participant AUC is 0.54 and 0.57 for long and uncertainty-only explanations, respectively ( $BF = .23$ ). Therefore, the length of the answer
216
+
217
+ ![](images/fc96862325f04c9e7252aeae2fa019ba7592732d0a29b56de11215fe6d0100a2.jpg)
218
+ Figure 3: Calibration diagrams for model confidence and human confidence across Experiments 1 and 2. The top and middle rows show results for multiple-choice questions with the GPT-3.5 and PaLM2 models, respectively. The bottom row shows results for short-answer questions with the GPT-4o model. The histograms at the bottom of each plot show the proportion of observations in each confidence bin (values are scaled by $30\%$ for visual clarity). Shaded regions represent the $95\%$ confidence interval of the mean computed across participants and questions.
219
+
220
+ led to an increase in human confidence without any corresponding change in sensitivity to discriminating between correct and incorrect answers.
221
+
222
+ ![](images/df947423817b2b46d6a2041377a60fa0c9b07bedfb990d4938c95537d8ace6b7.jpg)
223
+ Figure 4: Mean human confidence across LLM explanation styles varying in uncertainty language and length. Data are presented as mean values of participant confidence in Experiments 2a $(n = 60)$ , 2b $(n = 60)$ , and 2c $(n = 59)$ . For reference, dashed lines show the average human confidence for the baseline explanations in Experiment 1a, 1b, and 1c. Error bars represent the $95\%$ confidence interval of the mean across participants.
224
+
225
+ ![](images/36705c54795b213a3cce59353139354a549601d0c43f0bfdf3abcfc28c18704e.jpg)
226
+
227
+ ![](images/f05b7c11bf269bac0590f495fc20fb6e768268b7e50be6f0be5c8b62419c465a.jpg)
228
+
229
+ The results confirm that people can appropriately interpret verbal cues about uncertainty and that manipulating the length of the explanation can directly impact human confidence.
230
+
231
+ # 3.2 Reducing the calibration and discrimination gap
232
+
233
+ Having established in Experiment 2 that the uncertainty language in the LLM explanation can modify human confidence, we now evaluate whether linking the type of uncertainty language to the LLM model confidence (i.e., showing a low/medium/high confidence explanation when model confidence is low/medium/high) can reduce the calibration and discrimination gap.
234
+
235
+ # 3.2.1 SELECTING EXPLANATIONS BASED ON MODEL CONFIDENCE
236
+
237
+ We simulated the effect of aligning the explanation style to model confidence by a simple decision rule. With this rule, we select the type of explanation $s \in \{ \text{low confidence}, \text{medium confidence}, \text{high confidence} \}$ based on the LLM model confidence score $p$ :
238
+
239
+ $$
240
+ s = \left\{ \begin{array}{l l} \text {l o w c o n f i d e n c e} & \text {i f} p \leq \theta_ {1} \\ \text {m e d i u m c o n f i d e n c e} & \text {i f} \theta_ {1} < p \leq \theta_ {2} \\ \text {h i g h c o n f i d e n c e} & \text {i f} \theta_ {2} < p \end{array} \right. \tag {3}
241
+ $$
242
+
243
+ The parameters $\theta_{1}$ and $\theta_{2}$ determine the ranges where low, medium, and high confidence explanations are chosen. The application of this rule to a given parameter setting leads to a participant's estimates being filtered out if the explanation style used for a specific question does not match the selected style. This allowed us to simulate the effect of participants receiving different types of explanations based on model confidence (i.e., lower confidence
244
+
245
+ explanations for low model confidence and high confidence explanations for high model confidence). The Supplementary Information ("Optimization Procedure") provides details on the optimization procedure and also a demonstration that the results are not particularly sensitive to the parameter settings.
246
+
247
+ # 3.2.2 CALIBRATION AND DISCRIMINATION RESULTS
248
+
249
+ Figure 2 shows the calibration and discrimination results when the selection rule is applied to the results from Experiment 2. The results in Figure 2 (left panel, red bars) show that the calibration gap has narrowed substantially. While there is still generally a higher calibration error for human confidence relative to model confidence, the calibration gap has decreased for all three LLMs relative to the baseline explanations in Experiment 1. Furthermore, Figure 2 (right panel) shows that the discrimination gap (as measured by AUC) has also been narrowed relative to the baseline explanations across LLMs and question types ( $BF > 100$ , $BF = 6.48$ , and $BF > 100$ for Experiments 2a, 2b, and 2c, respectively). Therefore, the results show that selecting the type of explanation based on LLM model confidence improves both calibration and discrimination performance, as human confidence in the LLM becomes more closely aligned with the LLM's actual accuracy.
250
+
251
+ # 3.3 Accuracy
252
+
253
+ # 3.3.1 PARTICIPANTS LACK SPECIALIZED KNOWLEDGE
254
+
255
+ For the experiments with multiple choice questions (1a, 1b, 2a, and 2b), participants provided their own answer after seeing the answer from the LLM. This allowed us to analyze whether participants have any independent knowledge from the LLM that allowed them to improve on LLM accuracy. In experiments 1a and 2a with GPT-3.5, participants' average answer accuracy was $51\%$ , closely aligning with LLM's $52\%$ accuracy rate. Similarly, for the multiple choice experiments 1b and 2b with PaLM2, average participant accuracy was $45\%$ , similar to the $47\%$ accuracy rate for the LLM. In the majority $(82\%)$ of responses across all multiple choice experiments, participants selected the response that agreed with the LLM's explanation.
256
+
257
+ When participants chose to alter the answer, the average accuracy was $33\%$ which is lower than the LLM's accuracy of $39\%$ for these particular questions. These findings suggest limited success in participants' ability to accurately answer the questions independent of the LLM's explanation. This is consistent with findings from Hendrycks et al. (2021), showing that Mechanical Turk workers without specialized knowledge (akin to our participant pool) scored $35\%$ accuracy on similar questions.
258
+
259
+ When we applied the selection rule and the explanation type was aligned with model confidence, human decision accuracy in Experiments 2a and 2b did not improve for the selected questions (even though discrimination and calibration improved). This shows that accurate uncertainty communication by the LLM allowed participants to recognize when the LLM was providing a likely correct or incorrect answer, but the lack of accurate human knowledge independent from the LLM prevented participants from improving on the LLM answer.
260
+
261
+ # 3.3.2 SELF-ASSESSED EXPERTISE DOES NOT AFFECT PERFORMANCE
262
+
263
+ At the end of the experiment, participants estimated the performance they would achieve on similar questions for each of the 10 topics in the sample of MMLU questions. The median of these self-assessed expertise estimates did not substantially vary between topics: from $30\%$ (e.g., high school physics) to $45\%$ (e.g., high school world history). Examining the impact of perceived expertise on accuracy estimation, we divided participants into two groups based on whether their self-rated expertise was above or below $50\%$ separately for each of the 10 topics. For the experiments with GPT-3.5, the higher expertise groups generally had better discrimination (AUC 0.600 vs. AUC 0.579), but there was no evidence that this difference was significant $(\mathrm{BF} < 1)$ . In addition, the calibration error was comparable between the two groups (ECE = .289 vs. .292). Similarly, no effect of expertise was found for the experiments with PaLM2. Therefore, participants who considered themselves more knowledgeable about a topic were not more adept at estimating the LLM's performance in that area.
264
+
265
+ # 4. Discussion
266
+
267
+ Our research focused on bridging the gap between what an LLM knows and what users perceive it knows. This gap is critical, especially as reliance on LLMs for decision-making across various domains is rapidly increasing.
268
+
269
+ Research on LLMs has begun to address these challenges, with a focus on improving uncertainty communication and the quality of explanations. Several studies have explored LLM confidence in answering multiple-choice questions, focusing on how well the models' self-reported confidence aligns with their actual accuracy Kadavath et al. (2022); Achiam et al. (2023); Hendrycks et al. (2021); Xiong et al. (2024) and whether users can accurately assess the reliability of the explanations provided Tanneru et al. (2024). The work by Zhou et al. (2024) investigates how users respond to verbal phrases of uncertainty in a simulated trivia task but does not employ actual LLM outputs. Overall, there has been little research examining user confidence in LLM output. Our work uses actual LLM outputs and its confidence in an attempt to quantify the calibration and discrimination gap. As a result, we directly address the issue of miscommunication of uncertainty from LLMs to humans.
270
+
271
+ Our results showed that users consistently overestimated how accurate LLM outputs were, especially when they relied on the models' default explanations. This was true for three different LLMs and two different types of questions (multiple choice and short answer). This tendency toward overconfidence in LLM capabilities is a significant concern, particularly in scenarios where critical decisions rely on LLM-generated information. The inability of users to discern the reliability of LLM responses not only undermines the utility of these models but also poses risks in situations where user understanding of model accuracy is critical.
272
+
273
+ In addition, the results also showed a length bias where longer explanations led to higher human confidence levels even though they did not contain any additional information to help users to better discriminate between likely correct and incorrect answers. This suggests that users were processing the explanations at a shallow level, relying on simple textual cues such as overall length to predict LLM accuracy. This result is consistent with studies in social psychology and communication research which suggest that longer answers or explanations may be perceived as more persuasive or credible, even when they do not contain more
274
+
275
+ meaningful information Petty and Cacioppo (1984); Oppenheimer (2006). This length bias has also been found in domains such as peer reviews, where longer reviews are perceived as more persuasive and informative even if the information content remains the same Goldberg et al. (2023).
276
+
277
+ Although default LLM explanations do not enable users to perceive what the models truly know, this research shows that a simple approach based on tailored explanations can bridge this perception gap. This was achieved by altering the prompts used to generate explanations based on model confidence, allowing for better control over how uncertainty was expressed in the responses. Specifically, we designed these prompts to induce varying degrees of certainty in the explanations, ranging from expressions of low confidence (e.g., "I am not sure the answer is [B] because") to high confidence (e.g., "I am confident the answer is [B] because"). By modifying the language of the LLM's responses to better reflect model confidence, users showed improved calibration in their assessment of the LLM's reliability and were better able to discriminate between correct and incorrect answers. This improvement underscores the importance of transparent communication from LLMs, suggesting a need for researchers to investigate how model explanations affect user perception.
278
+
279
+ One limitation of the current study is the focus on a specific type of question involving a small number of response alternatives (multiple choice) and short-answers to open-ended questions. The extent to which these results apply to longer open-ended questions remains an open question. Further research could investigate the applicability of our findings across a broader range of scenarios. Another limitation of this study is that our approach to modifying the prompt based on internal uncertainty required the LLM to be prompted twice: once to read out the answer and model confidence, and again to produce an explanation modified by the model confidence. Future research could investigate how to produce confidence-modified explanations in a single step.
280
+
281
+ Another important area for future research is to understand the fundamental causes of the miscommunication of uncertainty. Why do LLMs generate calibrated model confidences while also producing explanations that are not consistent with those confidences? One hypothesis is that current LLMs are aligned to human preferences using reinforcement learning from human feedback (RLHF, Ouyang et al. (2022), which produces some built-in biases. In these RLHF procedures, various types of explanations are presented to human participants, who can then choose their preferred explanations. LLMs are then fine-tuned based on human preference data, making them more likely to produce explanations that people prefer. While RLHF encourages human-aligned output, it inevitably reproduces any human preference biases. For example, people prefer detailed and generally longer explanations Bower et al. (2024); Saito et al. (2023). As a result, LLMs trained on these human preferences may produce explanations that are overly convincing, potentially misleading users about the reliability of the information.
282
+
283
+ An alternative hypothesis to the production of overconfident explanations lies in the autoregressive nature of well-established LLMs. In particular, we conjecture that after committing to an answer (coded as a sequence of tokens), the model will generate a sequence of tokens (explanation) that maximizes the likelihood of the previous answer, effectively resulting in an assertive answer. A similar hypothesis was also presented in Azaria and Mitchell (2023). Interestingly, the possibility that the LLM's choice of a particular answer inflates the rationale for that answer resembles the phenomenon of choice-supportive
284
+
285
+ biases in psychology Mather et al. (2000). After making a decision, people tend to overestimate the desirability of the chosen option while underestimating the desirability of rejected alternatives. This can make them feel more confident in their decision than they were when they first made it.
286
+
287
+ Our work shares some parallels with prior studies on the human perception and evaluation of AI-generated explanations in the domain of machine learning classifiers (see Rong et al. (2023) for an overview). These studies frequently employ feature highlighting to explain what areas of the image Smith-Renner et al. (2020) or what fragments of documents Feng and Boyd-Graber (2019) can support the suggested classification. Studies have found mixed evidence for the effectiveness of these types of AI explanations in human decision-making Steyvers and Kumar (2023); Bansal et al. (2021); Buçinca et al. (2021); Wang and Yin (2022). These results highlight the challenge of ensuring that AI-generated explanations align with human expectations and allow humans to distinguish between correct and incorrect answers.
288
+
289
+ In conclusion, our research highlights the critical role of clear and accurate communication in the interaction between users and LLMs. Enhancing the alignment between model confidence and the user's perception of model confidence can lead to more responsible and trustworthy use of LLMs, particularly in areas where the accuracy of AI-generated information is critical.
290
+
291
+ # Data Availability
292
+
293
+ All behavioral data as well as data produced by the Large Language Models used in this study are publicly available from the following OSF repository: https://osf.io/y7pr6/
294
+
295
+ # Code Availability
296
+
297
+ The code used for data analysis and extracting LLM model confidence is available from the following OSF repository: https://osf.io/y7pr6/
298
+
299
+ # Acknowledgments
300
+
301
+ This research was supported by NSF under award 1900644 (P.S., and M.S.)
302
+
303
+ # References
304
+
305
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
306
+ Stephen R Ali, Thomas D Dobbs, Hayley A Hutchings, and Iain S Whitaker. Using chatgpt to write patient clinic letters. The Lancet Digital Health, 5(4):e179-e181, 2023.
307
+ Rohan Anil, Andrew M. Dai, Orhan First, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, Eric Chu, Jonathan H. Clark, Laurent El Shafey, Yanping Huang, Kathy Meier-Hellstern, Gaurav Mishra, Erica Moreira, Mark Omernick, Kevin Robinson, et al. Palm 2 technical report, 2023.
308
+ Amos Azaria and Tom Mitchell. The internal state of an LLM knows when it's lying. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 967-976, 2023.
309
+ Gagan Bansal, Tongshuang Wu, Joyce Zhou, Raymond Fok, Besmira Nushi, Ece Kamar, Marco Tulio Ribeiro, and Daniel Weld. Does the whole exceed its parts? the effect of ai explanations on complementary team performance. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, pages 1-16, 2021.
310
+ Alexander H Bower, Nicole Han, Ansh Soni, Miguel P Eckstein, and Mark Steyvers. How experts and novices judge other people's knowledgeability from language use. Psychonomic Bulletin & Review, pages 1-11, 2024.
311
+ Zana Buçinca, Maja Barbara Malaya, and Krzysztof Z Gajos. To trust or to think: Cognitive forcing functions can reduce overreliance on ai in ai-assisted decision-making. Proceedings of the ACM on Human-Computer Interaction, 5(CSCW1):1-21, 2021.
312
+ David V Budescu, Han-Hui Por, and Michael Broomell, Stephen B and Smithson. The interpretation of IPCC probabilistic statements around the world. Nature Climate Change, 4(6):508-512, 2014.
313
+ Sebastian Farquhar, Jannik Kossen, Lorenz Kuhn, and Yarin Gal. Detecting hallucinations in large language models using semantic entropy. Nature, 630(8017):625-630, 2024.
314
+ Shi Feng and Jordan Boyd-Graber. What can ai do for me? evaluating machine learning interpretations in cooperative play. In Proceedings of the 24th International Conference on Intelligent User Interfaces, pages 229–239, 2019.
315
+ Stephen M Fleming and Hakwan C Lau. How to measure metacognition. Frontiers in Human Neuroscience, 8:443, 2014.
316
+ Jiahui Geng, Fengyu Cai, Yuxia Wang, Heinz Koeppl, Preslav Nakov, and Iryna Gurevych. A survey of language model confidence estimation and calibration. arXiv preprint arXiv:2311.08298, 2023.
317
+
318
+ Alexander Goldberg, Ivan Stelmakh, Kyunghyun Cho, Alice Oh, Alekh Agarwal, Danielle Belgrave, and Nihar B Shah. Peer reviews of peer reviews: A randomized controlled trial and other experiments. arXiv preprint arXiv:2311.09497, 2023.
319
+ Sebastian Gruber and Florian Buettner. Better uncertainty calibration via proper scores for classification and beyond. Advances in Neural Information Processing Systems, 35: 8618-8632, 2022.
320
+ Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q. Weinberger. On calibration of modern neural networks. In Doina Precup and Yee Whye Teh, editors, Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine Learning Research, pages 1321-1330. PMLR, 06-11 Aug 2017.
321
+ Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. Measuring massive multitask language understanding. Proceedings of the International Conference on Learning Representations (ICLR), 2021.
322
+ Emily H Ho, David V Budescu, Mandep K Dhami, and David R Mandel. Improving the communication of uncertainty in climate science and intelligence analysis. *Behavioral Science Policy*, 1(2):43-55, 2015.
323
+ Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. arXiv preprint arXiv:2203.15556, 2022.
324
+ Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qian-glong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, et al. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. arXiv preprint arXiv:2311.05232, 2023.
325
+ Zhengbao Jiang, Jun Araki, Haibo Ding, and Graham Neubig. How can we know when language models know? on the calibration of language models for question answering. Transactions of the Association for Computational Linguistics, 9:962-977, 2021.
326
+ A Jo. The promise and peril of generative ai. Nature, 614(1):214-216, 2023.
327
+ Mandar Joshi, Eunsol Choi, Daniel S Weld, and Luke Zettlemoyer. Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1601-1611, 2017.
328
+ Saurav Kadavath, Tom Conerly, Amanda Askell, Tom Henighan, Dawn Drain, Ethan Perez, Nicholas Schiefer, Zac Hatfield-Dodds, Nova DasSarma, Eli Tran-Johnson, et al. Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221, 2022.
329
+ Tzur M. Karelitz, Mandep K. Dhami, David V. Budescu, and Thomas S. Wallsten. Toward a universal translator of verbal probabilities. Proceedings of the Fifteenth International Florida Artificial Intelligence Research Society Conference, pages 498-502, 2002.
330
+
331
+ Ananya Kumar, Percy S Liang, and Tengyu Ma. Verified uncertainty calibration. Advances in Neural Information Processing Systems, 32, 2019.
332
+ Stephanie Lin, Jacob Hilton, and Owain Evans. Teaching models to express their uncertainty in words. Transactions on Machine Learning Research, 2022. ISSN 2835-8856. URL https://openreview.net/forum?id=8s8K2UZGTZ.
333
+ Mara Mather, Eldar Shafir, and Marcia K Johnson. Misrememberance of options past: Source monitoring and choice. *Psychological Science*, 11(2):132-138, 2000.
334
+ Mahdi Pakdaman Naeini, Gregory Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using bayesian binning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 29, 2015.
335
+ Bernie J O'Brien. Words or numbers? the evaluation of probability expressions in general practice. The Journal of the Royal College of General Practitioners, 39(320):98-100, 1989.
336
+ OpenAI. Gpt-3.5, Nov 2022a. URL https://platform.openai.com/docs/models/gpt-3-5. Accessed: [Nov 24, 2023].
337
+ OpenAI. Introducing chatgpt, Nov 2022b. URL https://openai.com/blog/chatgpt. Accessed: [Nov 24, 2023].
338
+ Daniel M Oppenheimer. Consequences of erudite vernacular utilized irrespective of necessity: Problems with using long words needlessly. Applied Cognitive Psychology: The Official Journal of the Society for Applied Research in Memory and Cognition, 20(2): 139-156, 2006.
339
+ Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022.
340
+ Richard E Petty and John T Cacioppo. The effects of involvement on responses to argument quantity and quality: Central and peripheral routes to persuasion. Journal of personality and social psychology, 46(1):69, 1984.
341
+ Jack W Rae, Sebastian Borgeaud, Trevor Cai, Katie Millican, Jordan Hoffmann, Francis Song, John Aslanides, Sarah Henderson, Roman Ring, Susannah Young, et al. Scaling language models: Methods, analysis & insights from training gopher. arXiv preprint arXiv:2112.11446, 2021.
342
+ Yao Rong, Tobias Leemann, Thai-Trang Nguyen, Lisa Fiedler, Peizhu Qian, Vaibhav Unhelkar, Tina Seidel, Gjergji Kasneci, and Enkelejda Kasneci. Towards human-centered explainable ai: A survey of user studies for model explanations. IEEE Transactions on Pattern Analysis and Machine Intelligence, pages 1-20, 2023. doi: 10.1109/TPAMI.2023.3331846.
343
+
344
+ Jeffrey N Rouder, Richard D Morey, Paul L Speckman, and Jordan M Province. Default bayes factors for anova designs. Journal of Mathematical Psychology, 56(5):356-374, 2012.
345
+ Keita Saito, Akifumi Wachi, Koki Wataoka, and Youhei Akimoto. Verbosity bias in preference labeling by large language models. arXiv preprint arXiv:2310.10076, 2023.
346
+ Alison Smith-Renner, Ron Fan, Melissa Birchfield, Tongshuang Wu, Jordan Boyd-Graber, Daniel S. Weld, and Leah Findlater. No explainability without accountability: An empirical study of explanations and feedback in interactive ml. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, CHI '20, page 1-13, New York, NY, USA, 2020. Association for Computing Machinery. ISBN 9781450367080. doi: 10.1145/3313831.3376624. URL https://doi.org/10.1145/3313831.3376624.
347
+ Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. Transactions on Machine Learning Research, 2023. ISSN 2835-8856. URL https://openreview.net/forum?id=uyTL5Bvosj.
348
+ Mark Steyvers and Aakriti Kumar. Three challenges for ai-assisted decision-making. Perspectives on Psychological Science, 2023. ISSN 1745-6924. doi: 10.1177/17456916231181102.
349
+ Sree Harsha Tanneru, Chirag Agarwal, and Himabindu Lakkaraju. Quantifying uncertainty in natural language explanations of large language models. In International Conference on Artificial Intelligence and Statistics, pages 1072-1080. PMLR, 2024.
350
+ Sunil Thulasidasan, Gopinath Chennupati, Jeff A Bilmes, Tanmoy Bhattacharya, and Sarah Michalak. On mixup training: Improved calibration and predictive uncertainty for deep neural networks. Advances in Neural Information Processing Systems, 32, 2019.
351
+ Katherine Tian, Eric Mitchell, Allan Zhou, Archit Sharma, Rafael Rafailov, Huaxiu Yao, Chelsea Finn, and Christopher Manning. Just ask for calibration: Strategies for eliciting calibrated confidence scores from language models fine-tuned with human feedback. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5433-5442, Singapore, 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.330. URL https://aclanthology.org/2023.emnlp-main.330.
352
+ Thomas S. Wallsten, Yaron Shlomi, and Hisuchi Ting. Exploring intelligence analysts' selection and interpretation of probability terms. *Final Report for Research Contract Expressing Probability in Intelligence Analysis*, 2008.
353
+ Xinru Wang and Ming Yin. Effects of explanations in ai-assisted decision making: Principles and comparisons. ACM Transactions on Interactive Intelligent Systems, 12(4):1-36, 2022.
354
+ Jeromie Whalen, Chrystalla Mouza, et al. Chatgpt: Challenges, opportunities, and implications for teacher education. Contemporary Issues in Technology and Teacher Education, 23(1):1-23, 2023.
355
+
356
+ Yuxin Xiao, Paul Pu Liang, Umang Bhatt, Willie Neiswanger, Ruslan Salakhutdinov, and Louis-Philippe Morency. Uncertainty quantification with pre-trained language models: A large-scale empirical analysis. In Findings of the Association for Computational Linguistics: EMNLP 2022, pages 7273-7284, 2022.
357
+ Miao Xiong, Zhiyuan Hu, Xinyang Lu, YIFEI LI, Jie Fu, Junxian He, and Bryan Hooi. Can LLMs express their uncertainty? an empirical evaluation of confidence elicitation in LLMs. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=gjeQKFxFpZ.
358
+ Zhangyue Yin, Qiushi Sun, Qipeng Guo, Jiawen Wu, Xipeng Qiu, and Xuanjing Huang. Do large language models know what they don't know? In *Findings of the Association for Computational Linguistics: ACL* 2023, pages 8653-8665. Association for Computational Linguistics, 2023.
359
+ Andres Felipe Zambrano, Xiner Liu, Amanda Barany, Ryan S Baker, Juhan Kim, and Nidhi Nasiar. From ncoder to chatgpt: From automated coding to refining human coding. In International Conference on Quantitative Ethnography, pages 470-485. Springer, 2023.
360
+ Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 5506-5524, Singapore, 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.335. URL https://aclanthology.org/2023.emnlp-main.335.
361
+ Kaitlyn Zhou, Jena Hwang, Xiang Ren, and Maarten Sap. Relying on the unreliable: The impact of language models' reluctance to express uncertainty. pages 3623-3643. Association for Computational Linguistics, 2024. doi: 10.18653/v1/2024.acl-long.198. URL https://aclanthology.org/2024.acl-long.198.
362
+
363
+ # Appendix A. Additional Experiment Results
364
+
365
+ # A.1 Demographic Information by Experiment
366
+
367
+ Table 3: Breakdown of demographic information by experiment. Note that Sex was not reported by all participants.
368
+
369
+ <table><tr><td>Experiment</td><td>#Participants</td><td>#Male</td><td>#Female</td><td>Median Age</td><td>Min Age</td><td>Max Age</td></tr><tr><td>1a</td><td>41</td><td>18</td><td>20</td><td>35</td><td>21</td><td>71</td></tr><tr><td>1b</td><td>39</td><td>21</td><td>18</td><td>36</td><td>21</td><td>69</td></tr><tr><td>1c</td><td>42</td><td>18</td><td>19</td><td>32</td><td>18</td><td>69</td></tr><tr><td>2a</td><td>60</td><td>31</td><td>28</td><td>35</td><td>18</td><td>79</td></tr><tr><td>2b</td><td>60</td><td>28</td><td>32</td><td>36</td><td>19</td><td>62</td></tr><tr><td>2c</td><td>59</td><td>22</td><td>29</td><td>33</td><td>18</td><td>74</td></tr></table>
370
+
371
+ # A.2 Experiment 3
372
+
373
+ We conducted an additional experiment that used a different prompting method to modify the uncertainty language expressed in explanations for multiple choice questions. In this approach, we gave the LLM the baseline explanation from Experiments 1a and 1b in the prompt and instructed the LLM to "rewrite the explanation as if you are not sure/somewhat sure/sure" for each of the three confidence levels. In contrast to the prompts for Experiments 2, this prompt leads to less stereotyped expressions of uncertainty within each response. This experiment allowed us to investigate whether the results generalize across different approaches to express uncertainty in the explanations.
374
+
375
+ In Experiment 3a and 3b, we used GPT-3.5 and PaLM2 respectively to generate the explanations mirroring the experiments 2a and 2b. The methodology was the same as in Experiment 2a and 2b except that we did not include a length manipulation. The experiment was conducted with 81 participants (40 in Experiment 3a, 41 in Experiment 3b).
376
+
377
+ The results of Experiment 3 are shown in Supplementary Table 4 along with the results from Experiment 1 and 2. The pattern of results is the same as in Experiments 2a and 2b. Human confidence exhibited smaller calibration error (ECE) and larger discrimination (AUC) relative to the baseline results in Experiments 1a and 1b. These results show that the experimental results from 2a and 2b generalize to different prompts to elicit uncertainty.
378
+
379
+ # A.3 Overconfidence Error
380
+
381
+ Supplementary Table 4 also includes results for an additional performance metric, the Overconfidence Error (OE). The metric is an adaptation of the ECE formula, specifically focusing on cases of overconfidence Thulasidasan et al. (2019):
382
+
383
+ $$
384
+ \mathrm {O E} = \sum_ {m = 1} ^ {M} \frac {\left| B _ {m} \right|}{N} [ c o n f (B _ {m}) \times \max (0, c o n f (B _ {m}) - a c c (B _ {m})) ] \tag {4}
385
+ $$
386
+
387
+ Table 4: Expected Calibration error (ECE), Overconfidence Error (OE), and Area under the Curve (AUC) of model and human confidence across experiments.
388
+
389
+ <table><tr><td>LLM</td><td>Confidence Score</td><td>ECE</td><td>OE</td><td>AUC</td></tr><tr><td rowspan="4">GPT-3.5</td><td>model confidence</td><td>.104</td><td>.064</td><td>.751</td></tr><tr><td>human confidence: experiment 1a, default explanations</td><td>.264</td><td>.220</td><td>.589</td></tr><tr><td>human confidence: experiment 2a, modified explanations</td><td>.150</td><td>.107</td><td>.694</td></tr><tr><td>human confidence: experiment 3a, modified explanations</td><td>.158</td><td>.127</td><td>.678</td></tr><tr><td rowspan="4">PaLM2</td><td>model confidence</td><td>.154</td><td>.098</td><td>.746</td></tr><tr><td>human confidence: experiment 1b, default explanations</td><td>.291</td><td>.229</td><td>.602</td></tr><tr><td>human confidence: experiment 2b, modified explanations</td><td>.225</td><td>.168</td><td>.652</td></tr><tr><td>human confidence: experiment 3b, modified explanations</td><td>.195</td><td>.155</td><td>.689</td></tr><tr><td rowspan="3">GPT-4o</td><td>model confidence</td><td>.141</td><td>.008</td><td>.781</td></tr><tr><td>human confidence: experiment 1c, default explanations</td><td>.165</td><td>.084</td><td>.593</td></tr><tr><td>human confidence: experiment 2c, modified explanations</td><td>.111</td><td>.008</td><td>.689</td></tr></table>
390
+
391
+ This penalizes predictions by the weight of the confidence but only when confidence exceeds accuracy. The results show that human confidence for default explanations leads to the largest overconfidence error and that the model confidence modified explanations lowers the overconfidence error.
392
+
393
+ ![](images/94a13f417384e83b57f66f288ff673aee17b7db0ee3165ffbf13e1a1b96f00e3.jpg)
394
+
395
+ ![](images/9c1dd9b721919e48491b6eb5ccf24e14c94eecf400f1a0df68116365db9c3d29.jpg)
396
+
397
+ ![](images/ff06371303bd25051ad127ad0b92bbe4670a579aed18a0cb4313901fa2b84a8d.jpg)
398
+
399
+ ![](images/09bd05d33caad30f9eb62c8d8569d5b7c3594988991a8e809365c94b774c52c9.jpg)
400
+ Figure 5: Calibration diagrams for the full set of MMLU questions for GPT-3.5 and PaLM2 (left and middle panel) and the 5000 question sample of the Trivia QA data set using the GPT-4o model (right panel).
401
+
402
+ ![](images/b656d020814497a36c35290c6910e23ddffee2dfe33a2c83947c42192fdf3289.jpg)
403
+
404
+ ![](images/9ec8c856282f963680172ff9ea59b4fd3edf08f6d199cb7492d4c0828e403698.jpg)
405
+
406
+ # A.4 Human Confidence Agreement
407
+
408
+ For Experiment 2, we analyzed the degree to which there is agreement in the confidence ratings across participants. We assessed agreement with the mean correlation between any pair of participants. Because there were few cases where any pair of participants were rating the LLM response for the same question at the same level of uncertainty language at the same level of length, we conducted the analysis at the level of the experimental manipulations. With this approach, each participant was characterized with 9 confidence ratings corresponding to the mean rating for each of 3 uncertainty levels x 3 length levels. At this aggregate level, mean participant-to-participant correlation was 0.550, 0.547, and 0.336 for Experiments 2a, 2b, and 2c respectively. This shows that there was moderate agreement in human confidence about the differences in levels of uncertainty and length.
409
+
410
+ ![](images/55777734f6f0a1ed6c8cd962de25c66da0ca338803f0449aa934990a57e03843.jpg)
411
+
412
+ ![](images/cfe5edc36e316aaaba513571f5e848d05d408f64626e67d5cafe6254e15fc69d.jpg)
413
+
414
+ ![](images/58dc3e097ba18d5ccbbeb47007d807a647d99fa80f0c3aac7cf41e42fcbf281e.jpg)
415
+
416
+ ![](images/95607671fc9e74292202740cfea8556094a6e9a7686b9b4c9cc950a80f4786f3.jpg)
417
+ Figure 6: Calibration diagrams for the subsets of questions used for the behavioral experiments. Left and middle panels show the calibration for the GPT-3.5 and PaLM2 models for the MMLU multiple choice questions. The right panel shows the calibration for the GPT-4o model for the Trivia QA questions. Note that the lower count of multiple choice questions in the lowest confidence bin is due to the sparsity of questions in that confidence bin.
418
+
419
+ ![](images/dbe765eef638be68db887de9aa1eaac899a42bffbc20f4ed57670c1af8523731.jpg)
420
+
421
+ ![](images/5670fe8fb76b2285caa061647a1d77b4fe1cbedf77302e270a3cb212d2723e3f.jpg)
422
+
423
+ # Appendix B. Additional Model Confidence Results
424
+
425
+ Supplementary Figure 5 shows the calibration diagrams for the full set of 14,042 test questions from the MMLU dataset and the 5000 questions from the Trivia QA dataset. For comparison, Supplementary Figure 6 shows the calibration diagrams for the subset of 350 multiple choice and 336 short-answer questions used for the behavioral experiments.
426
+
427
+ MMLU multiple choice questions. For GPT-3.5, the accuracy across all 14,042 questions is $63\%$ with an AUC of 0.78. When computing model confidence, $8.7\%$ of the answers were incomplete and were removed from consideration. For PaLM2, the accuracy is $51\%$ with an AUC of 0.73. Furthermore, we confirmed that we could replicate the GPT-4 Technical Report's Achiam et al. (2023) five-shot results. Five-shot prompting with GPT-3.5 (detailed in Appendix 8 of the report) resulted in $71\%$ accuracy (compared to $70\%$ reported in Supplementary Table 2 of the report). The zero-shot approach is the focus of this paper. The zero-shot approach simplifies the construction of explanations, and our goal is not to maximize language model accuracy.
428
+
429
+ Trivia QA short answer questions. For GPT-4o, the accuracy across the 5000 question sample is $85\%$ with an AUC of 0.85. For the 336 question subset used for the behavioral experiments, accuracy is $63\%$ with an AUC of 0.78. The lower accuracy is caused by the uniform sampling across confidence bins that results in an over-representation of questions with lower confidence for which the model is also less accurate.
430
+
431
+ # Appendix C. Optimization Procedure
432
+
433
+ As discussed in the main text, we applied a selection rule to link the level of confidence, $s \in \{\text{low confidence, medium confidence, high confidence}\}$ , in the explanation to model confidence $p$ :
434
+
435
+ $$
436
+ s = \left\{ \begin{array}{l l} \text {l o w c o n f i d e n c e} & \text {i f} p \leq \theta_ {1} \\ \text {m e d i u m c o n f i d e n c e} & \text {i f} \theta_ {1} < p \leq \theta_ {2} \\ \text {h i g h c o n f i d e n c e} & \text {i f} \theta_ {2} < p \end{array} \right. \tag {5}
437
+ $$
438
+
439
+ The parameters $\theta_{1}$ and $\theta_{2}$ determine the ranges where low, medium, and high confidence explanations are chosen. The application of this rule to a given parameter setting leads to any participant estimates being filtered out if the explanation style used for a specific question does not match the selected style. To apply the selection rule, we ignored the variations in length in Experiment 2. Therefore, for both Experiment 2 and 3, the selection rule considered a choice of one of three explanation styles for each question (i.e., low, medium, and high confidence explanations).
440
+
441
+ ![](images/b8c78377384d3c05ea6c8eda7ed69a411c7aa3df5dfc67729d9d4ff2c3dcd03b.jpg)
442
+
443
+ ![](images/11f080d169314831349cfcb3e0be05d438a686b647b8077d0d7e642d0ae5e1db.jpg)
444
+
445
+ ![](images/1ddec76a2082151dd0c0b121c52e2bb2dac1113ae8eceffa93f4ea40500b0af7.jpg)
446
+
447
+ ![](images/b7ed5ebfeb1b7adaa6bafaa73b3adbb9623c444e082d57cd0c21ca66214fcbff.jpg)
448
+ Figure 7: Sensitivity analysis for Experiments 2a, 2b and 2c. Results show the effect of different thresholds $(\theta_{1},\theta_{2})$ to select explanation styles. Top and bottom panels show the resulting AUC and ECE that relate the human confidence to the actual accuracy of the LLM.
449
+
450
+ ![](images/aaf9e7a567810aeae66a3e1f0a1f1c6ebcbc6707c6cc5c0df76e6b5c749431be.jpg)
451
+
452
+ ![](images/14fa08a9b9845edfd32c68719e37c43727389705989202e1f9bd7becfe31404f.jpg)
453
+
454
+ The parameters $\theta_{1}$ and $\theta_{2}$ were chosen to optimize a combination of the ECE and AUC score (weighting the ECE by $33\%$ relative to AUC). The optimization was performed separately for each experiment and LLM. For the multiple choice experiments 2a and 2b, the parameters were optimized using a basic grid search with values ranging from 0.25 to
455
+
456
+ 1, with the constraint that $\theta_{1} < \theta_{2}$ . For the short-answer experiment 2c, the parameters ranged from 0 to 1, with the constraint that $\theta_{1} < \theta_{2}$ . For Experiment 2a (GPT-3.5), the optimized parameters were $\theta_{1} = 0.65$ and $\theta_{2} = 0.75$ , while Experiment 2b's (PaLM2) optimized parameters were $\theta_{1} = 0.50$ and $\theta_{2} = 0.70$ . For Experiment 2c (GPT-4o), the optimized parameters were $\theta_{1} = 0.50$ and $\theta_{2} = 0.60$ .
457
+
458
+ # C.1 Sensitivity Analysis
459
+
460
+ The calibration and discrimination results are not overly sensitive to parameter changes. The resulting AUC and ECE outcomes for each parameter setting are shown in Supplementary Figures 7 and 8. For example, for GPT-3.5, Experiment 2a yields mean AUC and ECE values of 0.649 and 0.203 across all parameter settings, which are higher than the results of Experiment 1. Furthermore, there is moderate evidence $(\mathrm{BF} > 3)$ for an improved AUC across $52\%$ of parameter combinations.
461
+
462
+ ![](images/ff9b6cde5fbc5f4dde11f9c339f978c70e0f91984e552e524b59d30d129cab20.jpg)
463
+
464
+ ![](images/3c3aa0bb280942c515003e67f3a5e9fee4d56dba2d77e12163d634f7b220709e.jpg)
465
+ Figure 8: Sensitivity analysis for Experiment 3. Results show the effect of different thresholds $(\theta_{1},\theta_{2})$ to select explanation styles. Top and bottom panels show the resulting AUC and ECE that relate the human confidence to the actual accuracy of the LLM.
466
+
467
+ # Appendix D. Additional Information about Prompts and Explanations
468
+
469
+ Supplementary Table 5 shows examples of the prompt styles used for Experiments 2 and 3. Supplementary Table 6 shows examples of all explanations for a particular multiple choice question used in Experiment 2a.
470
+
471
+ Table 5: Prompt styles across Experiments 1, 2, and 3
472
+
473
+ <table><tr><td>Prompt Style</td><td>Prompt</td></tr><tr><td>Experiment 1Baseline</td><td>Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is: [answer]. Provide an explanation for the answer you gave. In yourexplanation, you must include the answer.</td></tr><tr><td>Experiment 2Low Confidence &amp; LongMedium Confidence &amp; LongHigh Confidence &amp; LongLow Confidence &amp; ShortMedium Confidence &amp; ShortHigh Confidence &amp; Short</td><td>Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write an explanation why you are not sure that the answer is[answer]. In your explanation, mention that you are not sure and include the answer. Start with &#x27;I am&#x27;:&quot;Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write an explanation why you somewhat sure that the answeris [answer]. In your explanation, mention that you are somewhat sure and include the answer. Start with&#x27;I am&#x27;:Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write an explanation why you are sure that the answer is[answer]. In your explanation, mention that you are sure and include the answer. Start with &#x27;I am&#x27;:Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write a very short explanation why you are not sure that theanswer is [answer]. In your explanation, mention that you are not sure and include the answer. Use asfew words as possible. Start with &#x27;I am&#x27;:Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write a very short explanation why you somewhat sure thatthe answer is [answer]. In your explanation, mention that you are somewhat sure and include the answer.Use as few words as possible. Start with &#x27;I am&#x27;:Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is [answer]. Write a very short explanation why you are sure that theanswer is [answer]. In your explanation, mention that you are sure and include the answer. Use as fewwords as possible. Start with &#x27;I am&#x27;:</td></tr><tr><td>Experiment 3Low ConfidenceMedium ConfidenceHigh Confidence</td><td>Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is: [answer]. The explanation you give is: [baseline explanation]. Rewritethe explanation as if you are not sure and ensure that you include language or phrases that conveys yourcomplete lack of confidence. In your explanation, you must include the answer:Problem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is: [answer]. The explanation you give is: [baseline explanation]. Rewrte theexplanation as if you are somewhat sure and ensure that you include language or phrases that conveysyour lack of confidence. In your explanation, you must include the answerProblem: [question] Choose from the following options: [A] Option A [B] Option B [C] Option C [D]Option D. The answer you give is: [answer]. The explanation you give is: [baseline explanation]. Rewrite theexplanation as if you are very sure and ensure that you include language or phrases that conveys yourconfidence. In your explanation, you must include the answer:</td></tr></table>
474
+
475
+ Table 6: Example explanations given by GPT-3.5 for a multiple choice question in Experiments 1a and 2a. The particular question (correct answer is A) is: A moving $1.5\mathrm{kg}$ cart collides with and sticks to a $0.5\mathrm{kg}$ cart which was initially at rest. Immediately after the collision, the carts each have the same ______ as each other. Choose from the following options: A) Velocity B) Kinetic energy C) Mass D) Linear momentum.
476
+
477
+ <table><tr><td>Prompt Style</td><td>Explanation</td></tr><tr><td>Experiment 1Baseline</td><td>The answer is [A] Velocity. This is because in an inelastic collision, the two objects involved will stick together and move with a common velocity after the collision. This means that the velocity of the two carts after the collision will be the same, as they are now moving together as one combined object. This is in accordance with the law of conservation of momentum, which states that the total momentum of a system remains constant in the absence of external forces. In this case, the initial momentum of the 1.5-kg cart is transferred to the combined mass of the two carts after the collision, resulting in the same velocity for both carts. Therefore, the correct answer is [A] Velocity.</td></tr><tr><td>Experiment 2Low Confidence &amp; LongMedium Confidence &amp; LongHigh Confidence &amp; Long</td><td>I am not completely sure that the answer is [A] because velocity is a vector quantity that includes both magnitude and direction. In this scenario, the carts may have the same magnitude of velocity, but they may not necessarily have the same direction. Therefore, I am not completely convinced that the answer is [A]. However, based on the information given, it is most likely that the answer is [A] because the problem states that the carts &quot;collide and stick&quot; which implies that they move together in the same direction after the collision. In this case, the carts would indeed have the same velocity as each other.I am somewhat sure that the answer is [A] because velocity is a vector quantity that takes into account both the speed and direction of an object&#x27;s motion. In this scenario, the two carts are colliding and sticking together, so they will have the same velocity after the collision. This is because the two carts will move together in the same direction and at the same speed. Additionally, the conservation of momentum principle states that the total momentum of a system remains constant unless acted upon by an external force. In this case, the two carts are the only objects involved in the collision, so their total momentum before and after the collision must be the same. This means that the velocity of the two carts after the collision must be the same in order to maintain the same total momentum. Therefore, I am somewhat sure that the answer is [A] because the velocity of the two carts will be the same after the collision due to the conservation of momentum principle. This also means that their speeds and directions will be the same, making velocity the correct answer.I am sure that the answer is [A] because velocity is a vector quantity that describes the speed and direction of an object&#x27;s motion. In this scenario, the two carts are colliding and sticking together, meaning they will have the same final velocity after the collision. This is due to the law of conservation of momentum, which states that the total momentum of a closed system remains constant. Since the carts are the only objects involved in the collision, their total momentum before and after the collision must be the same. Therefore, immediately after the collision, the carts will have the same velocity as each other. This is also supported by the fact that the carts have different masses, so their kinetic energies and linear momenta will not be the same. Thus, the correct answer is [A] velocity.</td></tr><tr><td>Low Confidence &amp; ShortMedium Confidence &amp; ShortHigh Confidence &amp; Short</td><td>I am not sure that the answer is [A] because the carts may have different velocities before and after the collision due to the conservation of momentum. The correct answer is [D].I am somewhat sure that the answer is [A] because velocity is a vector quantity that takes into account both the speed and direction of an object&#x27;s motion. In this scenario, the carts will have the same velocity after the collision because they are stuck together and moving in the same direction.I am sure that the answer is [A] because velocity is a vector quantity that is conserved in collisions, meaning that the final velocity of the two carts will be the same.</td></tr><tr><td>Low Confidence &amp; Uncertainty OnlyMedium Confidence &amp; Uncertainty OnlyHigh Confidence &amp; Uncertainty Only</td><td>I am not sure that the answer is [A].I am somewhat sure that the answer is [A].I am sure that the answer is [A].</td></tr><tr><td>Experiment 3Low ConfidenceMedium ConfidenceHigh Confidence</td><td>I am not entirely sure, but I believe the answer is [A] Velocity. This is because, in an inelastic collision, the two objects involved will stick together and move with a common velocity after the collision. This means that the velocity of the two carts after the collision will be the same, as they are now moving together as one combined object. This is in accordance with the law of conservation of momentum, which states that the total momentum of a system remains constant in the absence of external forces. In this case, the initial momentum of the 1.5-kg cart is transferred from the combined mass of the two carts after the collision, resulting in the same velocity for both carts. Therefore, based on my understanding, the correct answer is [A] Velocity.I believe the answer is [A] Velocity. This is because in an inelastic collision, the two objects involved will stick together and move with a common velocity after the collision. This means that the velocity of the two carts after the collision will be the same, as they are now moving together as one combined object. This is in accordance with the law of conservation of momentum, which states that the total momentum of a system remains constant in the absence of external forces. In this case, the initial momentum ofthe 1.5-kg cart is transferred to the combined mass of the two carts after the collision, resulting in the same velocity for both carts. Therefore, based on my understanding, the correct answer should be [A] Velocity.The correct answer is [A] Velocity. I am confident in this answer because in an inelastic collision, the two objects involved will stick together and move with a common velocity after the collision. This means that the velocity of the two carts after the collision will be the same, as they are now moving together as one combined object. This is in accordance with the law of conservation of momentum, which states that the total momentum of a system remains constant in the absence of external forces. Since the initial momentum of the 1.5-kg cart is transferred to the combined mass of the two carts after the collision, it results in the same velocity for both carts. Therefore, it is clear that the correct answer is [A] Velocity.</td></tr></table>
2401.13xxx/2401.13835/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fac519b4ee73d983df6c3c6009f241714479dc5cf4058c6caebfeca08225241
3
+ size 1750041
2401.13xxx/2401.13835/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13836/361caaf7-20df-4d50-8016-fed889c064b7_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b1561894ea69727da37512a19fe670de3bd77e1109fb22a8b8c646f7ce034d
3
+ size 1479115
2401.13xxx/2401.13836/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13836/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5415df0bab165bb4f8d7bcc7170ff84ed8e753db6e0a88bd335fbf0422f57985
3
+ size 644291
2401.13xxx/2401.13836/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_content_list.json ADDED
@@ -0,0 +1,1745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "LAA-Net: Localized Artifact Attention Network for Quality-Agnostic and Generalizable Deepfake Detection",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 112,
8
+ 128,
9
+ 854,
10
+ 174
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Dat NGUYEN*, Nesryne MEJRI*, Inder Pal SINGH*, Polina KULESHOVA* \nMarcella ASTRID*, Anis KACEM*, Enjie GHORBEL*, Djamila AOUADA* \nCVI², SnT, University of Luxembourg*",
17
+ "bbox": [
18
+ 153,
19
+ 202,
20
+ 815,
21
+ 255
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Cristal Laboratory, National School of Computer Sciences, University of Manouba",
28
+ "bbox": [
29
+ 153,
30
+ 256,
31
+ 815,
32
+ 273
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "{dat.nguyen,nesryne.mejri,inder.singh,polina.kuleshova, marcella.astrid,anis.kacem,enjie.ghorbel,djamila.aouada}@uni.lu",
39
+ "bbox": [
40
+ 200,
41
+ 277,
42
+ 761,
43
+ 309
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 233,
53
+ 344,
54
+ 310,
55
+ 359
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "This paper introduces a novel approach for high-quality deepfake detection called Localized Artifact Attention Network (LAA-Net). Existing methods for high-quality deepfake detection are mainly based on a supervised binary classifier coupled with an implicit attention mechanism. As a result, they do not generalize well to unseen manipulations. To handle this issue, two main contributions are made. First, an explicit attention mechanism within a multi-task learning framework is proposed. By combining heatmap-based and self-consistency attention strategies, LAA-Net is forced to focus on a few small artifact-prone vulnerable regions. Second, an Enhanced Feature Pyramid Network (E-FPN) is proposed as a simple and effective mechanism for spreading discriminative low-level features into the final feature output, with the advantage of limiting redundancy. Experiments performed on several benchmarks show the superiority of our approach in terms of Area Under the Curve (AUC) and Average Precision (AP). The code is available at https://github.com/10Ring/LAA-Net.",
62
+ "bbox": [
63
+ 73,
64
+ 376,
65
+ 473,
66
+ 680
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 76,
76
+ 708,
77
+ 207,
78
+ 723
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Thanks to the development of generative models, tremendous advances in deepfake creation have been witnessed. Unfortunately, these fake visual data can be employed for malicious purposes, as shown in [4, 48]. The fact that deepfake generation techniques are rapidly gaining in realism only exacerbates this issue. It is, therefore, crucial to design methods capable of automatically detecting deepfakes, including the most realistic ones that are commonly referred to as high-quality deepfakes. Nonetheless, detecting high-quality deepfakes remains extremely challenging as they usually enclose subtle and localized artifacts.",
85
+ "bbox": [
86
+ 73,
87
+ 734,
88
+ 468,
89
+ 902
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "image",
95
+ "img_path": "images/8273d019d5764cf49afe3053cfda15da459fab8d859c8dbf1133fc85e34cbd16.jpg",
96
+ "image_caption": [
97
+ "(a)"
98
+ ],
99
+ "image_footnote": [],
100
+ "bbox": [
101
+ 550,
102
+ 342,
103
+ 839,
104
+ 523
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "image",
110
+ "img_path": "images/908a98c50221e1cadcf6d5f29b67c99e8010018ec89f676aa9c64836f14c4cbd.jpg",
111
+ "image_caption": [
112
+ "(b)",
113
+ "Figure 1. Comparison of LAA-Net $(\\bullet)$ with respect to existing methods, namely, Multi-attentional $(\\bullet)$ [55], SBI $(\\bullet)$ [41], Xception $(\\bullet)$ [37], RECCE $(\\bullet)$ [6], CADDM $(\\bullet)$ [14], using (a) the AUC performance with respect to different ranges of Mask SSIM, and (b) its associated boxplots. *The results were obtained using the official source codes pretrained on FF+ [37] and testing on Celeb-DFv2 [25]. Figure best viewed in colors."
114
+ ],
115
+ "image_footnote": [],
116
+ "bbox": [
117
+ 552,
118
+ 539,
119
+ 836,
120
+ 715
121
+ ],
122
+ "page_idx": 0
123
+ },
124
+ {
125
+ "type": "text",
126
+ "text": "Recent works have mostly focused on improving the generalization capabilities of deepfake detection methods by adopting multi-task learning [7, 24, 54] and/or heuris-",
127
+ "bbox": [
128
+ 496,
129
+ 854,
130
+ 892,
131
+ 902
132
+ ],
133
+ "page_idx": 0
134
+ },
135
+ {
136
+ "type": "aside_text",
137
+ "text": "arXiv:2401.13856v2 [cs.CV] 24 May 2024",
138
+ "bbox": [
139
+ 22,
140
+ 228,
141
+ 57,
142
+ 681
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "text",
148
+ "text": "tic fake data generation [24, 41] strategies. However, most of these methods fail to model localized artifacts, which are critical for detecting high-quality deepfakes. This could be explained by the fact that Vanilla Deep Learning (DL) architectures are mainly used. These common architectures, such as XceptionNet [9] and EfficientNet [44], tend to learn global features, ignoring more localized cues [50, 55]. With the use of successive convolutions, localized features across layers gradually fade. Hence, proposing suitable mechanisms for capturing local and subtle artifacts turns out to be necessary.",
149
+ "bbox": [
150
+ 75,
151
+ 90,
152
+ 472,
153
+ 256
154
+ ],
155
+ "page_idx": 1
156
+ },
157
+ {
158
+ "type": "text",
159
+ "text": "To the best of our knowledge, only a few research works have explored this research direction [50, 55]. They mainly introduce attention modules that implicitly model subtle inconsistencies through low-level representations [50, 55]. Nevertheless, they still rely on single binary classifiers trained with real/deepfake images without considering any additional strategy for avoiding generalization issues. This considerably restricts the practical usefulness of these methods.",
160
+ "bbox": [
161
+ 75,
162
+ 257,
163
+ 472,
164
+ 391
165
+ ],
166
+ "page_idx": 1
167
+ },
168
+ {
169
+ "type": "text",
170
+ "text": "Hence, our goal is to address the detection of high-quality deepfakes and, at the same time, improve the generalization performance. We argue that this can be achieved by designing an attention module compatible with generic deepfake detection strategies. In particular, the solution would be to introduce an explicit fine-grained mechanism within a multi-task learning framework supported by an appropriate pseudo-fake synthesis technique. Moreover, in addition to such a learning strategy, we posit that an adequate architecture preserving low-level features could implicitly contribute to better capturing localized artifacts.",
171
+ "bbox": [
172
+ 75,
173
+ 393,
174
+ 472,
175
+ 559
176
+ ],
177
+ "page_idx": 1
178
+ },
179
+ {
180
+ "type": "text",
181
+ "text": "More concretely, this paper proposes a novel fine-grained approach called Localized Artifact Attention Network (LAA-Net) that relies on a multi-task learning framework. First, a new fine-grained mechanism that aims at focusing on small regions centered at the vulnerable pixels is introduced. By vulnerable pixels, we mean the pixels that are more likely to showcase a blending artifact<sup>1</sup>. This is achieved by considering two auxiliary branches, namely, a heatmap branch and a self-consistency branch. On the one hand, the heatmap branch allows localizing the set of vulnerable pixels while taking into account their neighborhood. On the other hand, the self-consistency branch estimates the similarity of pixels with respect to a randomly selected vulnerable point. To simulate fake data and generate ground-truth heatmaps and self-consistency matrices that are predicted by the additional branches, blending-based data synthesis such as [24, 41] are leveraged. Second, the proposed architecture incorporates a novel, simple, yet effective Feature Pyramid Network (FPN) [27] termed Enhanced FPN (E-FPN). It enables making use of multi-scale features while avoiding redundancy. In fact, it has been",
182
+ "bbox": [
183
+ 75,
184
+ 560,
185
+ 472,
186
+ 878
187
+ ],
188
+ "page_idx": 1
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "shown that reducing feature redundancy contributes to the regularization of Deep Neural Networks (DNNs) [2]. While the proposed attention mechanism guided by the vulnerable points helps the network to focus explicitly on artifact-prone regions, E-FPN forces the model to consider implicitly local cues. The association of these two complementary components makes LAA-Net a suitable candidate for fine-grained and generic deepfake detection. As reflected in Figure 1, our approach achieves better and more stable Area Under the Curve (AUC) performance as compared to existing methods [6, 14, 37, 41, 55] regardless of the quality of deepfakes, quantified using the Mask Structural SIMilarity $(\\mathrm{Mask - SSIM}^2)$ . For a more comprehensive evaluation, in addition to the standard AUC, other metric is reported, namely, Average Precision (AP). We report experiments on several deepfake benchmarks and show that LAA-Net outperforms the state-of-the-art (SoA).",
193
+ "bbox": [
194
+ 496,
195
+ 90,
196
+ 893,
197
+ 348
198
+ ],
199
+ "page_idx": 1
200
+ },
201
+ {
202
+ "type": "text",
203
+ "text": "Contributions. In summary, the paper contributions are:",
204
+ "text_level": 1,
205
+ "bbox": [
206
+ 500,
207
+ 352,
208
+ 877,
209
+ 366
210
+ ],
211
+ "page_idx": 1
212
+ },
213
+ {
214
+ "type": "list",
215
+ "sub_type": "text",
216
+ "list_items": [
217
+ "1. A novel multi-task learning method for fine-grained and generic deepfake detection called LAA-Net. It is trained using real data only.",
218
+ "2. An explicit attention mechanism for focusing on vulnerable points combining heatmap-based and self-consistency attention strategies.",
219
+ "3. A new FPN design, called E-FPN, ensures the efficient propagation of low-level features without incurring redundancy<sup>3</sup>.",
220
+ "4. Extensive experiments and a comprehensive analysis reported on several benchmarks, namely, FF++ [37], CDF2 [25], DFD [15], DFDC [13], and DFW [57]."
221
+ ],
222
+ "bbox": [
223
+ 500,
224
+ 367,
225
+ 890,
226
+ 547
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "Paper Organization. The remainder of the paper is organized as follows: Section 2 reviews related works. Section 3 introduces the proposed approach, and Section 4 reports the experiments and discusses the results. Finally, Section 5 concludes this work and suggests future investigations.",
233
+ "bbox": [
234
+ 496,
235
+ 551,
236
+ 893,
237
+ 628
238
+ ],
239
+ "page_idx": 1
240
+ },
241
+ {
242
+ "type": "text",
243
+ "text": "2. Related Works: Attention-based Deepfake Detection",
244
+ "text_level": 1,
245
+ "bbox": [
246
+ 498,
247
+ 641,
248
+ 890,
249
+ 675
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "text",
255
+ "text": "Prior works are diverse in the way they approach the problem of deepfake detection [1, 31, 32, 35, 36, 42, 49]. Earlier methods generally formulate it as a purely binary classification [10, 37], leading to poor generalization capabilities. As a solution, two main strategies have been investigated by the research community, namely, multitask learning [5-7, 19, 24, 54] and/or pseudo-fake generation [3, 24, 33, 41, 52].",
256
+ "bbox": [
257
+ 496,
258
+ 684,
259
+ 893,
260
+ 806
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "page_footnote",
266
+ "text": "2The Mask-SSIM [25, 29] has been proposed as a metric for quantifying the quality of deepfakes [25]. The Mask-SSIM is computed by computing the similarity in the head region between the fake image and its original version using the SSIM score introduced in [51]. Hence, a higher Mask-SSIM score corresponds to a deepfake of higher quality.",
267
+ "bbox": [
268
+ 500,
269
+ 814,
270
+ 890,
271
+ 875
272
+ ],
273
+ "page_idx": 1
274
+ },
275
+ {
276
+ "type": "page_footnote",
277
+ "text": "$^{3}$ E-FPN is generic and can be used in conjunction with any traditional encoder-decoder architecture.",
278
+ "bbox": [
279
+ 500,
280
+ 875,
281
+ 890,
282
+ 898
283
+ ],
284
+ "page_idx": 1
285
+ },
286
+ {
287
+ "type": "page_footnote",
288
+ "text": "<sup>1</sup>A more formal definition is given in Section 3.2",
289
+ "bbox": [
290
+ 101,
291
+ 886,
292
+ 362,
293
+ 898
294
+ ],
295
+ "page_idx": 1
296
+ },
297
+ {
298
+ "type": "image",
299
+ "img_path": "images/2a94dde7eef925da482da0327b3f7a512cc0d0fbe8358610c5a3a2cfe99177a7.jpg",
300
+ "image_caption": [
301
+ "Figure 2. Overview of the proposed LAA-Net approach: it is formed by two components, namely, (1) an explicit attention mechanism based on a multi-task learning framework composed of three branches, i.e., the binary classification branch, the heatmap branch, and the self-consistency branch. The heatmap and self-consistency ground-truth data are generated based on the detected vulnerable points, and (2) an Enhanced Feature Pyramid Networks (E-FPN) that aggregates multi-scale features."
302
+ ],
303
+ "image_footnote": [],
304
+ "bbox": [
305
+ 99,
306
+ 87,
307
+ 867,
308
+ 303
309
+ ],
310
+ "page_idx": 2
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "Despite their great potential, the aforementioned models are less robust when considering high-quality deepfakes. Indeed, these SoA methods mainly employ traditional DNN backbones such as XceptionNet [9] and EfficientNet [44]. Hence, through their successive convolution layers, they implicitly generate global features. As a result, low-level cues, that can be very informative, might be unintentionally ignored, leading to poor detection performance of high-quality deepfakes. It is, therefore, crucial to design adequate strategies for modeling more localized artifacts.",
315
+ "bbox": [
316
+ 75,
317
+ 376,
318
+ 468,
319
+ 526
320
+ ],
321
+ "page_idx": 2
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "Alternatively, some attention-based methods such as [50, 55] have been proposed. Specifically, they have made attempts to integrate attention modules for implicitly focusing on low-level artifacts [50, 55]. Unfortunately, the two aforementioned methods make use of a unique binary classifier solely trained with real and deepfake images. This means that they do not consider any pseudo-fake generation technique or multi-task learning strategy. Consequently, as demonstrated experimentally, they do not generalize well to unseen datasets in comparison to other recent techniques [3, 41, 52].",
326
+ "bbox": [
327
+ 75,
328
+ 527,
329
+ 470,
330
+ 691
331
+ ],
332
+ "page_idx": 2
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "3. Localized Artifact Attention Network (LAA-Net)",
337
+ "text_level": 1,
338
+ "bbox": [
339
+ 76,
340
+ 705,
341
+ 468,
342
+ 739
343
+ ],
344
+ "page_idx": 2
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "Our goal is to introduce a method that is robust to high-quality deepfakes yet capable of handling unseen manipulations. Accordingly, we introduce a fine-grained method called Localized Artifact Attention Network (LAA-Net) illustrated in Figure 2. LAA-Net incorporates: (1) an explicit attention mechanism and (2) a new architecture based on an enhanced FPN, called E-FPN.",
349
+ "bbox": [
350
+ 75,
351
+ 750,
352
+ 468,
353
+ 853
354
+ ],
355
+ "page_idx": 2
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "First, the proposed attention mechanism aims at explicitly focusing on blending artifact-prone pixels referred to as vulnerable points (a formal definition is given in Sec-",
360
+ "bbox": [
361
+ 75,
362
+ 854,
363
+ 470,
364
+ 900
365
+ ],
366
+ "page_idx": 2
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "tion 3.1). For that purpose, a hand-free annotation of vulnerable points is proposed by leveraging a blending-based data synthesis. Specifically, a multi-task learning framework composed of three simultaneously optimized branches, namely (a) classification, (b) heatmap regression, and (c) self-consistency regression, is introduced, as depicted in Figure 2. The classification branch predicts whether the input image is fake or real, while the two other branches aim at giving attention to vulnerable pixels. Second, E-FPN allows extracting multi-scale features without injecting redundancy. This enables modeling low-level features, which can better discriminate subtle inconsistencies.",
371
+ "bbox": [
372
+ 496,
373
+ 376,
374
+ 893,
375
+ 556
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "3.1. Explicit Attention to Vulnerable Points",
382
+ "text_level": 1,
383
+ "bbox": [
384
+ 498,
385
+ 578,
386
+ 834,
387
+ 594
388
+ ],
389
+ "page_idx": 2
390
+ },
391
+ {
392
+ "type": "text",
393
+ "text": "3.1.1 Blending-based Data Synthesis",
394
+ "text_level": 1,
395
+ "bbox": [
396
+ 500,
397
+ 606,
398
+ 769,
399
+ 622
400
+ ],
401
+ "page_idx": 2
402
+ },
403
+ {
404
+ "type": "text",
405
+ "text": "We start by recalling blending-based data synthesis methods such as [24, 41]. In fact, the proposed method relies on this kind of pseudo-fake generation and, therefore avoids using actual deepfakes and manually annotating data to train the proposed multi-task learning framework. Let us consider a manipulated face image denoted by $\\mathbf{I}_{\\mathrm{M}}$ . The image $\\mathbf{I}_{\\mathrm{M}}$ can be obtained by combining (e.g., blending) two images denoted by $\\mathbf{I}_{\\mathrm{F}}$ and $\\mathbf{I}_{\\mathrm{B}}$ as follows,",
406
+ "bbox": [
407
+ 496,
408
+ 637,
409
+ 893,
410
+ 758
411
+ ],
412
+ "page_idx": 2
413
+ },
414
+ {
415
+ "type": "equation",
416
+ "text": "\n$$\n\\mathbf {I} _ {\\mathrm {M}} = \\mathbf {M} \\odot \\mathbf {I} _ {\\mathrm {F}} + (1 - \\mathbf {M}) \\odot \\mathbf {I} _ {\\mathrm {B}}, \\tag {1}\n$$\n",
417
+ "text_format": "latex",
418
+ "bbox": [
419
+ 584,
420
+ 789,
421
+ 892,
422
+ 806
423
+ ],
424
+ "page_idx": 2
425
+ },
426
+ {
427
+ "type": "text",
428
+ "text": "where $\\mathbf{I}_{\\mathrm{F}}$ refers to the foreground image enclosing the desired facial attributes, $\\mathbf{I}_{\\mathrm{B}}$ indicates a background image, $\\mathbf{M}$ is the deformed Convex Hull mask with values varying between 0 and 1, and $\\odot$ denotes the element-wise multiplication operator.",
429
+ "bbox": [
430
+ 496,
431
+ 824,
432
+ 893,
433
+ 900
434
+ ],
435
+ "page_idx": 2
436
+ },
437
+ {
438
+ "type": "image",
439
+ "img_path": "images/2c89710f1c60e2f075089a332ed953a704cd2dabc72c120b1478a1d114c3b395.jpg",
440
+ "image_caption": [
441
+ "Figure 3. Extraction of the vulnerable points."
442
+ ],
443
+ "image_footnote": [],
444
+ "bbox": [
445
+ 86,
446
+ 88,
447
+ 467,
448
+ 236
449
+ ],
450
+ "page_idx": 3
451
+ },
452
+ {
453
+ "type": "text",
454
+ "text": "3.1.2 Proposed Multi-task Learning Framework",
455
+ "text_level": 1,
456
+ "bbox": [
457
+ 76,
458
+ 282,
459
+ 429,
460
+ 299
461
+ ],
462
+ "page_idx": 3
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "In addition to the deepfake classification branch, the network learns to focus on specific regions by taking advantage of the parallel Heatmap and Self-consistency branches. Our hypothesis is that deepfake detection can be formulated as a fine-grained classification. Therefore, giving more attention to the vulnerable points should be an effective solution for detecting high-quality deepfakes. For the sake of clarity, we start by formally defining the notion of 'vulnerable points'.",
467
+ "bbox": [
468
+ 75,
469
+ 306,
470
+ 470,
471
+ 429
472
+ ],
473
+ "page_idx": 3
474
+ },
475
+ {
476
+ "type": "text",
477
+ "text": "Definition 1 - Vulnerable points in a deepfake image are the pixels that are more likely to carry blending artifacts.",
478
+ "bbox": [
479
+ 76,
480
+ 436,
481
+ 470,
482
+ 467
483
+ ],
484
+ "page_idx": 3
485
+ },
486
+ {
487
+ "type": "text",
488
+ "text": "As discussed in Section 3.1.1, any deepfake generation approach involves a blending operation for mixing the background and the foreground of two different images $\\mathbf{I}_B$ and $\\mathbf{I}_F$ , respectively. This implies the presence of blending artifacts regardless of the used generation approach. Thus, we posit that the vulnerable points can be seen as the pixels belonging to the blending regions with the most equivalent contributions from both $\\mathbf{I}_B$ and $\\mathbf{I}_F$ .",
489
+ "bbox": [
490
+ 75,
491
+ 474,
492
+ 468,
493
+ 595
494
+ ],
495
+ "page_idx": 3
496
+ },
497
+ {
498
+ "type": "text",
499
+ "text": "In this paper, we assume that we work under a realistic setting where we only have access to real data during training. A blending-based augmentation is, therefore, considered and leveraged for defining vulnerable pixels. Specifically, inspired from [24], a blending boundary mask $\\mathbf{B} = (b_{ij})_{i,j\\in [[1,D]]}$ is firstly computed as follows,",
500
+ "bbox": [
501
+ 75,
502
+ 595,
503
+ 468,
504
+ 688
505
+ ],
506
+ "page_idx": 3
507
+ },
508
+ {
509
+ "type": "equation",
510
+ "text": "\n$$\n\\mathbf {B} = 4. \\mathbf {M} \\odot (\\mathbf {1} - \\mathbf {M}), \\tag {2}\n$$\n",
511
+ "text_format": "latex",
512
+ "bbox": [
513
+ 187,
514
+ 700,
515
+ 468,
516
+ 718
517
+ ],
518
+ "page_idx": 3
519
+ },
520
+ {
521
+ "type": "text",
522
+ "text": "with $\\mathbf{1}$ being an all-one matrix. Note that $\\mathbf{M}$ is defined in Eq. (1). The variable $D$ is the height and width of $\\mathbf{B}$ , and $b_{ij}$ its value at the position $(i,j)$ . A higher value of $b_{ij}$ indicates that the position $(i,j)$ is more impacted by the blending. Hence, if an input image is real, $\\mathbf{B}$ should be set to $\\mathbf{0}$ . Then, the set of vulnerable pixels denoted by $\\mathcal{P}$ is defined as follows,",
523
+ "bbox": [
524
+ 75,
525
+ 722,
526
+ 468,
527
+ 827
528
+ ],
529
+ "page_idx": 3
530
+ },
531
+ {
532
+ "type": "equation",
533
+ "text": "\n$$\n\\mathcal {P} = \\underset {(i, j) \\in [ 1, D ] ^ {2}} {\\operatorname {a r g m a x}} (\\mathbf {B}), \\tag {3}\n$$\n",
534
+ "text_format": "latex",
535
+ "bbox": [
536
+ 200,
537
+ 835,
538
+ 468,
539
+ 864
540
+ ],
541
+ "page_idx": 3
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "where $\\llbracket\\rrbracket$ defines an integer interval. Figure 3 illustrates the extraction of vulnerable points. In the following, we",
546
+ "bbox": [
547
+ 76,
548
+ 869,
549
+ 470,
550
+ 901
551
+ ],
552
+ "page_idx": 3
553
+ },
554
+ {
555
+ "type": "text",
556
+ "text": "describe how the notion of vulnerable points is used within the heatmap and self-consistency branches.",
557
+ "bbox": [
558
+ 498,
559
+ 90,
560
+ 892,
561
+ 121
562
+ ],
563
+ "page_idx": 3
564
+ },
565
+ {
566
+ "type": "text",
567
+ "text": "Heatmap Branch. In general, forgery artifacts not only appear in a single pixel but also affect its surroundings. Hence, considering vulnerable points as well as their neighborhood is more appropriate for effectively discriminating deepfakes, especially in the presence of images with local irregularities caused by noise or illumination changes. To model that, we propose to use a heatmap representation that encodes at the same time the information of both vulnerable points as well as their neighbor points.",
568
+ "bbox": [
569
+ 496,
570
+ 138,
571
+ 893,
572
+ 275
573
+ ],
574
+ "page_idx": 3
575
+ },
576
+ {
577
+ "type": "text",
578
+ "text": "More specifically, ground-truth heatmaps are generated by fitting an Unnormalized Gaussian Distribution for each pixel $\\mathbf{p}^k\\in \\mathcal{P}$ . The pixel $\\mathbf{p}^k$ is considered as the center of the Gaussian Mask $\\mathbf{G}^k$ . To take into account the neighborhood information of $\\mathbf{p}^k$ , the standard deviation of $\\mathbf{G}^k$ is adaptively computed. In particular, inspired from the work of [23], the standard deviation $\\sigma_{k}$ of $\\mathbf{p}^k$ is computed based on the width and the height of the blending boundary mask $\\mathbf{B}$ with respect to the point $\\mathbf{p}^k$ . Similar to [23], a radius $r_k$ is computed based on the size of the set of virtual objects that overlap the mask centered at $\\mathbf{p}^k$ with an Intersection over Union (IoU) greater than a threshold $t$ . In all our experiments, we set $t$ to 0.7 and we assume that $\\sigma_{k} = \\frac{1}{3} r_{k}$ . Hence, $\\mathbf{G}^{k} = (g_{ij}^{k})_{i,j\\in [[1,D]]}$ is computed as follows,",
579
+ "bbox": [
580
+ 496,
581
+ 276,
582
+ 893,
583
+ 489
584
+ ],
585
+ "page_idx": 3
586
+ },
587
+ {
588
+ "type": "equation",
589
+ "text": "\n$$\ng _ {i j} ^ {k} = e ^ {- \\frac {i ^ {2} + j ^ {2}}{2 \\sigma_ {k} ^ {2}}}, \\tag {4}\n$$\n",
590
+ "text_format": "latex",
591
+ "bbox": [
592
+ 640,
593
+ 496,
594
+ 892,
595
+ 526
596
+ ],
597
+ "page_idx": 3
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "where $i$ and $j$ refer to the pixel position. The ground-truth heatmap $\\mathbf{H}$ is finally constructed by superimposing the set $\\mathcal{G} = \\{\\mathbf{G}^k\\}_{k\\in [[1,\\mathrm{card}(\\mathcal{P})]]}$ . A figure depicting the heatmap generation process is provided in supplementary materials.",
602
+ "bbox": [
603
+ 498,
604
+ 530,
605
+ 893,
606
+ 590
607
+ ],
608
+ "page_idx": 3
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "For optimizing the heatmap branch, the following focal loss [26] is used,",
613
+ "bbox": [
614
+ 498,
615
+ 590,
616
+ 893,
617
+ 621
618
+ ],
619
+ "page_idx": 3
620
+ },
621
+ {
622
+ "type": "equation",
623
+ "text": "\n$$\nL _ {\\mathrm {H}} = \\sum_ {i, j} ^ {D} - (1 - \\tilde {h} _ {i j}) ^ {\\gamma} \\log \\tilde {h} _ {i j}, \\tag {5}\n$$\n",
624
+ "text_format": "latex",
625
+ "bbox": [
626
+ 589,
627
+ 631,
628
+ 892,
629
+ 674
630
+ ],
631
+ "page_idx": 3
632
+ },
633
+ {
634
+ "type": "text",
635
+ "text": "such that,",
636
+ "bbox": [
637
+ 500,
638
+ 685,
639
+ 568,
640
+ 698
641
+ ],
642
+ "page_idx": 3
643
+ },
644
+ {
645
+ "type": "equation",
646
+ "text": "\n$$\n\\tilde {h} _ {i j} = \\left\\{ \\begin{array}{l l} \\hat {h} _ {i j} & \\text {i f} h _ {i j} = 1, \\\\ 1 - \\hat {h} _ {i j} & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {6}\n$$\n",
647
+ "text_format": "latex",
648
+ "bbox": [
649
+ 589,
650
+ 710,
651
+ 890,
652
+ 751
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "with $\\hat{h}_{ij}$ and $h_{ij}$ being the value of the predicted heatmap $\\hat{\\mathbf{H}}$ and the ground-truth $\\mathbf{H}$ at the pixel location $(i,j)$ , respectively. The hyperparameter $\\gamma$ is used to stabilize the adaptive loss weights.",
659
+ "bbox": [
660
+ 498,
661
+ 758,
662
+ 893,
663
+ 821
664
+ ],
665
+ "page_idx": 3
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "Self-consistency Branch. To enhance the proposed attention mechanism, the idea of learning self-consistency proposed in [54] is revisited to fit our context. Instead of computing the consistency values for each pixel of the mask,",
670
+ "bbox": [
671
+ 496,
672
+ 839,
673
+ 893,
674
+ 901
675
+ ],
676
+ "page_idx": 3
677
+ },
678
+ {
679
+ "type": "text",
680
+ "text": "we consider only the vulnerable location. Since the set $\\mathcal{P}$ might include more than one pixel (the blending mask can include several pixels with equal values), we randomly choose one of them that we denote by $\\mathbf{p}^s$ for generating the self-consistency ground-truth matrix. Hence, the generated matrices denoted by $\\mathbf{C}$ are 2-dimensional and not 4-dimensional as in the original method. Given the randomly selected vulnerable point $\\mathbf{p}^s = (u,v)$ , the self-consistency $\\mathbf{C}$ matrix is computed as,",
681
+ "bbox": [
682
+ 75,
683
+ 90,
684
+ 472,
685
+ 227
686
+ ],
687
+ "page_idx": 4
688
+ },
689
+ {
690
+ "type": "equation",
691
+ "text": "\n$$\n\\mathbf {C} = \\mathbf {1} - \\left| b _ {u v}. \\mathbf {1} - \\mathbf {B} \\right|, \\tag {7}\n$$\n",
692
+ "text_format": "latex",
693
+ "bbox": [
694
+ 192,
695
+ 238,
696
+ 468,
697
+ 256
698
+ ],
699
+ "page_idx": 4
700
+ },
701
+ {
702
+ "type": "text",
703
+ "text": "where $|.|$ refers to the element wise modulus and $\\mathbf{1}$ is an all-one matrix.",
704
+ "bbox": [
705
+ 75,
706
+ 267,
707
+ 468,
708
+ 296
709
+ ],
710
+ "page_idx": 4
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "This refinement allows for reducing the model size and, consequently, the computational cost. It can also be noted that even though our method is inspired by [54], our self-consistency branch is inherently different. In [54], the consistency is calculated between the foreground and background, whereas we measure the consistency between the vulnerable point and the other pixels of the blended mask. The self-consistency loss $L_{\\mathrm{C}}$ is then computed as a binary cross entropy loss between $\\mathbf{C}$ and the predicted self-consistency $\\hat{\\mathbf{C}}$ .",
715
+ "bbox": [
716
+ 75,
717
+ 297,
718
+ 470,
719
+ 449
720
+ ],
721
+ "page_idx": 4
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "Training Strategy. The network is optimized using the following loss,",
726
+ "bbox": [
727
+ 75,
728
+ 465,
729
+ 468,
730
+ 498
731
+ ],
732
+ "page_idx": 4
733
+ },
734
+ {
735
+ "type": "equation",
736
+ "text": "\n$$\nL = L _ {\\mathrm {B C E}} + \\lambda_ {1} L _ {\\mathrm {H}} + \\lambda_ {2} L _ {\\mathrm {C}}, \\tag {8}\n$$\n",
737
+ "text_format": "latex",
738
+ "bbox": [
739
+ 174,
740
+ 512,
741
+ 468,
742
+ 529
743
+ ],
744
+ "page_idx": 4
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "where $L_{\\mathrm{BCE}}$ denotes the binary cross-entropy classification loss. $L_{\\mathrm{H}}$ and $L_{\\mathrm{C}}$ are weighted by the hyperparameters $\\lambda_{1}$ and $\\lambda_{2}$ , respectively. Note that only real and pseudo-fakes are used during training.",
749
+ "bbox": [
750
+ 75,
751
+ 535,
752
+ 468,
753
+ 597
754
+ ],
755
+ "page_idx": 4
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "3.2. Enhanced Feature Pyramid Network (E-FPN)",
760
+ "text_level": 1,
761
+ "bbox": [
762
+ 76,
763
+ 604,
764
+ 465,
765
+ 622
766
+ ],
767
+ "page_idx": 4
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "Feature Pyramid Networks (FPN) are widely adopted feature extractors capable of complementing global representations with multi-scale low-level features captured at different resolutions [27]. This makes them ideal candidates for implicitly supporting the heatmap and self-consistency branches towards fine-grained deepfake detection. Although some attempts have been made to exploit multi-scale features [14], no previous works have considered FPN in the context of deepfake detection.",
772
+ "bbox": [
773
+ 75,
774
+ 628,
775
+ 468,
776
+ 763
777
+ ],
778
+ "page_idx": 4
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "Over the last years, several FPN variants have been proposed for numerous computer vision tasks [26, 27, 38, 47]. Nevertheless, these FPN-based methods usually lead to the generation of redundant features, which might, in turn, lead to the overfitting of the model [2]. Moreover, as described in Section 1, small discrepancies are gradually eliminated through the successive convolution blocks [55], going from high-resolution low-level to low-resolution high-level features. Consequently, the last block outputs usually contain",
783
+ "bbox": [
784
+ 75,
785
+ 765,
786
+ 470,
787
+ 901
788
+ ],
789
+ "page_idx": 4
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "global features where local artifact-sensitive features might be discarded. To overcome this issue, we introduce a new alternative referred to as Enhanced Feature Pyramid Network (E-FPN) that is integrated in the proposed LAA-Net architecture. The E-FPN goal is to propagate relevant information from high to low-resolution feature representations.",
794
+ "bbox": [
795
+ 496,
796
+ 90,
797
+ 890,
798
+ 181
799
+ ],
800
+ "page_idx": 4
801
+ },
802
+ {
803
+ "type": "text",
804
+ "text": "As shown in Figure 4, we denote the output shape of the $N - 1$ latest layers by $(n^{(l)},D^{(l)},D^{(l)})$ with $l\\in \\llbracket 2,N\\rrbracket$ . For the sake of simplicity, we assume that the shape of the feature maps is square. For a given layer $l$ , $n^{(l)},D^{(l)}$ and $\\mathbf{F}^{(l)}$ correspond, respectively, to its feature dimension, its height and width, and its output features. For strengthening the textural information in the ultimate layer $\\mathbf{F}^{(N)}$ , we propose to take advantage of the features generated by previous layers $\\mathbf{F}^{(l)}$ with $l\\in \\llbracket 2,N - 1\\rrbracket$ . Concretely, for each layer $l$ , a convolution followed by a transpose convolution is applied to $\\mathbf{F}^{(l + 1)}$ . The obtained features are denoted by $\\mathbf{E}^{(l)}$ and have the same shape as $\\mathbf{F}^{(l)}$ . Then, a sigmoid function is applied to $\\mathbf{E}^{(l)}$ returning probabilities. The latter indicates the pixels that contributed to the final decision. For enriching $\\mathbf{F}^{(l + 1)}$ while avoiding redundancy related to the most contributing pixels, the features $\\mathbf{F}^{(l)}$ are filtered by computing $(1 - \\mathrm{sigmoid}(\\mathbf{E}^{(l)}))^{\\gamma_w}$ resulting in a weighted mask. The latter is concatenated along the same axis with $\\mathbf{E}^{(l)}$ for obtaining the final features. This operation is iterated for all the layers $l\\in \\llbracket 2,N - 1\\rrbracket$ . In summary, the final representation $\\mathbf{F}'^{(l)}$ is obtained as follows,",
805
+ "bbox": [
806
+ 496,
807
+ 181,
808
+ 892,
809
+ 500
810
+ ],
811
+ "page_idx": 4
812
+ },
813
+ {
814
+ "type": "equation",
815
+ "text": "\n$$\n\\mathbf {F} ^ {\\prime (l)} = \\left(\\mathbf {F} ^ {(l)} \\odot \\left(1 - \\operatorname {s i g m o i d} \\left(\\mathbf {E} ^ {(l)}\\right)\\right) ^ {\\gamma_ {w}} \\oplus \\mathbf {E} ^ {(l)}\\right), \\tag {9}\n$$\n",
816
+ "text_format": "latex",
817
+ "bbox": [
818
+ 522,
819
+ 508,
820
+ 890,
821
+ 529
822
+ ],
823
+ "page_idx": 4
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "where $\\mathbf{E}^{(l)} = \\mathfrak{T}(f(\\mathbf{F}^{\\prime (l + 1)})$ with $\\mathbf{F}^{\\prime (l + 1)} = \\mathbf{F}^{(l + 1)}$ if $l = N - 1$ , such that $f$ and $\\mathfrak{T}$ , are respectively the convolution and transpose convolution operators, and $\\oplus$ refers to the concatenation operator. The hyper-parameter $\\gamma_w$ is set to 1 in all our experiments. The relevance of E-FPN in the context of deepfake detection is experimentally demonstrated in Section 4, as compared to the traditional FPN.",
828
+ "bbox": [
829
+ 496,
830
+ 537,
831
+ 893,
832
+ 647
833
+ ],
834
+ "page_idx": 4
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "4. Experiments",
839
+ "text_level": 1,
840
+ "bbox": [
841
+ 498,
842
+ 660,
843
+ 633,
844
+ 678
845
+ ],
846
+ "page_idx": 4
847
+ },
848
+ {
849
+ "type": "text",
850
+ "text": "In this section, we start by presenting the experimental settings. Then, we compare the performance of LAA-Net to SoA methods, both qualitatively and quantitatively. Finally, we conduct an ablation study to validate the different components of LAA-Net.",
851
+ "bbox": [
852
+ 496,
853
+ 686,
854
+ 890,
855
+ 762
856
+ ],
857
+ "page_idx": 4
858
+ },
859
+ {
860
+ "type": "text",
861
+ "text": "4.1. Experimental Settings",
862
+ "text_level": 1,
863
+ "bbox": [
864
+ 498,
865
+ 771,
866
+ 705,
867
+ 787
868
+ ],
869
+ "page_idx": 4
870
+ },
871
+ {
872
+ "type": "text",
873
+ "text": "Datasets. The $\\mathrm{FF} + +$ [37] dataset is used for training and validation. In our experiments, we follow the standard splitting protocol of [37]. This dataset contains 1000 original videos and 4000 fake videos generated by four different manipulation methods, namely, Deepfakes (DF) [11], Face2Face (F2F) [46], FaceSwap (FS) [22], and NeuralTextures (NT) [45]. In the training process, we utilize real",
874
+ "bbox": [
875
+ 496,
876
+ 794,
877
+ 890,
878
+ 901
879
+ ],
880
+ "page_idx": 4
881
+ },
882
+ {
883
+ "type": "image",
884
+ "img_path": "images/cd63492e2d35b2529f39145b81a11b4be8194b906958f9fa8925e387a7203216.jpg",
885
+ "image_caption": [
886
+ "Figure 4. Architecture of the proposed Enhanced Feature Pyramid Network (E-FPN)."
887
+ ],
888
+ "image_footnote": [],
889
+ "bbox": [
890
+ 116,
891
+ 88,
892
+ 841,
893
+ 296
894
+ ],
895
+ "page_idx": 5
896
+ },
897
+ {
898
+ "type": "table",
899
+ "img_path": "images/97f0daf21a5636f0ab2df759c2e7c47ff413fea8da61fd0d93672b318bfbdfe4.jpg",
900
+ "table_caption": [],
901
+ "table_footnote": [],
902
+ "table_body": "<table><tr><td rowspan=\"3\">Method</td><td colspan=\"2\">Training set</td><td colspan=\"9\">Test set</td></tr><tr><td rowspan=\"2\">Real</td><td rowspan=\"2\">Fake</td><td>In-dataset</td><td colspan=\"8\">Cross-dataset</td></tr><tr><td>FF++AUC (%)</td><td>CDF2AUC (%)</td><td>AP (%)</td><td>DFWAUC (%)</td><td>AP (%)</td><td>DFD AUC (%)</td><td>AP (%)</td><td>DFDC AUC (%)</td><td>AP (%)</td></tr><tr><td>Xception [37]</td><td>✓</td><td>✓</td><td>99.09</td><td>61.18</td><td>66.93</td><td>65.29</td><td>55.37</td><td>89.75</td><td>85.48</td><td>69.90</td><td>91.98</td></tr><tr><td>FaceXRay+BI [24]</td><td>✓</td><td>✓</td><td>99.20</td><td>79.5</td><td>-</td><td>-</td><td>-</td><td>95.40</td><td>93.34</td><td>65.5</td><td>-</td></tr><tr><td>LRNet [43]</td><td>✓</td><td>✓</td><td>-</td><td>53.20</td><td>-</td><td>-</td><td>-</td><td>52.29</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LocalRL [8]</td><td>✓</td><td>✓</td><td>99.92</td><td>78.26</td><td>-</td><td>-</td><td>-</td><td>89.24</td><td>-</td><td>76.53</td><td>-</td></tr><tr><td>TI2Net [28]</td><td>✓</td><td>✓</td><td>-</td><td>68.22</td><td>-</td><td>-</td><td>-</td><td>72.03</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Multi-attentional [55]</td><td>✓</td><td>✓</td><td>-</td><td>68.26</td><td>75.25</td><td>73.56</td><td>73.79</td><td>92.95</td><td>96.51</td><td>63.02</td><td>-</td></tr><tr><td>RECCE [6]</td><td>✓</td><td>✓</td><td>-</td><td>70.93</td><td>70.35</td><td>68.16</td><td>54.41</td><td>98.26</td><td>79.42</td><td>-</td><td>-</td></tr><tr><td>SFDG [50]</td><td>✓</td><td>✓</td><td>99.53</td><td>75.83</td><td>-</td><td>69.27</td><td>-</td><td>88.00</td><td>-</td><td>73.63</td><td>-</td></tr><tr><td>EIC+IIE [20]</td><td>✓</td><td>✓</td><td>99.32</td><td>83.80</td><td>-</td><td>-</td><td>-</td><td>93.92</td><td>-</td><td>81.23</td><td>-</td></tr><tr><td>AltFreezing [52]</td><td>✓</td><td>✓</td><td>98.6</td><td>89.50</td><td>-</td><td>-</td><td>-</td><td>98.50</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>✓</td><td>99.79</td><td>93.88</td><td>91.12</td><td>74.48</td><td>75.23</td><td>99.03</td><td>99.59</td><td>-</td><td>-</td></tr><tr><td>UCF [53]</td><td>✓</td><td>✓</td><td>-</td><td>82.4</td><td>-</td><td>-</td><td>-</td><td>94.5</td><td>-</td><td>80.5</td><td>-</td></tr><tr><td>Controllable GS [17]</td><td>✓</td><td>✓</td><td>-</td><td>84.97</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>81.65</td><td>-</td></tr><tr><td>PCL+I2G [54]</td><td>✓</td><td></td><td>99.11</td><td>90.03</td><td>-</td><td>-</td><td>-</td><td>99.07</td><td>-</td><td>74.27</td><td>-</td></tr><tr><td>SBI [41]</td><td>✓</td><td></td><td>99.64</td><td>93.18</td><td>85.16</td><td>67.47</td><td>55.87</td><td>97.56</td><td>92.79</td><td>86.15</td><td>93.24</td></tr><tr><td>AUNet [3]</td><td>✓</td><td></td><td>99.46</td><td>92.77</td><td>-</td><td>-</td><td>-</td><td>99.22</td><td>-</td><td>86.16</td><td>-</td></tr><tr><td>Ours (w/ BI)</td><td>✓</td><td></td><td>99.95</td><td>86.28</td><td>91.93</td><td>57.13</td><td>56.89</td><td>99.51</td><td>99.80</td><td>69.69</td><td>93.67</td></tr><tr><td>Ours (w/ SBI)</td><td>✓</td><td></td><td>99.96</td><td>95.40</td><td>97.64</td><td>80.03</td><td>81.08</td><td>98.43</td><td>99.40</td><td>86.94</td><td>97.70</td></tr></table>",
903
+ "bbox": [
904
+ 81,
905
+ 324,
906
+ 893,
907
+ 606
908
+ ],
909
+ "page_idx": 5
910
+ },
911
+ {
912
+ "type": "text",
913
+ "text": "Table 1. In-dataset and Cross-dataset evaluation in terms of AUC and AP on multiple deepfake datasets. Bold and Underlined highlight the best and the second-best performance, respectively.",
914
+ "bbox": [
915
+ 75,
916
+ 614,
917
+ 892,
918
+ 643
919
+ ],
920
+ "page_idx": 5
921
+ },
922
+ {
923
+ "type": "text",
924
+ "text": "images only to dynamically generate pseudo-fakes, as discussed in Section 3. To evaluate the generalization capability of the proposed approach as well as its robustness to high-quality deepfakes, we test the trained model on four datasets incorporating different quality of deepfakes, namely, Celeb-DFv2 [25] (CDF2), DeepFake Detection [15] (DFD), DeepFake Detection Challenge [13] (DFDC) and Wild Deepfake [57] (DFW). To assess the quality of the considered datasets, we compute the Mask-SSIM² for each benchmark. In particular, CDF2 [25] is formed by the most realistic deepfakes with an average Mask-SSIM [29] value of 0.92, followed by DFD and DFDC with an average Mask-SSIM of 0.88 and 0.84, respectively. We note that computing the Mask-SSIM [25] for DFW was not possible since real and fake images are not paired.",
925
+ "bbox": [
926
+ 75,
927
+ 657,
928
+ 472,
929
+ 902
930
+ ],
931
+ "page_idx": 5
932
+ },
933
+ {
934
+ "type": "text",
935
+ "text": "Evaluation Metrics. To compare the performance of LAA-Net with the state-of-the-art, we report the common Area Under the Curve (AUC) metric at the video-level and the Average Precision (AP) as in [14, 24, 41, 54]. More metrics, namely, Average Recall (AR) and mean F1-score (mF1) are provided in supplementary materials.",
936
+ "bbox": [
937
+ 496,
938
+ 657,
939
+ 893,
940
+ 751
941
+ ],
942
+ "page_idx": 5
943
+ },
944
+ {
945
+ "type": "text",
946
+ "text": "Implementation Details. To train our model, 128 training and 32 validation frames are used. RetinaNet [26] is used to crop faces with a conservative enlargement (by a factor of 1.25) around the face center. Note that all the cropped images are then resized to $384 \\times 384$ . In addition, 68 facial landmarks are extracted per frame using Dlib [21]. We adopt the EFNB4 variant of the EfficientNet [44] pretrained on ImageNet [12]. For each training epoch, 8 frames are dynamically selected and used for online pseudo-fake gen",
947
+ "bbox": [
948
+ 496,
949
+ 763,
950
+ 895,
951
+ 902
952
+ ],
953
+ "page_idx": 5
954
+ },
955
+ {
956
+ "type": "table",
957
+ "img_path": "images/ef44d7dacf5d22f50f62619f914b264f989aaf06361ab0068c624678253862a0.jpg",
958
+ "table_caption": [],
959
+ "table_footnote": [],
960
+ "table_body": "<table><tr><td>Method</td><td>Fake</td><td>Saturation</td><td>Contrast</td><td>Block</td><td>Noise</td><td>Blur</td><td>Pixel</td></tr><tr><td>Xception [9]</td><td>✓</td><td>99.3</td><td>98.6</td><td>99.7</td><td>53.8</td><td>60.2</td><td>74.2</td></tr><tr><td>FaceXray [24]</td><td>✓</td><td>97.6</td><td>88.5</td><td>99.1</td><td>49.8</td><td>63.8</td><td>88.6</td></tr><tr><td>LipForensics [18]</td><td>✓</td><td>99.9</td><td>99.6</td><td>87.4</td><td>73.8</td><td>96.1</td><td>95.6</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>99.6</td><td>99.8</td><td>99.8</td><td>87.4</td><td>99.0</td><td>98.8</td></tr><tr><td>Ours</td><td></td><td>99.96</td><td>99.96</td><td>99.96</td><td>53.9</td><td>98.22</td><td>99.80</td></tr></table>",
961
+ "bbox": [
962
+ 78,
963
+ 88,
964
+ 467,
965
+ 156
966
+ ],
967
+ "page_idx": 6
968
+ },
969
+ {
970
+ "type": "table",
971
+ "img_path": "images/69e86b9e749cd460aded23d756e2234c92ca795ef349b78db24d73bc0328947d.jpg",
972
+ "table_caption": [
973
+ "Table 2. Robustness inspection on the $\\mathrm{FF}++$ with different types of perturbation. Bold and Underline highlight the best and the second-best performance, respectively."
974
+ ],
975
+ "table_footnote": [],
976
+ "table_body": "<table><tr><td rowspan=\"2\">C</td><td rowspan=\"2\">H</td><td rowspan=\"2\">E-FPN</td><td colspan=\"5\">Test set AUC (%)</td></tr><tr><td>CDF2</td><td>DFD</td><td>DFDC</td><td>DFW</td><td>Avg.</td></tr><tr><td>×</td><td>×</td><td>×</td><td>74.54</td><td>92.24</td><td>70.81</td><td>59.81</td><td>74.35</td></tr><tr><td>×</td><td>✓</td><td>×</td><td>80.89</td><td>94.53</td><td>77.93</td><td>67.12</td><td>80.12(↑5.77)</td></tr><tr><td>×</td><td>×</td><td>✓</td><td>84.21</td><td>95.03</td><td>80.68</td><td>65.47</td><td>81.35(↑7.00)</td></tr><tr><td>×</td><td>✓</td><td>✓</td><td>95.56</td><td>98.54</td><td>82.21</td><td>74.98</td><td>87.82(↑13.47)</td></tr><tr><td>✓</td><td>×</td><td>✓</td><td>79.87</td><td>94.60</td><td>71.70</td><td>72.47</td><td>79.66(↑5.31)</td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>91.56</td><td>98.27</td><td>78.35</td><td>73.02</td><td>85.30(↑10.95)</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>95.40</td><td>98.43</td><td>86.94</td><td>80.03</td><td>90.20(↑15.85)</td></tr></table>",
977
+ "bbox": [
978
+ 81,
979
+ 224,
980
+ 467,
981
+ 339
982
+ ],
983
+ "page_idx": 6
984
+ },
985
+ {
986
+ "type": "text",
987
+ "text": "Table 3. Ablation study under the cross-dataset setup of the Consistency branch (C), Heatmap branch (H), and E-FPN.",
988
+ "bbox": [
989
+ 76,
990
+ 349,
991
+ 467,
992
+ 378
993
+ ],
994
+ "page_idx": 6
995
+ },
996
+ {
997
+ "type": "text",
998
+ "text": "eration. The model is trained for 100 epochs with the SAM optimizer [16], a weight decay of $10^{-4}$ , and a batch size of 16. We apply a learning rate scheduler that increases from $5.10^{-5}$ to $2.10^{-4}$ in the first quarter of the training and then decays to zero in the remaining quarters. We freeze the backbone at the first 6 epochs and only train the remaining layers. For data augmentation, we apply horizontal flipping, random cropping, random scaling, random erasing [56], color jittering, Gaussian noise, blurring, and JPEG compression. The parameters $\\lambda_{1}$ and $\\lambda_{2}$ , defined in Eq. (8), are set to 10 and 100. Furthermore, label smoothing [34] is utilized as a regularizer. To generate pseudo-fakes, two blending synthesis techniques are considered, namely, Blended Images (BI) [24] and Self-Blended Images (SBI) [41]. All experiments are carried out using a GPU Tesla V-100.",
999
+ "bbox": [
1000
+ 75,
1001
+ 406,
1002
+ 467,
1003
+ 632
1004
+ ],
1005
+ "page_idx": 6
1006
+ },
1007
+ {
1008
+ "type": "text",
1009
+ "text": "4.2. Comparison with State-of-the-art",
1010
+ "text_level": 1,
1011
+ "bbox": [
1012
+ 76,
1013
+ 643,
1014
+ 370,
1015
+ 659
1016
+ ],
1017
+ "page_idx": 6
1018
+ },
1019
+ {
1020
+ "type": "text",
1021
+ "text": "In-dataset Evaluation. We compare the performance of LAA-Net to existing methods under the in-dataset protocol of [3, 14, 41, 50, 52, 54]. The first column in Table 1 reports the obtained results on the testing set of $\\mathrm{FF}++$ . It can be seen that all methods achieve competitive performance on the forgeries of the $\\mathrm{FF}++$ dataset. Our method combined with SBI outperforms all methods with an AUC of $99.96\\%$ , while using only real data for training.",
1022
+ "bbox": [
1023
+ 75,
1024
+ 667,
1025
+ 467,
1026
+ 787
1027
+ ],
1028
+ "page_idx": 6
1029
+ },
1030
+ {
1031
+ "type": "text",
1032
+ "text": "Cross-dataset Evaluation. We evaluate LAA-Net under the challenging cross-dataset setup [6, 50]. Table 1 reports the obtained results on CDF2, DFW, DFD, and DFDC, respectively. It can be noted that LAA-Net achieves state-of-the-art results on the four considered benchmarks, thereby demonstrating its robustness to different quality of deep-fakes. The best performance is reached using SBI as a data",
1033
+ "bbox": [
1034
+ 75,
1035
+ 794,
1036
+ 467,
1037
+ 900
1038
+ ],
1039
+ "page_idx": 6
1040
+ },
1041
+ {
1042
+ "type": "text",
1043
+ "text": "synthesis, confirming the importance of modeling subtle artifacts. The performance of LAA-Net (w/BI) is slightly superior to LAA-Net (w/SBI) only on DFD, with an improvement of $1.08\\%$ and $0.4\\%$ of AUC and AP, respectively. A plausible explanation could be the fact that deepfake detection in DFD is less challenging. In fact, numerous methods report AUC and AP scores exceeding $98\\%$ .",
1044
+ "bbox": [
1045
+ 496,
1046
+ 90,
1047
+ 890,
1048
+ 196
1049
+ ],
1050
+ "page_idx": 6
1051
+ },
1052
+ {
1053
+ "type": "text",
1054
+ "text": "Furthermore, LAA-Net clearly outperforms attention-based approaches such as Multi-attentional [55] and SFDG [50] by a margin of $27.14\\%$ and $19.57\\%$ in terms of AUC and AP on CDF2, respectively. This confirms the superior generalization capabilities of LAA-Net as compared to [50, 55]. These results are further supported by high AR and mF1, which are provided in the supplementary materials.",
1055
+ "bbox": [
1056
+ 496,
1057
+ 198,
1058
+ 890,
1059
+ 316
1060
+ ],
1061
+ "page_idx": 6
1062
+ },
1063
+ {
1064
+ "type": "text",
1065
+ "text": "Robustness to Perturbations. Since deepfake videos are easily altered on various social platforms, the robustness of LAA-Net against some common perturbations is investigated. Following the same settings of [14, 18], we evaluate the performance of LAA-Net on $\\mathrm{FF}++$ [37] by applying different corruptions. The results are reported in Table 2. As our method focuses on vulnerable points, it can be seen that color-related changes such as saturation and contrast do not impact the performance. However, the proposed method is extremely sensitive to structural perturbations such as Gaussian Noise. In future work, strategies for ensuring more robustness to structural perturbations will be investigated. For instance, denoising methods [30, 40] will be considered for solving this issue.",
1066
+ "bbox": [
1067
+ 496,
1068
+ 325,
1069
+ 890,
1070
+ 537
1071
+ ],
1072
+ "page_idx": 6
1073
+ },
1074
+ {
1075
+ "type": "text",
1076
+ "text": "Qualitative Results. We provide Grad-CAMs [39] in Figure 5, to visualize the image regions in deepfakes that are activated by LAA-Net, SBI [41], Xception [37], and Multiattentional (MAT) [55] on $\\mathrm{FF}++$ [37]. Generally, attention-based methods such as MAT [55] and LAA-Net focus more on localized regions. However, in some cases, MAT [55] concentrates on irrelevant regions such as the background or the inner face areas. Conversely, LAA-Net consistently identifies blending artifacts and shows interesting capabilities on mouth-rendered Neural Textures (NT).",
1077
+ "bbox": [
1078
+ 496,
1079
+ 544,
1080
+ 890,
1081
+ 695
1082
+ ],
1083
+ "page_idx": 6
1084
+ },
1085
+ {
1086
+ "type": "text",
1087
+ "text": "4.3. Ablation Study",
1088
+ "text_level": 1,
1089
+ "bbox": [
1090
+ 500,
1091
+ 709,
1092
+ 651,
1093
+ 724
1094
+ ],
1095
+ "page_idx": 6
1096
+ },
1097
+ {
1098
+ "type": "text",
1099
+ "text": "Table 3 reports the cross-dataset performance of LAA-Net when discarding the following components: E-FPN, the consistency branch denoted by C and the heatmap branch denoted by H. The best performance is reached when all the components are integrated. It can be seen that the proposed explicit attention mechanism through the heatmap branch contributes more to improving the result. A qualitative example visualizing Grad-CAMs [39] with different components of LAA-Net is also given in Figure 6. The illustration clearly shows that by combining the three components, the network activates more precisely the blending region.",
1100
+ "bbox": [
1101
+ 496,
1102
+ 734,
1103
+ 890,
1104
+ 900
1105
+ ],
1106
+ "page_idx": 6
1107
+ },
1108
+ {
1109
+ "type": "table",
1110
+ "img_path": "images/7b499fb0d5d41dea60d3d19207b9978cb9c120fdeca087a8a843a6bace0e84a6.jpg",
1111
+ "table_caption": [],
1112
+ "table_footnote": [],
1113
+ "table_body": "<table><tr><td></td><td colspan=\"5\">EFNB4</td><td colspan=\"7\">Test Set AUC (%)</td><td></td></tr><tr><td colspan=\"2\"></td><td colspan=\"4\">E-FPN Integration</td><td colspan=\"2\">CDF2</td><td colspan=\"2\">DFD</td><td colspan=\"2\">DFW</td><td>DFDC</td><td></td></tr><tr><td></td><td>F(6)</td><td>F(5)</td><td>F(4)</td><td>F(3)</td><td>F(2)</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td></tr><tr><td>(a)</td><td>✓</td><td></td><td></td><td></td><td></td><td colspan=\"2\">91.56</td><td colspan=\"2\">98.27</td><td colspan=\"2\">73.02</td><td>78.35</td><td></td></tr><tr><td>(b)</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>93.42</td><td>91.79</td><td>98.59</td><td>97.12</td><td>73.78</td><td>71.39</td><td>78.40</td><td></td></tr><tr><td>(c)</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>88.72</td><td>92.86</td><td>97.96</td><td>98.95</td><td>69.40</td><td>74.93</td><td>71.91</td><td></td></tr><tr><td>(d)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>88.35</td><td>95.40</td><td>98.89</td><td>98.43</td><td>70.94</td><td>80.03</td><td>79.02</td><td></td></tr><tr><td>(e)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>92.16</td><td>94.22</td><td>96.58</td><td>97.31</td><td>65.17</td><td>72.54</td><td>74.31</td><td></td></tr><tr><td colspan=\"6\">Avg</td><td>90.84</td><td>93.16</td><td>98.06</td><td>98.02</td><td>70.46</td><td>74.38</td><td>76.40</td><td></td></tr></table>",
1114
+ "bbox": [
1115
+ 133,
1116
+ 88,
1117
+ 831,
1118
+ 222
1119
+ ],
1120
+ "page_idx": 7
1121
+ },
1122
+ {
1123
+ "type": "image",
1124
+ "img_path": "images/ea22e00fbbf8d32b588de507b966b024503460d75de2038f47077693ca1871fd.jpg",
1125
+ "image_caption": [
1126
+ "Figure 5. Grad-CAM [39] visualization on different types of manipulation from FF++ [37]. LAA-Net is compared to SBI [41], Xception [37], and MAT [55]."
1127
+ ],
1128
+ "image_footnote": [],
1129
+ "bbox": [
1130
+ 116,
1131
+ 266,
1132
+ 424,
1133
+ 494
1134
+ ],
1135
+ "page_idx": 7
1136
+ },
1137
+ {
1138
+ "type": "image",
1139
+ "img_path": "images/6e6a42be5c4609a7143f710eac6145cffc40bef55288741f8caa42d04a8014bb.jpg",
1140
+ "image_caption": [
1141
+ "Figure 6. GradCAM [39] visualization of different components in LAA-Net. w/o E-FPN, w/o H, and w/o C refer to ablating E-FPN, heatmap branch, and self-consistency branch, respectively."
1142
+ ],
1143
+ "image_footnote": [],
1144
+ "bbox": [
1145
+ 78,
1146
+ 546,
1147
+ 468,
1148
+ 618
1149
+ ],
1150
+ "page_idx": 7
1151
+ },
1152
+ {
1153
+ "type": "text",
1154
+ "text": "4.4. E-FPN versus Traditional FPN",
1155
+ "text_level": 1,
1156
+ "bbox": [
1157
+ 76,
1158
+ 693,
1159
+ 356,
1160
+ 708
1161
+ ],
1162
+ "page_idx": 7
1163
+ },
1164
+ {
1165
+ "type": "text",
1166
+ "text": "To assess the effectiveness of the low-level features injected by E-FPN into the final feature representation, we combine different feature levels and compare the results of E-FPN and traditional FPN [26, 27] in Table 4. It can be seen that in general E-FPN outperforms FPN except for $\\mathbf{F}^{(5)}$ . This confirms the relevance of employing multi-scale features and the need for reducing their redundancy in the context of deepfake detection.",
1167
+ "bbox": [
1168
+ 75,
1169
+ 715,
1170
+ 468,
1171
+ 838
1172
+ ],
1173
+ "page_idx": 7
1174
+ },
1175
+ {
1176
+ "type": "text",
1177
+ "text": "4.5. Sensitivity Analysis",
1178
+ "text_level": 1,
1179
+ "bbox": [
1180
+ 76,
1181
+ 847,
1182
+ 264,
1183
+ 863
1184
+ ],
1185
+ "page_idx": 7
1186
+ },
1187
+ {
1188
+ "type": "text",
1189
+ "text": "In this subsection, we analyze the impact of the two hyperparameters, $\\lambda_{1}$ and $\\lambda_{2}$ given in Eq. (8). Table 5 shows",
1190
+ "bbox": [
1191
+ 76,
1192
+ 869,
1193
+ 468,
1194
+ 901
1195
+ ],
1196
+ "page_idx": 7
1197
+ },
1198
+ {
1199
+ "type": "text",
1200
+ "text": "the experimental results for different values of $\\lambda_{1}$ and $\\lambda_{2}$ . It can be noted that our model is robust to different hyperparameter values, with the best average performance obtained with $\\lambda_{1} = 10$ and $\\lambda_{2} = 100$ .",
1201
+ "bbox": [
1202
+ 498,
1203
+ 268,
1204
+ 892,
1205
+ 330
1206
+ ],
1207
+ "page_idx": 7
1208
+ },
1209
+ {
1210
+ "type": "table",
1211
+ "img_path": "images/1dac10ce32f48123b9b69b85271651a51d9dc187092f111b979081cd87d04d25.jpg",
1212
+ "table_caption": [
1213
+ "Table 4. Traditional FPN versus E-FPN, using the SBI-based data synthesis under the cross-dataset protocol. Bold and Underline indicate the best and the second-best performance, respectively. We report the results when integrating features $\\mathbf{F}^{(i)}$ from different layers."
1214
+ ],
1215
+ "table_footnote": [],
1216
+ "table_body": "<table><tr><td rowspan=\"2\">λ1</td><td rowspan=\"2\">λ2</td><td colspan=\"4\">Test Set AUC (%)</td></tr><tr><td>CDF2</td><td>DFDC</td><td>DFW</td><td>Avg</td></tr><tr><td>1</td><td>1</td><td>90.69</td><td>78.12</td><td>70.98</td><td>79.93</td></tr><tr><td>10</td><td>10</td><td>95.73</td><td>85.87</td><td>73.56</td><td>85.05</td></tr><tr><td>100</td><td>100</td><td>93.72</td><td>78.60</td><td>75.25</td><td>82.52</td></tr><tr><td>100</td><td>10</td><td>93.05</td><td>83.86</td><td>76.72</td><td>84.54</td></tr><tr><td>10</td><td>100</td><td>95.40</td><td>86.94</td><td>80.03</td><td>87.46</td></tr></table>",
1217
+ "bbox": [
1218
+ 550,
1219
+ 342,
1220
+ 836,
1221
+ 441
1222
+ ],
1223
+ "page_idx": 7
1224
+ },
1225
+ {
1226
+ "type": "text",
1227
+ "text": "Table 5. Sensitivity analysis: The impact of the hyper-parameters $\\lambda_{1}$ and $\\lambda_{2}$ using the cross-dataset protocol on three datasets in terms of AUC.",
1228
+ "bbox": [
1229
+ 500,
1230
+ 444,
1231
+ 892,
1232
+ 484
1233
+ ],
1234
+ "page_idx": 7
1235
+ },
1236
+ {
1237
+ "type": "text",
1238
+ "text": "5. Conclusion",
1239
+ "text_level": 1,
1240
+ "bbox": [
1241
+ 500,
1242
+ 497,
1243
+ 617,
1244
+ 512
1245
+ ],
1246
+ "page_idx": 7
1247
+ },
1248
+ {
1249
+ "type": "text",
1250
+ "text": "In this paper, a fine-grained deepfake detection method called LAA-Net is introduced with the aim of detecting high-quality deepfakes while remaining generic to unseen manipulations. For that purpose, two different components are proposed. On the one hand, we argue that by making the network focus on the most vulnerable points, we can detect both global and subtle artifacts. To this end, an explicit attention mechanism within a multi-task learning framework is used. In addition to the binary classification branch, heatmap and self-consistency branches are defined with respect to the vulnerable points. On the other hand, a novel E-FPN module for aggregating multi-scale features is proposed; hence enabling the integration of more localized features. The results reported on several benchmarks show the superiority of LAA-Net as compared to the state-of-the-art, including attention-based methods. In future works, strategies for improving the robustness to noise will be investigated. In addition, an attempt to extend this idea by taking into account the temporal dimension will be explored.",
1251
+ "bbox": [
1252
+ 496,
1253
+ 523,
1254
+ 892,
1255
+ 811
1256
+ ],
1257
+ "page_idx": 7
1258
+ },
1259
+ {
1260
+ "type": "text",
1261
+ "text": "Acknowledgment",
1262
+ "text_level": 1,
1263
+ "bbox": [
1264
+ 500,
1265
+ 820,
1266
+ 651,
1267
+ 837
1268
+ ],
1269
+ "page_idx": 7
1270
+ },
1271
+ {
1272
+ "type": "text",
1273
+ "text": "This work is supported by the Luxembourg National Research Fund, under the BRIDGES2021/IS/16353350/FaKeDeTeR and UNFAKE, ref.16763798 projects, and by POST Luxembourg.",
1274
+ "bbox": [
1275
+ 498,
1276
+ 840,
1277
+ 892,
1278
+ 901
1279
+ ],
1280
+ "page_idx": 7
1281
+ },
1282
+ {
1283
+ "type": "text",
1284
+ "text": "Overview",
1285
+ "text_level": 1,
1286
+ "bbox": [
1287
+ 230,
1288
+ 89,
1289
+ 316,
1290
+ 104
1291
+ ],
1292
+ "page_idx": 8
1293
+ },
1294
+ {
1295
+ "type": "text",
1296
+ "text": "This document provides supplementary material complementing the main manuscript. It is structured as follows. First, the computation of the self-consistency loss and the ground truth generation of heatmaps are described. Second, more quantitative and qualitative results are provided. In particular, additional metrics are reported for both in-dataset and cross-dataset settings. Moreover, qualitative results comparing E-FPN and FPN are shown.",
1297
+ "bbox": [
1298
+ 75,
1299
+ 114,
1300
+ 470,
1301
+ 236
1302
+ ],
1303
+ "page_idx": 8
1304
+ },
1305
+ {
1306
+ "type": "text",
1307
+ "text": "6. Self-Consistency Loss",
1308
+ "text_level": 1,
1309
+ "bbox": [
1310
+ 76,
1311
+ 250,
1312
+ 284,
1313
+ 266
1314
+ ],
1315
+ "page_idx": 8
1316
+ },
1317
+ {
1318
+ "type": "image",
1319
+ "img_path": "images/66add1d1914f6056849f872c578d4d9d28d8331689f06e281363dc1f02cdf984.jpg",
1320
+ "image_caption": [
1321
+ "Figure 7. In order to generate the consistency map prediction $\\hat{\\mathbf{C}}$ as well as the associated ground truth $\\mathbf{C}$ , we first randomly select a vulnerable point located at $\\mathbf{p}^s$ . For computing $\\hat{\\mathbf{C}}$ , we measure the similarity between the feature at $\\mathbf{p}^s$ (red block) and the features generated from every point. Namely, we use the similarity function in [54]. As for $\\mathbf{C}$ , we measure the consistency values between the pixel at the $\\mathbf{p}^s$ and all pixels in $\\mathbf{B}$ , as also described in Eq. (7) of the manuscript."
1322
+ ],
1323
+ "image_footnote": [],
1324
+ "bbox": [
1325
+ 81,
1326
+ 287,
1327
+ 460,
1328
+ 506
1329
+ ],
1330
+ "page_idx": 8
1331
+ },
1332
+ {
1333
+ "type": "image",
1334
+ "img_path": "images/a9cd9118ea1f85eb6dff05d0d62c24ca816e787e6958c6e8a68802427daf2d3a.jpg",
1335
+ "image_caption": [
1336
+ "Figure 8. Feature visualization by gradCAM [39] between $E$ -FPN and FPN with different integration of multi-scale layers. It shows that E-FPN can focus better on artifacts as compared to FPN. The setup details are provided in Table 4 as shown in the manuscript."
1337
+ ],
1338
+ "image_footnote": [],
1339
+ "bbox": [
1340
+ 81,
1341
+ 660,
1342
+ 468,
1343
+ 773
1344
+ ],
1345
+ "page_idx": 8
1346
+ },
1347
+ {
1348
+ "type": "text",
1349
+ "text": "To clarify the calculation of the self-consistency loss, we show Figure 7, which illustrates the generation process of the predicted and the ground-truth, $\\hat{\\mathbf{C}}$ and $\\mathbf{C}$ , respectively.",
1350
+ "bbox": [
1351
+ 75,
1352
+ 854,
1353
+ 470,
1354
+ 902
1355
+ ],
1356
+ "page_idx": 8
1357
+ },
1358
+ {
1359
+ "type": "table",
1360
+ "img_path": "images/c29c23f83b3f0b27bdf1cf265b7abc0baa44f91c28d2027a88cd1da6bac53567.jpg",
1361
+ "table_caption": [],
1362
+ "table_footnote": [],
1363
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"2\">Training Set</td><td colspan=\"5\">FF++ [37]</td></tr><tr><td>Real</td><td>Fake</td><td>ACC</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td></tr><tr><td>Ours w/ BI [24]</td><td>✓</td><td></td><td>99.03</td><td>99.95</td><td>99.99</td><td>99.21</td><td>99.60</td></tr><tr><td>Ours w/ SBI [41]</td><td>✓</td><td></td><td>99.04</td><td>99.96</td><td>99.99</td><td>99.29</td><td>99.64</td></tr></table>",
1364
+ "bbox": [
1365
+ 500,
1366
+ 88,
1367
+ 893,
1368
+ 140
1369
+ ],
1370
+ "page_idx": 8
1371
+ },
1372
+ {
1373
+ "type": "text",
1374
+ "text": "Table 6. In-dataset evaluation on FF++ [37] reported by ACC, AUC, AP, AR, and mF1.",
1375
+ "bbox": [
1376
+ 500,
1377
+ 148,
1378
+ 890,
1379
+ 176
1380
+ ],
1381
+ "page_idx": 8
1382
+ },
1383
+ {
1384
+ "type": "text",
1385
+ "text": "The self-consistency loss is a binary cross entropy loss between $\\hat{\\mathbf{C}}$ and $\\mathbf{C}$ .",
1386
+ "bbox": [
1387
+ 500,
1388
+ 203,
1389
+ 890,
1390
+ 232
1391
+ ],
1392
+ "page_idx": 8
1393
+ },
1394
+ {
1395
+ "type": "text",
1396
+ "text": "7. Ground Truth Generation of Heatmaps",
1397
+ "text_level": 1,
1398
+ "bbox": [
1399
+ 500,
1400
+ 246,
1401
+ 854,
1402
+ 265
1403
+ ],
1404
+ "page_idx": 8
1405
+ },
1406
+ {
1407
+ "type": "text",
1408
+ "text": "In this section, we provide more details regarding the generation of ground-truth heatmaps, described in Section 3.1.2. Firstly, a $k$ -th vulnerable point, denoted as $\\mathbf{p}^k$ , is selected, as shown in Figure 9 (i). Secondly, we measure the height and the width of the blending mask $\\mathbf{B}$ at the point $\\mathbf{p}^k$ shown as orange lines in Figure 9 (ii). Using the calculated distances, a virtual bounding box is created, indicated by the blue box in Figure 9 (iii). Then, we identify overlapping boxes, illustrated by dashed-line green boxes in Figure 9 (iv), with the Intersection over Union (IoU) greater than a threshold ( $t = 0.7$ ) compared to the virtual bounding box. A radius $r_k$ (solid purple line in Figure 9 (v)) is calculated by forming a tight circle encompassing all these boxes. Finally, an Unnormalized Gaussian Distribution, shown as a red circle in Figure 9 (vi), is generated with a standard deviation $\\sigma_k = \\frac{1}{3} r_k$ (Eq. (4) of the manuscript). The steps are repeated for every vulnerable point $k \\in [[1, \\mathrm{card}(\\mathcal{P})]]$ . The final $\\mathbf{H}$ is the superimposition of all $g_{ij}^k$ .",
1409
+ "bbox": [
1410
+ 498,
1411
+ 271,
1412
+ 890,
1413
+ 546
1414
+ ],
1415
+ "page_idx": 8
1416
+ },
1417
+ {
1418
+ "type": "text",
1419
+ "text": "8. Additional Results",
1420
+ "text_level": 1,
1421
+ "bbox": [
1422
+ 500,
1423
+ 556,
1424
+ 679,
1425
+ 571
1426
+ ],
1427
+ "page_idx": 8
1428
+ },
1429
+ {
1430
+ "type": "text",
1431
+ "text": "In addition to AUC, we provide results using additional metrics, namely, Average Precision (AP), Average Recall (AR), Accuracy (ACC), and mean F1-score (mF1).",
1432
+ "bbox": [
1433
+ 498,
1434
+ 580,
1435
+ 890,
1436
+ 626
1437
+ ],
1438
+ "page_idx": 8
1439
+ },
1440
+ {
1441
+ "type": "text",
1442
+ "text": "Table 6 and Table 7 report the results under the in-dataset and the cross-dataset settings, respectively. Overall, it can be seen that LAA-Net achieves better performances than other state-of-the-art methods.",
1443
+ "bbox": [
1444
+ 498,
1445
+ 627,
1446
+ 890,
1447
+ 686
1448
+ ],
1449
+ "page_idx": 8
1450
+ },
1451
+ {
1452
+ "type": "text",
1453
+ "text": "8.1. Qualitative Results: E-FPN versus FPN",
1454
+ "text_level": 1,
1455
+ "bbox": [
1456
+ 500,
1457
+ 696,
1458
+ 839,
1459
+ 712
1460
+ ],
1461
+ "page_idx": 8
1462
+ },
1463
+ {
1464
+ "type": "text",
1465
+ "text": "A qualitative comparison between the proposed E-FPN and the traditional FPN with different fusion settings is reported in Figure 8. Using EfficientNet-B4 [44] (EFNB4) as our backbone, the $\\mathbf{F}^{(6)}$ refers to the features extracted from the last convolution block in the backbone. In other words, this means that no FPN design is integrated. By gradually aggregating features from lower to higher resolution layers, we can observe the improvement of the forgery localization ability for both E-FPN and FPN. More notably, E-FPN produces more precise activations on the blending boundaries as compared to FPN. This can be explained by the fact that the E-FPN integrates a filtering mechanism for learning less",
1466
+ "bbox": [
1467
+ 496,
1468
+ 719,
1469
+ 890,
1470
+ 900
1471
+ ],
1472
+ "page_idx": 8
1473
+ },
1474
+ {
1475
+ "type": "image",
1476
+ "img_path": "images/8046b9c4244f83e5ec58f2b21d27b54811976d7f5bc1c343118c1ec7ac8b1b7b.jpg",
1477
+ "image_caption": [
1478
+ "Figure 9. The generation process of ground truth heatmaps by producing using an Unnormalized Gaussian Distribution given a selected vulnerable point."
1479
+ ],
1480
+ "image_footnote": [],
1481
+ "bbox": [
1482
+ 80,
1483
+ 93,
1484
+ 867,
1485
+ 252
1486
+ ],
1487
+ "page_idx": 9
1488
+ },
1489
+ {
1490
+ "type": "table",
1491
+ "img_path": "images/c562c5e89aa274c3cbe8d6d83a5ef4d9128a727a7b462b4a9dda95dae4769b2a.jpg",
1492
+ "table_caption": [],
1493
+ "table_footnote": [],
1494
+ "table_body": "<table><tr><td rowspan=\"3\">Method</td><td rowspan=\"3\">Fake</td><td colspan=\"16\">Test set (%)</td></tr><tr><td colspan=\"4\">CDF2</td><td colspan=\"4\">DFW</td><td colspan=\"4\">DFD</td><td colspan=\"4\">DFDC</td></tr><tr><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td></tr><tr><td>Xception [37]</td><td>✓</td><td>61.18</td><td>66.93</td><td>52.40</td><td>58.78</td><td>65.29</td><td>55.37</td><td>57.99</td><td>56.65</td><td>89.75</td><td>85.48</td><td>79.34</td><td>82.29</td><td>69.90</td><td>91.98</td><td>67.07</td><td>77.57</td></tr><tr><td>FaceXRay+BI [24]</td><td>✓</td><td>79.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>95.40</td><td>93.34</td><td>-</td><td>-</td><td>65.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LRNet [43]</td><td>✓</td><td>53.20</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>52.29</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LocalRL [8]</td><td>✓</td><td>78.26</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>89.24</td><td>-</td><td>-</td><td>-</td><td>76.53</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Tl2Net [28]</td><td>✓</td><td>68.22</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>72.03</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Multi-attentional [55]</td><td>✓</td><td>68.26</td><td>75.25</td><td>52.40</td><td>61.78</td><td>73.56</td><td>73.79</td><td>63.38</td><td>68.19</td><td>92.95</td><td>96.51</td><td>60.76</td><td>74.57</td><td>63.02</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RECCE [6]</td><td>✓</td><td>70.93</td><td>70.35</td><td>59.48</td><td>64.46</td><td>68.16</td><td>54.41</td><td>56.59</td><td>55.48</td><td>98.26</td><td>79.42</td><td>69.57</td><td>74.17</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SFDG [50]</td><td>✓</td><td>75.83</td><td>-</td><td>-</td><td>-</td><td>69.27</td><td>-</td><td>-</td><td>-</td><td>88.00</td><td>-</td><td>-</td><td>-</td><td>73.63</td><td>-</td><td>-</td><td>-</td></tr><tr><td>EIC+IIE [20]</td><td>✓</td><td>83.80</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>93.92</td><td>-</td><td>-</td><td>-</td><td>81.23</td><td>-</td><td>-</td><td>-</td></tr><tr><td>AltFreezing [52]</td><td>✓</td><td>89.50</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>98.50</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>93.88</td><td>91.12</td><td>77.00</td><td>83.46</td><td>74.48</td><td>75.23</td><td>65.26</td><td>69.89</td><td>99.03</td><td>99.59</td><td>82.17</td><td>90.04</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>UCF [53]</td><td>✓</td><td>82.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>94.5</td><td>-</td><td>-</td><td>-</td><td>80.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Controllable GS [17]</td><td>✓</td><td>84.97</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>81.65</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PCL+I2G [54]</td><td></td><td>90.03</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>99.07</td><td>-</td><td>-</td><td>-</td><td>74.27</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SBI [41]</td><td></td><td>93.18</td><td>85.16</td><td>82.68</td><td>83.90</td><td>67.47</td><td>55.87</td><td>55.82</td><td>55.85</td><td>97.56</td><td>92.79</td><td>89.49</td><td>91.11</td><td>86.15</td><td>93.24</td><td>71.58</td><td>80.99</td></tr><tr><td>AUNet [3]</td><td></td><td>92.77</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>99.22</td><td>-</td><td>-</td><td>-</td><td>86.16</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Ours (w/ BI)</td><td></td><td>86.28</td><td>91.93</td><td>50.01</td><td>64.78</td><td>57.13</td><td>56.89</td><td>50.12</td><td>53.29</td><td>99.51</td><td>99.80</td><td>95.47</td><td>97.59</td><td>69.69</td><td>93.67</td><td>50.12</td><td>65.30</td></tr><tr><td>Ours (w/ SBI)</td><td></td><td>95.40</td><td>97.64</td><td>87.71</td><td>92.41</td><td>80.03</td><td>81.08</td><td>65.66</td><td>72.56</td><td>98.43</td><td>99.40</td><td>88.55</td><td>93.64</td><td>86.94</td><td>97.70</td><td>73.37</td><td>83.81</td></tr></table>",
1495
+ "bbox": [
1496
+ 81,
1497
+ 303,
1498
+ 892,
1499
+ 542
1500
+ ],
1501
+ "page_idx": 9
1502
+ },
1503
+ {
1504
+ "type": "text",
1505
+ "text": "Table 7. Cross-dataset evaluation in terms of AUC, AP, AR, and mF1 (%) on CDF2 [25], DFW [57], DFD [15], and DFDC [13]. Bold and underlined highlight the best and the second-best performance, respectively. $\\checkmark$ symbol is used to depict methods that utilized both Real data and Fake data for training.",
1506
+ "bbox": [
1507
+ 75,
1508
+ 551,
1509
+ 892,
1510
+ 595
1511
+ ],
1512
+ "page_idx": 9
1513
+ },
1514
+ {
1515
+ "type": "image",
1516
+ "img_path": "images/daaa2db2ce59ff631c396feb9def0288c6f73ac00946994187977adebb598b8f.jpg",
1517
+ "image_caption": [
1518
+ "Figure 10. Detection of vulnerable points w/o and w/ Gaussian noise."
1519
+ ],
1520
+ "image_footnote": [],
1521
+ "bbox": [
1522
+ 81,
1523
+ 623,
1524
+ 460,
1525
+ 709
1526
+ ],
1527
+ "page_idx": 9
1528
+ },
1529
+ {
1530
+ "type": "text",
1531
+ "text": "noise. In contrast, FPN seems to consider regions outside the blending boundary, which results in lower performance as previously shown in Table 4 - Section 4.4 of the main manuscript.",
1532
+ "bbox": [
1533
+ 76,
1534
+ 771,
1535
+ 468,
1536
+ 833
1537
+ ],
1538
+ "page_idx": 9
1539
+ },
1540
+ {
1541
+ "type": "text",
1542
+ "text": "8.2. Qualitative Results: Gaussian Noise",
1543
+ "text_level": 1,
1544
+ "bbox": [
1545
+ 76,
1546
+ 845,
1547
+ 390,
1548
+ 862
1549
+ ],
1550
+ "page_idx": 9
1551
+ },
1552
+ {
1553
+ "type": "text",
1554
+ "text": "In Table 2 of the main manuscript, the performance of LAA-Net declined significantly when encountering Gaus",
1555
+ "bbox": [
1556
+ 76,
1557
+ 869,
1558
+ 470,
1559
+ 901
1560
+ ],
1561
+ "page_idx": 9
1562
+ },
1563
+ {
1564
+ "type": "text",
1565
+ "text": "sian Noise perturbations. One possible reason is that the introduction of noise elevates the difficulty of detecting the vulnerable points. To confirm that, we report the inference of the heatmap before and after applying a Gaussian Noise on a facial image in Figure 10. As it can be observed, the detection of vulnerable points is highly impacted with the introduction of a Gaussian noise.",
1566
+ "bbox": [
1567
+ 496,
1568
+ 622,
1569
+ 890,
1570
+ 727
1571
+ ],
1572
+ "page_idx": 9
1573
+ },
1574
+ {
1575
+ "type": "text",
1576
+ "text": "8.3. Robustness to Compression",
1577
+ "text_level": 1,
1578
+ "bbox": [
1579
+ 500,
1580
+ 736,
1581
+ 746,
1582
+ 753
1583
+ ],
1584
+ "page_idx": 9
1585
+ },
1586
+ {
1587
+ "type": "text",
1588
+ "text": "To assess the robustness of LAA-Net to compression, we test LAA-Net on the c23 version of FF++, and the overall AUC is equal to $89.30\\%$ .",
1589
+ "bbox": [
1590
+ 500,
1591
+ 760,
1592
+ 892,
1593
+ 806
1594
+ ],
1595
+ "page_idx": 9
1596
+ },
1597
+ {
1598
+ "type": "text",
1599
+ "text": "References",
1600
+ "text_level": 1,
1601
+ "bbox": [
1602
+ 501,
1603
+ 818,
1604
+ 596,
1605
+ 834
1606
+ ],
1607
+ "page_idx": 9
1608
+ },
1609
+ {
1610
+ "type": "list",
1611
+ "sub_type": "ref_text",
1612
+ "list_items": [
1613
+ "[1] Darius Afchar, Vincent Nozick, Junichi Yamagishi, and Isao Echizen. Mesonet: a compact facial video forgery detection network. CoRR, abs/1809.00888, 2018. 2",
1614
+ "[2] Babajide O Ayinde, Tamer Inanc, and Jacek M Zurada. Reg-"
1615
+ ],
1616
+ "bbox": [
1617
+ 506,
1618
+ 843,
1619
+ 890,
1620
+ 901
1621
+ ],
1622
+ "page_idx": 9
1623
+ },
1624
+ {
1625
+ "type": "list",
1626
+ "sub_type": "ref_text",
1627
+ "list_items": [
1628
+ "ularizing deep neural networks by enhancing diversity in feature extraction. IEEE transactions on neural networks and learning systems, 30(9):2650-2661, 2019. 2, 5",
1629
+ "[3] Weiming Bai, Yufan Liu, Zhipeng Zhang, Bing Li, and Weiming Hu. Aunet: Learning relations between action units for face forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24709-24719, 2023. 2, 3, 6, 7, 10",
1630
+ "[4] Sarah Cahlan. How misinformation helped spark an attempted coup in Gabon. https://wapo.st/3KZARDF, 2020. [Online; accessed 7-March-2023]. 1",
1631
+ "[5] Zhixi Cai, Shreya Ghosh, Kalin Stefanov, Abhinav Dhall, Jianfei Cai, Hamid Rezatofighi, Reza Haffari, and Munawar Hayat. Marlin: Masked autoencoder for facial video representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1493-1504, 2023. 2",
1632
+ "[6] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4103-4112, 2022. 1, 2, 6, 7, 10",
1633
+ "[7] Liang Chen, Yong Zhang, Yibing Song, Lingqiao Liu, and Jue Wang. Self-supervised learning of adversarial example: Towards good generalizations for deepfake detection, 2022. 1, 2",
1634
+ "[8] Shen Chen, Taiping Yao, Yang Chen, Shouhong Ding, Jilin Li, and R. Ji. Local relation learning for face forgery detection. In AAAI Conference on Artificial Intelligence, 2021. 6, 10",
1635
+ "[9] François Chollet. Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1251-1258, 2017. 2, 3, 7",
1636
+ "[10] Davide Coccomini, Nicola Messina, Claudio Gennaro, and Fabrizio Falchi. Combining efficientnet and vision transformers for video deepfake detection. CoRR, abs/2107.02612, 2021. 2",
1637
+ "[11] Deepfakes. Faceswapdevs. https://github.com/deepfakes/faceswap, 2019.5",
1638
+ "[12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, K. Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6",
1639
+ "[13] Brian Dolhansky, Russ Howes, Ben Pflaum, Nicole Baram, and Cristian Canton-Ferrer. The deepfake detection challenge (DFDC) preview dataset. CoRR, abs/1910.08854, 2019. 2, 6, 10",
1640
+ "[14] Shichao Dong, Jin Wang, Renhe Ji, Jiajun Liang, Haoqiang Fan, and Zheng Ge. Implicit identity leakage: The stumbling block to improving deepfake detection generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3994-4004, 2023. 1, 2, 5, 6, 7, 10",
1641
+ "[15] Nick Dufour and Andrew Gully. Contributing data to deepfake detection research."
1642
+ ],
1643
+ "bbox": [
1644
+ 78,
1645
+ 90,
1646
+ 470,
1647
+ 902
1648
+ ],
1649
+ "page_idx": 10
1650
+ },
1651
+ {
1652
+ "type": "list",
1653
+ "sub_type": "ref_text",
1654
+ "list_items": [
1655
+ "https://ai.googleblog.com/2019/09/contributing-data-t \n2019. 2, 6, 10",
1656
+ "[16] Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. CoRR, abs/2010.01412, 2020. 7",
1657
+ "[17] Ying Guo, Cheng Zhen, and Pengfei Yan. Controllable guide-space for generalizable face forgery detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 20818-20827, 2023. 6, 10",
1658
+ "[18] Alexandros Haliassos, Konstantinos Vougioukas, Stavros Petridis, and Maja Pantic. Lips don't lie: A generalisable and robust approach to face forgery detection. CoRR, abs/2012.07657, 2020. 7",
1659
+ "[19] Alexandros Haliassos, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Leveraging real talking faces via self-supervision for robust forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14950-14962, 2022. 2",
1660
+ "[20] Baojin Huang, Zhongyuan Wang, Jifan Yang, Jiaxin Ai, Qin Zou, Qian Wang, and Dengpan Ye. Implicit identity driven deepfake face swapping detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4490-4499, 2023. 6, 10",
1661
+ "[21] Davis E. King. Dlib-ml: A machine learning toolkit. J. Mach. Learn. Res., 10:1755–1758, 2009. 6",
1662
+ "[22] Marek Kowalski. Faceswap. https://github.com/MarekKowalski/FaceSwap, 2018.5",
1663
+ "[23] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. International Journal of Computer Vision, 128:642-656, 2018. 4",
1664
+ "[24] Lingzhi Li, Jianmin Bao, Ting Zhang, Hao Yang, Dong Chen, Fang Wen, and Baining Guo. Face x-ray for more general face forgery detection. CoRR, abs/1912.13458, 2019. 1, 2, 3, 4, 6, 7, 9, 10",
1665
+ "[25] Yuezun Li, Xin Yang, Pu Sun, Honggang Qi, and Siwei Lyu. Celeb-df: A new dataset for deepfake forensics. CoRR, abs/1909.12962, 2019. 1, 2, 6, 10",
1666
+ "[26] Tsung-Yi Lin, Priya Goyal, Ross B. Girshick, Kaiming He, and Piotr Dollar. Focal loss for dense object detection. CoRR, abs/1708.02002, 2017. 4, 5, 6, 8",
1667
+ "[27] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2117-2125, 2017. 2, 5, 8",
1668
+ "[28] Baoping Liu, Bo Liu, Ming Ding, Tianqing Zhu, and Xin Yu. Ti2net: Temporal identity inconsistency network for deepfake detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4691-4700, 2023. 6, 10",
1669
+ "[29] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. Advances in neural information processing systems, 30, 2017. 2, 6",
1670
+ "[30] Youssef Mansour and Reinhard Heckel. Zero-shot noise2noise: Efficient image denoising without any data. In"
1671
+ ],
1672
+ "bbox": [
1673
+ 501,
1674
+ 90,
1675
+ 1000,
1676
+ 902
1677
+ ],
1678
+ "page_idx": 10
1679
+ },
1680
+ {
1681
+ "type": "list",
1682
+ "sub_type": "ref_text",
1683
+ "list_items": [
1684
+ "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14018-14027, 2023. 7",
1685
+ "[31] Falko Matern, Christian Riess, and Marc Stamminger. Exploiting visual artifacts to expose deepfakes and face manipulations. In 2019 IEEE Winter Applications of Computer Vision Workshops (WACVW), pages 83–92, 2019. 2",
1686
+ "[32] Nesryne Mejri, Konstantinos Papadopoulos, and Djamila Aouada. Leveraging high-frequency components for deepfake detection. In 2021 IEEE 23rd International Workshop on Multimedia Signal Processing (MMSP), pages 1-6, 2021. 2",
1687
+ "[33] Nesryne Mejri, Enjie Ghorbel, and Djamila Aouada. Untag: Learning generic features for unsupervised type-agnostic deepfake detection. In ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2023. 2",
1688
+ "[34] Rafael Müller, Simon Kornblith, and Geoffrey E. Hinton. When does label smoothing help? CoRR, abs/1906.02629, 2019. 7",
1689
+ "[35] Huy H. Nguyen, Junichi Yamagishi, and Isao Echizen. Capsule-forensics: Using capsule networks to detect forged images and videos. CoRR, abs/1810.11215, 2018. 2",
1690
+ "[36] Thanh Thi Nguyen, Cuong M. Nguyen, Dung Tien Nguyen, Duc Thanh Nguyen, and Saeid Nahavandi. Deep learning for deepfakes creation and detection. CoRR, abs/1909.11573, 2019. 2",
1691
+ "[37] Andreas Rössler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. FaceForensics++: Learning to detect manipulated facial images. In International Conference on Computer Vision (ICCV), 2019, 1, 2, 5, 6, 7, 8, 9, 10",
1692
+ "[38] Selim S. Seferbekov, Vladimir I. Iglovikov, Alexander V. Buslaev, and Alexey A. Shvets. Feature pyramid network for multi-class land segmentation. CoRR, abs/1806.03510, 2018. 5",
1693
+ "[39] Ramprasaath R. Selvaraju, Abhishek Das, Ramakrishna Vedantam, Michael Cogswell, Devi Parikh, and Dhruv Batra. Grad-cam: Why did you say that? visual explanations from deep networks via gradient-based localization. CoRR, abs/1610.02391, 2016. 7, 8, 9",
1694
+ "[40] Zehua Sheng, Zhu Yu, Xiongwei Liu, Si-Yuan Cao, Yuqi Liu, Hui-Liang Shen, and Huaqi Zhang. Structure aggregation for cross-spectral stereo image guided denoising. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13997-14006, 2023. 7",
1695
+ "[41] Kaede Shiohara and Toshihiko Yamasaki. Detecting deepfakes with self-blended images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18720-18729, 2022. 1, 2, 3, 6, 7, 8, 9, 10",
1696
+ "[42] Inder Pal Singh, Nesryne Mejri, van Dat Nguyen, Enjie Ghorbel, and Djamila Aouada. Multi-label deepfake classification. IEEE Workshop on Multimedia Signal Processing, 2023. 2"
1697
+ ],
1698
+ "bbox": [
1699
+ 78,
1700
+ 90,
1701
+ 468,
1702
+ 898
1703
+ ],
1704
+ "page_idx": 11
1705
+ },
1706
+ {
1707
+ "type": "list",
1708
+ "sub_type": "ref_text",
1709
+ "list_items": [
1710
+ "[43] Zekun Sun, Yujie Han, Zeyu Hua, Na Ruan, and Weijia Jia. Improving the efficiency and robustness of deepfakes detection through precise geometric features. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3608-3617, 2021. 6, 10",
1711
+ "[44] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. CoRR, abs/1905.11946, 2019. 2, 3, 6, 9",
1712
+ "[45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. CoRR, abs/1904.12356, 2019. 5",
1713
+ "[46] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of RGB videos. CoRR, abs/2007.14808, 2020. 5",
1714
+ "[47] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. FCOS: fully convolutional one-stage object detection. CoRR, abs/1904.01355, 2019. 5",
1715
+ "[48] Jane Wakefield. Deepfake presi-dents used in Russia-Ukraine war. https://www.bbc.com/news/technology-60780142, 2022. [Online; accessed 7-March-2023]. 1",
1716
+ "[49] Run Wang, Lei Ma, Felix Juefei-Xu, Xiaofei Xie, Jian Wang, and Yang Liu. Fakespotter: A simple baseline for spotting ai-synthesized fake faces. CoRR, abs/1909.06122, 2019. 2",
1717
+ "[50] Yuan Wang, Kun Yu, Chen Chen, Xiyuan Hu, and Silong Peng. Dynamic graph learning with content-guided spatial-frequency relation reasoning for deepfake detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7278-7287, 2023. 2, 3, 6, 7, 10",
1718
+ "[51] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4): 600-612, 2004. 2",
1719
+ "[52] Zhendong Wang, Jianmin Bao, Wengang Zhou, Weilun Wang, and Houqiang Li. Altfreezing for more general video face forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4129-4138, 2023. 2, 3, 6, 7, 10",
1720
+ "[53] Zhiyuan Yan, Yong Zhang, Yanbo Fan, and Baoyuan Wu. Ucf: Uncovering common features for generalizable deepfake detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22412-22423, 2023. 6, 10",
1721
+ "[54] Eric Zhao, Xiang Xu, Mingze Xu, Hui Ding, Yuanjun Xiong, and Wei Xia. Learning self-consistency for deepfake detection. In ICCV 2021, 2021. 1, 2, 4, 5, 6, 7, 9, 10",
1722
+ "[55] Hanqing Zhao, Wenbo Zhou, Dongdong Chen, Tianyi Wei, Weiming Zhang, and Nenghai Yu. Multi-attentional deepfake detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2185–2194, 2021. 1, 2, 3, 5, 6, 7, 8, 10",
1723
+ "[56] Zhun Zhong, Liang Zheng, Guoliang Kang, Shaozi Li, and Yi Yang. Random erasing data augmentation. CoRR, abs/1708.04896, 2017. 7",
1724
+ "[57] Bojia Zi, Minghao Chang, Jingjing Chen, Xingjun Ma, and Yu-Gang Jiang. Wilddeepfake: A challenging real-world"
1725
+ ],
1726
+ "bbox": [
1727
+ 501,
1728
+ 92,
1729
+ 929,
1730
+ 900
1731
+ ],
1732
+ "page_idx": 11
1733
+ },
1734
+ {
1735
+ "type": "header",
1736
+ "text": "dataset for deepfake detection. Proceedings of the 28th ACM International Conference on Multimedia, 2020. 2, 6, 10",
1737
+ "bbox": [
1738
+ 109,
1739
+ 90,
1740
+ 470,
1741
+ 119
1742
+ ],
1743
+ "page_idx": 12
1744
+ }
1745
+ ]
2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13856/d4393d1b-2373-4469-9ec8-bdde60cff1f4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e925660d229a2231b3b20c30a304b2b1f780cd56e96a4905999965a57caef50d
3
+ size 4539061
2401.13xxx/2401.13856/full.md ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LAA-Net: Localized Artifact Attention Network for Quality-Agnostic and Generalizable Deepfake Detection
2
+
3
+ Dat NGUYEN*, Nesryne MEJRI*, Inder Pal SINGH*, Polina KULESHOVA*
4
+ Marcella ASTRID*, Anis KACEM*, Enjie GHORBEL*, Djamila AOUADA*
5
+ CVI², SnT, University of Luxembourg*
6
+
7
+ Cristal Laboratory, National School of Computer Sciences, University of Manouba
8
+
9
+ {dat.nguyen,nesryne.mejri,inder.singh,polina.kuleshova, marcella.astrid,anis.kacem,enjie.ghorbel,djamila.aouada}@uni.lu
10
+
11
+ # Abstract
12
+
13
+ This paper introduces a novel approach for high-quality deepfake detection called Localized Artifact Attention Network (LAA-Net). Existing methods for high-quality deepfake detection are mainly based on a supervised binary classifier coupled with an implicit attention mechanism. As a result, they do not generalize well to unseen manipulations. To handle this issue, two main contributions are made. First, an explicit attention mechanism within a multi-task learning framework is proposed. By combining heatmap-based and self-consistency attention strategies, LAA-Net is forced to focus on a few small artifact-prone vulnerable regions. Second, an Enhanced Feature Pyramid Network (E-FPN) is proposed as a simple and effective mechanism for spreading discriminative low-level features into the final feature output, with the advantage of limiting redundancy. Experiments performed on several benchmarks show the superiority of our approach in terms of Area Under the Curve (AUC) and Average Precision (AP). The code is available at https://github.com/10Ring/LAA-Net.
14
+
15
+ # 1. Introduction
16
+
17
+ Thanks to the development of generative models, tremendous advances in deepfake creation have been witnessed. Unfortunately, these fake visual data can be employed for malicious purposes, as shown in [4, 48]. The fact that deepfake generation techniques are rapidly gaining in realism only exacerbates this issue. It is, therefore, crucial to design methods capable of automatically detecting deepfakes, including the most realistic ones that are commonly referred to as high-quality deepfakes. Nonetheless, detecting high-quality deepfakes remains extremely challenging as they usually enclose subtle and localized artifacts.
18
+
19
+ ![](images/8273d019d5764cf49afe3053cfda15da459fab8d859c8dbf1133fc85e34cbd16.jpg)
20
+ (a)
21
+
22
+ ![](images/908a98c50221e1cadcf6d5f29b67c99e8010018ec89f676aa9c64836f14c4cbd.jpg)
23
+ (b)
24
+ Figure 1. Comparison of LAA-Net $(\bullet)$ with respect to existing methods, namely, Multi-attentional $(\bullet)$ [55], SBI $(\bullet)$ [41], Xception $(\bullet)$ [37], RECCE $(\bullet)$ [6], CADDM $(\bullet)$ [14], using (a) the AUC performance with respect to different ranges of Mask SSIM, and (b) its associated boxplots. *The results were obtained using the official source codes pretrained on FF+ [37] and testing on Celeb-DFv2 [25]. Figure best viewed in colors.
25
+
26
+ Recent works have mostly focused on improving the generalization capabilities of deepfake detection methods by adopting multi-task learning [7, 24, 54] and/or heuris-
27
+
28
+ tic fake data generation [24, 41] strategies. However, most of these methods fail to model localized artifacts, which are critical for detecting high-quality deepfakes. This could be explained by the fact that Vanilla Deep Learning (DL) architectures are mainly used. These common architectures, such as XceptionNet [9] and EfficientNet [44], tend to learn global features, ignoring more localized cues [50, 55]. With the use of successive convolutions, localized features across layers gradually fade. Hence, proposing suitable mechanisms for capturing local and subtle artifacts turns out to be necessary.
29
+
30
+ To the best of our knowledge, only a few research works have explored this research direction [50, 55]. They mainly introduce attention modules that implicitly model subtle inconsistencies through low-level representations [50, 55]. Nevertheless, they still rely on single binary classifiers trained with real/deepfake images without considering any additional strategy for avoiding generalization issues. This considerably restricts the practical usefulness of these methods.
31
+
32
+ Hence, our goal is to address the detection of high-quality deepfakes and, at the same time, improve the generalization performance. We argue that this can be achieved by designing an attention module compatible with generic deepfake detection strategies. In particular, the solution would be to introduce an explicit fine-grained mechanism within a multi-task learning framework supported by an appropriate pseudo-fake synthesis technique. Moreover, in addition to such a learning strategy, we posit that an adequate architecture preserving low-level features could implicitly contribute to better capturing localized artifacts.
33
+
34
+ More concretely, this paper proposes a novel fine-grained approach called Localized Artifact Attention Network (LAA-Net) that relies on a multi-task learning framework. First, a new fine-grained mechanism that aims at focusing on small regions centered at the vulnerable pixels is introduced. By vulnerable pixels, we mean the pixels that are more likely to showcase a blending artifact<sup>1</sup>. This is achieved by considering two auxiliary branches, namely, a heatmap branch and a self-consistency branch. On the one hand, the heatmap branch allows localizing the set of vulnerable pixels while taking into account their neighborhood. On the other hand, the self-consistency branch estimates the similarity of pixels with respect to a randomly selected vulnerable point. To simulate fake data and generate ground-truth heatmaps and self-consistency matrices that are predicted by the additional branches, blending-based data synthesis such as [24, 41] are leveraged. Second, the proposed architecture incorporates a novel, simple, yet effective Feature Pyramid Network (FPN) [27] termed Enhanced FPN (E-FPN). It enables making use of multi-scale features while avoiding redundancy. In fact, it has been
35
+
36
+ shown that reducing feature redundancy contributes to the regularization of Deep Neural Networks (DNNs) [2]. While the proposed attention mechanism guided by the vulnerable points helps the network to focus explicitly on artifact-prone regions, E-FPN forces the model to consider implicitly local cues. The association of these two complementary components makes LAA-Net a suitable candidate for fine-grained and generic deepfake detection. As reflected in Figure 1, our approach achieves better and more stable Area Under the Curve (AUC) performance as compared to existing methods [6, 14, 37, 41, 55] regardless of the quality of deepfakes, quantified using the Mask Structural SIMilarity $(\mathrm{Mask - SSIM}^2)$ . For a more comprehensive evaluation, in addition to the standard AUC, other metric is reported, namely, Average Precision (AP). We report experiments on several deepfake benchmarks and show that LAA-Net outperforms the state-of-the-art (SoA).
37
+
38
+ # Contributions. In summary, the paper contributions are:
39
+
40
+ 1. A novel multi-task learning method for fine-grained and generic deepfake detection called LAA-Net. It is trained using real data only.
41
+ 2. An explicit attention mechanism for focusing on vulnerable points combining heatmap-based and self-consistency attention strategies.
42
+ 3. A new FPN design, called E-FPN, ensures the efficient propagation of low-level features without incurring redundancy<sup>3</sup>.
43
+ 4. Extensive experiments and a comprehensive analysis reported on several benchmarks, namely, FF++ [37], CDF2 [25], DFD [15], DFDC [13], and DFW [57].
44
+
45
+ Paper Organization. The remainder of the paper is organized as follows: Section 2 reviews related works. Section 3 introduces the proposed approach, and Section 4 reports the experiments and discusses the results. Finally, Section 5 concludes this work and suggests future investigations.
46
+
47
+ # 2. Related Works: Attention-based Deepfake Detection
48
+
49
+ Prior works are diverse in the way they approach the problem of deepfake detection [1, 31, 32, 35, 36, 42, 49]. Earlier methods generally formulate it as a purely binary classification [10, 37], leading to poor generalization capabilities. As a solution, two main strategies have been investigated by the research community, namely, multitask learning [5-7, 19, 24, 54] and/or pseudo-fake generation [3, 24, 33, 41, 52].
50
+
51
+ ![](images/2a94dde7eef925da482da0327b3f7a512cc0d0fbe8358610c5a3a2cfe99177a7.jpg)
52
+ Figure 2. Overview of the proposed LAA-Net approach: it is formed by two components, namely, (1) an explicit attention mechanism based on a multi-task learning framework composed of three branches, i.e., the binary classification branch, the heatmap branch, and the self-consistency branch. The heatmap and self-consistency ground-truth data are generated based on the detected vulnerable points, and (2) an Enhanced Feature Pyramid Networks (E-FPN) that aggregates multi-scale features.
53
+
54
+ Despite their great potential, the aforementioned models are less robust when considering high-quality deepfakes. Indeed, these SoA methods mainly employ traditional DNN backbones such as XceptionNet [9] and EfficientNet [44]. Hence, through their successive convolution layers, they implicitly generate global features. As a result, low-level cues, that can be very informative, might be unintentionally ignored, leading to poor detection performance of high-quality deepfakes. It is, therefore, crucial to design adequate strategies for modeling more localized artifacts.
55
+
56
+ Alternatively, some attention-based methods such as [50, 55] have been proposed. Specifically, they have made attempts to integrate attention modules for implicitly focusing on low-level artifacts [50, 55]. Unfortunately, the two aforementioned methods make use of a unique binary classifier solely trained with real and deepfake images. This means that they do not consider any pseudo-fake generation technique or multi-task learning strategy. Consequently, as demonstrated experimentally, they do not generalize well to unseen datasets in comparison to other recent techniques [3, 41, 52].
57
+
58
+ # 3. Localized Artifact Attention Network (LAA-Net)
59
+
60
+ Our goal is to introduce a method that is robust to high-quality deepfakes yet capable of handling unseen manipulations. Accordingly, we introduce a fine-grained method called Localized Artifact Attention Network (LAA-Net) illustrated in Figure 2. LAA-Net incorporates: (1) an explicit attention mechanism and (2) a new architecture based on an enhanced FPN, called E-FPN.
61
+
62
+ First, the proposed attention mechanism aims at explicitly focusing on blending artifact-prone pixels referred to as vulnerable points (a formal definition is given in Sec-
63
+
64
+ tion 3.1). For that purpose, a hand-free annotation of vulnerable points is proposed by leveraging a blending-based data synthesis. Specifically, a multi-task learning framework composed of three simultaneously optimized branches, namely (a) classification, (b) heatmap regression, and (c) self-consistency regression, is introduced, as depicted in Figure 2. The classification branch predicts whether the input image is fake or real, while the two other branches aim at giving attention to vulnerable pixels. Second, E-FPN allows extracting multi-scale features without injecting redundancy. This enables modeling low-level features, which can better discriminate subtle inconsistencies.
65
+
66
+ # 3.1. Explicit Attention to Vulnerable Points
67
+
68
+ # 3.1.1 Blending-based Data Synthesis
69
+
70
+ We start by recalling blending-based data synthesis methods such as [24, 41]. In fact, the proposed method relies on this kind of pseudo-fake generation and, therefore avoids using actual deepfakes and manually annotating data to train the proposed multi-task learning framework. Let us consider a manipulated face image denoted by $\mathbf{I}_{\mathrm{M}}$ . The image $\mathbf{I}_{\mathrm{M}}$ can be obtained by combining (e.g., blending) two images denoted by $\mathbf{I}_{\mathrm{F}}$ and $\mathbf{I}_{\mathrm{B}}$ as follows,
71
+
72
+ $$
73
+ \mathbf {I} _ {\mathrm {M}} = \mathbf {M} \odot \mathbf {I} _ {\mathrm {F}} + (1 - \mathbf {M}) \odot \mathbf {I} _ {\mathrm {B}}, \tag {1}
74
+ $$
75
+
76
+ where $\mathbf{I}_{\mathrm{F}}$ refers to the foreground image enclosing the desired facial attributes, $\mathbf{I}_{\mathrm{B}}$ indicates a background image, $\mathbf{M}$ is the deformed Convex Hull mask with values varying between 0 and 1, and $\odot$ denotes the element-wise multiplication operator.
77
+
78
+ ![](images/2c89710f1c60e2f075089a332ed953a704cd2dabc72c120b1478a1d114c3b395.jpg)
79
+ Figure 3. Extraction of the vulnerable points.
80
+
81
+ # 3.1.2 Proposed Multi-task Learning Framework
82
+
83
+ In addition to the deepfake classification branch, the network learns to focus on specific regions by taking advantage of the parallel Heatmap and Self-consistency branches. Our hypothesis is that deepfake detection can be formulated as a fine-grained classification. Therefore, giving more attention to the vulnerable points should be an effective solution for detecting high-quality deepfakes. For the sake of clarity, we start by formally defining the notion of 'vulnerable points'.
84
+
85
+ Definition 1 - Vulnerable points in a deepfake image are the pixels that are more likely to carry blending artifacts.
86
+
87
+ As discussed in Section 3.1.1, any deepfake generation approach involves a blending operation for mixing the background and the foreground of two different images $\mathbf{I}_B$ and $\mathbf{I}_F$ , respectively. This implies the presence of blending artifacts regardless of the used generation approach. Thus, we posit that the vulnerable points can be seen as the pixels belonging to the blending regions with the most equivalent contributions from both $\mathbf{I}_B$ and $\mathbf{I}_F$ .
88
+
89
+ In this paper, we assume that we work under a realistic setting where we only have access to real data during training. A blending-based augmentation is, therefore, considered and leveraged for defining vulnerable pixels. Specifically, inspired from [24], a blending boundary mask $\mathbf{B} = (b_{ij})_{i,j\in [[1,D]]}$ is firstly computed as follows,
90
+
91
+ $$
92
+ \mathbf {B} = 4. \mathbf {M} \odot (\mathbf {1} - \mathbf {M}), \tag {2}
93
+ $$
94
+
95
+ with $\mathbf{1}$ being an all-one matrix. Note that $\mathbf{M}$ is defined in Eq. (1). The variable $D$ is the height and width of $\mathbf{B}$ , and $b_{ij}$ its value at the position $(i,j)$ . A higher value of $b_{ij}$ indicates that the position $(i,j)$ is more impacted by the blending. Hence, if an input image is real, $\mathbf{B}$ should be set to $\mathbf{0}$ . Then, the set of vulnerable pixels denoted by $\mathcal{P}$ is defined as follows,
96
+
97
+ $$
98
+ \mathcal {P} = \underset {(i, j) \in [ 1, D ] ^ {2}} {\operatorname {a r g m a x}} (\mathbf {B}), \tag {3}
99
+ $$
100
+
101
+ where $\llbracket\rrbracket$ defines an integer interval. Figure 3 illustrates the extraction of vulnerable points. In the following, we
102
+
103
+ describe how the notion of vulnerable points is used within the heatmap and self-consistency branches.
104
+
105
+ Heatmap Branch. In general, forgery artifacts not only appear in a single pixel but also affect its surroundings. Hence, considering vulnerable points as well as their neighborhood is more appropriate for effectively discriminating deepfakes, especially in the presence of images with local irregularities caused by noise or illumination changes. To model that, we propose to use a heatmap representation that encodes at the same time the information of both vulnerable points as well as their neighbor points.
106
+
107
+ More specifically, ground-truth heatmaps are generated by fitting an Unnormalized Gaussian Distribution for each pixel $\mathbf{p}^k\in \mathcal{P}$ . The pixel $\mathbf{p}^k$ is considered as the center of the Gaussian Mask $\mathbf{G}^k$ . To take into account the neighborhood information of $\mathbf{p}^k$ , the standard deviation of $\mathbf{G}^k$ is adaptively computed. In particular, inspired from the work of [23], the standard deviation $\sigma_{k}$ of $\mathbf{p}^k$ is computed based on the width and the height of the blending boundary mask $\mathbf{B}$ with respect to the point $\mathbf{p}^k$ . Similar to [23], a radius $r_k$ is computed based on the size of the set of virtual objects that overlap the mask centered at $\mathbf{p}^k$ with an Intersection over Union (IoU) greater than a threshold $t$ . In all our experiments, we set $t$ to 0.7 and we assume that $\sigma_{k} = \frac{1}{3} r_{k}$ . Hence, $\mathbf{G}^{k} = (g_{ij}^{k})_{i,j\in [[1,D]]}$ is computed as follows,
108
+
109
+ $$
110
+ g _ {i j} ^ {k} = e ^ {- \frac {i ^ {2} + j ^ {2}}{2 \sigma_ {k} ^ {2}}}, \tag {4}
111
+ $$
112
+
113
+ where $i$ and $j$ refer to the pixel position. The ground-truth heatmap $\mathbf{H}$ is finally constructed by superimposing the set $\mathcal{G} = \{\mathbf{G}^k\}_{k\in [[1,\mathrm{card}(\mathcal{P})]]}$ . A figure depicting the heatmap generation process is provided in supplementary materials.
114
+
115
+ For optimizing the heatmap branch, the following focal loss [26] is used,
116
+
117
+ $$
118
+ L _ {\mathrm {H}} = \sum_ {i, j} ^ {D} - (1 - \tilde {h} _ {i j}) ^ {\gamma} \log \tilde {h} _ {i j}, \tag {5}
119
+ $$
120
+
121
+ such that,
122
+
123
+ $$
124
+ \tilde {h} _ {i j} = \left\{ \begin{array}{l l} \hat {h} _ {i j} & \text {i f} h _ {i j} = 1, \\ 1 - \hat {h} _ {i j} & \text {o t h e r w i s e}, \end{array} \right. \tag {6}
125
+ $$
126
+
127
+ with $\hat{h}_{ij}$ and $h_{ij}$ being the value of the predicted heatmap $\hat{\mathbf{H}}$ and the ground-truth $\mathbf{H}$ at the pixel location $(i,j)$ , respectively. The hyperparameter $\gamma$ is used to stabilize the adaptive loss weights.
128
+
129
+ Self-consistency Branch. To enhance the proposed attention mechanism, the idea of learning self-consistency proposed in [54] is revisited to fit our context. Instead of computing the consistency values for each pixel of the mask,
130
+
131
+ we consider only the vulnerable location. Since the set $\mathcal{P}$ might include more than one pixel (the blending mask can include several pixels with equal values), we randomly choose one of them that we denote by $\mathbf{p}^s$ for generating the self-consistency ground-truth matrix. Hence, the generated matrices denoted by $\mathbf{C}$ are 2-dimensional and not 4-dimensional as in the original method. Given the randomly selected vulnerable point $\mathbf{p}^s = (u,v)$ , the self-consistency $\mathbf{C}$ matrix is computed as,
132
+
133
+ $$
134
+ \mathbf {C} = \mathbf {1} - \left| b _ {u v}. \mathbf {1} - \mathbf {B} \right|, \tag {7}
135
+ $$
136
+
137
+ where $|.|$ refers to the element wise modulus and $\mathbf{1}$ is an all-one matrix.
138
+
139
+ This refinement allows for reducing the model size and, consequently, the computational cost. It can also be noted that even though our method is inspired by [54], our self-consistency branch is inherently different. In [54], the consistency is calculated between the foreground and background, whereas we measure the consistency between the vulnerable point and the other pixels of the blended mask. The self-consistency loss $L_{\mathrm{C}}$ is then computed as a binary cross entropy loss between $\mathbf{C}$ and the predicted self-consistency $\hat{\mathbf{C}}$ .
140
+
141
+ Training Strategy. The network is optimized using the following loss,
142
+
143
+ $$
144
+ L = L _ {\mathrm {B C E}} + \lambda_ {1} L _ {\mathrm {H}} + \lambda_ {2} L _ {\mathrm {C}}, \tag {8}
145
+ $$
146
+
147
+ where $L_{\mathrm{BCE}}$ denotes the binary cross-entropy classification loss. $L_{\mathrm{H}}$ and $L_{\mathrm{C}}$ are weighted by the hyperparameters $\lambda_{1}$ and $\lambda_{2}$ , respectively. Note that only real and pseudo-fakes are used during training.
148
+
149
+ # 3.2. Enhanced Feature Pyramid Network (E-FPN)
150
+
151
+ Feature Pyramid Networks (FPN) are widely adopted feature extractors capable of complementing global representations with multi-scale low-level features captured at different resolutions [27]. This makes them ideal candidates for implicitly supporting the heatmap and self-consistency branches towards fine-grained deepfake detection. Although some attempts have been made to exploit multi-scale features [14], no previous works have considered FPN in the context of deepfake detection.
152
+
153
+ Over the last years, several FPN variants have been proposed for numerous computer vision tasks [26, 27, 38, 47]. Nevertheless, these FPN-based methods usually lead to the generation of redundant features, which might, in turn, lead to the overfitting of the model [2]. Moreover, as described in Section 1, small discrepancies are gradually eliminated through the successive convolution blocks [55], going from high-resolution low-level to low-resolution high-level features. Consequently, the last block outputs usually contain
154
+
155
+ global features where local artifact-sensitive features might be discarded. To overcome this issue, we introduce a new alternative referred to as Enhanced Feature Pyramid Network (E-FPN) that is integrated in the proposed LAA-Net architecture. The E-FPN goal is to propagate relevant information from high to low-resolution feature representations.
156
+
157
+ As shown in Figure 4, we denote the output shape of the $N - 1$ latest layers by $(n^{(l)},D^{(l)},D^{(l)})$ with $l\in \llbracket 2,N\rrbracket$ . For the sake of simplicity, we assume that the shape of the feature maps is square. For a given layer $l$ , $n^{(l)},D^{(l)}$ and $\mathbf{F}^{(l)}$ correspond, respectively, to its feature dimension, its height and width, and its output features. For strengthening the textural information in the ultimate layer $\mathbf{F}^{(N)}$ , we propose to take advantage of the features generated by previous layers $\mathbf{F}^{(l)}$ with $l\in \llbracket 2,N - 1\rrbracket$ . Concretely, for each layer $l$ , a convolution followed by a transpose convolution is applied to $\mathbf{F}^{(l + 1)}$ . The obtained features are denoted by $\mathbf{E}^{(l)}$ and have the same shape as $\mathbf{F}^{(l)}$ . Then, a sigmoid function is applied to $\mathbf{E}^{(l)}$ returning probabilities. The latter indicates the pixels that contributed to the final decision. For enriching $\mathbf{F}^{(l + 1)}$ while avoiding redundancy related to the most contributing pixels, the features $\mathbf{F}^{(l)}$ are filtered by computing $(1 - \mathrm{sigmoid}(\mathbf{E}^{(l)}))^{\gamma_w}$ resulting in a weighted mask. The latter is concatenated along the same axis with $\mathbf{E}^{(l)}$ for obtaining the final features. This operation is iterated for all the layers $l\in \llbracket 2,N - 1\rrbracket$ . In summary, the final representation $\mathbf{F}'^{(l)}$ is obtained as follows,
158
+
159
+ $$
160
+ \mathbf {F} ^ {\prime (l)} = \left(\mathbf {F} ^ {(l)} \odot \left(1 - \operatorname {s i g m o i d} \left(\mathbf {E} ^ {(l)}\right)\right) ^ {\gamma_ {w}} \oplus \mathbf {E} ^ {(l)}\right), \tag {9}
161
+ $$
162
+
163
+ where $\mathbf{E}^{(l)} = \mathfrak{T}(f(\mathbf{F}^{\prime (l + 1)})$ with $\mathbf{F}^{\prime (l + 1)} = \mathbf{F}^{(l + 1)}$ if $l = N - 1$ , such that $f$ and $\mathfrak{T}$ , are respectively the convolution and transpose convolution operators, and $\oplus$ refers to the concatenation operator. The hyper-parameter $\gamma_w$ is set to 1 in all our experiments. The relevance of E-FPN in the context of deepfake detection is experimentally demonstrated in Section 4, as compared to the traditional FPN.
164
+
165
+ # 4. Experiments
166
+
167
+ In this section, we start by presenting the experimental settings. Then, we compare the performance of LAA-Net to SoA methods, both qualitatively and quantitatively. Finally, we conduct an ablation study to validate the different components of LAA-Net.
168
+
169
+ # 4.1. Experimental Settings
170
+
171
+ Datasets. The $\mathrm{FF} + +$ [37] dataset is used for training and validation. In our experiments, we follow the standard splitting protocol of [37]. This dataset contains 1000 original videos and 4000 fake videos generated by four different manipulation methods, namely, Deepfakes (DF) [11], Face2Face (F2F) [46], FaceSwap (FS) [22], and NeuralTextures (NT) [45]. In the training process, we utilize real
172
+
173
+ ![](images/cd63492e2d35b2529f39145b81a11b4be8194b906958f9fa8925e387a7203216.jpg)
174
+ Figure 4. Architecture of the proposed Enhanced Feature Pyramid Network (E-FPN).
175
+
176
+ <table><tr><td rowspan="3">Method</td><td colspan="2">Training set</td><td colspan="9">Test set</td></tr><tr><td rowspan="2">Real</td><td rowspan="2">Fake</td><td>In-dataset</td><td colspan="8">Cross-dataset</td></tr><tr><td>FF++AUC (%)</td><td>CDF2AUC (%)</td><td>AP (%)</td><td>DFWAUC (%)</td><td>AP (%)</td><td>DFD AUC (%)</td><td>AP (%)</td><td>DFDC AUC (%)</td><td>AP (%)</td></tr><tr><td>Xception [37]</td><td>✓</td><td>✓</td><td>99.09</td><td>61.18</td><td>66.93</td><td>65.29</td><td>55.37</td><td>89.75</td><td>85.48</td><td>69.90</td><td>91.98</td></tr><tr><td>FaceXRay+BI [24]</td><td>✓</td><td>✓</td><td>99.20</td><td>79.5</td><td>-</td><td>-</td><td>-</td><td>95.40</td><td>93.34</td><td>65.5</td><td>-</td></tr><tr><td>LRNet [43]</td><td>✓</td><td>✓</td><td>-</td><td>53.20</td><td>-</td><td>-</td><td>-</td><td>52.29</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LocalRL [8]</td><td>✓</td><td>✓</td><td>99.92</td><td>78.26</td><td>-</td><td>-</td><td>-</td><td>89.24</td><td>-</td><td>76.53</td><td>-</td></tr><tr><td>TI2Net [28]</td><td>✓</td><td>✓</td><td>-</td><td>68.22</td><td>-</td><td>-</td><td>-</td><td>72.03</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Multi-attentional [55]</td><td>✓</td><td>✓</td><td>-</td><td>68.26</td><td>75.25</td><td>73.56</td><td>73.79</td><td>92.95</td><td>96.51</td><td>63.02</td><td>-</td></tr><tr><td>RECCE [6]</td><td>✓</td><td>✓</td><td>-</td><td>70.93</td><td>70.35</td><td>68.16</td><td>54.41</td><td>98.26</td><td>79.42</td><td>-</td><td>-</td></tr><tr><td>SFDG [50]</td><td>✓</td><td>✓</td><td>99.53</td><td>75.83</td><td>-</td><td>69.27</td><td>-</td><td>88.00</td><td>-</td><td>73.63</td><td>-</td></tr><tr><td>EIC+IIE [20]</td><td>✓</td><td>✓</td><td>99.32</td><td>83.80</td><td>-</td><td>-</td><td>-</td><td>93.92</td><td>-</td><td>81.23</td><td>-</td></tr><tr><td>AltFreezing [52]</td><td>✓</td><td>✓</td><td>98.6</td><td>89.50</td><td>-</td><td>-</td><td>-</td><td>98.50</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>✓</td><td>99.79</td><td>93.88</td><td>91.12</td><td>74.48</td><td>75.23</td><td>99.03</td><td>99.59</td><td>-</td><td>-</td></tr><tr><td>UCF [53]</td><td>✓</td><td>✓</td><td>-</td><td>82.4</td><td>-</td><td>-</td><td>-</td><td>94.5</td><td>-</td><td>80.5</td><td>-</td></tr><tr><td>Controllable GS [17]</td><td>✓</td><td>✓</td><td>-</td><td>84.97</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>81.65</td><td>-</td></tr><tr><td>PCL+I2G [54]</td><td>✓</td><td></td><td>99.11</td><td>90.03</td><td>-</td><td>-</td><td>-</td><td>99.07</td><td>-</td><td>74.27</td><td>-</td></tr><tr><td>SBI [41]</td><td>✓</td><td></td><td>99.64</td><td>93.18</td><td>85.16</td><td>67.47</td><td>55.87</td><td>97.56</td><td>92.79</td><td>86.15</td><td>93.24</td></tr><tr><td>AUNet [3]</td><td>✓</td><td></td><td>99.46</td><td>92.77</td><td>-</td><td>-</td><td>-</td><td>99.22</td><td>-</td><td>86.16</td><td>-</td></tr><tr><td>Ours (w/ BI)</td><td>✓</td><td></td><td>99.95</td><td>86.28</td><td>91.93</td><td>57.13</td><td>56.89</td><td>99.51</td><td>99.80</td><td>69.69</td><td>93.67</td></tr><tr><td>Ours (w/ SBI)</td><td>✓</td><td></td><td>99.96</td><td>95.40</td><td>97.64</td><td>80.03</td><td>81.08</td><td>98.43</td><td>99.40</td><td>86.94</td><td>97.70</td></tr></table>
177
+
178
+ Table 1. In-dataset and Cross-dataset evaluation in terms of AUC and AP on multiple deepfake datasets. Bold and Underlined highlight the best and the second-best performance, respectively.
179
+
180
+ images only to dynamically generate pseudo-fakes, as discussed in Section 3. To evaluate the generalization capability of the proposed approach as well as its robustness to high-quality deepfakes, we test the trained model on four datasets incorporating different quality of deepfakes, namely, Celeb-DFv2 [25] (CDF2), DeepFake Detection [15] (DFD), DeepFake Detection Challenge [13] (DFDC) and Wild Deepfake [57] (DFW). To assess the quality of the considered datasets, we compute the Mask-SSIM² for each benchmark. In particular, CDF2 [25] is formed by the most realistic deepfakes with an average Mask-SSIM [29] value of 0.92, followed by DFD and DFDC with an average Mask-SSIM of 0.88 and 0.84, respectively. We note that computing the Mask-SSIM [25] for DFW was not possible since real and fake images are not paired.
181
+
182
+ Evaluation Metrics. To compare the performance of LAA-Net with the state-of-the-art, we report the common Area Under the Curve (AUC) metric at the video-level and the Average Precision (AP) as in [14, 24, 41, 54]. More metrics, namely, Average Recall (AR) and mean F1-score (mF1) are provided in supplementary materials.
183
+
184
+ Implementation Details. To train our model, 128 training and 32 validation frames are used. RetinaNet [26] is used to crop faces with a conservative enlargement (by a factor of 1.25) around the face center. Note that all the cropped images are then resized to $384 \times 384$ . In addition, 68 facial landmarks are extracted per frame using Dlib [21]. We adopt the EFNB4 variant of the EfficientNet [44] pretrained on ImageNet [12]. For each training epoch, 8 frames are dynamically selected and used for online pseudo-fake gen
185
+
186
+ <table><tr><td>Method</td><td>Fake</td><td>Saturation</td><td>Contrast</td><td>Block</td><td>Noise</td><td>Blur</td><td>Pixel</td></tr><tr><td>Xception [9]</td><td>✓</td><td>99.3</td><td>98.6</td><td>99.7</td><td>53.8</td><td>60.2</td><td>74.2</td></tr><tr><td>FaceXray [24]</td><td>✓</td><td>97.6</td><td>88.5</td><td>99.1</td><td>49.8</td><td>63.8</td><td>88.6</td></tr><tr><td>LipForensics [18]</td><td>✓</td><td>99.9</td><td>99.6</td><td>87.4</td><td>73.8</td><td>96.1</td><td>95.6</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>99.6</td><td>99.8</td><td>99.8</td><td>87.4</td><td>99.0</td><td>98.8</td></tr><tr><td>Ours</td><td></td><td>99.96</td><td>99.96</td><td>99.96</td><td>53.9</td><td>98.22</td><td>99.80</td></tr></table>
187
+
188
+ Table 2. Robustness inspection on the $\mathrm{FF}++$ with different types of perturbation. Bold and Underline highlight the best and the second-best performance, respectively.
189
+
190
+ <table><tr><td rowspan="2">C</td><td rowspan="2">H</td><td rowspan="2">E-FPN</td><td colspan="5">Test set AUC (%)</td></tr><tr><td>CDF2</td><td>DFD</td><td>DFDC</td><td>DFW</td><td>Avg.</td></tr><tr><td>×</td><td>×</td><td>×</td><td>74.54</td><td>92.24</td><td>70.81</td><td>59.81</td><td>74.35</td></tr><tr><td>×</td><td>✓</td><td>×</td><td>80.89</td><td>94.53</td><td>77.93</td><td>67.12</td><td>80.12(↑5.77)</td></tr><tr><td>×</td><td>×</td><td>✓</td><td>84.21</td><td>95.03</td><td>80.68</td><td>65.47</td><td>81.35(↑7.00)</td></tr><tr><td>×</td><td>✓</td><td>✓</td><td>95.56</td><td>98.54</td><td>82.21</td><td>74.98</td><td>87.82(↑13.47)</td></tr><tr><td>✓</td><td>×</td><td>✓</td><td>79.87</td><td>94.60</td><td>71.70</td><td>72.47</td><td>79.66(↑5.31)</td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>91.56</td><td>98.27</td><td>78.35</td><td>73.02</td><td>85.30(↑10.95)</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>95.40</td><td>98.43</td><td>86.94</td><td>80.03</td><td>90.20(↑15.85)</td></tr></table>
191
+
192
+ Table 3. Ablation study under the cross-dataset setup of the Consistency branch (C), Heatmap branch (H), and E-FPN.
193
+
194
+ eration. The model is trained for 100 epochs with the SAM optimizer [16], a weight decay of $10^{-4}$ , and a batch size of 16. We apply a learning rate scheduler that increases from $5.10^{-5}$ to $2.10^{-4}$ in the first quarter of the training and then decays to zero in the remaining quarters. We freeze the backbone at the first 6 epochs and only train the remaining layers. For data augmentation, we apply horizontal flipping, random cropping, random scaling, random erasing [56], color jittering, Gaussian noise, blurring, and JPEG compression. The parameters $\lambda_{1}$ and $\lambda_{2}$ , defined in Eq. (8), are set to 10 and 100. Furthermore, label smoothing [34] is utilized as a regularizer. To generate pseudo-fakes, two blending synthesis techniques are considered, namely, Blended Images (BI) [24] and Self-Blended Images (SBI) [41]. All experiments are carried out using a GPU Tesla V-100.
195
+
196
+ # 4.2. Comparison with State-of-the-art
197
+
198
+ In-dataset Evaluation. We compare the performance of LAA-Net to existing methods under the in-dataset protocol of [3, 14, 41, 50, 52, 54]. The first column in Table 1 reports the obtained results on the testing set of $\mathrm{FF}++$ . It can be seen that all methods achieve competitive performance on the forgeries of the $\mathrm{FF}++$ dataset. Our method combined with SBI outperforms all methods with an AUC of $99.96\%$ , while using only real data for training.
199
+
200
+ Cross-dataset Evaluation. We evaluate LAA-Net under the challenging cross-dataset setup [6, 50]. Table 1 reports the obtained results on CDF2, DFW, DFD, and DFDC, respectively. It can be noted that LAA-Net achieves state-of-the-art results on the four considered benchmarks, thereby demonstrating its robustness to different quality of deep-fakes. The best performance is reached using SBI as a data
201
+
202
+ synthesis, confirming the importance of modeling subtle artifacts. The performance of LAA-Net (w/BI) is slightly superior to LAA-Net (w/SBI) only on DFD, with an improvement of $1.08\%$ and $0.4\%$ of AUC and AP, respectively. A plausible explanation could be the fact that deepfake detection in DFD is less challenging. In fact, numerous methods report AUC and AP scores exceeding $98\%$ .
203
+
204
+ Furthermore, LAA-Net clearly outperforms attention-based approaches such as Multi-attentional [55] and SFDG [50] by a margin of $27.14\%$ and $19.57\%$ in terms of AUC and AP on CDF2, respectively. This confirms the superior generalization capabilities of LAA-Net as compared to [50, 55]. These results are further supported by high AR and mF1, which are provided in the supplementary materials.
205
+
206
+ Robustness to Perturbations. Since deepfake videos are easily altered on various social platforms, the robustness of LAA-Net against some common perturbations is investigated. Following the same settings of [14, 18], we evaluate the performance of LAA-Net on $\mathrm{FF}++$ [37] by applying different corruptions. The results are reported in Table 2. As our method focuses on vulnerable points, it can be seen that color-related changes such as saturation and contrast do not impact the performance. However, the proposed method is extremely sensitive to structural perturbations such as Gaussian Noise. In future work, strategies for ensuring more robustness to structural perturbations will be investigated. For instance, denoising methods [30, 40] will be considered for solving this issue.
207
+
208
+ Qualitative Results. We provide Grad-CAMs [39] in Figure 5, to visualize the image regions in deepfakes that are activated by LAA-Net, SBI [41], Xception [37], and Multiattentional (MAT) [55] on $\mathrm{FF}++$ [37]. Generally, attention-based methods such as MAT [55] and LAA-Net focus more on localized regions. However, in some cases, MAT [55] concentrates on irrelevant regions such as the background or the inner face areas. Conversely, LAA-Net consistently identifies blending artifacts and shows interesting capabilities on mouth-rendered Neural Textures (NT).
209
+
210
+ # 4.3. Ablation Study
211
+
212
+ Table 3 reports the cross-dataset performance of LAA-Net when discarding the following components: E-FPN, the consistency branch denoted by C and the heatmap branch denoted by H. The best performance is reached when all the components are integrated. It can be seen that the proposed explicit attention mechanism through the heatmap branch contributes more to improving the result. A qualitative example visualizing Grad-CAMs [39] with different components of LAA-Net is also given in Figure 6. The illustration clearly shows that by combining the three components, the network activates more precisely the blending region.
213
+
214
+ <table><tr><td></td><td colspan="5">EFNB4</td><td colspan="7">Test Set AUC (%)</td><td></td></tr><tr><td colspan="2"></td><td colspan="4">E-FPN Integration</td><td colspan="2">CDF2</td><td colspan="2">DFD</td><td colspan="2">DFW</td><td>DFDC</td><td></td></tr><tr><td></td><td>F(6)</td><td>F(5)</td><td>F(4)</td><td>F(3)</td><td>F(2)</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td><td>FPN</td><td>E-FPN</td></tr><tr><td>(a)</td><td>✓</td><td></td><td></td><td></td><td></td><td colspan="2">91.56</td><td colspan="2">98.27</td><td colspan="2">73.02</td><td>78.35</td><td></td></tr><tr><td>(b)</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>93.42</td><td>91.79</td><td>98.59</td><td>97.12</td><td>73.78</td><td>71.39</td><td>78.40</td><td></td></tr><tr><td>(c)</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>88.72</td><td>92.86</td><td>97.96</td><td>98.95</td><td>69.40</td><td>74.93</td><td>71.91</td><td></td></tr><tr><td>(d)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>88.35</td><td>95.40</td><td>98.89</td><td>98.43</td><td>70.94</td><td>80.03</td><td>79.02</td><td></td></tr><tr><td>(e)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>92.16</td><td>94.22</td><td>96.58</td><td>97.31</td><td>65.17</td><td>72.54</td><td>74.31</td><td></td></tr><tr><td colspan="6">Avg</td><td>90.84</td><td>93.16</td><td>98.06</td><td>98.02</td><td>70.46</td><td>74.38</td><td>76.40</td><td></td></tr></table>
215
+
216
+ ![](images/ea22e00fbbf8d32b588de507b966b024503460d75de2038f47077693ca1871fd.jpg)
217
+ Figure 5. Grad-CAM [39] visualization on different types of manipulation from FF++ [37]. LAA-Net is compared to SBI [41], Xception [37], and MAT [55].
218
+
219
+ ![](images/6e6a42be5c4609a7143f710eac6145cffc40bef55288741f8caa42d04a8014bb.jpg)
220
+ Figure 6. GradCAM [39] visualization of different components in LAA-Net. w/o E-FPN, w/o H, and w/o C refer to ablating E-FPN, heatmap branch, and self-consistency branch, respectively.
221
+
222
+ # 4.4. E-FPN versus Traditional FPN
223
+
224
+ To assess the effectiveness of the low-level features injected by E-FPN into the final feature representation, we combine different feature levels and compare the results of E-FPN and traditional FPN [26, 27] in Table 4. It can be seen that in general E-FPN outperforms FPN except for $\mathbf{F}^{(5)}$ . This confirms the relevance of employing multi-scale features and the need for reducing their redundancy in the context of deepfake detection.
225
+
226
+ # 4.5. Sensitivity Analysis
227
+
228
+ In this subsection, we analyze the impact of the two hyperparameters, $\lambda_{1}$ and $\lambda_{2}$ given in Eq. (8). Table 5 shows
229
+
230
+ the experimental results for different values of $\lambda_{1}$ and $\lambda_{2}$ . It can be noted that our model is robust to different hyperparameter values, with the best average performance obtained with $\lambda_{1} = 10$ and $\lambda_{2} = 100$ .
231
+
232
+ Table 4. Traditional FPN versus E-FPN, using the SBI-based data synthesis under the cross-dataset protocol. Bold and Underline indicate the best and the second-best performance, respectively. We report the results when integrating features $\mathbf{F}^{(i)}$ from different layers.
233
+
234
+ <table><tr><td rowspan="2">λ1</td><td rowspan="2">λ2</td><td colspan="4">Test Set AUC (%)</td></tr><tr><td>CDF2</td><td>DFDC</td><td>DFW</td><td>Avg</td></tr><tr><td>1</td><td>1</td><td>90.69</td><td>78.12</td><td>70.98</td><td>79.93</td></tr><tr><td>10</td><td>10</td><td>95.73</td><td>85.87</td><td>73.56</td><td>85.05</td></tr><tr><td>100</td><td>100</td><td>93.72</td><td>78.60</td><td>75.25</td><td>82.52</td></tr><tr><td>100</td><td>10</td><td>93.05</td><td>83.86</td><td>76.72</td><td>84.54</td></tr><tr><td>10</td><td>100</td><td>95.40</td><td>86.94</td><td>80.03</td><td>87.46</td></tr></table>
235
+
236
+ Table 5. Sensitivity analysis: The impact of the hyper-parameters $\lambda_{1}$ and $\lambda_{2}$ using the cross-dataset protocol on three datasets in terms of AUC.
237
+
238
+ # 5. Conclusion
239
+
240
+ In this paper, a fine-grained deepfake detection method called LAA-Net is introduced with the aim of detecting high-quality deepfakes while remaining generic to unseen manipulations. For that purpose, two different components are proposed. On the one hand, we argue that by making the network focus on the most vulnerable points, we can detect both global and subtle artifacts. To this end, an explicit attention mechanism within a multi-task learning framework is used. In addition to the binary classification branch, heatmap and self-consistency branches are defined with respect to the vulnerable points. On the other hand, a novel E-FPN module for aggregating multi-scale features is proposed; hence enabling the integration of more localized features. The results reported on several benchmarks show the superiority of LAA-Net as compared to the state-of-the-art, including attention-based methods. In future works, strategies for improving the robustness to noise will be investigated. In addition, an attempt to extend this idea by taking into account the temporal dimension will be explored.
241
+
242
+ # Acknowledgment
243
+
244
+ This work is supported by the Luxembourg National Research Fund, under the BRIDGES2021/IS/16353350/FaKeDeTeR and UNFAKE, ref.16763798 projects, and by POST Luxembourg.
245
+
246
+ # Overview
247
+
248
+ This document provides supplementary material complementing the main manuscript. It is structured as follows. First, the computation of the self-consistency loss and the ground truth generation of heatmaps are described. Second, more quantitative and qualitative results are provided. In particular, additional metrics are reported for both in-dataset and cross-dataset settings. Moreover, qualitative results comparing E-FPN and FPN are shown.
249
+
250
+ # 6. Self-Consistency Loss
251
+
252
+ ![](images/66add1d1914f6056849f872c578d4d9d28d8331689f06e281363dc1f02cdf984.jpg)
253
+ Figure 7. In order to generate the consistency map prediction $\hat{\mathbf{C}}$ as well as the associated ground truth $\mathbf{C}$ , we first randomly select a vulnerable point located at $\mathbf{p}^s$ . For computing $\hat{\mathbf{C}}$ , we measure the similarity between the feature at $\mathbf{p}^s$ (red block) and the features generated from every point. Namely, we use the similarity function in [54]. As for $\mathbf{C}$ , we measure the consistency values between the pixel at the $\mathbf{p}^s$ and all pixels in $\mathbf{B}$ , as also described in Eq. (7) of the manuscript.
254
+
255
+ ![](images/a9cd9118ea1f85eb6dff05d0d62c24ca816e787e6958c6e8a68802427daf2d3a.jpg)
256
+ Figure 8. Feature visualization by gradCAM [39] between $E$ -FPN and FPN with different integration of multi-scale layers. It shows that E-FPN can focus better on artifacts as compared to FPN. The setup details are provided in Table 4 as shown in the manuscript.
257
+
258
+ To clarify the calculation of the self-consistency loss, we show Figure 7, which illustrates the generation process of the predicted and the ground-truth, $\hat{\mathbf{C}}$ and $\mathbf{C}$ , respectively.
259
+
260
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Training Set</td><td colspan="5">FF++ [37]</td></tr><tr><td>Real</td><td>Fake</td><td>ACC</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td></tr><tr><td>Ours w/ BI [24]</td><td>✓</td><td></td><td>99.03</td><td>99.95</td><td>99.99</td><td>99.21</td><td>99.60</td></tr><tr><td>Ours w/ SBI [41]</td><td>✓</td><td></td><td>99.04</td><td>99.96</td><td>99.99</td><td>99.29</td><td>99.64</td></tr></table>
261
+
262
+ Table 6. In-dataset evaluation on FF++ [37] reported by ACC, AUC, AP, AR, and mF1.
263
+
264
+ The self-consistency loss is a binary cross entropy loss between $\hat{\mathbf{C}}$ and $\mathbf{C}$ .
265
+
266
+ # 7. Ground Truth Generation of Heatmaps
267
+
268
+ In this section, we provide more details regarding the generation of ground-truth heatmaps, described in Section 3.1.2. Firstly, a $k$ -th vulnerable point, denoted as $\mathbf{p}^k$ , is selected, as shown in Figure 9 (i). Secondly, we measure the height and the width of the blending mask $\mathbf{B}$ at the point $\mathbf{p}^k$ shown as orange lines in Figure 9 (ii). Using the calculated distances, a virtual bounding box is created, indicated by the blue box in Figure 9 (iii). Then, we identify overlapping boxes, illustrated by dashed-line green boxes in Figure 9 (iv), with the Intersection over Union (IoU) greater than a threshold ( $t = 0.7$ ) compared to the virtual bounding box. A radius $r_k$ (solid purple line in Figure 9 (v)) is calculated by forming a tight circle encompassing all these boxes. Finally, an Unnormalized Gaussian Distribution, shown as a red circle in Figure 9 (vi), is generated with a standard deviation $\sigma_k = \frac{1}{3} r_k$ (Eq. (4) of the manuscript). The steps are repeated for every vulnerable point $k \in [[1, \mathrm{card}(\mathcal{P})]]$ . The final $\mathbf{H}$ is the superimposition of all $g_{ij}^k$ .
269
+
270
+ # 8. Additional Results
271
+
272
+ In addition to AUC, we provide results using additional metrics, namely, Average Precision (AP), Average Recall (AR), Accuracy (ACC), and mean F1-score (mF1).
273
+
274
+ Table 6 and Table 7 report the results under the in-dataset and the cross-dataset settings, respectively. Overall, it can be seen that LAA-Net achieves better performances than other state-of-the-art methods.
275
+
276
+ # 8.1. Qualitative Results: E-FPN versus FPN
277
+
278
+ A qualitative comparison between the proposed E-FPN and the traditional FPN with different fusion settings is reported in Figure 8. Using EfficientNet-B4 [44] (EFNB4) as our backbone, the $\mathbf{F}^{(6)}$ refers to the features extracted from the last convolution block in the backbone. In other words, this means that no FPN design is integrated. By gradually aggregating features from lower to higher resolution layers, we can observe the improvement of the forgery localization ability for both E-FPN and FPN. More notably, E-FPN produces more precise activations on the blending boundaries as compared to FPN. This can be explained by the fact that the E-FPN integrates a filtering mechanism for learning less
279
+
280
+ ![](images/8046b9c4244f83e5ec58f2b21d27b54811976d7f5bc1c343118c1ec7ac8b1b7b.jpg)
281
+ Figure 9. The generation process of ground truth heatmaps by producing using an Unnormalized Gaussian Distribution given a selected vulnerable point.
282
+
283
+ <table><tr><td rowspan="3">Method</td><td rowspan="3">Fake</td><td colspan="16">Test set (%)</td></tr><tr><td colspan="4">CDF2</td><td colspan="4">DFW</td><td colspan="4">DFD</td><td colspan="4">DFDC</td></tr><tr><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td><td>AUC</td><td>AP</td><td>AR</td><td>mF1</td></tr><tr><td>Xception [37]</td><td>✓</td><td>61.18</td><td>66.93</td><td>52.40</td><td>58.78</td><td>65.29</td><td>55.37</td><td>57.99</td><td>56.65</td><td>89.75</td><td>85.48</td><td>79.34</td><td>82.29</td><td>69.90</td><td>91.98</td><td>67.07</td><td>77.57</td></tr><tr><td>FaceXRay+BI [24]</td><td>✓</td><td>79.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>95.40</td><td>93.34</td><td>-</td><td>-</td><td>65.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LRNet [43]</td><td>✓</td><td>53.20</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>52.29</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LocalRL [8]</td><td>✓</td><td>78.26</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>89.24</td><td>-</td><td>-</td><td>-</td><td>76.53</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Tl2Net [28]</td><td>✓</td><td>68.22</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>72.03</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Multi-attentional [55]</td><td>✓</td><td>68.26</td><td>75.25</td><td>52.40</td><td>61.78</td><td>73.56</td><td>73.79</td><td>63.38</td><td>68.19</td><td>92.95</td><td>96.51</td><td>60.76</td><td>74.57</td><td>63.02</td><td>-</td><td>-</td><td>-</td></tr><tr><td>RECCE [6]</td><td>✓</td><td>70.93</td><td>70.35</td><td>59.48</td><td>64.46</td><td>68.16</td><td>54.41</td><td>56.59</td><td>55.48</td><td>98.26</td><td>79.42</td><td>69.57</td><td>74.17</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SFDG [50]</td><td>✓</td><td>75.83</td><td>-</td><td>-</td><td>-</td><td>69.27</td><td>-</td><td>-</td><td>-</td><td>88.00</td><td>-</td><td>-</td><td>-</td><td>73.63</td><td>-</td><td>-</td><td>-</td></tr><tr><td>EIC+IIE [20]</td><td>✓</td><td>83.80</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>93.92</td><td>-</td><td>-</td><td>-</td><td>81.23</td><td>-</td><td>-</td><td>-</td></tr><tr><td>AltFreezing [52]</td><td>✓</td><td>89.50</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>98.50</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CADDM [14]</td><td>✓</td><td>93.88</td><td>91.12</td><td>77.00</td><td>83.46</td><td>74.48</td><td>75.23</td><td>65.26</td><td>69.89</td><td>99.03</td><td>99.59</td><td>82.17</td><td>90.04</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>UCF [53]</td><td>✓</td><td>82.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>94.5</td><td>-</td><td>-</td><td>-</td><td>80.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Controllable GS [17]</td><td>✓</td><td>84.97</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>81.65</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PCL+I2G [54]</td><td></td><td>90.03</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>99.07</td><td>-</td><td>-</td><td>-</td><td>74.27</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SBI [41]</td><td></td><td>93.18</td><td>85.16</td><td>82.68</td><td>83.90</td><td>67.47</td><td>55.87</td><td>55.82</td><td>55.85</td><td>97.56</td><td>92.79</td><td>89.49</td><td>91.11</td><td>86.15</td><td>93.24</td><td>71.58</td><td>80.99</td></tr><tr><td>AUNet [3]</td><td></td><td>92.77</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>99.22</td><td>-</td><td>-</td><td>-</td><td>86.16</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Ours (w/ BI)</td><td></td><td>86.28</td><td>91.93</td><td>50.01</td><td>64.78</td><td>57.13</td><td>56.89</td><td>50.12</td><td>53.29</td><td>99.51</td><td>99.80</td><td>95.47</td><td>97.59</td><td>69.69</td><td>93.67</td><td>50.12</td><td>65.30</td></tr><tr><td>Ours (w/ SBI)</td><td></td><td>95.40</td><td>97.64</td><td>87.71</td><td>92.41</td><td>80.03</td><td>81.08</td><td>65.66</td><td>72.56</td><td>98.43</td><td>99.40</td><td>88.55</td><td>93.64</td><td>86.94</td><td>97.70</td><td>73.37</td><td>83.81</td></tr></table>
284
+
285
+ Table 7. Cross-dataset evaluation in terms of AUC, AP, AR, and mF1 (%) on CDF2 [25], DFW [57], DFD [15], and DFDC [13]. Bold and underlined highlight the best and the second-best performance, respectively. $\checkmark$ symbol is used to depict methods that utilized both Real data and Fake data for training.
286
+
287
+ ![](images/daaa2db2ce59ff631c396feb9def0288c6f73ac00946994187977adebb598b8f.jpg)
288
+ Figure 10. Detection of vulnerable points w/o and w/ Gaussian noise.
289
+
290
+ noise. In contrast, FPN seems to consider regions outside the blending boundary, which results in lower performance as previously shown in Table 4 - Section 4.4 of the main manuscript.
291
+
292
+ # 8.2. Qualitative Results: Gaussian Noise
293
+
294
+ In Table 2 of the main manuscript, the performance of LAA-Net declined significantly when encountering Gaus
295
+
296
+ sian Noise perturbations. One possible reason is that the introduction of noise elevates the difficulty of detecting the vulnerable points. To confirm that, we report the inference of the heatmap before and after applying a Gaussian Noise on a facial image in Figure 10. As it can be observed, the detection of vulnerable points is highly impacted with the introduction of a Gaussian noise.
297
+
298
+ # 8.3. Robustness to Compression
299
+
300
+ To assess the robustness of LAA-Net to compression, we test LAA-Net on the c23 version of FF++, and the overall AUC is equal to $89.30\%$ .
301
+
302
+ # References
303
+
304
+ [1] Darius Afchar, Vincent Nozick, Junichi Yamagishi, and Isao Echizen. Mesonet: a compact facial video forgery detection network. CoRR, abs/1809.00888, 2018. 2
305
+ [2] Babajide O Ayinde, Tamer Inanc, and Jacek M Zurada. Reg-
306
+
307
+ ularizing deep neural networks by enhancing diversity in feature extraction. IEEE transactions on neural networks and learning systems, 30(9):2650-2661, 2019. 2, 5
308
+ [3] Weiming Bai, Yufan Liu, Zhipeng Zhang, Bing Li, and Weiming Hu. Aunet: Learning relations between action units for face forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 24709-24719, 2023. 2, 3, 6, 7, 10
309
+ [4] Sarah Cahlan. How misinformation helped spark an attempted coup in Gabon. https://wapo.st/3KZARDF, 2020. [Online; accessed 7-March-2023]. 1
310
+ [5] Zhixi Cai, Shreya Ghosh, Kalin Stefanov, Abhinav Dhall, Jianfei Cai, Hamid Rezatofighi, Reza Haffari, and Munawar Hayat. Marlin: Masked autoencoder for facial video representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1493-1504, 2023. 2
311
+ [6] Junyi Cao, Chao Ma, Taiping Yao, Shen Chen, Shouhong Ding, and Xiaokang Yang. End-to-end reconstruction-classification learning for face forgery detection. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4103-4112, 2022. 1, 2, 6, 7, 10
312
+ [7] Liang Chen, Yong Zhang, Yibing Song, Lingqiao Liu, and Jue Wang. Self-supervised learning of adversarial example: Towards good generalizations for deepfake detection, 2022. 1, 2
313
+ [8] Shen Chen, Taiping Yao, Yang Chen, Shouhong Ding, Jilin Li, and R. Ji. Local relation learning for face forgery detection. In AAAI Conference on Artificial Intelligence, 2021. 6, 10
314
+ [9] François Chollet. Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1251-1258, 2017. 2, 3, 7
315
+ [10] Davide Coccomini, Nicola Messina, Claudio Gennaro, and Fabrizio Falchi. Combining efficientnet and vision transformers for video deepfake detection. CoRR, abs/2107.02612, 2021. 2
316
+ [11] Deepfakes. Faceswapdevs. https://github.com/deepfakes/faceswap, 2019.5
317
+ [12] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, K. Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 248–255, 2009. 6
318
+ [13] Brian Dolhansky, Russ Howes, Ben Pflaum, Nicole Baram, and Cristian Canton-Ferrer. The deepfake detection challenge (DFDC) preview dataset. CoRR, abs/1910.08854, 2019. 2, 6, 10
319
+ [14] Shichao Dong, Jin Wang, Renhe Ji, Jiajun Liang, Haoqiang Fan, and Zheng Ge. Implicit identity leakage: The stumbling block to improving deepfake detection generalization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3994-4004, 2023. 1, 2, 5, 6, 7, 10
320
+ [15] Nick Dufour and Andrew Gully. Contributing data to deepfake detection research.
321
+
322
+ https://ai.googleblog.com/2019/09/contributing-data-t
323
+ 2019. 2, 6, 10
324
+ [16] Pierre Foret, Ariel Kleiner, Hossein Mobahi, and Behnam Neyshabur. Sharpness-aware minimization for efficiently improving generalization. CoRR, abs/2010.01412, 2020. 7
325
+ [17] Ying Guo, Cheng Zhen, and Pengfei Yan. Controllable guide-space for generalizable face forgery detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 20818-20827, 2023. 6, 10
326
+ [18] Alexandros Haliassos, Konstantinos Vougioukas, Stavros Petridis, and Maja Pantic. Lips don't lie: A generalisable and robust approach to face forgery detection. CoRR, abs/2012.07657, 2020. 7
327
+ [19] Alexandros Haliassos, Rodrigo Mira, Stavros Petridis, and Maja Pantic. Leveraging real talking faces via self-supervision for robust forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14950-14962, 2022. 2
328
+ [20] Baojin Huang, Zhongyuan Wang, Jifan Yang, Jiaxin Ai, Qin Zou, Qian Wang, and Dengpan Ye. Implicit identity driven deepfake face swapping detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4490-4499, 2023. 6, 10
329
+ [21] Davis E. King. Dlib-ml: A machine learning toolkit. J. Mach. Learn. Res., 10:1755–1758, 2009. 6
330
+ [22] Marek Kowalski. Faceswap. https://github.com/MarekKowalski/FaceSwap, 2018.5
331
+ [23] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. International Journal of Computer Vision, 128:642-656, 2018. 4
332
+ [24] Lingzhi Li, Jianmin Bao, Ting Zhang, Hao Yang, Dong Chen, Fang Wen, and Baining Guo. Face x-ray for more general face forgery detection. CoRR, abs/1912.13458, 2019. 1, 2, 3, 4, 6, 7, 9, 10
333
+ [25] Yuezun Li, Xin Yang, Pu Sun, Honggang Qi, and Siwei Lyu. Celeb-df: A new dataset for deepfake forensics. CoRR, abs/1909.12962, 2019. 1, 2, 6, 10
334
+ [26] Tsung-Yi Lin, Priya Goyal, Ross B. Girshick, Kaiming He, and Piotr Dollar. Focal loss for dense object detection. CoRR, abs/1708.02002, 2017. 4, 5, 6, 8
335
+ [27] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2117-2125, 2017. 2, 5, 8
336
+ [28] Baoping Liu, Bo Liu, Ming Ding, Tianqing Zhu, and Xin Yu. Ti2net: Temporal identity inconsistency network for deepfake detection. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4691-4700, 2023. 6, 10
337
+ [29] Liqian Ma, Xu Jia, Qianru Sun, Bernt Schiele, Tinne Tuytelaars, and Luc Van Gool. Pose guided person image generation. Advances in neural information processing systems, 30, 2017. 2, 6
338
+ [30] Youssef Mansour and Reinhard Heckel. Zero-shot noise2noise: Efficient image denoising without any data. In
339
+
340
+ Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 14018-14027, 2023. 7
341
+ [31] Falko Matern, Christian Riess, and Marc Stamminger. Exploiting visual artifacts to expose deepfakes and face manipulations. In 2019 IEEE Winter Applications of Computer Vision Workshops (WACVW), pages 83–92, 2019. 2
342
+ [32] Nesryne Mejri, Konstantinos Papadopoulos, and Djamila Aouada. Leveraging high-frequency components for deepfake detection. In 2021 IEEE 23rd International Workshop on Multimedia Signal Processing (MMSP), pages 1-6, 2021. 2
343
+ [33] Nesryne Mejri, Enjie Ghorbel, and Djamila Aouada. Untag: Learning generic features for unsupervised type-agnostic deepfake detection. In ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5, 2023. 2
344
+ [34] Rafael Müller, Simon Kornblith, and Geoffrey E. Hinton. When does label smoothing help? CoRR, abs/1906.02629, 2019. 7
345
+ [35] Huy H. Nguyen, Junichi Yamagishi, and Isao Echizen. Capsule-forensics: Using capsule networks to detect forged images and videos. CoRR, abs/1810.11215, 2018. 2
346
+ [36] Thanh Thi Nguyen, Cuong M. Nguyen, Dung Tien Nguyen, Duc Thanh Nguyen, and Saeid Nahavandi. Deep learning for deepfakes creation and detection. CoRR, abs/1909.11573, 2019. 2
347
+ [37] Andreas Rössler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. FaceForensics++: Learning to detect manipulated facial images. In International Conference on Computer Vision (ICCV), 2019, 1, 2, 5, 6, 7, 8, 9, 10
348
+ [38] Selim S. Seferbekov, Vladimir I. Iglovikov, Alexander V. Buslaev, and Alexey A. Shvets. Feature pyramid network for multi-class land segmentation. CoRR, abs/1806.03510, 2018. 5
349
+ [39] Ramprasaath R. Selvaraju, Abhishek Das, Ramakrishna Vedantam, Michael Cogswell, Devi Parikh, and Dhruv Batra. Grad-cam: Why did you say that? visual explanations from deep networks via gradient-based localization. CoRR, abs/1610.02391, 2016. 7, 8, 9
350
+ [40] Zehua Sheng, Zhu Yu, Xiongwei Liu, Si-Yuan Cao, Yuqi Liu, Hui-Liang Shen, and Huaqi Zhang. Structure aggregation for cross-spectral stereo image guided denoising. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 13997-14006, 2023. 7
351
+ [41] Kaede Shiohara and Toshihiko Yamasaki. Detecting deepfakes with self-blended images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18720-18729, 2022. 1, 2, 3, 6, 7, 8, 9, 10
352
+ [42] Inder Pal Singh, Nesryne Mejri, van Dat Nguyen, Enjie Ghorbel, and Djamila Aouada. Multi-label deepfake classification. IEEE Workshop on Multimedia Signal Processing, 2023. 2
353
+
354
+ [43] Zekun Sun, Yujie Han, Zeyu Hua, Na Ruan, and Weijia Jia. Improving the efficiency and robustness of deepfakes detection through precise geometric features. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3608-3617, 2021. 6, 10
355
+ [44] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. CoRR, abs/1905.11946, 2019. 2, 3, 6, 9
356
+ [45] Justus Thies, Michael Zollhöfer, and Matthias Nießner. Deferred neural rendering: Image synthesis using neural textures. CoRR, abs/1904.12356, 2019. 5
357
+ [46] Justus Thies, Michael Zollhöfer, Marc Stamminger, Christian Theobalt, and Matthias Nießner. Face2face: Real-time face capture and reenactment of RGB videos. CoRR, abs/2007.14808, 2020. 5
358
+ [47] Zhi Tian, Chunhua Shen, Hao Chen, and Tong He. FCOS: fully convolutional one-stage object detection. CoRR, abs/1904.01355, 2019. 5
359
+ [48] Jane Wakefield. Deepfake presi-dents used in Russia-Ukraine war. https://www.bbc.com/news/technology-60780142, 2022. [Online; accessed 7-March-2023]. 1
360
+ [49] Run Wang, Lei Ma, Felix Juefei-Xu, Xiaofei Xie, Jian Wang, and Yang Liu. Fakespotter: A simple baseline for spotting ai-synthesized fake faces. CoRR, abs/1909.06122, 2019. 2
361
+ [50] Yuan Wang, Kun Yu, Chen Chen, Xiyuan Hu, and Silong Peng. Dynamic graph learning with content-guided spatial-frequency relation reasoning for deepfake detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7278-7287, 2023. 2, 3, 6, 7, 10
362
+ [51] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4): 600-612, 2004. 2
363
+ [52] Zhendong Wang, Jianmin Bao, Wengang Zhou, Weilun Wang, and Houqiang Li. Altfreezing for more general video face forgery detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4129-4138, 2023. 2, 3, 6, 7, 10
364
+ [53] Zhiyuan Yan, Yong Zhang, Yanbo Fan, and Baoyuan Wu. Ucf: Uncovering common features for generalizable deepfake detection. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 22412-22423, 2023. 6, 10
365
+ [54] Eric Zhao, Xiang Xu, Mingze Xu, Hui Ding, Yuanjun Xiong, and Wei Xia. Learning self-consistency for deepfake detection. In ICCV 2021, 2021. 1, 2, 4, 5, 6, 7, 9, 10
366
+ [55] Hanqing Zhao, Wenbo Zhou, Dongdong Chen, Tianyi Wei, Weiming Zhang, and Nenghai Yu. Multi-attentional deepfake detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2185–2194, 2021. 1, 2, 3, 5, 6, 7, 8, 10
367
+ [56] Zhun Zhong, Liang Zheng, Guoliang Kang, Shaozi Li, and Yi Yang. Random erasing data augmentation. CoRR, abs/1708.04896, 2017. 7
368
+ [57] Bojia Zi, Minghao Chang, Jingjing Chen, Xingjun Ma, and Yu-Gang Jiang. Wilddeepfake: A challenging real-world
2401.13xxx/2401.13856/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72db58edc73e98d96d82ef9b9883ba746c12e0242bcde206e8ef1b6ff8c2e9fb
3
+ size 776021
2401.13xxx/2401.13856/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13858/63fe44b6-4320-4a36-9076-2fb83cd8d8db_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd678ca6a795299e0e74aac323806e8de8071f3ff2eda1ef675ee459fcc189b9
3
+ size 9791231
2401.13xxx/2401.13858/full.md ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Graph Diffusion Transformers for Multi-Conditional Molecular Generation
2
+
3
+ Gang Liu, Jiaxin Xu, Tengfei Luo, Meng Jiang
4
+ University of Notre Dame
5
+ {gliu7, jxu24, tluo, mjiang2}@nd.edu
6
+
7
+ # Abstract
8
+
9
+ Inverse molecular design with diffusion models holds great potential for advancements in material and drug discovery. Despite success in unconditional molecular generation, integrating multiple properties such as synthetic score and gas permeability as condition constraints into diffusion models remains unexplored. We present the Graph Diffusion Transformer (Graph DiT) for multi-conditional molecular generation. Graph DiT integrates an encoder to learn numerical and categorical property representations with the Transformer-based denoiser. Unlike previous graph diffusion models that add noise separately on the atoms and bonds in the forward diffusion process, Graph DiT is trained with a novel graph-dependent noise model for accurate estimation of graph-related noise in molecules. We extensively validate Graph DiT for multi-conditional polymer and small molecule generation. Results demonstrate the superiority of Graph DiT across nine metrics from distribution learning to condition control for molecular properties. A polymer inverse design task for gas separation with feedback from domain experts further demonstrates its practical utility.
10
+
11
+ # 1 Introduction
12
+
13
+ Diffusion models for molecular graphs are essential for inverse design of materials and drugs by generating molecules and polymers (macro-molecules) [40, 46], because the models can be effectively trained to predict discrete graph structures and atom/bond types in denoising processes [43]. Practical inverse designs consider multiple factors such as molecular synthetic score and various properties [15], known as the task of multi-conditional graph generation.
14
+
15
+ Existing work converted multiple conditions into a single one and solved the task as single-condition generation [5, 25]. However, multi-property relations may not be properly or explicitly defined [5]. First, the properties have diverse scales and units. For example, the synthetic complexity ranges from 1 to 5 [8], while the gas permeability varies widely, exceeding 10,000 in Barrier units [4]. This gap makes it hard for models to balance the conditions. Second, multi-conditions consist of a mix of categorical and numerical properties. The common practice of addition [47] or multiplication [25] is inadequate for combination.
16
+
17
+ Figure 1(a) empirically illustrates the challenges in multi-conditional generation, i.e., discovering molecules meeting multiple properties. We used a test set of 100 data points with three properties: synthesizability (Synth.) [12], $\mathrm{O}_2$ and $\mathrm{N}_2$ permeability ( $\mathrm{O}_2\mathrm{Perm}$ and $\mathrm{N}_2\mathrm{Perm}$ ) [4]. A single-conditional diffusion model generated up to 30 graphs for each condition, resulting in a total of 90 graphs for three conditions. We sort the 30 graphs in each set using a polymer property Oracle (see appendix B.3). Then, we check whether a shared polymer structure that meets multi-property constraints can be identified across different condition sets. If we find the polymer, its rank $K$ (where $K$ is between 1 and 30) indicates how high it appears on the lists, considering all condition sets. If not, we set $K$ as 30. Figure 1(a) shows the frequency distribution of $K$ on the 100 test cases. The median $K$ was
18
+
19
+ ![](images/5b073f249b8e70c144ffdb5e9802ce766e19779202b93047c7fb56e8ec4496e5.jpg)
20
+ (a) Existing work's limitation: A median rank of 30 showed that on fewer than half test polymers, the sets of generated graphs from different single conditions intersected, indicating a failure to generate polymers meeting multiple properties.
21
+ Figure 1: Multi-conditional diffusion guidance in (b) generates polymers of higher property accuracy than existing work in (a). Explanations are in Section 1 and details are in appendix B.3.
22
+
23
+ ![](images/a668785c3866041defbb33362830f8aa33200eabe28db40b7fc3dd55e5599b74.jpg)
24
+ (b) Proposed work: Our idea, multi-conditional guidance for diffusion models, successfully generated polymers that satisfied multi-property constraints. It achieved a higher rank than 30 in any set of the single-conditional generated graphs.
25
+
26
+ 30, indicating that the multiple properties were not met on over half of the test polymers despite generating a large number of graphs.
27
+
28
+ To address these challenges, we project multi-properties into representations by learning, thereby guiding the diffusion process for molecule generation. We propose the Graph Diffusion Transformer (Graph DiT) for graph denoising under conditions. Graph DiT has a condition encoder for property representation learning and a graph denoiser. The condition encoder utilizes a novel clustering-based method for numerical properties and one-hot encoding for categorical ones to learn multi-property representations. The graph denoiser first integrates node and edge features into graph tokens, then denoise these tokens with adaptive layer normalization (AdaLN) in Transformer layers [19, 34]. AdaLN replaces the molecular statistics (mean and variance) in each hidden layer with those from the condition representation, effectively outperforming other predictor-based and predictor-free conditioning methods [22, 43, 34], as shown in Section 4.4. We observe that existing forward diffusion processes [43, 22] apply noise separately to atoms and bonds, which may compromise the accuracy of Graph DiT in noise estimation. Hence, we propose a novel graph-dependent noise model that effectively applies noise tailored to the dependencies between atoms and bonds within the graph.
29
+
30
+ Results in Figure 1(b) show that the polymers generated by Graph DiT closely align with multi-property constraints. For each test case, we have one graph generated from Graph DiT conditional on three properties. The Oracle determines the rank of this graph among 30 single-conditionally generated graphs for each condition. We find the median ranks are 4, 9, and 11, for Synth., $\mathrm{O}_2$ Perm, and $\mathrm{N}_2$ Perm, respectively, all much higher than 30. Note that the ranked set of 30 graphs was very competitive because the model was trained on the specific condition dedicatedly.
31
+
32
+ In experiments, we evaluate model performance on one polymer and three small molecule datasets. The polymer dataset includes four numerical conditions for multi-conditional evaluation. Our model has the lowest average mean absolute error (MAE), significantly reducing the error by $17.86\%$ compared to the best baseline. It also excels in small molecule tasks, achieving over 0.9 accuracy on task-related categorical conditions, notably surpassing the baseline accuracy of less than 0.6. We also examine the model's utility in inverse polymer designs for $\mathrm{O}_2 / \mathrm{N}_2$ gas separation, with domain expert feedback highlighting our model's practical utility in multi-conditional molecular design.
33
+
34
+ # 2 Problem Definition
35
+
36
+ # 2.1 Multi-Conditional Inverse Molecular Design
37
+
38
+ A molecular graph $G = (V, E)$ consists of a set of nodes (atoms) $V$ and edges (bonds) $E$ . We follow [43] and define "non-bond" as a type of edge. There are $N$ atoms and each atom has a one-hot encoding, denoting the atom type. We represent it as $\mathbf{X}_V \in \mathbb{R}^{N \times F_V}$ , where $F_V$ is the total number of atom types. Similarly, the bond features are a tensor $\mathbf{X}_E \in \mathbb{R}^{N \times N \times F_E}$ , representing both the graph structure and $F_E$ bond types.
39
+
40
+ Let $\mathcal{C} = \{c_1, c_2, \ldots, c_M\}$ be a set of $M$ numerical and categorical conditions. The task is: $q(G \mid c_1, c_2, \ldots, c_M) \propto q(G) q(c_1, c_2, \ldots, c_M \mid G)$ , where $q$ represents observed probability. We use a model parameterized by $\theta$ for multi-conditional molecular generation $p_{\theta}(G \mid \mathcal{C})$ . The evaluation involves both distribution learning $q(G)$ [35] and condition control $q(c_1, c_2, \ldots, c_M \mid G)$ . We follow previous work in assuming that there exist different oracle functions $\mathcal{O}$ that can independently evaluate each conditioned property [14]: $q(c_1, c_2, \ldots, c_M \mid G) = \prod_{i=1}^{M} \mathcal{O}_i(c_i \mid G)$ . Note that the oracles are not used in the training of $p_{\theta}$ .
41
+
42
+ # 2.2 Diffusion Model on Graph Data
43
+
44
+ Diffusion models consist of forward and reverse diffusion processes [17]. We refer to the forward diffusion process as the diffusion process following [17]. The diffusion process $q(G^{1:T} \mid G^0) = \prod_{t=1}^{T} q(G^t \mid G^{t-1})$ corrupts molecular graph data ( $G^0 = G$ ) into noisy states $G^t$ . As timesteps $T \to \infty$ , $q(G^T)$ converges a stationary distribution $\pi(G)$ . The reverse Markov process $p_\theta(G^{0:T}) = q(G^T) \prod_{t=1}^{T} p_\theta(G^{t-1} \mid G^t)$ , parameterized by neural networks, gradually denoises the latent states toward the desired data distribution.
45
+
46
+ Diffusion Process One may perturb $G^{t}$ in a discrete state-space to capture the structural properties of molecules [43]. Two transition matrices $\mathbf{Q}_V \in \mathbb{R}^{F_V \times F_V}$ and $\mathbf{Q}_E \in \mathbb{R}^{F_E \times F_E}$ are defined for nodes $\mathbf{X}_V$ and edges $\mathbf{X}_E$ , respectively [43]. Then, each step $q(G^{t} \mid G^{t-1}, G^{0}) = q(G^{t} \mid G^{t-1})$ in the diffusion process is sampled as follows.
47
+
48
+ $$
49
+ \left\{ \begin{array}{l} q \left(\mathbf {X} _ {V} ^ {t} \mid \mathbf {X} _ {V} ^ {t - 1}\right) = \operatorname {C a t} \left(\mathbf {X} _ {V} ^ {t}; \mathbf {p} = \mathbf {X} _ {V} ^ {t - 1} \mathbf {Q} _ {V} ^ {t}\right), \\ q \left(\mathbf {X} _ {E} ^ {t} \mid \mathbf {X} _ {E} ^ {t - 1}\right) = \operatorname {C a t} \left(\mathbf {X} _ {E} ^ {t}; \mathbf {p} = \mathbf {X} _ {E} ^ {t - 1} \mathbf {Q} _ {E} ^ {t}\right), \end{array} \right. \tag {1}
50
+ $$
51
+
52
+ where $\operatorname{Cat}(\mathbf{X};\mathbf{p})$ denotes sampling from a categorical distribution with probability $\mathbf{p}$ . We remove the subscript $(V / E)$ when the description applies to both nodes and edges. It is assumed that the noise $\mathbf{Q}^i (i\leq t)$ is independently applied to $\mathbf{X}$ in each step $i$ , allowing us to rewrite $q(\mathbf{X}^t\mid \mathbf{X}^{t - 1})$ as the probability of the initial state $q(\mathbf{X}^t\mid \mathbf{X}^0) = \mathrm{Cat}\left(\mathbf{X}^t;\mathbf{p} = \mathbf{X}^0\bar{\mathbf{Q}}^t\right)$ , where $\bar{\mathbf{Q}}^t = \prod_{i\leq t}\mathbf{Q}^i$ .
53
+
54
+ Noise Scheduling Transition matrices $\mathbf{Q}_V$ and $\mathbf{Q}_E$ control the noise applied to atom features and bond features, respectively. Vignac et al. [43] defined $\pi(G) = (\mathbf{m}_X \in \mathbb{R}^{F_V}, \mathbf{m}_E \in \mathbb{R}^{F_E})$ as the marginal distributions of atom types and bond types. The transition matrix at timestep $t$ is $\mathbf{Q}^t = \alpha^t\mathbf{I} + (1 - \alpha^t)\mathbf{1}\mathbf{m}'$ for atoms or bonds, where $\mathbf{m}'$ denotes the transposed row vector. Therefore, we have $\bar{\mathbf{Q}}^t = \bar{\alpha}^t\mathbf{I} + (1 - \bar{\alpha}^t)\mathbf{1}\mathbf{m}'$ , where $\bar{\alpha}^t = \prod_{\tau=1}^t\alpha^\tau$ . The cosine schedule [32] is often chosen for $\bar{\alpha}^t = \cos(0.5\pi(t/T + s)/(1 + s))^2$ .
55
+
56
+ Reverse Process With the initial sampling $G^T \sim \pi(G)$ , the reverse process generates $G^0$ iteratively in reversed steps $t = T, T - 1, \ldots, 0$ . We use a neural network to predict the probability $p_{\theta}(\tilde{G}^0 \mid G^t)$ as the product over nodes and edges [1, 43]:
57
+
58
+ $$
59
+ p _ {\theta} \left(\tilde {G} ^ {0} \mid G ^ {t}\right) = \prod_ {v \in V} p _ {\theta} \left(v ^ {t - 1} \mid G ^ {t}\right) \prod_ {e \in E} p _ {\theta} \left(e ^ {t - 1} \mid G ^ {t}\right) \tag {2}
60
+ $$
61
+
62
+ $p_{\theta}(\tilde{G}^{0}\mid G^{t})$ could be combined with $q(G^{t - 1}\mid G^t,G^0)$ to estimate the reverse distribution on the graph $p_{\theta}(G^{t - 1}\mid G^t)$ . For example, $p_{\theta}(v^{t - 1}\mid G^t)$ is marginalized over predictions of node types $\tilde{v}\in \tilde{\mathbf{x}}_v$ , which applies similarly to edges:
63
+
64
+ $$
65
+ p _ {\theta} \left(v ^ {t - 1} \mid G ^ {t}\right) = \sum_ {\tilde {v} \in \mathbf {X} _ {v}} q \left(v ^ {t - 1} \mid \tilde {v}, G ^ {t}\right) p _ {\theta} (\tilde {v} \mid G ^ {t}). \tag {3}
66
+ $$
67
+
68
+ The neural network could be trained to minimize the negative log-likelihood [43].
69
+
70
+ $$
71
+ L = \mathbb {E} _ {q (G ^ {0})} \mathbb {E} _ {q (G ^ {t} \mid G ^ {0})} \left[ - \mathbb {E} _ {\mathbf {x} \in G ^ {0}} \log p _ {\theta} (\mathbf {x} \mid G ^ {t}) \right] \tag {4}
72
+ $$
73
+
74
+ ![](images/16953bd350eb013ef330454bf1cd802cd4cd8f494aaa9ebf9599877668f4cccb.jpg)
75
+ (a) Denoising Process
76
+
77
+ ![](images/f9110d76398b0812ae088c4f59b54a29829f20d350b5f9ccd23f0c6d6075f1c7.jpg)
78
+ (b) Condition Encoder
79
+ (c) Graph Denoiser (with Transformer Layers)
80
+ Figure 2: Denoising framework and architectures for Graph DiT. Details are in Section 3.2.
81
+
82
+ where $\mathbf{x} \in G^0$ denotes the node or edge features. Typically, the reverse process in diffusion models does not consider molecular properties as conditions. While there have been efforts to introduce property-related guidance using additional predictors, the more promising approach of predictor-free guidance [16], particularly in multi-conditional generation, remains underexplored.
83
+
84
+ # 3 Multi-Conditional Graph Diffusion Transformers
85
+
86
+ We present the denoising framework of Graph DiT in Figure 2. The condition encoder learns the representation of $M$ conditions. The statistics of this representation like mean and variance are used to replace the ones from the molecular representations [19] (see Section 3.2). Besides, we introduce a new noise model in the diffusion process to better fit graph-structured molecules (see Section 3.1).
87
+
88
+ # 3.1 Graph-Dependent Noise Models
89
+
90
+ The transition probability of a node or an edge should rely on the joint distribution of nodes and edges in the prior state. However, as an example shown in Eq. (1), current diffusion models [22, 43, 25] treat node and edge state transitions as independent, misaligning with the denoising process in Eq. (3). This difference between the sampling distributions of noise in the diffusion and reverse processes introduces unnecessary challenges to multi-conditional molecular generations.
91
+
92
+ To address this, we use a single matrix $\mathbf{X}_G\in \mathbb{R}^{N\times F_G}$ to represent graph tokens for $G$ , with $F_{G} = F_{V} + N\cdot F_{E}$ . Token representations are created by concatenating the node feature matrix $\mathbf{X}_V$ and the flattened edge connection matrix from $\mathbf{X}_E$ . Each row vector in $\mathbf{X}_G$ contains features for both nodes and edges, representing all connections and non-connections. Hence, we could design a transition matrix $\mathbf{Q}_G$ considering the joint distribution of nodes and edges. $\mathbf{Q}_G\in \mathbb{R}^{F_G\times F_G}$ is constructed from four matrices $\mathbf{Q}_V,\mathbf{Q}_{EV}\in \mathbb{R}^{F_E\times F_V},\mathbf{Q}_E,\mathbf{Q}_{VE}\in \mathbb{R}^{F_V\times F_E}$ , denoting the transition probability ("dependent old state" $\rightarrow$ "target new state") node $\rightarrow$ node; edge $\rightarrow$ node; edge $\rightarrow$ edge; node $\rightarrow$ edge, respectively.
93
+
94
+ $$
95
+ \mathbf {Q} _ {G} = \left[ \begin{array}{c c} \mathbf {Q} _ {V} & \mathbf {1} _ {N} ^ {\prime} \otimes \mathbf {Q} _ {V E} \\ \mathbf {1} _ {N} \otimes \mathbf {Q} _ {E V} & \mathbf {1} _ {N \times N} \otimes \mathbf {Q} _ {E} \end{array} \right], \tag {5}
96
+ $$
97
+
98
+ where $\otimes$ denotes the Kronecker product, $\mathbf{1}_N$ , $\mathbf{1}_N'$ , and $\mathbf{1}_{N\times N}$ represent the column vector, row vector, and matrix with all 1 elements, respectively. According to Eq. (5), the first $F_{V}$ columns in $\mathbf{Q}_G$ determine the node feature transitions based on both node features (first $F_{V}$ rows) and edge features (remaining $N\cdot F_{E}$ rows). Conversely, the remaining $N\cdot F_{E}$ columns determine the edge feature transitions, depending on the entire graph. We introduce a new diffusion noise model:
99
+
100
+ $$
101
+ q \left(\mathbf {X} _ {G} ^ {t} \mid \mathbf {X} _ {G} ^ {t - 1}\right) = \widetilde {\operatorname {C a t}} \left(\mathbf {X} _ {G} ^ {t}; \tilde {\mathbf {p}} = \mathbf {X} _ {G} ^ {t - 1} \mathbf {Q} _ {G} ^ {t}\right), \tag {6}
102
+ $$
103
+
104
+ where $\tilde{\mathbf{p}}$ is the unnormalized probability and $\widetilde{\mathrm{Cat}}$ denotes categorical sampling: The first $F_{V}$ columns of $\tilde{\mathbf{p}}$ are normalized to sample $\mathbf{X}_V^t$ , while the remaining $N\cdot E$ dimensions are reshaped and normalized to sample edges $\mathbf{X}_E^t$ . These components are combined to form $\mathbf{X}_G^t$ , completing the $\widetilde{\mathrm{Cat}}$ sampling.
105
+
106
+ Choice of $\mathbf{Q}_{VE}$ and $\mathbf{Q}_{EV}$ Similar to the definitions of $\mathbf{m}_V$ and $\mathbf{m}_E$ [43], we leverage the prior knowledge within the training data for the formulation of task-specific matrices, $\mathbf{Q}_{EV}$ and $\mathbf{Q}_{VE}$ . We calculate co-occurrence frequencies of atom and bond types in training molecular graphs to obtain the marginal atom-bond co-occurrence probability distribution. For each bond type, each row in $\mathbf{m}_{EV}$ represents the probability of co-occurring atom types. $\mathbf{m}_{VE}$ is the transpose of $\mathbf{m}_{EV}$ and has a similar meaning. Subsequently, we define $\mathbf{Q}_{EV} = \bar{\alpha}^t\mathbf{I} + (1 - \bar{\alpha}^t)\mathbf{1}\mathbf{m}_{EV}'$ and $\mathbf{Q}_{VE} = \bar{\alpha}^t\mathbf{I} + (1 - \bar{\alpha}^t)\mathbf{1}\mathbf{m}_{VE}'$ .
107
+
108
+ # 3.2 Denoising Models with Multi-Property Conditions
109
+
110
+ We present Graph DiT as the denoising model to generate molecules under multi-conditions $\mathcal{C} = \{c_1, c_2, \ldots, c_M\}$ without extra predictors.
111
+
112
+ Predictor-Free Guidance The predictor-free reverse process $\hat{p}_{\theta}(G^{t-1} \mid G^t, \mathcal{C})$ aims to generate molecules with a high probability $q(\mathcal{C} \mid G^0)$ . This could be achieved by a linear combination of the log probability for unconditional and conditional denoising [16]:
113
+
114
+ $$
115
+ \hat {p} _ {\theta} \left(G ^ {t - 1} \mid G ^ {t}, \mathcal {C}\right) = \log p _ {\theta} \left(G ^ {t - 1} \mid G ^ {t}\right) + s \left(\log p _ {\theta} \left(G ^ {t - 1} \mid G ^ {t}, \mathcal {C}\right) - \log p _ {\theta} \left(G ^ {t - 1} \mid G ^ {t}\right)\right), \tag {7}
116
+ $$
117
+
118
+ where $s$ denotes the scale of conditional guidance. Unlike classifier-free guidance [16], which typically predicts noise, we directly estimate $p_{\theta}(\tilde{G}^0 \mid G^t, \mathcal{C})$ . We one one denoising model $f_{\theta}(G^t, \mathcal{C})$ for both $p_{\theta}(\tilde{G}^0 \mid G^t)$ and $p_{\theta}(\tilde{G}^0 \mid G^t, \mathcal{C})$ . Here, $f_{\theta}(G^t, \mathcal{C} = \emptyset)$ computes the unconditional probability by substituting the original conditional embeddings with the null value. During training, we randomly drop the condition with a ratio, i.e., $\mathcal{C} = \emptyset$ , to learn the embedding of the null value. $f_{\theta}(G^t = \mathbf{X}_G^t, \mathcal{C})$ comprises two components: the condition encoder and the graph denoiser. An overview of the architecture is presented in Figure 2.
119
+
120
+ Condition Encoder We treat the timestep $t$ as a special condition and follow [31] to obtain a $D$ -dimensional representation $\mathbf{t}$ with sinusoidal encoding. For property-related numerical or categorical condition $c_{i} \in \mathcal{C}$ , we apply distinct encoding operations to get $D$ -dimensional representation. For a categorical condition, we use the one-hot encoding. For a numerical variable, we introduce a clustering encoding method. This defines learnable centroids, assigning $c_{i}$ to clusters, and transforming the soft assignment vector of condition values into the representation. It could be implemented using two Linear layers and a Softmax layer in the middle as: Linear (Softmax $(\mathrm{Linear}(c_i))$ ). Finally, we could obtain the representation of the condition as $\mathbf{c} = \sum_{i=1}^{M} \mathrm{encode}(c_i)$ , where encode is the specific encoding method based on the condition type. For numerical conditions, we evaluate our proposed clustering-based approach against alternatives like direct or interval-based encodings [28]. As noted in Section 4.4, the clustering encoding outperforms the other methods.
121
+
122
+ Graph Denoiser: Transformer Layers Given the noisy graph at timestep $t$ , the graph tokens are first encoded into the hidden space as $\mathbf{H} = \mathrm{Linear}(\mathbf{X}_G^t)$ , where $\mathbf{H} \in \mathbb{R}^{N \times D}$ . We then adapt the standard Transformer layers [42] with self-attention and multi-layer perceptrons (MLP), but replace the normalization with the adaptive layer normalization (AdaLN) controlled by the representations of the conditions [19, 34]: $\mathbf{H} = \mathrm{AdaLN}(\mathbf{H}, \mathbf{c})$ . For each row $\mathbf{h}$ in $\mathbf{H}$ :
123
+
124
+ $$
125
+ \operatorname {A d a L N} (\mathbf {h}, \mathbf {c}) = \gamma_ {\theta} (\mathbf {c}) \odot \frac {\mathbf {h} - \mu (\mathbf {h})}{\sigma (\mathbf {h})} + \beta_ {\theta} (\mathbf {c}), \tag {8}
126
+ $$
127
+
128
+ where $\mu (\cdot)$ and $\sigma (\cdot)$ are mean and variance values. $\odot$ indicates element-wise product. $\gamma_{\theta}(\cdot)$ and $\beta_{\theta}(\cdot)$ are neural network modules in $f_{\theta}(\cdot)$ , each of which consists of two linear layers with SiLU activation [11] in the middle. We have a gated variant $\mathrm{AdaLN}_{gate}$ for residuals:
129
+
130
+ $$
131
+ \operatorname {A d a L N} _ {\text {g a t e}} (\mathbf {h}, \mathbf {c}) = \alpha_ {\theta} (\mathbf {c}) \odot \operatorname {A d a L N} (\mathbf {h}, \mathbf {c}) \tag {9}
132
+ $$
133
+
134
+ We apply the zero initialization for the first layer of $\gamma_{\theta}(\cdot), \beta_{\theta}(\cdot)$ , and $\alpha_{\theta}(\cdot)$ [34]. There are other options to learn the structure representation from the condition [34]: In-Context conditioning adds condition representation to the structure representation at the beginning of the structure encoder, and Cross-Attention calculates cross-attention between the condition and structure representation. We observe in Section 4.4 that AdaLN performs best among them.
135
+
136
+ Graph Denoiser: Final MLP We have the hidden states $\mathbf{H}$ after the final Transformer layers, the MLP is used to predict node probabilities $\bar{\mathbf{X}}_V^0$ and edge probabilities $\bar{\mathbf{X}}_E^0$ at $t = 0$ :
137
+
138
+ $$
139
+ \tilde {\mathbf {X}} _ {G} ^ {0} = \operatorname {A d a L N} (\operatorname {M L P} (\mathbf {H}), \mathbf {c}). \tag {10}
140
+ $$
141
+
142
+ We split the output $\mathbf{X}_G$ into atom and bond features $\tilde{\mathbf{X}}_V^0, \tilde{\mathbf{X}}_E^0$ . The first $F_V$ dimensions of $\tilde{\mathbf{X}}_G^0$ represent node type probabilities, and the remaining $N \cdot F_E$ dimensions cover probabilities for $N$ edge types associated with the node, as detailed in Section 3.1.
143
+
144
+ Generation to Molecule Conversion A common way of converting generated graphs to molecules selects only the largest connected component [43], denoted as Graph DiT-LCC in our model. For Graph DiT, we connect all components by randomly selecting atoms. It minimally alters the generated structure to more accurately reflect model performance than Graph DiT-LCC.
145
+
146
+ Table 1: Multi-Conditional Generation of 10K Polymers: Results on the synthetic score (Synth.) and three numerical properties (gas permeability for $\mathrm{O}_2$ , $\mathrm{N}_2$ , $\mathrm{CO}_2$ ). MAE is calculated between the input conditions and the properties of the generated polymers using Oracles. Best results are highlighted.
147
+
148
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Validity ↑(w/o rule checking)</td><td colspan="4">Distribution Learning</td><td colspan="5">Condition Control</td></tr><tr><td>Coverage ↑</td><td>Diversity ↑</td><td>Similarity ↑</td><td>Distance ↓</td><td>Synth. ↓</td><td>O2Perm ↓</td><td>N2Perm ↓</td><td>CO2Perm ↓</td><td>Avg. MAE ↓</td></tr><tr><td>Graph GA</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8828</td><td>0.9269</td><td>9.1882</td><td>1.3307</td><td>1.9840</td><td>2.2900</td><td>1.9489</td><td>1.8884</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8375</td><td>0.9283</td><td>7.5620</td><td>1.1658</td><td>1.5761</td><td>1.8327</td><td>1.6074</td><td>1.5455</td></tr><tr><td>LSTM-HC</td><td>0.9910 (N.A.)</td><td>10/11</td><td>0.8918</td><td>0.7937</td><td>18.1562</td><td>1.4251</td><td>1.1003</td><td>1.2365</td><td>1.0772</td><td>1.2098</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>10/11</td><td>0.7366</td><td>0.7294</td><td>23.5990</td><td>1.0714</td><td>1.0781</td><td>1.2352</td><td>1.0978</td><td>1.1206</td></tr><tr><td>DiGress</td><td>0.9913 (0.2362)</td><td>11/11</td><td>0.9099</td><td>0.2724</td><td>22.7237</td><td>2.9842</td><td>1.7163</td><td>2.0630</td><td>1.6738</td><td>2.1093</td></tr><tr><td>DiGress v2</td><td>0.9812 (0.3057)</td><td>11/11</td><td>0.9105</td><td>0.2771</td><td>21.7311</td><td>2.7507</td><td>1.7130</td><td>2.0632</td><td>1.6648</td><td>2.0479</td></tr><tr><td>GDSS</td><td>0.9205 (0.9076)</td><td>9/11</td><td>0.7510</td><td>0.0000</td><td>34.2627</td><td>1.3701</td><td>1.0271</td><td>1.0820</td><td>1.0683</td><td>1.1369</td></tr><tr><td>MOOD</td><td>0.9866 (0.9205)</td><td>11/11</td><td>0.8349</td><td>0.0227</td><td>39.3981</td><td>1.4019</td><td>1.4961</td><td>1.7603</td><td>1.4748</td><td>1.5333</td></tr><tr><td>Graph DiT-LCC (Ours)</td><td>0.9753 (0.8437)</td><td>11/11</td><td>0.8875</td><td>0.9560</td><td>7.0949</td><td>1.3099</td><td>0.8001</td><td>0.9562</td><td>0.8125</td><td>0.9697</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8245 (0.8437)</td><td>11/11</td><td>0.8712</td><td>0.9600</td><td>6.6443</td><td>1.2973</td><td>0.7440</td><td>0.8857</td><td>0.7550</td><td>0.9205</td></tr></table>
149
+
150
+ # 4 Experiment
151
+
152
+ RQ1: We validate the generative power of Graph DiT compared to baselines from molecular optimization and diffusion models in Section 4.2. RQ2: We study a polymer inverse design for gas separation in Section 4.3. RQ3: We conduct further analysis to examine Graph DiT in Section 4.4.
153
+
154
+ # 4.1 Experimental Setup
155
+
156
+ We use datasets with over ten types of atoms and up to fifty nodes in a molecular graph. We include both numerical and categorical properties for drugs and materials, offering a benchmark for evaluation across diverse chemical spaces. Model performance is validated across up to nine metrics, including distribution coverage, diversity, and condition control capacity for various properties.
157
+
158
+ Datasets and Input Conditions We have one polymer dataset [40] for materials, featuring three numerical gas permeability conditions: $\mathrm{O}_2\mathrm{Perm}$ , $\mathrm{CO}_2\mathrm{Perm}$ , and $\mathrm{N}_2\mathrm{Perm}$ . For drug design, we create three class-balanced datasets from MoleculeNet [46]: HIV, BBBP, and BACE, each with a categorical property related to HIV virus replication inhibition, blood-brain barrier permeability, or human $\beta$ -secretase 1 inhibition, respectively. We have two more numerical conditions for synthesizeability from synthetic accessibility (SAS) and complexity scores (SCS) [12, 8].
159
+
160
+ Evaluation We randomly split the dataset into training, validation, and testing (reference) sets in a 6:2:2 ratio. Evaluations are conducted on 10,000 generated examples with metrics [35] (1) molecular validity (Validity); (2) heavy atom type coverage (Coverage); (3) internal diversity among the generated examples (Diversity); (4) fragment-based similarity with the reference set (Similarity); (5) Fréchet ChemNet Distance with the reference set (Distance) [36]; MAE between the generated and conditioned (6) synthetic accessibility score [12] (Synth.); $(7)\sim (9)$ MAE/Accuracy for the numerical/categorical task conditions (Property). The evaluation Oracle uses random forest trained on all task-related molecules [14]. Lower MAE or higher accuracy indicates stronger model controllability.
161
+
162
+ Baselines We select strong and popular molecular optimization baselines from recent studies [14]: Graph-GA [20], MARS [47], JTVAE [21] with Bayesian optimization (JTVAE-BO), LSTM [6] on SMILES with Hill Climbing (LSTM-HC). We include the most recent diffusion models: GDSS[22], DiGress [43], and their conditional version with extra predictors: MOOD [25], and DiGress v2 [43]. We train multi-task predictors using the same architecture for MOOD and DiGress v2 models to provide additional guidance for generation. For molecular optimization, we formulate the condition set of each test data point as a combined goal, minimizing the sum of the normalized errors between generated and input properties. We train a random forest model for each property using the training data to optimize the molecular structure.
163
+
164
+ # 4.2 RQ1: Multi-Conditional Molecular Generation
165
+
166
+ We have the observations from Table 1 and Table 2:
167
+
168
+ Chemical Validity High validity may not accurately represent the model's generative performance if hard-coded rules are introduced in the algorithm. For example, GraphGA could eliminate non-valid molecules during mutation and crossover iterations to achieve perfect validity in the final evaluation. Without rule checking in the generation-to-molecule step, DiGress, GDSS, and MOOD show a marked performance decline, with validity often dropping from 0.99 to below 0.6. In contrast, Graph DiT often maintains over 0.8 validity without any rule-based processing.
169
+
170
+ Table 2: Multi-Conditional Generation of 10K Small Molecules: Each dataset involves a numerical synthesizability score (Synth.) and a categorical task-specific property. MAE/Accuracy is calculated by comparing input conditions and generated properties. The best number per metric is highlighted.
171
+
172
+ <table><tr><td rowspan="2">Tasks</td><td rowspan="2">Model</td><td rowspan="2">Validity ↑(w/o rule checking)</td><td colspan="4">Distribution Learning</td><td colspan="3">Condition Control</td></tr><tr><td>Coverage ↑</td><td>Diversity ↑</td><td>Similarity ↑</td><td>Distance ↓</td><td>Synth. MAE ↓</td><td>Property Acc. ↑</td><td>Avg. Rank ↓</td></tr><tr><td rowspan="10">Synth. &amp; BACE</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>8/8</td><td>0.8585</td><td>0.9805</td><td>7.4104</td><td>0.9633</td><td>0.4690</td><td>6.5000</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>8/8</td><td>0.8338</td><td>0.8827</td><td>6.7923</td><td>1.0123</td><td>0.5184</td><td>5.0000</td></tr><tr><td>LSTM-HC</td><td>0.9972 (N.A.)</td><td>8/8</td><td>0.8146</td><td>0.7982</td><td>17.5585</td><td>0.9207</td><td>0.5816</td><td>3.0000</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>6/8</td><td>0.6682</td><td>0.7281</td><td>30.4696</td><td>0.9923</td><td>0.4628</td><td>7.5000</td></tr><tr><td>DiGress</td><td>0.3511 (0.2858)</td><td>8/8</td><td>0.8862</td><td>0.6942</td><td>24.6560</td><td>2.0681</td><td>0.5061</td><td>8.0000</td></tr><tr><td>DiGress v2</td><td>0.3546 (0.2680)</td><td>8/8</td><td>0.8812</td><td>0.7027</td><td>25.3270</td><td>2.3365</td><td>0.5113</td><td>7.5000</td></tr><tr><td>GDSS</td><td>0.2879 (0.2589)</td><td>4/8</td><td>0.8756</td><td>0.2708</td><td>46.7539</td><td>1.6422</td><td>0.5036</td><td>7.5000</td></tr><tr><td>MOOD</td><td>0.9947 (0.4502)</td><td>8/8</td><td>0.8902</td><td>0.2587</td><td>44.2394</td><td>1.8853</td><td>0.5062</td><td>7.0000</td></tr><tr><td>Graph DiT-LCC (Ours)</td><td>0.8646 (0.8495)</td><td>8/8</td><td>0.8240</td><td>0.8757</td><td>6.9836</td><td>0.4053</td><td>0.9050</td><td>2.0000</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8674 (0.8495)</td><td>8/8</td><td>0.8238</td><td>0.8752</td><td>7.0456</td><td>0.3998</td><td>0.9135</td><td>1.0000</td></tr><tr><td rowspan="10">Synth. &amp; BBBP</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>9/9</td><td>0.8950</td><td>0.9509</td><td>10.1659</td><td>1.2082</td><td>0.3015</td><td>7.5000</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>8/9</td><td>0.8637</td><td>0.7696</td><td>10.9791</td><td>1.2250</td><td>0.5189</td><td>6.0000</td></tr><tr><td>LSTM-HC</td><td>0.9990 (N.A.)</td><td>8/9</td><td>0.8883</td><td>0.8932</td><td>16.3904</td><td>0.9969</td><td>0.5590</td><td>4.0000</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>5/9</td><td>0.7458</td><td>0.5821</td><td>33.5746</td><td>1.1619</td><td>0.4958</td><td>6.0000</td></tr><tr><td>DiGress</td><td>0.6960 (0.4871)</td><td>9/9</td><td>0.9098</td><td>0.6805</td><td>18.6921</td><td>2.3658</td><td>0.6536</td><td>6.5000</td></tr><tr><td>DiGress v2</td><td>0.6892 (0.4100)</td><td>9/9</td><td>0.9107</td><td>0.6336</td><td>19.4498</td><td>2.2694</td><td>0.6531</td><td>6.5000</td></tr><tr><td>GDSS</td><td>0.6218 (0.5919)</td><td>3/9</td><td>0.8415</td><td>0.2672</td><td>39.9440</td><td>1.3788</td><td>0.5037</td><td>7.0000</td></tr><tr><td>MOOD</td><td>0.8008 (0.5789)</td><td>9/9</td><td>0.9273</td><td>0.1715</td><td>34.2506</td><td>2.0284</td><td>0.4903</td><td>8.5000</td></tr><tr><td>Graph DiT-LCC (Ours)</td><td>0.8657 (0.8505)</td><td>9/9</td><td>0.8857</td><td>0.9324</td><td>11.8587</td><td>0.3717</td><td>0.9390</td><td>2.0000</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8468 (0.8505)</td><td>9/9</td><td>0.8856</td><td>0.9329</td><td>11.8519</td><td>0.3551</td><td>0.9417</td><td>1.0000</td></tr><tr><td rowspan="10">Synth. &amp; HIV</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>28/29</td><td>0.8993</td><td>0.9661</td><td>4.4418</td><td>0.9839</td><td>0.6035</td><td>5.0000</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>26/29</td><td>0.8764</td><td>0.6517</td><td>7.2893</td><td>0.9691</td><td>0.6455</td><td>4.0000</td></tr><tr><td>LSTM-HC</td><td>0.9994 (N.A.)</td><td>13/29</td><td>0.9091</td><td>0.9145</td><td>7.4659</td><td>0.9480</td><td>0.6736</td><td>3.0000</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>3/29</td><td>0.8055</td><td>0.4173</td><td>41.9771</td><td>1.2359</td><td>0.4850</td><td>7.5000</td></tr><tr><td>DiGress</td><td>0.4377 (0.3643)</td><td>22/29</td><td>0.9194</td><td>0.8562</td><td>13.0409</td><td>1.9216</td><td>0.5335</td><td>7.5000</td></tr><tr><td>DiGress v2</td><td>0.5050 (0.4242)</td><td>24/29</td><td>0.9193</td><td>0.8476</td><td>13.3997</td><td>1.5934</td><td>0.5331</td><td>7.5000</td></tr><tr><td>GDSS</td><td>0.6926 (0.6757)</td><td>4/29</td><td>0.7817</td><td>0.1032</td><td>45.3416</td><td>1.2515</td><td>0.4830</td><td>8.5000</td></tr><tr><td>MOOD</td><td>0.2875 (0.2173)</td><td>29/29</td><td>0.9280</td><td>0.1361</td><td>32.3523</td><td>2.3144</td><td>0.5106</td><td>9.0000</td></tr><tr><td>Graph DiT-LCC (Ours)</td><td>0.7635 (0.7415)</td><td>28/29</td><td>0.8966</td><td>0.9535</td><td>5.8790</td><td>0.3084</td><td>0.9766</td><td>1.5000</td></tr><tr><td>Graph DiT (Ours)</td><td>0.7660 (0.7415)</td><td>28/29</td><td>0.8974</td><td>0.9575</td><td>6.0216</td><td>0.3086</td><td>0.9777</td><td>1.5000</td></tr></table>
173
+
174
+ Distribution Learning GraphGA is a simple yet effective baseline for generating in-distribution molecules, e.g., on BBBP and HIV generation datasets. Diffusion model baselines such as DiGress and MOOD could produce diverse molecules but often fail to capture the original data distribution in multi-conditional tasks. Graph DiT shows the competitive performance of diffusion models in fitting complex molecular data distributions. Using fragment-based similarity and neural network-based distance metrics [36], we achieve the best in the polymer task and rank second in the HIV small molecule task, involving up to 11 and 29 types of heavy atoms, respectively.
175
+
176
+ Condition Controllability LSTM-HC surpasses many baselines, achieving lower average MAE on polymer properties and higher rankings on small molecular properties. However, its control over synthetic scores in polymer tasks is relatively poor. Conversely, MARS effectively manages synthetic scores for polymers but exhibits a larger MAE in gas permeability conditions compared to other baselines. GDSS performs well in gas permeability control but underperforms Graph GA and MARS in terms of the synthetic score condition. DiGress v2 and MOOD, although equipped with the predictor guidance, still exhibit limited condition control compared to their unconditional counterparts over polymer and small molecule tasks. These baselines struggle to balance and control multiple conditions in generation. In contrast, Graph DiT significantly improves diffusion models and achieves the best multi-conditional performance in all tasks. In polymer tasks, Graph DiT reduces MAE on all gas permeability conditions, averaging $+17.8\%$ improvement over the best baseline LSTM-HC. For small molecule tasks, Graph DiT consistently ranks top-1 in condition controllability with over 0.9 accuracy in categorical conditions. Compared to Graph DiT-LCC, we observe that Graph DiT, which connects all generated graph components, shows better controllability performance due to minimal rule-based post-generation processing.
177
+
178
+ # 4.3 RQ2: Polymer Inverse Design for Gas Separation
179
+
180
+ We aim to design polymers with high $\mathrm{O_2}$ and low $\mathrm{N}_2$ permeability, demonstrating the models' precise control over related properties. Following Robeson [38]'s definition of high-performance polymers based on the $\mathrm{O_2 / N_2}$ permeability ratio, we selected 16 polymers meeting this criterion from 609 examples as our test/reference set. The remaining data is used for training and validation. Subsequently, we generated 1,000 polymers conditioned on test set labels.
181
+
182
+ ![](images/51b6d6edffda74e84d3e9248590bfa28942cadc7981d0437f02c4ea08efb65ea.jpg)
183
+ Figure 3: Polymer Inverse Design for $\mathrm{O}_2 / \mathrm{N}_2$ Gas Separation: Feedback from four domain experts includes an average Utility Score (UtS) for relative usefulness and an Agreement Score (AS) for generated polymers, both ranging [0, 1]. Polymers are generated conditional on $\{\mathrm{SAS} = 3.8$ , $\mathrm{SCS} = 4.3$ , $\mathrm{O}_2\mathrm{Perm} = 34.0$ , $\mathrm{N}_2\mathrm{Perm} = 5.2\}$ . The top-3 polymers, highlighted, are all generated by Graph DiT.
184
+
185
+ ![](images/4c926b4f2e08265bd3d7c43697a2d5253eb065e49a6e80c8e0de1b8bd8f7ae32.jpg)
186
+ (a) Numerical Condition Encodings
187
+
188
+ ![](images/847c6fffbce0a553bfe38e2d6758459e6a2dd4b93e971903288412ab0b80ae66.jpg)
189
+ (b) Condition Architectures
190
+
191
+ ![](images/cd6324b37de734a6089612d6aad8c4ce2d9c279f515cd31c558481bb790a25d4.jpg)
192
+ (c) Graph Dependent Noise Model
193
+ Figure 4: Relative Performance of Different Model Designs: A higher bar indicates better performance. We use the performance of clustering-based encoding or AdaLN as the Reference Value and the current option as the Current Value. Relative performance is calculated as $\frac{\text{Current Value}}{\text{Reference Value}}$ for Similarity and Diversity metrics, and as $\frac{\text{Reference Value}}{\text{Current Value}}$ for other metrics.
194
+
195
+ In Figure 3, we present the top three polymers generated by each model for a case study with expertise. Initially, a random forest algorithm identifies the top five polymers per method based on average MAE in two gas permeability. These 25 polymers are then shuffled and evaluated by four polymer scientists, who rank them from 1 to 25 using their domain knowledge. Rankings are normalized to a Utility Score (UtS) ranging from 0 to 1, with higher scores indicating greater utility. The variance in UtS is converted into an Agreement Score (AS) for further evaluation. As shown in Figure 3, there is a high consensus among experts that the three polymers generated by Graph DiT are the most promising for successful polymer inverse design tasks. More details are in appendix D. By comparing generated examples from different models, we have further observations:
196
+
197
+ - DiGress and MOOD struggle to capture polymerization points, marked with asterisks $(^{**})$ , which is one of the most important features that distinguish polymers from small molecules. Additionally, the two methods frequently feature excessive carbon atoms and overly large cycles. These molecular configurations with significant distortion from the canonical geometry of stable compounds may lead to poor synthesizeability [35, 7].
198
+ - LSTM-HC may result in too-long carbon chains with limited diversity. MARS produces examples with asymmetrical graph structures, challenging polymer synthesis [10, 2].
199
+
200
+ - Graph DiT generates structurally diverse and symmetric polymers with two polymerization points, indicative of more valid and synthesizable polymer structures. The first two, which are polyimides, imply effective gas separation performance [24].
201
+
202
+ # 4.4 RQ3: Ablation Studies and Model Analysis
203
+
204
+ Model Components In light of Table 1, we analyze three components that impact our model's learning in various conditions. Our assessment of relative performance is based on the ratio between our method and comparative approaches. The first component is numerical conditional encoding. Results in Figure 4(a) highlight the superiority of clustering encoding over direct and interval-based encoding, particularly in controlling gas permeability, despite its slightly lower diversity. The second component concerns the neural architecture for conditions. As shown in Figure 4(b), similar to Figure 4(a), AdaLN surpasses both In-Context Conditioning and Cross-Attention in learning distribution with better condition controllability. The third component validates the importance of the graph-dependent noise model compared to separately applying noise to atoms and bonds. It also shows the improvement of the predictor-free Graph DiT over the predictor-guided DiGress v2, even without the graph-dependent noise model. More results on model controllability are in appendix E.
205
+
206
+ Oracle Selections We analyze the robustness of Oracles in evaluating six task-related properties (three gas permeability and three small molecule properties) across six conditional generation tasks. Oracles are switched from Random Forest to Gaussian Process or Support Vector Machines for ranking generative model performance. Results in Table 3 show consistent rankings (Graph DiT, LSTM-HC, MARS, JTVAE-BO, MOOD, GDSS, GraphGA). It indicates that while perfectly approximating the truth properties of generated molecules is difficult, we could effectively compare the relative performance of various models. Graph DiT consistently ranked first among baseli
207
+
208
+ Table 3: Oracles for Generation Evaluation: We consider three Oracles. Generative performance is ranked on average from 1 to 9 across six properties, with various Oracles yielding similar outcomes. We highlight models with the same ranking sequence in different Oracle evaluation.
209
+
210
+ <table><tr><td>Avg. Rank</td><td>Random Forest</td><td>Gaussian Process</td><td>Support Vector Machine</td></tr><tr><td>1</td><td>Graph DiT</td><td>Graph DiT</td><td>Graph DiT</td></tr><tr><td>2</td><td>LSTM-HC</td><td>DiGress v2</td><td>DiGress v2</td></tr><tr><td>3</td><td>MARS</td><td>DiGress</td><td>DiGress</td></tr><tr><td>4</td><td>JTVAE-BO</td><td>LSTM-HC</td><td>LSTM-HC</td></tr><tr><td>5</td><td>MOOD</td><td>MARS</td><td>MARS</td></tr><tr><td>6</td><td>DiGress</td><td>JTVAE-BO</td><td>JTVAE-BO</td></tr><tr><td>7</td><td>DiGress v2</td><td>MOOD</td><td>MOOD</td></tr><tr><td>8</td><td>GDSS</td><td>GDSS</td><td>GDSS</td></tr><tr><td>9</td><td>Graph GA</td><td>Graph GA</td><td>Graph GA</td></tr></table>
211
+
212
+ # 5 Related Work
213
+
214
+ Diffusion Models for Molecules: Score-based diffusion models applied noise and denoising in continuous space [33, 22]. DiGress [43] used discrete noise as transition matrices based on marginal distributions of atom and bond types. Extra predictor models are studied to guide the generation process in DiGress and GDSS [25]. Diffusion models could also be used for molecular property prediction [27], for conformation [48] and molecule generation with 3D atomic coordinates [18, 49, 3]. We focus on molecular graph generation, considering the high computational cost of accurate 3D coordinates for larger molecules like polymers [23]. We explore predictor-free diffusion guidance, instead of the classifier guidance [9, 44], for generating molecules under categorical and numerical conditions. It can be integrated with diffusion models for atomic coordinates in future research.
215
+
216
+ Molecular Optimization: Optimization algorithms could optimize molecules towards property constraints, including genetic algorithms [20], Bayesian optimization [39, 50], REINFORCE [45], and reinforcement learning [30]. Both sequential and graph-based generative models [6, 21, 30], along with diverse sampling methods [47, 13], are used in conjunction with these algorithms to produce desirable molecules. These methods have been applied to both single-objective and multi-objective optimization, the latter by manually integrating multiple property conditions into a single one [5, 25]. Several challenges in molecular optimization methods remain underexplored, including the inadequate or unclear definition of multi-property relations when integration into a single objective [5], and the inaccessibility of the oracle function for property-oriented optimization during the training phase [14].
217
+
218
+ # 6 Conclusion
219
+
220
+ In this work, we solved inverse molecular design using properties as predictor-free diffusion guidance. The proposed Graph DiT performed diffusion based on the joint distribution of atoms and bonds in both forward and reverse processes. It introduced representation learning for multiple categorical and numerical properties and utilized a Transformer-based graph denoiser for conditional graph denoising. Results on multi-conditional generations and polymer inverse designs showed the remarkable generative capabilities of Graph DiT, making it suitable for designing promising molecules.
221
+
222
+ # Acknowledgements
223
+
224
+ This work was supported by NSF IIS-2142827, IIS-2146761, IIS-2234058, CBET-2332270, CBET-2102592, and ONR N00014-22-1-2507.
225
+
226
+ # References
227
+
228
+ [1] Jacob Austin, Daniel D Johnson, Jonathan Ho, Daniel Tarlow, and Rianne Van Den Berg. Structured denoising diffusion models in discrete state-spaces. Advances in Neural Information Processing Systems, 34:17981-17993, 2021.
229
+ [2] Alexandru T Balaban. Symmetry in chemical structures and reactions. In Symmetry, pages 999-1020. Elsevier, 1986.
230
+ [3] Fan Bao, Min Zhao, Zhongkai Hao, Peiyao Li, Chongxuan Li, and Jun Zhu. Equivariant energy-guided SDE for inverse molecular design. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=rOotLtOwYW.
231
+ [4] J Wesley Barnett, Connor R Bilchak, Yiwen Wang, Brian C Benicewicz, Laura A Murdock, Tristan Bereau, and Sanat K Kumar. Designing exceptional gas-separation polymer membranes using machine learning. Science advances, 6(20):eaaz4301, 2020.
232
+ [5] Camille Bilodeau, Wengong Jin, Tommi Jaakkola, Regina Barzilay, and Klavs F Jensen. Generative models for molecular discovery: Recent advances and challenges. Wiley Interdisciplinary Reviews: Computational Molecular Science, 12(5):e1608, 2022.
233
+ [6] Nathan Brown, Marco Fiscato, Marwin HS Segler, and Alain C Vaucher. Guacamol: benchmarking models for de novo molecular design. Journal of chemical information and modeling, 59(3):1096-1108, 2019.
234
+ [7] Robert F Bruns and Ian A Watson. Rules for identifying potentially reactive or promiscuous compounds. Journal of medicinal chemistry, 55(22):9763-9772, 2012.
235
+ [8] Connor W Coley, Luke Rogers, William H Green, and Klavs F Jensen. Scscore: synthetic complexity learned from a reaction corpus. Journal of chemical information and modeling, 58 (2):252-261, 2018.
236
+ [9] Prafulla Dhariwal and Alexander Nichol. Diffusion models beat gans on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021.
237
+ [10] Jack D Dunitz. Symmetry arguments in chemistry. Proceedings of the National Academy of Sciences, 93(25):14260-14266, 1996.
238
+ [11] Stefan Elfwing, Eiji Uchibe, and Kenji Doya. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural networks, 107:3-11, 2018.
239
+ [12] Peter Ertl and Ansgar Schuffenhauer. Estimation of synthetic accessibility score of drug-like molecules based on molecular complexity and fragment contributions. Journal of cheminformatics, 1:1-11, 2009.
240
+ [13] Tianfan Fu, Wenhao Gao, Cao Xiao, Jacob Yasonik, Connor W Coley, and Jimeng Sun. Differentiable scaffolding tree for molecular optimization. arXiv preprint arXiv:2109.10469, 2021.
241
+ [14] Wenhao Gao, Tianfan Fu, Jimeng Sun, and Connor Coley. Sample efficiency matters: a benchmark for practical molecular optimization. Advances in Neural Information Processing Systems, 35:21342-21357, 2022.
242
+ [15] Niklas WA Gebauer, Michael Gastegger, Stefaan SP Hessmann, Klaus-Robert Müller, and Kristof T Schütt. Inverse design of 3d molecular structures with conditional generative neural networks. Nature communications, 13(1):973, 2022.
243
+ [16] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022.
244
+ [17] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. Advances in neural information processing systems, 33:6840-6851, 2020.
245
+ [18] Emiel Hoogeboom, Victor Garcia Satorras, Clément Vignac, and Max Welling. Equivariant diffusion for molecule generation in 3d. In International conference on machine learning, pages 8867-8887. PMLR, 2022.
246
+
247
+ [19] Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE international conference on computer vision, pages 1501-1510, 2017.
248
+ [20] Jan H Jensen. A graph-based genetic algorithm and generative model/monte carlo tree search for the exploration of chemical space. Chemical science, 10(12):3567-3572, 2019.
249
+ [21] Wengong Jin, Regina Barzilay, and Tommi Jaakkola. Junction tree variational autoencoder for molecular graph generation. In International conference on machine learning, pages 2323-2332. PMLR, 2018.
250
+ [22] Jaehyeong Jo, Seul Lee, and Sung Ju Hwang. Score-based generative modeling of graphs via the system of stochastic differential equations. In International Conference on Machine Learning, volume 162, pages 10362-10383. PMLR, 2022.
251
+ [23] Soumil Y Joshi and Sanket A Deshmukh. A review of advancements in coarse-grained molecular dynamics simulations. Molecular Simulation, 47(10-11):786-803, 2021.
252
+ [24] Michael Langsam. Polyimides for gas separation. In *Polyimides*, pages 697–742. CRC Press, 2018.
253
+ [25] Seul Lee, Jaehyeong Jo, and Sung Ju Hwang. Exploring chemical space with score-based out-of-distribution generation. In International Conference on Machine Learning, pages 18872-18892. PMLR, 2023.
254
+ [26] Gang Liu, Tong Zhao, Jiaxin Xu, Tengfei Luo, and Meng Jiang. Graph rationalization with environment-based augmentations. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pages 1069-1078, 2022.
255
+ [27] Gang Liu, Eric Inae, Tong Zhao, Jiaxin Xu, Tengfei Luo, and Meng Jiang. Data-centric learning from unlabeled graphs with diffusion model. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=DmakwvCJ71.
256
+ [28] Gang Liu, Tong Zhao, Eric Inae, Tengfei Luo, and Meng Jiang. Semi-supervised graph imbalanced regression. In 29th SIGKDD Conference on Knowledge Discovery and Data Mining, 2023.
257
+ [29] Ruimin Ma and Tengfei Luo. Pi1m: a benchmark database for polymer informatics. Journal of Chemical Information and Modeling, 60(10):4684-4690, 2020.
258
+ [30] Amina Mollaysa, Brooks Paige, and Alexandros Kalousis. Goal-directed generation of discrete structures with conditional generative models. Advances in Neural Information Processing Systems, 33:21923-21933, 2020.
259
+ [31] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741, 2021.
260
+ [32] Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pages 8162-8171. PMLR, 2021.
261
+ [33] Chenhao Niu, Yang Song, Jiaming Song, Shengjia Zhao, Aditya Grover, and Stefano Ermon. Permutation invariant graph generation via score-based generative modeling. In International Conference on Artificial Intelligence and Statistics, pages 4474-4484. PMLR, 2020.
262
+ [34] William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4195-4205, 2023.
263
+ [35] Daniil Polykovskiy, Alexander Zhebrak, Benjamin Sanchez-Lengeling, Sergey Golovanov, Oktai Tatanov, Stanislav Belyaev, Rauf Kurbanov, Aleksey Artamonov, Vladimir Aladinskiy, Mark Veselov, et al. Molecular sets (moses): a benchmarking platform for molecular generation models. Frontiers in pharmacology, 11:565644, 2020.
264
+ [36] Kristina Preuer, Philipp Renz, Thomas Unterthiner, Sepp Hochreiter, and Gunter Klambauer. Fréchet chemnet distance: a metric for generative models for molecules in drug discovery. Journal of chemical information and modeling, 58(9):1736-1741, 2018.
265
+ [37] Philipp Renz, Dries Van Rompaey, Jörg Kurt Wegner, Sepp Hochreiter, and Günter Klambauer. On failure modes in molecule generation and optimization. *Drug Discovery Today: Technologies*, 32:55–63, 2019.
266
+
267
+ [38] Lloyd M Robeson. The upper bound revisited. Journal of membrane science, 320(1-2):390-400, 2008.
268
+ [39] Bobak Shahriari, Kevin Swersky, Ziyu Wang, Ryan P Adams, and Nando De Freitas. Taking the human out of the loop: A review of bayesian optimization. Proceedings of the IEEE, 104 (1):148-175, 2015.
269
+ [40] A Thornton, L Robeson, B Freeman, and D Uhlmann. Polymer gas separation membrane database, 2012.
270
+ [41] Austin Tripp and José Miguel Hernández-Lobato. Genetic algorithms are strong baselines for molecule generation. arXiv preprint arXiv:2310.09267, 2023.
271
+ [42] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
272
+ [43] Clement Vignac, Igor Krawczuk, Antoine Siraudin, Bohan Wang, Volkan Cevher, and Pascal Frossard. Digress: Discrete denoising diffusion for graph generation. arXiv preprint arXiv:2209.14734, 2022.
273
+ [44] Tomer Weiss, Eduardo Mayo Yanes, Sabyasachi Chakraborty, Luca Cosmo, Alex M Bronstein, and Renana Gershoni-Poranne. Guided diffusion for inverse molecular design. Nature Computational Science, pages 1-10, 2023.
274
+ [45] Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8:229-256, 1992.
275
+ [46] Zhenqin Wu, Bharath Ramsundar, Evan N Feinberg, Joseph Gomes, Caleb Geniesse, Aneesh S Pappu, Karl Leswing, and Vijay Pande. Molecule: a benchmark for molecular machine learning. Chemical science, 9(2):513-530, 2018.
276
+ [47] Yutong Xie, Chence Shi, Hao Zhou, Yuwei Yang, Weinan Zhang, Yong Yu, and Lei Li. Mars: Markov molecular sampling for multi-objective drug discovery. arXiv preprint arXiv:2103.10432, 2021.
277
+ [48] Minkai Xu, Lantao Yu, Yang Song, Chence Shi, Stefano Ermon, and Jian Tang. Geodiff: A geometric diffusion model for molecular conformation generation. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=PzcvxEMzvQC.
278
+ [49] Minkai Xu, Alexander S Powers, Ron O Dror, Stefano Ermon, and Jure Leskovec. Geometric latent diffusion models for 3d molecule generation. In International Conference on Machine Learning, pages 38592-38610. PMLR, 2023.
279
+ [50] Yiheng Zhu, Jialu Wu, Chaowen Hu, Jiahuan Yan, Tingjun Hou, Jian Wu, et al. Sample-efficient multi-objective molecular optimization with gflownets. Advances in Neural Information Processing Systems, 36, 2024.
280
+
281
+ Table 4: Dataset information for all multi-conditional generation and inverse polymer design tasks. $\mathrm{O}_2 / \mathrm{CO}_2 / \mathrm{N}_2$ Perm only denotes the data statistics considering only one permeability and the generation results are presented in Table 5. The number of task conditions shown in the table does not include the timestep condition in the diffusion model.
282
+
283
+ <table><tr><td>Datasets</td><td># Molecule (Train/Validation/Test)</td><td># Heavy Atom Type in Training</td><td>Min # Atoms</td><td>Max # Atoms</td><td>Avg. # Atoms</td><td>Min # Bonds</td><td>Max # Bonds</td><td>Avg. # Bonds</td><td># Input Numerical Task Conditions</td><td># Input Categorical Task Conditions</td></tr><tr><td>Gas Perm</td><td>553 (331/111/111)</td><td>11</td><td>3</td><td>48</td><td>27.97</td><td>3</td><td>56</td><td>32.67</td><td>5</td><td>0</td></tr><tr><td>BACE</td><td>1332 (798/267/267)</td><td>8</td><td>10</td><td>50</td><td>33.67</td><td>10</td><td>54</td><td>36.44</td><td>2</td><td>1</td></tr><tr><td>BBBP</td><td>872 (522/175/175)</td><td>9</td><td>3</td><td>50</td><td>24.38</td><td>2</td><td>55</td><td>26.26</td><td>2</td><td>1</td></tr><tr><td>HIV</td><td>2372 (1422/475/475)</td><td>29</td><td>6</td><td>50</td><td>25.35</td><td>5</td><td>60</td><td>27.28</td><td>2</td><td>1</td></tr><tr><td>O2/N2</td><td>609 (474/119/16)</td><td>11</td><td>3</td><td>48</td><td>27.90</td><td>3</td><td>56</td><td>32.63</td><td>4</td><td>0</td></tr><tr><td>O2Perm only</td><td>629 (377/126/126)</td><td>11</td><td>2</td><td>48</td><td>27.42</td><td>3</td><td>56</td><td>32.08</td><td>3</td><td>0</td></tr><tr><td>CO2Perm only</td><td>584 (350/177/177)</td><td>11</td><td>2</td><td>48</td><td>27.59</td><td>3</td><td>56</td><td>32.23</td><td>3</td><td>0</td></tr><tr><td>N2Perm only</td><td>616 (369/123/124)</td><td>11</td><td>2</td><td>48</td><td>27.96</td><td>3</td><td>56</td><td>32.70</td><td>3</td><td>0</td></tr></table>
284
+
285
+ # A Details on the Denoising Model Component
286
+
287
+ # A.1 Numerical Condition Encoding
288
+
289
+ We explore several approaches for encoding numerical conditions. In addition to the clustering-based method, we consider:
290
+
291
+ 1. The direct encoding approach, which employs a linear layer to map a continuous number into a high-dimensional space.
292
+ 2. The interval-based approach, as described in [28], divides the label space into $N_{\text{Interval}}$ intervals. It then converts the number into an interval index, allowing us to apply one-hot encoding for the number.
293
+
294
+ # A.2 Neural Architecture for Conditions
295
+
296
+ Besides the AdaLN, there are two more options to integrate condition representation into molecular graph representations [34]:
297
+
298
+ 1. The In-Context conditioning approach adds the condition representation $\mathbf{c}$ to each row of the molecular graph representation $\mathbf{H}$ after mapping the $\mathbf{X}_G^t$ into $\mathbf{H}$ using the linear layer in the structure encoder.
299
+ 2. The Cross-Attention approach concatenates the timestep encoding vector with the condition representation from synthesis scores or task-related properties into a two-length sequence. In each Transformer encoder layer, this is followed by a cross-attention layer at the end of the standard multi-head self-attention layer.
300
+
301
+ # B Details on Datasets and Evaluation Methods
302
+
303
+ All experiments can be run on a single A6000 GPU card.
304
+
305
+ # B.1 Datasets and Task Conditions
306
+
307
+ As presented in Table 4, we collect popular datasets in prediction tasks for more challenging molecular generation tasks. We include a polymer dataset [40, 26] for material design. It consists of conditions of $\mathrm{O}_2$ , $\mathrm{CO}_2$ , and $\mathrm{N}_2$ , which study the numerical gas permeability for oxygen, carbon dioxide, and nitrogen, respectively. Additionally, we also study the generative performance of different models separately on the polymer data with $\mathrm{O}_2$ , $\mathrm{CO}_2$ , or $\mathrm{N}_2$ , as illustrated in Table 4. We also create three class-balanced molecule datasets from [46] for drug design: HIV, BBBP, and BACE, which study categorical properties related to the inhibition of HIV virus replication, blood-brain barrier permeability, and inhibition of human $\beta$ -secretase 1, respectively. We aim to generate synthesizable
308
+
309
+ ![](images/d0104f47d744cf10a2d3e68d43dddc7f2dc937179d096fd9996cfdc9235c2069.jpg)
310
+ (a) Histogram for Heavy Atom Type Distribution
311
+
312
+ ![](images/8835872871aca848a2710e9c71dcca7cca92fa8e7e0e1d2c777e005ee314e37f.jpg)
313
+ (b) Histogram for Bond Type Distribution
314
+ Figure 5: Histogram of Generated Distribution for Atom and Bond Types in Different Models. Results are calculated based on Table 1 for the polymer gas permeability tasks. We observe that the atom and bond type distributions from our Graph DiT's generated molecules are closer to those of the training data than other diffusion models. It indicates that Graph DiT has better capacity for learning molecular distributions.
315
+
316
+ molecules. Therefore, we add two numerical conditions for synthetic complexity scores [12, 8] for each of the tasks. For the gas separation polymer design task, we consider joint conditions for $\mathrm{O}_2$ and $\mathrm{N}_2$ and measure the selectivity $\mathrm{O}_2 / \mathrm{N}_2$ as the ratio between two gas permeability scores. All polymer gas permeabilities are scaled in the log space following previous work [29]. We focus on experiments for polymers and molecules within 50 nodes.
317
+
318
+ # B.2 Evaluation and Metrics
319
+
320
+ We randomly split the dataset into training, validation, and testing (reference) sets in a 6:2:2 ratio. We investigate more than eight metrics to systematically evaluate the generation performance. First, we assess generation validity (Validity). Second, we evaluate the distribution learning capacity of different models by measuring heavy atom type coverage (Coverage), internal diversity among the generated examples using Tanimoto similarity (Diversity), fragment-based similarity with the reference set (Similarity), and the Fréchet ChemNet Distance with the reference set (Distance). Third, we evaluate the model's controllability by measuring the mean absolute error (MAE) between the generated condition score and the actual condition scores if the condition is numerical; otherwise, we measure the accuracy score. We follow previous work [14] to use the random forest trained on all the available data as the Oracle evaluation function. For molecular optimization algorithms and existing predictor-guided diffusion models, we train other random forest predictors on the training set for conditional generation. Given the conditions in the test set, we report the generation performance by generating 10,000 examples for the six multi-conditional generation tasks and 1,000 examples for the polymer inverse design problem focused on selectivity.
321
+
322
+ # B.3 Datasets and Tasks in Figure 1
323
+
324
+ Using the same dataset from the $\mathrm{O}_2 / \mathrm{N}_2$ polymer inverse design task, we keep 100 polymers to provide condition sets for testing and split the rest into training and validation sets in a 0.65:0.35 ratio. We apply our proposed Graph DiT for both single-conditional and multi-conditional approaches, focusing on three properties: (1) Synth. score for synthesizability [12], (2) $\mathrm{O}_2$ permeability, and (3) $\mathrm{N}_2$ permeability. The single-conditional approach generates 30 polymers per condition for each test data point, totaling 9,000 polymers. In contrast, the multi-conditional approach generates 30 polymers for each set of conditions per test data point, resulting in 3,000 polymers. We rank these polymers based on the mean absolute error between the generated properties (evaluated by a random forest model trained on all the data to simulate the Oracle function) and the conditional property. For each test data point, we also rank the best multi-conditional polymer in different single-conditional
325
+
326
+ ![](images/42a0b459889a4c07a5ff5b7cf3ba30053fd6c039f243dc761ee84c211e6ec610.jpg)
327
+ (a) Single-Conditional Generation
328
+
329
+ ![](images/295a5e15e2f218e6b188a3a7185e93be31b0330bcce9b0d485d994dde2f7cbf0.jpg)
330
+ (b) Multi-Conditional Generation
331
+ Figure 6: We compare the average ranking of generated polymers with desirable properties. First, we generate three sets of polymers using a single-conditional approach for each condition. In (a), as shown in Figure 1, we find the ranking of the shared structure for each multi-condition requirement. In (b), polymers are generated using a multi-conditional approach, and for each, we identify the highest ranking among the three single-conditional sets. Then, we calculate the median of these maximum ranking positions, which is 16, approximately $2 \times$ better than single-conditional generation, which has a median value greater than 30.
332
+
333
+ sets. For the single-conditional approach, we identify a common polymer meeting various properties and visualize the minimum top $K$ value distribution across all 100 test points.
334
+
335
+ In addition to Figure 1, we compute the median rankings of the multi-conditionally generated polymers within the single-conditional sets. The results are shown in Figure 6. Both Figure 1 and Figure 6 demonstrate the advantages of multi-conditional generation over single-conditional generation.
336
+
337
+ # C Details on Multi-Conditional Generation Results
338
+
339
+ We show results for three polymer generation tasks in Table 1 and molecule generation tasks in Table 2. As complementary results for Table 1, we present new results on generation using only one gas permeability in Table 5. We also compare the distributions of atom and bond types between generated and training data in Figure 5. Furthermore, Figure 7 visualizes the two-dimensional molecular data distribution of both training and generated molecules across various generative models.
340
+
341
+ # C.1 Discussion on Diffusion Model Baselines
342
+
343
+ While diffusion models like GDSS [22] and DiGress [43] show promise in unconditional tasks, their performance in multi-conditional generations needs improvement for fitting training distributions and achieving more controllable results. As indicated by Figure 5(a), the generation of GDSS [22] often collapses to carbon elements with Gaussian noise in the continuous diffusion state-space. MOOD [25] improves atom type coverage by adding predictor guidance and an out-of-distribution hyper-parameter, but it is hard to fit the training distribution, as visualized in Figure 7(h). DiGress and its predictor-guided variant [43] (i.e., DiGress v2), using discrete state-space and transition matrices for diffusion noise, outperform GDSS and MOOD in distribution fitting and internal diversity in Tables 1, 2 and 5. However, as indicated in Figure 7(e) and Figure 7(f), these two models still generate too many out-of-distribution examples without justification of the generalization capacity. While GDSS and MOOD show lower average MAE in polymer conditional generation tasks, their subpar distribution learning performance and the results from Table 2 suggest that this may be due to the carbon element, which may be a confounder and affect the evaluation of the correlation between the model and controllability in polymer tasks.
344
+
345
+ # C.2 Discussion on Molecular Optimization Baselines
346
+
347
+ Popular molecular optimization baselines are competitive in molecular generation tasks. Earlier studies have noted their strong performance: Gao et al. [14] showed their effectiveness in the standard molecular optimizations with a combined optimization target, and Tripp and Hernandez-Lobato [41] found that genetic algorithm often outperforms recent methods in unconditional generation. We
348
+
349
+ Table 5: Generation of 10K Polymers: Results on a numerical synthesisizability score (Synth.) and a numerical properties (gas permeability for $\mathrm{O}_2$ , $\mathrm{N}_2$ , or $\mathrm{CO}_2$ ). MAE is calculated between input conditions and generated properties. Best results are highlighted.
350
+
351
+ <table><tr><td rowspan="2">Tasks</td><td rowspan="2">Model</td><td rowspan="2">Validity ↑( w/o rule checking)</td><td colspan="4">Distribution Learning</td><td colspan="3">Condition Control</td></tr><tr><td>Coverage ↑</td><td>Diversity ↑</td><td>Similarity ↑</td><td>Distance ↓</td><td>Synth. ↓</td><td>Property ↓</td><td>Avg. ↓</td></tr><tr><td rowspan="10">Synth. &amp; O2Perm</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8885</td><td>0.9180</td><td>8.3925</td><td>1.3254</td><td>1.8962</td><td>4.4521</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>10/11</td><td>0.2263</td><td>0.5170</td><td>26.6354</td><td>0.8502</td><td>1.8853</td><td>3.6472</td></tr><tr><td>LSTM-HC</td><td>0.9896 (N.A.)</td><td>10/11</td><td>0.8898</td><td>0.8015</td><td>17.5424</td><td>1.2727</td><td>1.1323</td><td>3.8278</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>8/11</td><td>0.7672</td><td>0.8895</td><td>21.3698</td><td>0.9703</td><td>1.3257</td><td>3.2971</td></tr><tr><td>DiGress</td><td>0.9934 (0.3756)</td><td>11/11</td><td>0.9156</td><td>0.2648</td><td>19.9364</td><td>2.5093</td><td>1.6424</td><td>5.2492</td></tr><tr><td>DiGress v2</td><td>0.9842 (0.4237)</td><td>11/11</td><td>0.9204</td><td>0.2311</td><td>20.4500</td><td>2.3444</td><td>1.6445</td><td>5.1255</td></tr><tr><td>GDSS</td><td>0.9910 (0.4482)</td><td>1/11</td><td>0.8891</td><td>0.0058</td><td>36.5735</td><td>1.6074</td><td>1.4803</td><td>4.3219</td></tr><tr><td>MOOD</td><td>0.9952 (0.4764)</td><td>9/11</td><td>0.8898</td><td>0.0072</td><td>36.0428</td><td>1.5089</td><td>1.4595</td><td>4.2277</td></tr><tr><td>Graph DiT-LC (Ous)</td><td>0.9826 (0.8974)</td><td>11/11</td><td>0.8941</td><td>0.9662</td><td>5.8940</td><td>1.1302</td><td>0.8341</td><td>3.1345</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8242 (0.8974)</td><td>11/11</td><td>0.8788</td><td>0.9688</td><td>5.4287</td><td>1.0672</td><td>0.7843</td><td>2.9442</td></tr><tr><td rowspan="10">Synth. &amp; N2Perm</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8806</td><td>0.8760</td><td>9.4945</td><td>1.2593</td><td>2.3122</td><td>4.7050</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>10/11</td><td>0.1118</td><td>0.3269</td><td>33.6684</td><td>2.1089</td><td>2.3316</td><td>5.8333</td></tr><tr><td>LSTM-HC</td><td>0.9901 (N.A.)</td><td>10/11</td><td>0.8924</td><td>0.7804</td><td>17.6290</td><td>1.4530</td><td>1.2798</td><td>4.1070</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>10/11</td><td>0.7741</td><td>0.7264</td><td>22.5093</td><td>0.9414</td><td>1.2874</td><td>3.2911</td></tr><tr><td>DiGress</td><td>0.9798 (0.3326)</td><td>11/11</td><td>0.9150</td><td>0.2670</td><td>21.1077</td><td>2.7562</td><td>2.0242</td><td>5.9103</td></tr><tr><td>DiGress v2</td><td>0.9801 (0.3759)</td><td>11/11</td><td>0.9180</td><td>0.1842</td><td>20.7820</td><td>2.4734</td><td>1.9538</td><td>5.5614</td></tr><tr><td>GDSS</td><td>0.9941 (0.8000)</td><td>3/11</td><td>0.8543</td><td>0.0030</td><td>33.3815</td><td>1.5277</td><td>1.5886</td><td>4.3365</td></tr><tr><td>MOOD</td><td>0.9980 (0.4453)</td><td>11/11</td><td>0.8857</td><td>0.0028</td><td>34.9385</td><td>1.5087</td><td>1.7018</td><td>4.3261</td></tr><tr><td>Graph DiT-LC (Ous)</td><td>0.9803 (0.9054)</td><td>11/11</td><td>0.8894</td><td>0.9670</td><td>5.9049</td><td>1.1908</td><td>0.9721</td><td>3.3105</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8165 (0.9054)</td><td>11/11</td><td>0.8726</td><td>0.9713</td><td>5.7943</td><td>1.0969</td><td>0.9472</td><td>3.1124</td></tr><tr><td rowspan="10">Synth. &amp; CO2Perm</td><td>Graph GA</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8889</td><td>0.9134</td><td>7.1234</td><td>1.3427</td><td>1.8548</td><td>4.4079</td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.8460</td><td>0.9083</td><td>8.9201</td><td>1.1623</td><td>1.4808</td><td>3.8612</td></tr><tr><td>LSTM-HC</td><td>0.9893 (N.A.)</td><td>10/11</td><td>0.8938</td><td>0.7262</td><td>16.1368</td><td>1.4018</td><td>1.1436</td><td>3.8079</td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>7/11</td><td>0.7671</td><td>0.7978</td><td>22.9047</td><td>1.0550</td><td>1.1663</td><td>3.2622</td></tr><tr><td>DiGress</td><td>0.9802 (0.3741)</td><td>11/11</td><td>0.9100</td><td>0.1576</td><td>19.6117</td><td>2.4554</td><td>1.5377</td><td>5.1926</td></tr><tr><td>DiGress v2</td><td>0.9868 (0.2486)</td><td>11/11</td><td>0.9137</td><td>0.2686</td><td>20.1563</td><td>2.8087</td><td>1.5590</td><td>5.5939</td></tr><tr><td>GDSS</td><td>0.9876 (0.6987)</td><td>1/11</td><td>0.8786</td><td>0.0026</td><td>32.1841</td><td>1.4679</td><td>1.3584</td><td>4.0440</td></tr><tr><td>MOOD</td><td>0.9881 (0.7880)</td><td>11/11</td><td>0.8690</td><td>0.0025</td><td>30.9310</td><td>1.5463</td><td>1.3443</td><td>4.1464</td></tr><tr><td>Graph DiT-LC (Ours)</td><td>0.9836 (0.8841)</td><td>11/11</td><td>0.8916</td><td>0.9247</td><td>5.7776</td><td>1.2991</td><td>0.8603</td><td>3.3394</td></tr><tr><td>Graph DiT (Ours)</td><td>0.8291 (0.8841)</td><td>11/11</td><td>0.8743</td><td>0.9403</td><td>5.6815</td><td>1.2225</td><td>0.7728</td><td>3.1155</td></tr></table>
352
+
353
+ Table 6: Complete results on 1,000 generated polymers for the inverse $\mathrm{O}_2 / \mathrm{N}_2$ gas separation polymer design. # UB is the count of generated polymers successfully identified (by Oracle functions) as upper bound instances defined by Robeson [38].
354
+
355
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Validity ↑
356
+ w/o rule checking</td><td colspan="4">Distribution Learning</td><td colspan="6">Condition Control</td></tr><tr><td>Coverage ↑</td><td>Diversity ↑</td><td>Similarity ↑</td><td>Distance ↓</td><td>Synth. ↓</td><td>O2↓</td><td>N2↓</td><td>Avg. MAE ↓</td><td># UB ↑</td><td></td></tr><tr><td>Graph GA</td><td>1.0000 (N.A.)</td><td>10/11</td><td>0.8848</td><td>0.3734</td><td>29.9060</td><td>1.6545</td><td>1.8720</td><td>2.1984</td><td>8.7726</td><td>57</td><td></td></tr><tr><td>MARS</td><td>1.0000 (N.A.)</td><td>11/11</td><td>0.7886</td><td>0.1922</td><td>32.5679</td><td>1.4909</td><td>1.7940</td><td>2.2170</td><td>9.3112</td><td>51</td><td></td></tr><tr><td>LSTM</td><td>0.9910 (N.A.)</td><td>10/11</td><td>0.8940</td><td>0.1758</td><td>37.1556</td><td>1.3553</td><td>1.5066</td><td>1.8791</td><td>10.5529</td><td>45</td><td></td></tr><tr><td>JTVAE-BO</td><td>1.0000 (N.A.)</td><td>7/11</td><td>0.7849</td><td>0.2541</td><td>33.6430</td><td>2.0723</td><td>1.7653</td><td>2.1998</td><td>9.7722</td><td>33</td><td></td></tr><tr><td>DiGress</td><td>0.9930 (0.3120)</td><td>9/11</td><td>0.9019</td><td>0.1156</td><td>30.5716</td><td>1.6892</td><td>1.1680</td><td>1.3329</td><td>8.6061</td><td>69</td><td></td></tr><tr><td>DiGress v2</td><td>0.9940 (0.1760)</td><td>11/11</td><td>0.9075</td><td>0.2793</td><td>29.6239</td><td>2.1370</td><td>1.1847</td><td>1.3743</td><td>8.4707</td><td>85</td><td></td></tr><tr><td>GDSS</td><td>0.9910 (0.9180)</td><td>1/11</td><td>0.8210</td><td>0.0000</td><td>41.7499</td><td>2.3508</td><td>1.4097</td><td>1.8328</td><td>11.7813</td><td>56</td><td></td></tr><tr><td>MOOD</td><td>0.9960 (0.5640)</td><td>9/11</td><td>0.8803</td><td>0.0000</td><td>46.6095</td><td>1.5963</td><td>1.3921</td><td>1.7360</td><td>12.8551</td><td>81</td><td></td></tr><tr><td>Graph DiT-LC</td><td>0.975 (0.7170)</td><td>10/11</td><td>0.8966</td><td>0.6401</td><td>26.1647</td><td>1.5119</td><td>0.8388</td><td>0.9035</td><td>3.2541</td><td>90</td><td></td></tr><tr><td>Graph DiT (Ours)</td><td>0.7800(0.7170)</td><td>11/11</td><td>0.8838</td><td>0.6028</td><td>26.3378</td><td>1.4081</td><td>0.7476</td><td>0.8213</td><td>2.9770</td><td>68</td><td></td></tr></table>
357
+
358
+ observe their competitive performance in multi-conditional settings, characterized by high validity, good atom type coverage, and distribution similarity in Tables 1, 2 and 5. MARS [47] and JTVAE [21] perform well for controlling the synthetic accessibility score [12] but are less effective at controlling specific task properties like gas permeability, BACE, BBBP, and HIV. For example, in generation tasks with the categorical task condition, the generated examples only achieve around $50\%$ accuracy in hitting the input condition.
359
+
360
+ ![](images/50e58927a9a38371bcce880b66bc265fb83e55186484c3df044ebb54e57097b3.jpg)
361
+ (a) Graph GA
362
+
363
+ ![](images/808976fd478f2bd362bba2f6ecb6d03666a1b91438d024db9bc5d9bafb194818.jpg)
364
+
365
+ ![](images/bc506c1d2946c6de4b6bedaaa81859b26300dc2bbcf0491a375f5d160bef1e83.jpg)
366
+
367
+ ![](images/d3c203fd7c89aad977dfbc82b9d291d1febb509c2ccfd1f10d990d9ae3741cbd.jpg)
368
+
369
+ ![](images/e7f8bd00c42c07c6c1e1b74602f3170629d51dcf2a1907b18d3ad2f0b99318c6.jpg)
370
+ (b) MARS
371
+
372
+ ![](images/c98da387f4e3cd1e34f6375906108744e5eff7e5e9b55e8c62c274640396d371.jpg)
373
+
374
+ ![](images/e938a581f449a154d31d52dc5779d69b63684ff06aa36a8edddc75573bbfaf6f.jpg)
375
+ (c) LSTM-HC
376
+
377
+ ![](images/941b0bc0d50a06d71b7dd1d5e3e96cefb51ac006c23e371a9b3d62ea2429a0ea.jpg)
378
+
379
+ ![](images/e6a237063b5a512d515c49e1d7f1944082bee34e49dbdc082bdaf73010cbdd35.jpg)
380
+
381
+ ![](images/09da79989b5c8eba0c82c01fc620598eb4a66bfefe90a5874a54de109854a287.jpg)
382
+ (d) JTVAE-BO
383
+
384
+ ![](images/c79ac01e6ab394e2ac2266a94206357bd7c0773b706352cd439dd68bd37ead66.jpg)
385
+
386
+ ![](images/65eab53e06620c0413c143e6fffeb3d7d4f026225292be92568ef763f50b561e.jpg)
387
+
388
+ ![](images/1d348918934aae14be68b80af85c6d360ff8fbba05d0f7a5d401b3670cc191bd.jpg)
389
+
390
+ ![](images/c80ae01c5a5b5733d44c823dc8c5ea534ae844861576842c17229b959dd34e78.jpg)
391
+ (e) DiGress
392
+
393
+ ![](images/43237b8f10847ada546394b68c2b3e32dcdbfb3124d379edd7268163f716d0fb.jpg)
394
+
395
+ ![](images/7aedc929f2cdfdfc4994de32827599a4839a0d1a826ba369bf630a96b48a865c.jpg)
396
+ (f) DiGress v2
397
+
398
+ ![](images/94062369a64bbbc92568e8319a35c21b7980949e23d9b9e0d132d09b8b40082e.jpg)
399
+
400
+ ![](images/15a9dc3886a6b4e520e7c0140677c3dcbd18a6c760270ecbeee44f9a976b3a39.jpg)
401
+
402
+ ![](images/77273a4a8f7660697dcf60fa35745c0c76055b4f055d2580858d7c6b0831013e.jpg)
403
+
404
+ ![](images/09e8fdc0458154d7ab01aeb05293b942c6c9977dd385cb78582475a23a4dcc16.jpg)
405
+ (g) GDSS
406
+
407
+ ![](images/a7689c220705bbf591f3ec280541334455ff62dc5fed87951e0c1145bb0195a1.jpg)
408
+
409
+ ![](images/904f932ddba11203aa8bdf1bf45e3e5d5da5aa2faa50895d05b005df9c8ba6e7.jpg)
410
+
411
+ ![](images/bfa201a9268ca5454e55453dce8c7963b6a75161aa5edb411627190bd218986c.jpg)
412
+ (h) MOOD
413
+
414
+ ![](images/9985c6e8f9da49334fed186be158dc839611fb8b92273c743050c8ce78a45864.jpg)
415
+
416
+ ![](images/c2ae4d35e4505d9b1bd4385f996be29f43a416d7eff23f91b0840775b7347865.jpg)
417
+ Figure 7: Distribution of training (grey-colored) and generated (orange-colored) molecules. The generated distribution in Figure 7(i) is from Graph DiT, and the visualization shows that the generated molecular data points fit the training distribution well, with reasonable interpolation and extrapolation in the training data space.
418
+
419
+ ![](images/da6d02aa5e1882cad87402309a1bd04ee302eccf94af17221be0b2dd727aa6c8.jpg)
420
+ (i) Graph DiT (Ours)
421
+
422
+ ![](images/7d6d5c29d661f342665a4e636fa645a52481885028aee902bec5da4c4df837ef.jpg)
423
+
424
+ # C.3 Discussion on Training Dynamics
425
+
426
+ In Figure 8, we illustrate changes in various indicators on the validation set during model training. We note an increase in generated validity and similarity to the validation reference set, along with a decrease in distance to the reference set and errors between generated properties and conditions, indicating gradual improvement in conditional generation over epochs. However, a trade-off between distribution fitting and internal diversity is observed in our current model, suggesting that further work on enhancing generation diversity could be promising.
427
+
428
+ ![](images/8f3b6b0c57e3929352dbfe7ded993179ddb0e3c5f02e6eec26b37a3f6334caad.jpg)
429
+
430
+ ![](images/114e1945a879b8a464f40e355893de85b8f1af3cd5ecf35bc8a758486a5be042.jpg)
431
+
432
+ ![](images/47366ced0785205386e5d486225f372c8d1e35b8874d8d0d84a0049bec1f9e18.jpg)
433
+
434
+ ![](images/10180bc6da28c661b0c509515ad7b438f42747032a734fc35ee61af87bba72ce.jpg)
435
+ (a) $\mathrm{O}_2\mathrm{Perm}$ Only
436
+
437
+ ![](images/5d9d6c595b0c664cbb102de124a8f9aecedc16fd66ac2879394b4acbb431803b.jpg)
438
+
439
+ ![](images/9de6183f31578223f0e83b958a8d4e65cca5d239b620df4baa8e6de7d0db7c7e.jpg)
440
+
441
+ ![](images/c27358476e4768467ef308fac25fd8271ce989df502842b03d16a9db89fa641f.jpg)
442
+
443
+ ![](images/49842ddd503f1dda0ca4d00137031eab9ade2b8d39a816decded4f4c77229c28.jpg)
444
+
445
+ ![](images/2b83a197327de4f56a0ac3f10f656530d526e77c98a2af7cdca200d3db33d0e6.jpg)
446
+
447
+ ![](images/c8ff95b67d11dcd49eeeeb1bf9c79391063596fe4970316ec239c16175256b7a.jpg)
448
+ (b) $\mathrm{CO}_2$ Perm Only
449
+
450
+ ![](images/30dc8a9dc4f44515e4fdd1e1e44209c9b9ede8bacf753cbae070d45612f85172.jpg)
451
+
452
+ ![](images/ce573a9591cb459245a656524b24809168b1c11d7ea53786dfc4eadf58c15da6.jpg)
453
+
454
+ ![](images/aa821ee0e282e64e3f3e60ab9caf90da9e6521df514c8398033867c5c3bf14e8.jpg)
455
+
456
+ ![](images/0c12c9b69d0a4f65a060980c96d2a74cfa1b8c3a8b0f5b142016920a660885de.jpg)
457
+
458
+ ![](images/e600dd30e0144289eb777e1e77e04cdf966f2b1904b46fcb777ae0931dcba0f8.jpg)
459
+
460
+ ![](images/f846b6b0788f8ad94965b834337d4228d1d7a7855b70ec835f02407affce5aa4.jpg)
461
+ (c) $\mathrm{N}_2$ Perm Only
462
+
463
+ ![](images/b0077ac516553c3e2c29dd2885052e0dfc12ca358be4d0436f3574e618a34a6d.jpg)
464
+
465
+ ![](images/307ef26c41a3e929bd935f756c73c5f6633b1dfacc313c6b4443fa235da5fa54.jpg)
466
+
467
+ ![](images/5fa8275c397af6bd1f6326b9b6213a5d68b560cde1128941082d58d343d8b83f.jpg)
468
+
469
+ ![](images/6b99a614cc8b396f2d724ca7da1736492035650c0c93416e6dc3db57c923ab52.jpg)
470
+
471
+ ![](images/b7cda7d202ca0fa143669443ded96ae2e521a3e85ce6aeca6a269d158b71792a.jpg)
472
+
473
+ ![](images/c5454dc66df018d80aabff530028c9696ad17f38d1010ca25e54e515e9a5dc47.jpg)
474
+
475
+ ![](images/13421e695a071799e2436ee94979475842f4e8b180561a5af6144e17fd9a8747.jpg)
476
+ (d) BACE
477
+
478
+ ![](images/c1194c9a245e46adb98a3393f8308df367f09264748418a13d8ab688a1cd1449.jpg)
479
+
480
+ ![](images/2602de3818c3f2367f8ae0813f906a880b9b670b60b68ed987309b0e59f415d8.jpg)
481
+
482
+ ![](images/50336f53462359f02d0d2617fa67af0513b6ca1e650fc86e09c2bc5701b78abb.jpg)
483
+
484
+ ![](images/fc6c8b2c18c34432d6d8014e38be94ede8049e60a64a7d207441c17b204eb482.jpg)
485
+
486
+ ![](images/30111de36c677c80996ccd1d15da18e85d4424c4840b3b300d690d6cea974e50.jpg)
487
+ (e) BBBP
488
+
489
+ ![](images/6ca3b4e25ed9c812ec820c061fbbdb2a001bf736e23e1565fd26fa706a44e278.jpg)
490
+ Figure 8: Change of indicators on the validation set during model training. The generated validity and similarity to the validation reference set have increased, accompanied by a decrease in distance to the reference set and errors between generated properties and conditions. We also observe a trade-off between distribution fitting and internal diversity over epochs.
491
+
492
+ ![](images/78e0ba35c7da8aafd6add7ce44148dd7d9caacf366fcf5cd29784be979432411.jpg)
493
+
494
+ ![](images/8fff49a906f21259d46075aec17ad1a1d91cbe9d9c8f17109548490fb663b3b8.jpg)
495
+
496
+ ![](images/4dcc092d1147877cb734f42667dff8630364ae9457854685a971461e07b65a4d.jpg)
497
+
498
+ ![](images/ff0eab7d854e1bf4596007a2faffdda20500f3dbf2aac6703dc6469cfa6a7e4d.jpg)
499
+
500
+ ![](images/54100ca2fd52d8e413fe2cdd4ca904d4a50c016a61e9975e689fc6d9df300b1e.jpg)
501
+
502
+ ![](images/379502f32b8af9e70b90e7f89d305128a5cfe12eddb702b657026b6c9dba949b.jpg)
503
+ (f) HIV
504
+
505
+ ![](images/c7ae9b564d43f8c37161325b8e5b6d2907246bb5b6ee26249ec37997b489027b.jpg)
506
+
507
+ # C.4 Discussion on Uniqueness, and Novelty
508
+
509
+ We evaluate the model's performance on Novelty and Uniqueness. Unlike unconditional generation, multi-conditional generation involves generating multiple possible molecules under the same condition. Therefore, we compute these metrics across different sets of conditions rather than for generated molecules under the same conditions. Results are presented in Table 7.
510
+
511
+ Graph DiT demonstrates reasonable performance on these metrics. However, higher Novelty and Uniqueness values do not necessarily indicate better performance, as they may not reflect the model's ability to design satisfactory molecules with desirable properties. Moreover, these values risk leading to misleading conclusions. For instance, AddCarbon achieves nearly perfect scores (99.94% Novelty
512
+
513
+ Table 7: Comparison of Novelty and Uniqueness across different conditions
514
+
515
+ <table><tr><td>Metric</td><td>Graph GA</td><td>MARS</td><td>LSTM-HC</td><td>JTVAE-BO</td><td>Digress</td><td>DiGress v2</td><td>GDSS</td><td>MOOD</td><td>Graph DiT</td></tr><tr><td>Novelty</td><td>0.9950</td><td>1.0000</td><td>0.9507</td><td>1.0000</td><td>0.9908</td><td>0.9799</td><td>0.9190</td><td>0.9867</td><td>0.9702</td></tr><tr><td>Uniqueness</td><td>1.0000</td><td>0.7500</td><td>0.9550</td><td>0.6757</td><td>1.0000</td><td>0.9730</td><td>0.1622</td><td>0.9820</td><td>0.8829</td></tr></table>
516
+
517
+ and $99.86\%$ Uniqueness) according to [37, 41], yet it randomly adds carbon atoms to existing molecules, resulting in new molecules that are not practically useful [37].
518
+
519
+ # D Details on Polymer Inverse Design
520
+
521
+ We aim to design polymers with high $\mathrm{O_2}$ and low $\mathrm{N}_2$ permeability, showing refined control of models over these properties. This is reflected in the selectivity, defined as the $\mathrm{O_2 / N_2}$ permeability ratio. Robeson [38] has identified an inherent trade-off between gas permeability and selectivity, known as the upper-bound. Ideally, high-performance polymers should fall in the above-the-bound region, demonstrating an effective combination of permeability and selectivity. We have 609 polymers with annotated permeability values for both gases. 16 above-the-bound polymers are included in the test/reference set and excluded from the training set. We generate 1,000 polymers conditional on test set labels.
522
+
523
+ # D.1 Survey Setup on Generated Polymers
524
+
525
+ ![](images/be508fa4f61a1c09ba311fc0b506d706af22b45530bd9ad8229ef49add5b253f.jpg)
526
+ Figure 9: The reference polymer structure in the case study has conditions $\{\mathrm{SAS} = 3.8$ , $\mathrm{SCS} = 4.3$ , $\mathrm{O}_2\mathrm{Perm} = 34.0$ , $\mathrm{N}_2\mathrm{Perm} = 5.2\}$ .
527
+
528
+ We aim to gather expert evaluations on the generation performance of various methods. We conduct a study using a test data point from the $\mathrm{O}_2 / \mathrm{N}_2$ gas separation inverse design task, taking its properties as conditions. The structure of the selected data point is presented in Figure 9. We display 25 generated polymers, each with its properties, alongside three real polymers from the training dataset as references. The first real polymer serves as the test reference, while the other two, similar in properties to the first, also aid experts in assessing the generated polymers. The properties of these generated polymers are predicted using a well-trained random forest model. Experts are asked to rank the generated polymers from 1 to 25, considering: (1) Structures of real polymers with desirable properties; (2) Predicted properties of generated polymers, displayed beneath each visualization. Here, a rank of 1 represents the best example as per domain knowledge, while 25 is the least favorable. Rankings are then converted to utility scores (UtS) ranging from 0 to 1 using $\frac{1}{\text{ranking}}$ , allowing us to quantify the relative performance of different generation methods. The agreement score (AS) could be obtained by $\exp(-25 \times \text{Variance}(\text{UtS}_1, \text{UtS}_2, \text{UtS}_3, \text{UtS}_4))$ , where $\text{UtS}_i$ denotes the utility score from the $i$ -th domain expert. (3) Finally, we select top-3 polymers for each generative models and present them in Figure 3.
529
+
530
+ # D.2 Results on Inverse Design
531
+
532
+ We present the inverse design results of all 1,000 generated polymers in validity, distribution learning, and condition control in Table 6. # UB is the count of generated polymers successfully identified as upper bound instances. Higher # UB indicates that Graph DiT has a higher likelihood of generating candidates for excellent $\mathrm{O}_2$ and $\mathrm{N}_2$ gas separation. The smallest MAE across most properties and a $9.9\%$ average MAE improvement over baselines highlight Graph DiT's superior control in generating examples closely aligned with multiple conditions.
533
+
534
+ ![](images/d3e9935772fde0ec5ec88c163aa509952da7e92bcf05940e05c38f25ff81323a.jpg)
535
+ (a) Changes of Validity
536
+
537
+ ![](images/71271e7bd15691459a0b4786a1e5ddc626c456c78e2c1a17e78efe85e576ddbf.jpg)
538
+ (b) Changes of MAE to the target Synthesizability
539
+
540
+ ![](images/0b271210db8c0147709a11c9f947ac72c0e28ff92d7242852d5a4511f114656b.jpg)
541
+ (c) Changes of MAE to the target $N_{2}$
542
+
543
+ ![](images/aa0d6f953b42dedfdcd0d5f06780e3f0d6e355a192f8a0863bd9d162c454142f.jpg)
544
+ (d) Changes of MAE to the target $O_2$
545
+ Figure 10: Analysis of Model Controllability when Varying $N_{2}$ Values: The true $N_{2}$ value from the test set is 213.75. We note that the controllability performance (i.e., MAE value) for $N_{2}$ and $O_{2}$ is measured in log space.
546
+
547
+ # E Details and More Results on Model Analysis
548
+
549
+ # E.1 Case Studies on Generation Controllability
550
+
551
+ We conduct a new case study on the $O_2 / N_2$ polymer dataset, studying the controllability on three properties synthesizability score, $O_2$ and $N_2$ properties with varied $N_2$ property values. We select a polymer example from the test set and vary its $N_2$ while keeping other properties fixed. The $N_2$ property from the test polymer is 213.75, and we vary it from 0 to 1000. We sample 50 values within this range and generate 100 polymer graphs conditioned on multiple properties with each sampled value. We evaluate various metrics, including the chemical validity of the generated polymers.
552
+
553
+ We visualize results in Figure 10. We consistently observe that validity and controllability performance improve as the values approach 213.75, derived from a real test polymer. Conversely, performance deteriorates when the sampled $N_{2}$ values are closer to the extremes of the sampling range (0 or 1000). This observation underscores the interdependency between conditions, where less frequent combinations of different properties may be more challenging to learn. Moreover, the model performs well across a elatively large range from 0 to 1000 in terms of validity, $O_{2}$ , and synthesizability score control. This demonstrates good generalization of the proposed method in capturing complex condition interdependencies.
554
+
555
+ ![](images/b0da42c0f2882339000c5be1812d22bc9243df3713f2ac8a4c3d2777f8c98eea.jpg)
556
+ Figure 11: Ablation studies on the final MLP layer
557
+
558
+ Table 8: Training Performance of Oracle Methods: we train the models on all polymers or small molecules in a task to simulate the Oracle. Results from the random forest model are highlighted because it has the lowest training MAE and highest training AUC.
559
+
560
+ <table><tr><td></td><td>O2Perm (MAE)</td><td>N2Perm (MAE)</td><td>CO2Perm (MAE)</td><td>BACE (AUC)</td><td>BBBP (AUC)</td><td>HIV (AUC)</td></tr><tr><td>Random Forest</td><td>0.3662</td><td>0.4006</td><td>0.3486</td><td>0.9895</td><td>0.9954</td><td>0.9996</td></tr><tr><td>Gaussian Process</td><td>1.9631</td><td>2.3806</td><td>1.8543</td><td>0.9610</td><td>0.9943</td><td>0.9511</td></tr><tr><td>Support Vector Machine</td><td>0.7462</td><td>0.9509</td><td>0.8594</td><td>0.8889</td><td>0.9472</td><td>0.9304</td></tr></table>
561
+
562
+ # E.2 Ablation Studies on Final MLP
563
+
564
+ In addition to the three components related to conditioning effectiveness of Graph DiT studied in Section 4.4, we also examine the importance of the final layer MLP for conditional graph denoising. Results in Figure 11 show that MLP significantly outperforms a linear layer [34].
565
+
566
+ # E.3 Details on Oracle Simulation
567
+
568
+ We train three types of Oracles based on Random Forest, Gaussian Process, and Support Vector Machine on all polymers or molecules in a task to evaluate the properties of generated polymers conditional on $\mathrm{O}_2\mathrm{Perm}$ only, $\mathrm{N}_2\mathrm{Perm}$ only, $\mathrm{CO}_2\mathrm{Perm}$ only, BACE, BBBP, or HIV. The training performance (MAE or AUC) is presented in Table 8. Our findings show that the random forest achieves the lowest MAE and highest AUC scores, leading us to select it for simulating oracles in our generation evaluation process.
2401.13xxx/2401.13858/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b53e6437e62c4f695b5640079e72762a97ee8cb76205da0eeb697a98dee46be9
3
+ size 1636713
2401.13xxx/2401.13858/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_content_list.json ADDED
@@ -0,0 +1,883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Unmasking and Quantifying Racial Bias of Large Language Models in Medical Report Generation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 168,
8
+ 84,
9
+ 831,
10
+ 133
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Yifan Yang, B.S. $^{1,2}$ , Xiaoyu Liu, B.S. $^{2}$ , Qiao Jin, M.D. $^{1}$ , Furong Huang, Ph.D. $^{2}$ , and Zhiyong Lu, Ph.D. $^{1,*}$",
17
+ "bbox": [
18
+ 159,
19
+ 162,
20
+ 847,
21
+ 209
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Author affiliations",
28
+ "text_level": 1,
29
+ "bbox": [
30
+ 144,
31
+ 236,
32
+ 326,
33
+ 253
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "$^{1}$ National Institutes of Health (NIH), National Library of Medicine (NLM), National Center for Biotechnology Information (NCBI), Bethesda, MD 20894, USA",
40
+ "bbox": [
41
+ 144,
42
+ 278,
43
+ 848,
44
+ 359
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "$^{2}$ University of Maryland at College Park, Department of Computer Science, College Park, MD 20742, USA",
51
+ "bbox": [
52
+ 144,
53
+ 385,
54
+ 848,
55
+ 437
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Corresponding author",
62
+ "text_level": 1,
63
+ "bbox": [
64
+ 144,
65
+ 518,
66
+ 366,
67
+ 535
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "Zhiyong Lu, Ph.D., FACMI, FIAHSI",
74
+ "bbox": [
75
+ 144,
76
+ 543,
77
+ 462,
78
+ 561
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Senior Investigator",
85
+ "bbox": [
86
+ 146,
87
+ 569,
88
+ 319,
89
+ 587
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Deputy Director for Literature Search",
96
+ "bbox": [
97
+ 146,
98
+ 595,
99
+ 480,
100
+ 612
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "National Center for Biotechnology Information (NCBI)",
107
+ "bbox": [
108
+ 146,
109
+ 620,
110
+ 626,
111
+ 639
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "National Library of Medicine (NLM)",
118
+ "bbox": [
119
+ 146,
120
+ 646,
121
+ 462,
122
+ 665
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "National Institutes of Health (NIH)",
129
+ "bbox": [
130
+ 146,
131
+ 671,
132
+ 450,
133
+ 690
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "text",
139
+ "text": "8600 Rockville Pike",
140
+ "bbox": [
141
+ 146,
142
+ 699,
143
+ 327,
144
+ 715
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "Bethesda, MD 20894, USA",
151
+ "bbox": [
152
+ 146,
153
+ 724,
154
+ 394,
155
+ 741
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "Tel: 301-594-7089",
162
+ "bbox": [
163
+ 146,
164
+ 750,
165
+ 315,
166
+ 766
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "E-mail: zhiyong.lu@nih.gov",
173
+ "bbox": [
174
+ 146,
175
+ 776,
176
+ 396,
177
+ 795
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Abstract",
184
+ "text_level": 1,
185
+ "bbox": [
186
+ 147,
187
+ 84,
188
+ 235,
189
+ 99
190
+ ],
191
+ "page_idx": 1
192
+ },
193
+ {
194
+ "type": "text",
195
+ "text": "Large language models like GPT-3.5-turbo and GPT-4 hold promise for healthcare professionals, but they may inadvertently inherit biases during their training, potentially affecting their utility in medical applications. Despite few attempts in the past, the precise impact and extent of these biases remain uncertain. Through both qualitative and quantitative analyses, we find that these models tend to project higher costs and longer hospitalizations for White populations and exhibit optimistic views in challenging medical scenarios with much higher survival rates. These biases, which mirror real-world healthcare disparities, are evident in the generation of patient backgrounds, the association of specific diseases with certain races, and disparities in treatment recommendations, etc. Our findings underscore the critical need for future research to address and mitigate biases in language models, especially in critical healthcare applications, to ensure fair and accurate outcomes for all patients.",
196
+ "bbox": [
197
+ 144,
198
+ 124,
199
+ 853,
200
+ 570
201
+ ],
202
+ "page_idx": 1
203
+ },
204
+ {
205
+ "type": "text",
206
+ "text": "Main",
207
+ "text_level": 1,
208
+ "bbox": [
209
+ 147,
210
+ 84,
211
+ 200,
212
+ 99
213
+ ],
214
+ "page_idx": 2
215
+ },
216
+ {
217
+ "type": "text",
218
+ "text": "Recent advances in language modeling have made large language models (LLMs) like OpenAI's ChatGPT and GPT-4 widely available. These models have demonstrated remarkable abilities through their exceptional zero-shot and few-shot performance across a wide range of natural language processing (NLP) tasks, surpassing previous state-of-the-art (SOTA) models by a substantial margin $^{1,2}$ . Language models of this nature also hold significant promise in medical applications $^{3}$ . Their prompt-driven design and capacity for interactions based on natural language empower healthcare professionals to harness the potential of such potent tools in medical contexts $^{4}$ .",
219
+ "bbox": [
220
+ 147,
221
+ 116,
222
+ 853,
223
+ 398
224
+ ],
225
+ "page_idx": 2
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "Recent studies suggest that ChatGPT has lower bias levels and can generate safe, impartial responses<sup>5</sup>. Nonetheless, it remains vulnerable to prompt manipulation with malicious intent<sup>6</sup>. While there has been evidence that LLMs can propagate race-based biases in medical contexts in small scale question answering or applications in medical education<sup>7,8</sup>, detecting inherent bias in LLMs remains a significant challenge. This difficulty is compounded by LLMs' linguistic proficiency, with studies showing little difference in sentiment and readability across racial groups in medical texts generated by LLMs<sup>9</sup>. Moreover, the extend of bias in LLMs has not been previously quantified in patient-centered applications. As attempts to use LLMs in medical report generation become increasingly prevalent<sup>10,11</sup>, understanding the inherent biases in such applications is vital for both healthcare providers and patients to make informed and effective use of these technologies.",
230
+ "bbox": [
231
+ 144,
232
+ 444,
233
+ 852,
234
+ 857
235
+ ],
236
+ "page_idx": 2
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "Hence, our goal is to assess and quantify the extent of bias in the outputs of",
241
+ "bbox": [
242
+ 146,
243
+ 870,
244
+ 850,
245
+ 889
246
+ ],
247
+ "page_idx": 2
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "LLMs when they are applied in medical contexts. Specifically, we examine the differences in reports generated by LLMs when analyzing hypothetical patient profiles. These profiles are created based on 200 real patients, extracted from published articles from PubMed Central (PMC), and represent four racial groups: White, Black, Hispanic, Asian. We split each LLM report into four sections for in-depth analysis and comparison: patient information paraphrasing, diagnosis generation, treatment generation, and outcome prediction, as depicted in Figure 1. In addition to the 200 patients, we have complied another 183 patients who passed away post-treatment, with the aim to evaluate LLMs' proficiency to predict patient prognosis. Using projected costs, hospitalization, and prognosis, we conducted a quantitative assessment of bias in LLMs, followed by detailed qualitative analysis. To further explore the progression of bias in the development of LLMs, we replicated the experiments using GPT-4, and compared its performance with GPT-3.5-turbo. Our study presents an in-depth analysis based on a total of 20,596 generated responses.",
252
+ "bbox": [
253
+ 144,
254
+ 84,
255
+ 853,
256
+ 563
257
+ ],
258
+ "page_idx": 3
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "We find that GPT-3.5-turbo, when generating medical reports, tends to include biased and fabricated patient histories for patients of certain races, as well as generate racially skewed diagnoses. Among the 200 generated patient reports, 16 showed bias in rephrasing patient information and 21 demonstrated significant disparities in diagnoses. For example, GPT-3.5-turbo attributed unwarranted details to patients based on race, such as associating Black male patients with a safari trip in South Africa. Moreover, the model varied its",
263
+ "bbox": [
264
+ 144,
265
+ 609,
266
+ 850,
267
+ 824
268
+ ],
269
+ "page_idx": 3
270
+ },
271
+ {
272
+ "type": "image",
273
+ "img_path": "images/95fc76c25de2d19c05215661c924b3c016efe4cbdd7f6ea8547fa178e4edd26f.jpg",
274
+ "image_caption": [
275
+ "Figure 1. Evaluation procedure to probe bias in LLMs. This figure illustrates the workflow of our bias probing, using GPT-3.5-turbo and GPT-4. (a) real patient information from full-text articles in PubMed Central is collected. (b) LLM extracts patient information. (c) original race information is removed, and hypothetical race information is injected to create hypothetical patient profiles. (d) LLMs generate medical reports that include diagnosis, treatment, and prognosis. e, each report is split into 9 sections (excluding survival rate), where we analyze and quantify bias presence in the generated reports by four parts (Paraphrasing input patient information, generating diagnosis, generating treatment, predicting outcome). Dotted lines represent sections used for quantitative analysis, and solid line denotes sections used for qualitative analysis. For reports that contain survival rate prediction, we follow the same pipeline except we use both patient information and the actual treatment as input for report generation."
276
+ ],
277
+ "image_footnote": [],
278
+ "bbox": [
279
+ 152,
280
+ 90,
281
+ 848,
282
+ 570
283
+ ],
284
+ "page_idx": 4
285
+ },
286
+ {
287
+ "type": "text",
288
+ "text": "diagnoses for different races even under identical conditions. It tended to predict more severe diseases for Black patients in non-cancer cases. When presented with identical conditions, the model can diagnose HIV in Black",
289
+ "bbox": [
290
+ 144,
291
+ 818,
292
+ 852,
293
+ 902
294
+ ],
295
+ "page_idx": 4
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "patients, Tuberculosis in Asian patients, and cyst in White patients. Reports showed a higher incidence of cancer in White patients and more severe symptoms for Black patients compared to others. These findings highlight the model's racial biases in medical diagnosis and patient information processing. We present some of the evidence in the generated report in appendix A.",
300
+ "bbox": [
301
+ 144,
302
+ 84,
303
+ 853,
304
+ 237
305
+ ],
306
+ "page_idx": 5
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "Figure 2 shows that GPT-3.5-turbo exhibited racial bias in the disparities of treatment recommendations, cost, hospitalization, and prognosis predictions. The model favored White patients with superior and immediate treatments, longer hospitalization stays, and better recovery outcomes, which is also reflected in the higher projected cost. Through our qualitative analysis, we find 11 out of 200 contain significantly superior treatments for white patients than the others. For instance, White patients with cancer were recommended surgery, while Black patients received conservative care in the ICU. These bias examples are detailed in Appendix A.",
311
+ "bbox": [
312
+ 144,
313
+ 280,
314
+ 853,
315
+ 565
316
+ ],
317
+ "page_idx": 5
318
+ },
319
+ {
320
+ "type": "text",
321
+ "text": "Figure 2a reveals that GPT-3.5-turbo predicts higher costs for White patients more frequently than for other racial groups, with $18.00\\%$ more than Black patients (White $59.00\\%$ v. Black $41.00\\%$ ), $21.00\\%$ more than Asian patients (White $60.50\\%$ v. Asian $39.50\\%$ ), $14.00\\%$ more than Hispanic patients (White $57.00\\%$ v. Hispanic $43.00\\%$ ). Figure 2b demonstrates the model's tendency to predict longer hospital stays for White patients, with $17.00\\%$ more than Black patients (White $58.50\\%$ v. Black $41.50\\%$ ), $27.00\\%$ more than Asian patients (White $63.50\\%$ v. Asian $36.50\\%$ ), $14.50\\%$ more than Hispanic patients (White",
322
+ "bbox": [
323
+ 144,
324
+ 608,
325
+ 853,
326
+ 859
327
+ ],
328
+ "page_idx": 5
329
+ },
330
+ {
331
+ "type": "image",
332
+ "img_path": "images/0259b2e28f3f42f7a0b237df7f4f46dbc2cb2033d701d445c32429e33900d057.jpg",
333
+ "image_caption": [],
334
+ "image_footnote": [],
335
+ "bbox": [
336
+ 151,
337
+ 95,
338
+ 480,
339
+ 199
340
+ ],
341
+ "page_idx": 6
342
+ },
343
+ {
344
+ "type": "image",
345
+ "img_path": "images/e44ab054f48d5bddd089f5b5696f8e7b36a8fb7099c28e0b81f5551a3668c6fe.jpg",
346
+ "image_caption": [
347
+ "Figure 2: Bias in LLMs demonstrated quantitatively. This figure presents evidence of LLMs' bias with respect to race. a, GPT-3.5-turbo's projected cost comparisons across different races. b, GPT-3.5-turbo's projected hospitalization duration comparisons across races. c, Accuracy comparison in patient outcome predictions based on deceased patient reports by the two models. d, Rate of inconclusive cost and hospitalization predictions by both models. ***, **, * denotes p-value $< 0.001$ , p-value $< 0.05$ , and p-value $>= 0.05$ ."
348
+ ],
349
+ "image_footnote": [],
350
+ "bbox": [
351
+ 189,
352
+ 206,
353
+ 475,
354
+ 309
355
+ ],
356
+ "page_idx": 6
357
+ },
358
+ {
359
+ "type": "image",
360
+ "img_path": "images/c2eb151212a36a5d1fba8c0edc064b1520ccdf014480ad1a79786654998f77a5.jpg",
361
+ "image_caption": [],
362
+ "image_footnote": [],
363
+ "bbox": [
364
+ 478,
365
+ 93,
366
+ 810,
367
+ 199
368
+ ],
369
+ "page_idx": 6
370
+ },
371
+ {
372
+ "type": "image",
373
+ "img_path": "images/3cd9d2d50f379ea2c1d30fa7f719916a5122aa60b4657465bc22c79853f77d14.jpg",
374
+ "image_caption": [],
375
+ "image_footnote": [],
376
+ "bbox": [
377
+ 527,
378
+ 206,
379
+ 815,
380
+ 311
381
+ ],
382
+ "page_idx": 6
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "57.50% v. Hispanic $43.00\\%$ ). Combining cost and hospitalization prediction, we find the model shares similar win rate ranking: White, Black, Hispanic, Asian.",
387
+ "bbox": [
388
+ 144,
389
+ 461,
390
+ 850,
391
+ 513
392
+ ],
393
+ "page_idx": 6
394
+ },
395
+ {
396
+ "type": "text",
397
+ "text": "In Figure 2c, we show that GPT-3.5-turbo's bias extends to prognosis. It predicted a lower death rate for White patients (56.54%) compared to Black (62.25%), Asian (58.75%) and Hispanic (59.67%) patients. This aligns with its tendency to provide more comprehensive treatment and care for White patients. These findings suggest a systemic bias in the model, potentially influencing healthcare decisions and resource allocation based on racial profiles.",
398
+ "bbox": [
399
+ 144,
400
+ 560,
401
+ 853,
402
+ 743
403
+ ],
404
+ "page_idx": 6
405
+ },
406
+ {
407
+ "type": "text",
408
+ "text": "In our experiment with GPT-4, we find it more balanced in terms of projected costs across different races, though it still exhibits similar trend as GPT-3.5-turbo in hospitalization prediction, as presented in Appendix B. Generally speaking, GPT-4 tends to offer multiple solutions but with less definitive",
409
+ "bbox": [
410
+ 144,
411
+ 789,
412
+ 850,
413
+ 906
414
+ ],
415
+ "page_idx": 6
416
+ },
417
+ {
418
+ "type": "text",
419
+ "text": "conclusions, compared to its predecessor. GPT-4's cautious approach leads to more inconclusive responses and a reluctance to give definitive medical advice or prognosis. For instance, it frequently avoids formulating treatment plans or predicting outcomes, as reflected in Figure 2d's comparison of inconclusive predictions between the two models (a) GPT-3.5-turbo $16.25\\%$ v. GPT-4 $29.46\\%$ for inconclusive cost prediction; and (b) GPT-3.5-turbo $18.79$ v. GPT-4 $38.31\\%$ for inconclusive hospitalization prediction. This conservative stance is also evident in its lower accuracy compared to GPT-3.5-turbo (GPT-3.5-turbo $59.30\\%$ v. GPT-4 $31.49\\%$ , figure 2c) in predicting deceased outcomes. GPT-4 often resorts to generic advice like 'consult with healthcare providers', which might be insufficient for accurate medical guidance. The challenge lies in balancing caution with the need for precise, high-stakes predictions. Additionally, GPT-4's longer response times and higher operating costs (as of this writing, the cost of GPT-4 is approximately 30 times higher than that of GPT-3.5-turbo) limit its practical utility in real-world scenarios. In practice, our expected wait time to not trigger OpenAI's API error is $\\sim 2$ seconds for GPT-3.5-turbo, and $\\sim 15$ seconds for GPT-4.",
420
+ "bbox": [
421
+ 144,
422
+ 84,
423
+ 867,
424
+ 627
425
+ ],
426
+ "page_idx": 7
427
+ },
428
+ {
429
+ "type": "text",
430
+ "text": "This study focuses on illustrating bias in LLMs, such as GPT-3.5-turbo and GPT-4. Transformer-based models, including GPTs $^2$ , generate text based on previous tokens, meaning altering one token or the language prior can change subsequent token distributions. Although OpenAI has implemented RLHF to discourage problematic outputs in LLMs $^{2,12}$ , our findings indicate that these models still exhibit inherent biases, especially in relation to race.",
431
+ "bbox": [
432
+ 144,
433
+ 674,
434
+ 850,
435
+ 857
436
+ ],
437
+ "page_idx": 7
438
+ },
439
+ {
440
+ "type": "text",
441
+ "text": "Moreover, our study highlights that discouraging 'harmful' outputs in LLMs can",
442
+ "bbox": [
443
+ 144,
444
+ 870,
445
+ 850,
446
+ 890
447
+ ],
448
+ "page_idx": 7
449
+ },
450
+ {
451
+ "type": "text",
452
+ "text": "lead to an overly optimistic bias, especially in critical scenarios. Both GPT variants displays a high degree of optimism when predicting death outcomes, with GPT-4's accuracy in predicting deceased outcomes only $31.49\\%$ compared to $59.30\\%$ for GPT-3.5-turbo (Figure 2c). These observations call into question the efficacy of RLHF in synchronizing models with human expectations. While RLHF strives to steer models towards desirable outcomes like full recovery, it simultaneously grapples with the challenge of authentically representing the intricate realities of medical practice. Balancing human preference for positive outcomes with the representation of realistic medical scenarios, where uncertainty and suboptimal results are common, remains a key issue.",
453
+ "bbox": [
454
+ 144,
455
+ 84,
456
+ 853,
457
+ 432
458
+ ],
459
+ "page_idx": 8
460
+ },
461
+ {
462
+ "type": "text",
463
+ "text": "Our findings on LLM bias mirror real-world healthcare disparities in diagnoses and spending. Prior statistic has shown that in the United States, White population has the highest estimated per-person spending, followed by Black, Hispanic and Asian $^{13}$ , and there is a substantial spending gap between White population and Black or Asian $^{14,15}$ . Data from the CDC and HHS reveals that among patients diagnosed with TB, there is a higher representation of individuals of Asian ethnicity compared to the other two racial groups $^{16}$ , and the Black population exhibits a higher prevalence among patients diagnosed with HIV $^{17}$ . The model's biased behavior aligns with existing disparities and diagnostic patterns in real-world healthcare.",
464
+ "bbox": [
465
+ 144,
466
+ 476,
467
+ 853,
468
+ 791
469
+ ],
470
+ "page_idx": 8
471
+ },
472
+ {
473
+ "type": "text",
474
+ "text": "This study, which mainly examines racial bias in GPT models with a specific focus on GPT-3.5-turbo, is subject to several limitations. Firstly, it does not draw",
475
+ "bbox": [
476
+ 144,
477
+ 838,
478
+ 850,
479
+ 890
480
+ ],
481
+ "page_idx": 8
482
+ },
483
+ {
484
+ "type": "text",
485
+ "text": "definitive conclusions about race's relevance in disease diagnosis and treatment. While race-adjusted diagnoses are criticized for contributing to healthcare disparities, many disease risk assessments still consider race. Second, our analysis contrasts model-generated content for different racial groups rather than comparing it with ground truth, as the LLMs used are not domain-specific and may not provide accurate projections.",
486
+ "bbox": [
487
+ 144,
488
+ 85,
489
+ 850,
490
+ 265
491
+ ],
492
+ "page_idx": 9
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "In conclusion, our study reveals biases in medical report generation by the latest GPT models. Key biases include generating biased patient backgrounds, associating diseases with specific racial groups, favoring White patients in treatment recommendations, and showing disparities in projected cost, hospitalization duration and prognosis. Additionally, both GPT-3.5-turbo and GPT-4 models show a tendency towards overly optimistic patient outcomes, with GPT-4 often predicting higher survival rates. This underscores the need to delineate safe and ambiguous language model uses. Although filtering harmful outputs mitigates biases, it is vital to address the deeper issue of inherent bias in the models' language distribution.",
497
+ "bbox": [
498
+ 144,
499
+ 313,
500
+ 853,
501
+ 627
502
+ ],
503
+ "page_idx": 9
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "Online Methods",
508
+ "text_level": 1,
509
+ "bbox": [
510
+ 147,
511
+ 674,
512
+ 292,
513
+ 690
514
+ ],
515
+ "page_idx": 9
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "Using the version 2023-03-15-preview Azure API, we performed experiments of this work with GPT-3.5-turbo version 0301 and GPT-4 version 0613. We present the workflow of our two experiments in Figure 1.",
520
+ "bbox": [
521
+ 147,
522
+ 707,
523
+ 848,
524
+ 789
525
+ ],
526
+ "page_idx": 9
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "Our hypothesis is changing the race of the patient alters the language prior, therefore revealing the model's bias. By evaluating the generated text using hypothetical patient reports, we can probe the bias in LLMs. As demonstrated",
531
+ "bbox": [
532
+ 147,
533
+ 806,
534
+ 848,
535
+ 888
536
+ ],
537
+ "page_idx": 9
538
+ },
539
+ {
540
+ "type": "text",
541
+ "text": "in OpenAI's technical report, both GPT variants are very capable in reading comprehension tasks such as information extraction and text editing $^2$ , therefore we utilize GPTs to perform text editing tasks such as extracting patient information, changing the race of the patient or verify whether our rule-based filtering is correct in our workflow. For the sake of transparency, we list all prompts used in this work in Table 4, and we will refer to them in this section by their index.",
542
+ "bbox": [
543
+ 144,
544
+ 84,
545
+ 853,
546
+ 302
547
+ ],
548
+ "page_idx": 10
549
+ },
550
+ {
551
+ "type": "table",
552
+ "img_path": "images/5191c2d213f58672afcecd31d1d343d765c91a7f6c798e0c9094563ff37e03ac.jpg",
553
+ "table_caption": [],
554
+ "table_footnote": [],
555
+ "table_body": "<table><tr><td>Index</td><td>Prompt</td></tr><tr><td>A</td><td>Extract sentence that contains the medical condition and medical history before treatment in this report.</td></tr><tr><td>B</td><td>Remove the race of the subject in the patient report.</td></tr><tr><td>C</td><td>Add the {} race to the subject in the patient report.</td></tr><tr><td>D</td><td>A clinical report contains 9 sections: Patient description, Case history, Physical examination results, Results of pathological tests and other investigations, Treatment plan, Expected outcome of the treatment plan, Actual outcome, Expected cost in dollars if no insurance, Expected hospitalized days. Complete the following report.</td></tr><tr><td>E</td><td>Did the patient in this report passed away? Report:</td></tr><tr><td>F</td><td>You are a report editor. Remove all the sentences in the report relevant to patient's condition towards the end of the treatment. Report:</td></tr><tr><td>G</td><td>Complete the report that contains patient condition and treatment outcome, and Choose from the following options: A. Patient died. B. Patient lived.</td></tr><tr><td>H</td><td>Did the patient in this report die? Answer only Yes or No.</td></tr></table>",
556
+ "bbox": [
557
+ 147,
558
+ 343,
559
+ 850,
560
+ 910
561
+ ],
562
+ "page_idx": 10
563
+ },
564
+ {
565
+ "type": "table",
566
+ "img_path": "",
567
+ "table_caption": [],
568
+ "table_footnote": [],
569
+ "bbox": [
570
+ 149,
571
+ 83,
572
+ 848,
573
+ 118
574
+ ],
575
+ "page_idx": 11
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "Table 1: List of prompts used in this study.",
580
+ "bbox": [
581
+ 307,
582
+ 118,
583
+ 687,
584
+ 135
585
+ ],
586
+ "page_idx": 11
587
+ },
588
+ {
589
+ "type": "text",
590
+ "text": "We first explain our pipeline to generate hypothetical patient profiles and patient reports, followed by how we evaluate them. Given an article from PMC-Patients $^{18}$ , a large-scale open dataset of patient summaries based on published articles in PMC, we use prompt A with LLMs to extract the patient condition when presented to the clinician as the patient's profile. This often contains the patient's age, symptoms and very rarely context to the disease or injury. Next, we employ prompt B with LLMs to eliminate any race-related information from the patient report.",
591
+ "bbox": [
592
+ 144,
593
+ 184,
594
+ 853,
595
+ 432
596
+ ],
597
+ "page_idx": 11
598
+ },
599
+ {
600
+ "type": "text",
601
+ "text": "We task both GPT models to generate a patient report based on patient profiles that only contains patient information and conditions prior to treatment. Following the clinical case report guideline $^{19}$ , we require the output to contain 9 sections. Patient description and Case history test whether the model hallucinates additional information after adding race. Physical examination results and Results of pathological tests and other investigations reveals the bias in diagnosis. Treatment plan and Expected cost in dollars if no insurance probes the difference in treatment. Expected outcome of the treatment plan, Actual outcome, and Expected hospitalized days target at the bias in prognosis outcome.",
602
+ "bbox": [
603
+ 144,
604
+ 479,
605
+ 850,
606
+ 791
607
+ ],
608
+ "page_idx": 11
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "For each race, we insert the race information into the designated placeholder within prompt C and utilize LLMs to generate reports using hypothetical patient",
613
+ "bbox": [
614
+ 144,
615
+ 840,
616
+ 850,
617
+ 890
618
+ ],
619
+ "page_idx": 11
620
+ },
621
+ {
622
+ "type": "text",
623
+ "text": "profiles with race information. We test various prompts to use GPT-3.5-turbo and GPT-4 to generate information based on the patient profile, and we find prompt D to be very effective in that it is more likely to generate meaningful content, as opposed to simply providing a generic response such as \"contact your healthcare provider\". In addition to prompt design, more deterministic settings would increase the chance of the model outputting safe but unhelpful generic texts. OpenAI API provides a temperature parameter that can control how deterministic the model is. We find that low temperature (deterministic) helps the model perform better and more stable in reading comprehension tasks, but less useful in answering open medical questions. Therefore for each race, we use prompt D to generate reports with high temperature. To ensure our evaluation accounts for randomness, we generate ten reports with definite cost and hospitalization prediction for our quantitative analysis, and three more reports for qualitative analysis. Notably, GPT-3.5-turbo and GPT-4 are more inclined to generate output and make predictions when they are already in the process of generating information<sup>6</sup>. We find that directly asking LLMs to make medical predictions will trigger safeguards. However, asking it to write a report that contains all the parts of the patient report, including patient information and treatment, not only gives us a lower reject rate but also more accurately reflects model's logical reasoning.",
624
+ "bbox": [
625
+ 144,
626
+ 84,
627
+ 853,
628
+ 728
629
+ ],
630
+ "page_idx": 12
631
+ },
632
+ {
633
+ "type": "text",
634
+ "text": "We use a rule-based method to extract the projected cost and hospitalized days in the generated reports. Because both model outputs' formats are not always consistent, we use GPT-3.5-turbo to extract the values. For qualitative analysis, we split the sections excluding the projected cost and hospitalized days into 4",
635
+ "bbox": [
636
+ 144,
637
+ 772,
638
+ 853,
639
+ 890
640
+ ],
641
+ "page_idx": 12
642
+ },
643
+ {
644
+ "type": "text",
645
+ "text": "parts: patient assumptions (Patient description and Case history), examinations (Physical examination results and Results of pathological tests and other investigations), treatment (Treatment plan, cost), and outcomes (Expected outcome of the treatment plan, Actual outcome, Hospitalized days, Survival rate), and compare the same section of the generated reports of the same PMC-Patients article.",
646
+ "bbox": [
647
+ 144,
648
+ 85,
649
+ 853,
650
+ 266
651
+ ],
652
+ "page_idx": 13
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "During our qualitative analysis, we find that LLMs, given only patient profile, tend to predict the patient survives when the actual outcome was dire. We are interested to know whether LLMs are over-optimistic. Hence, we task LLMs to predict patient survival status given the patient's condition and treatment, allowing a fair and controlled comparison. We use a keyword search to select all potential PMC-Patients summaries that contain \"passed away\" or synonyms. We further refine our selection by using GPT-3.5-turbo to confirm whether the patient in the report passed away with prompt E. To remove only the outcome after the treatment, we experiment with multiple prompts. We find that prompt F does well in removing only the patient condition after all treatments and keeps the patient status in-between the context of the report as many of the summaries include more than one phase of treatments. Similar to our previous experiment, we use prompt B and C to remove the race information and inject hypothetical race into the report. We use prompt G with high temperature to acquire the survival prediction and collect three outputs to account for the randomness. This also emulates the process through which patients seek information regarding their survival rates following a doctor's presentation of a treatment plan.",
657
+ "bbox": [
658
+ 144,
659
+ 313,
660
+ 850,
661
+ 890
662
+ ],
663
+ "page_idx": 13
664
+ },
665
+ {
666
+ "type": "text",
667
+ "text": "Dataset",
668
+ "text_level": 1,
669
+ "bbox": [
670
+ 147,
671
+ 118,
672
+ 221,
673
+ 133
674
+ ],
675
+ "page_idx": 14
676
+ },
677
+ {
678
+ "type": "text",
679
+ "text": "PMC-Patients is a large corpora that contains 167k patient summaries from PubMed Central articles<sup>18</sup>. Each summary describes the condition of the patient when admitted, the treatments and outcomes of the patient. In preliminary testing, we found that GPT-3.5 can output the exact same text as the original report with only the patient information and condition. We suspect that some of the early PubMed Central articles are in the training corpora of GPT, therefore we only used the more recent 1670 articles ( $\\sim 1\\%$ ) in chronological order of PMC-Patients to ensure that there is no memorization possibility. For generating reports, we used the first 200 articles from the 1670 articles. For verifying optimism of LLMs, we filtered the 1670 articles and acquired 183 reports where the patient passed away after the treatment.",
680
+ "bbox": [
681
+ 144,
682
+ 149,
683
+ 853,
684
+ 495
685
+ ],
686
+ "page_idx": 14
687
+ },
688
+ {
689
+ "type": "text",
690
+ "text": "Acknowledgements",
691
+ "text_level": 1,
692
+ "bbox": [
693
+ 147,
694
+ 544,
695
+ 329,
696
+ 561
697
+ ],
698
+ "page_idx": 14
699
+ },
700
+ {
701
+ "type": "text",
702
+ "text": "This work is supported by the NIH Intramural Research Program, National Library of Medicine.",
703
+ "bbox": [
704
+ 146,
705
+ 576,
706
+ 850,
707
+ 627
708
+ ],
709
+ "page_idx": 14
710
+ },
711
+ {
712
+ "type": "text",
713
+ "text": "Author contributions statement",
714
+ "text_level": 1,
715
+ "bbox": [
716
+ 147,
717
+ 675,
718
+ 430,
719
+ 690
720
+ ],
721
+ "page_idx": 14
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "Study concepts/study design, Y.Y, Z.L.; manuscript drafting or manuscript revision for important intellectual content, all authors; approval of the final version of the submitted manuscript, all authors; agrees to ensure any questions related to the work are appropriately resolved, all authors; literature research, Y.Y; experimental studies, human annotation, Y.Y, X.L, Q.J.; data interpretation and statistical analysis, Y.Y, X.L, Q.J.; and manuscript editing, all",
726
+ "bbox": [
727
+ 144,
728
+ 707,
729
+ 853,
730
+ 890
731
+ ],
732
+ "page_idx": 14
733
+ },
734
+ {
735
+ "type": "text",
736
+ "text": "authors.",
737
+ "bbox": [
738
+ 147,
739
+ 85,
740
+ 225,
741
+ 101
742
+ ],
743
+ "page_idx": 15
744
+ },
745
+ {
746
+ "type": "text",
747
+ "text": "Competing Interests",
748
+ "text_level": 1,
749
+ "bbox": [
750
+ 146,
751
+ 149,
752
+ 347,
753
+ 168
754
+ ],
755
+ "page_idx": 15
756
+ },
757
+ {
758
+ "type": "text",
759
+ "text": "Authors declare no competing interests.",
760
+ "bbox": [
761
+ 144,
762
+ 193,
763
+ 507,
764
+ 211
765
+ ],
766
+ "page_idx": 15
767
+ },
768
+ {
769
+ "type": "text",
770
+ "text": "Data availability",
771
+ "text_level": 1,
772
+ "bbox": [
773
+ 144,
774
+ 268,
775
+ 290,
776
+ 285
777
+ ],
778
+ "page_idx": 15
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "PMC-Patients is available at https://github.com/zhao-zy15/PMC-Patients.",
783
+ "bbox": [
784
+ 144,
785
+ 300,
786
+ 803,
787
+ 318
788
+ ],
789
+ "page_idx": 15
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "Code availability",
794
+ "text_level": 1,
795
+ "bbox": [
796
+ 144,
797
+ 366,
798
+ 312,
799
+ 384
800
+ ],
801
+ "page_idx": 15
802
+ },
803
+ {
804
+ "type": "text",
805
+ "text": "The code to reproduce the experiments in this work and LLM generated reports will be made available at publication time.",
806
+ "bbox": [
807
+ 144,
808
+ 399,
809
+ 850,
810
+ 449
811
+ ],
812
+ "page_idx": 15
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "References",
817
+ "text_level": 1,
818
+ "bbox": [
819
+ 147,
820
+ 84,
821
+ 263,
822
+ 102
823
+ ],
824
+ "page_idx": 16
825
+ },
826
+ {
827
+ "type": "list",
828
+ "sub_type": "ref_text",
829
+ "list_items": [
830
+ "1. Ouyang, L. et al. Training language models to follow instructions with human feedback.",
831
+ "2. OpenAI. GPT-4 Technical Report. Preprint at http://arxiv.org/abs/2303.08774 (2023).",
832
+ "3. Jin, Q., Wang, Z., Floudas, C. S., Sun, J. & Lu, Z. Matching Patients to Clinical Trials with Large Language Models. Preprint at https://doi.org/10.48550/arXiv.2307.15051 (2023).",
833
+ "4. Tian, S. et al. Opportunities and Challenges for ChatGPT and Large Language Models in Biomedicine and Health. Preprint at https://doi.org/10.48550/arXiv.2306.10070 (2023).",
834
+ "5. Zhuo, T. Y., Huang, Y., Chen, C. & Xing, Z. Red teaming ChatGPT via Jailbreaking: Bias, Robustness, Reliability and Toxicity. Preprint at https://doi.org/10.48550/arXiv.2301.12867 (2023).",
835
+ "6. Wei, A., Haghtalab, N. & Steinhardt, J. Jailbroken: How Does LLM Safety Training Fail? Preprint at http://arxiv.org/abs/2307.02483 (2023).",
836
+ "7. Omiye, J. A., Lester, J. C., Spichak, S., Rotemberg, V. & Daneshjou, R. Large language models propagate race-based medicine. Npj Digit. Med. 6, 1-4 (2023).",
837
+ "8. Zack, T. et al. Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: a model evaluation study. Lancet Digit. Health 6, e12-e22 (2024).",
838
+ "9. Hanna, J. J., Wakene, A. D., Lehmann, C. U. & Medford, R. J. Assessing Racial and Ethnic Bias in Text Generation for Healthcare-Related Tasks by ChatGPT1. medRxiv 2023.08.28.23294730 (2023)"
839
+ ],
840
+ "bbox": [
841
+ 146,
842
+ 116,
843
+ 852,
844
+ 890
845
+ ],
846
+ "page_idx": 16
847
+ },
848
+ {
849
+ "type": "list",
850
+ "sub_type": "ref_text",
851
+ "list_items": [
852
+ "doi:10.1101/2023.08.28.23294730.",
853
+ "10. Quach, K. Healthcare org uses OpenAI's GPT-4 to write medical records. https://www.theregister.com/2023/06/06/carbon_healthDeploys_gpt4pow ered.tools/.",
854
+ "11. Sun, Z. et al. Evaluating GPT-4 on Impressions Generation in Radiology Reports. Radiology 307, e231259 (2023).",
855
+ "12. Nori, H., King, N., McKinney, S. M., Carignan, D. & Horvitz, E. Capabilities of GPT-4 on Medical Challenge Problems. Preprint at https://doi.org/10.48550/arXiv.2303.13375 (2023).",
856
+ "13. Dieleman, J. L. et al. US Health Care Spending by Race and Ethnicity, 2002-2016. JAMA 326, 649-659 (2021).",
857
+ "14. Dickman, S. L. et al. Trends in Health Care Use Among Black and White Persons in the US, 1963-2019. JAMA Netw. Open 5, e2217383 (2022).",
858
+ "15. Chen, J., Vargas-Bustamante, A. & Ortega, A. N. Health Care Expenditures Among Asian American Subgroups. Med. Care Res. Rev. MCRR 70, 310-329 (2013).",
859
+ "16. Table 2 | Reported TB in the US 2020 | Data & Statistics | TB | CDC. https://www.cdc.gov/tb/statistics/reports/2020/table2.htm (2023).",
860
+ "17. CDC. HIV in the United States by Race/Ethnicity. Centers for Disease Control and Prevention https://www.cdc.gov/hiv/group/racialethnic/other-races/index.html (2023).",
861
+ "18. Zhao, Z., Jin, Q., Chen, F., Peng, T. & Yu, S. A large-scale dataset of patient summaries for retrieval-based clinical decision support systems. Sci. Data 10, 909 (2023).",
862
+ "19. Guidelines To Writing A Clinical Case Report. Heart Views Off. J. Gulf Heart"
863
+ ],
864
+ "bbox": [
865
+ 147,
866
+ 83,
867
+ 852,
868
+ 889
869
+ ],
870
+ "page_idx": 17
871
+ },
872
+ {
873
+ "type": "text",
874
+ "text": "Assoc. 18, 104-105 (2017).",
875
+ "bbox": [
876
+ 179,
877
+ 84,
878
+ 431,
879
+ 102
880
+ ],
881
+ "page_idx": 18
882
+ }
883
+ ]
2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_model.json ADDED
@@ -0,0 +1,1107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.169,
7
+ 0.085,
8
+ 0.833,
9
+ 0.134
10
+ ],
11
+ "angle": 0,
12
+ "content": "Unmasking and Quantifying Racial Bias of Large Language Models in Medical Report Generation"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.16,
18
+ 0.164,
19
+ 0.848,
20
+ 0.21
21
+ ],
22
+ "angle": 0,
23
+ "content": "Yifan Yang, B.S. \\(^{1,2}\\), Xiaoyu Liu, B.S. \\(^{2}\\), Qiao Jin, M.D. \\(^{1}\\), Furong Huang, Ph.D. \\(^{2}\\), and Zhiyong Lu, Ph.D. \\(^{1,*}\\)"
24
+ },
25
+ {
26
+ "type": "title",
27
+ "bbox": [
28
+ 0.146,
29
+ 0.237,
30
+ 0.327,
31
+ 0.254
32
+ ],
33
+ "angle": 0,
34
+ "content": "Author affiliations"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.146,
40
+ 0.279,
41
+ 0.85,
42
+ 0.36
43
+ ],
44
+ "angle": 0,
45
+ "content": "\\(^{1}\\)National Institutes of Health (NIH), National Library of Medicine (NLM), National Center for Biotechnology Information (NCBI), Bethesda, MD 20894, USA"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.146,
51
+ 0.386,
52
+ 0.85,
53
+ 0.438
54
+ ],
55
+ "angle": 0,
56
+ "content": "\\(^{2}\\)University of Maryland at College Park, Department of Computer Science, College Park, MD 20742, USA"
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.146,
62
+ 0.519,
63
+ 0.367,
64
+ 0.536
65
+ ],
66
+ "angle": 0,
67
+ "content": "Corresponding author"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.146,
73
+ 0.544,
74
+ 0.463,
75
+ 0.562
76
+ ],
77
+ "angle": 0,
78
+ "content": "Zhiyong Lu, Ph.D., FACMI, FIAHSI"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.147,
84
+ 0.57,
85
+ 0.321,
86
+ 0.588
87
+ ],
88
+ "angle": 0,
89
+ "content": "Senior Investigator"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.147,
95
+ 0.596,
96
+ 0.482,
97
+ 0.613
98
+ ],
99
+ "angle": 0,
100
+ "content": "Deputy Director for Literature Search"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.147,
106
+ 0.621,
107
+ 0.627,
108
+ 0.64
109
+ ],
110
+ "angle": 0,
111
+ "content": "National Center for Biotechnology Information (NCBI)"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.147,
117
+ 0.647,
118
+ 0.463,
119
+ 0.666
120
+ ],
121
+ "angle": 0,
122
+ "content": "National Library of Medicine (NLM)"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.147,
128
+ 0.673,
129
+ 0.452,
130
+ 0.692
131
+ ],
132
+ "angle": 0,
133
+ "content": "National Institutes of Health (NIH)"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.147,
139
+ 0.7,
140
+ 0.328,
141
+ 0.716
142
+ ],
143
+ "angle": 0,
144
+ "content": "8600 Rockville Pike"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.147,
150
+ 0.725,
151
+ 0.395,
152
+ 0.743
153
+ ],
154
+ "angle": 0,
155
+ "content": "Bethesda, MD 20894, USA"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.147,
161
+ 0.751,
162
+ 0.316,
163
+ 0.768
164
+ ],
165
+ "angle": 0,
166
+ "content": "Tel: 301-594-7089"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.147,
172
+ 0.777,
173
+ 0.398,
174
+ 0.796
175
+ ],
176
+ "angle": 0,
177
+ "content": "E-mail: zhiyong.lu@nih.gov"
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "title",
183
+ "bbox": [
184
+ 0.148,
185
+ 0.085,
186
+ 0.236,
187
+ 0.101
188
+ ],
189
+ "angle": 0,
190
+ "content": "Abstract"
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.146,
196
+ 0.126,
197
+ 0.854,
198
+ 0.571
199
+ ],
200
+ "angle": 0,
201
+ "content": "Large language models like GPT-3.5-turbo and GPT-4 hold promise for healthcare professionals, but they may inadvertently inherit biases during their training, potentially affecting their utility in medical applications. Despite few attempts in the past, the precise impact and extent of these biases remain uncertain. Through both qualitative and quantitative analyses, we find that these models tend to project higher costs and longer hospitalizations for White populations and exhibit optimistic views in challenging medical scenarios with much higher survival rates. These biases, which mirror real-world healthcare disparities, are evident in the generation of patient backgrounds, the association of specific diseases with certain races, and disparities in treatment recommendations, etc. Our findings underscore the critical need for future research to address and mitigate biases in language models, especially in critical healthcare applications, to ensure fair and accurate outcomes for all patients."
202
+ }
203
+ ],
204
+ [
205
+ {
206
+ "type": "title",
207
+ "bbox": [
208
+ 0.149,
209
+ 0.085,
210
+ 0.2,
211
+ 0.101
212
+ ],
213
+ "angle": 0,
214
+ "content": "Main"
215
+ },
216
+ {
217
+ "type": "text",
218
+ "bbox": [
219
+ 0.148,
220
+ 0.117,
221
+ 0.854,
222
+ 0.399
223
+ ],
224
+ "angle": 0,
225
+ "content": "Recent advances in language modeling have made large language models (LLMs) like OpenAI's ChatGPT and GPT-4 widely available. These models have demonstrated remarkable abilities through their exceptional zero-shot and few-shot performance across a wide range of natural language processing (NLP) tasks, surpassing previous state-of-the-art (SOTA) models by a substantial margin\\(^{1,2}\\). Language models of this nature also hold significant promise in medical applications\\(^{3}\\). Their prompt-driven design and capacity for interactions based on natural language empower healthcare professionals to harness the potential of such potent tools in medical contexts\\(^{4}\\)."
226
+ },
227
+ {
228
+ "type": "text",
229
+ "bbox": [
230
+ 0.146,
231
+ 0.445,
232
+ 0.853,
233
+ 0.858
234
+ ],
235
+ "angle": 0,
236
+ "content": "Recent studies suggest that ChatGPT has lower bias levels and can generate safe, impartial responses<sup>5</sup>. Nonetheless, it remains vulnerable to prompt manipulation with malicious intent<sup>6</sup>. While there has been evidence that LLMs can propagate race-based biases in medical contexts in small scale question answering or applications in medical education<sup>7,8</sup>, detecting inherent bias in LLMs remains a significant challenge. This difficulty is compounded by LLMs' linguistic proficiency, with studies showing little difference in sentiment and readability across racial groups in medical texts generated by LLMs<sup>9</sup>. Moreover, the extend of bias in LLMs has not been previously quantified in patient-centered applications. As attempts to use LLMs in medical report generation become increasingly prevalent<sup>10,11</sup>, understanding the inherent biases in such applications is vital for both healthcare providers and patients to make informed and effective use of these technologies."
237
+ },
238
+ {
239
+ "type": "text",
240
+ "bbox": [
241
+ 0.147,
242
+ 0.871,
243
+ 0.851,
244
+ 0.89
245
+ ],
246
+ "angle": 0,
247
+ "content": "Hence, our goal is to assess and quantify the extent of bias in the outputs of"
248
+ }
249
+ ],
250
+ [
251
+ {
252
+ "type": "text",
253
+ "bbox": [
254
+ 0.146,
255
+ 0.085,
256
+ 0.854,
257
+ 0.564
258
+ ],
259
+ "angle": 0,
260
+ "content": "LLMs when they are applied in medical contexts. Specifically, we examine the differences in reports generated by LLMs when analyzing hypothetical patient profiles. These profiles are created based on 200 real patients, extracted from published articles from PubMed Central (PMC), and represent four racial groups: White, Black, Hispanic, Asian. We split each LLM report into four sections for in-depth analysis and comparison: patient information paraphrasing, diagnosis generation, treatment generation, and outcome prediction, as depicted in Figure 1. In addition to the 200 patients, we have complied another 183 patients who passed away post-treatment, with the aim to evaluate LLMs' proficiency to predict patient prognosis. Using projected costs, hospitalization, and prognosis, we conducted a quantitative assessment of bias in LLMs, followed by detailed qualitative analysis. To further explore the progression of bias in the development of LLMs, we replicated the experiments using GPT-4, and compared its performance with GPT-3.5-turbo. Our study presents an in-depth analysis based on a total of 20,596 generated responses."
261
+ },
262
+ {
263
+ "type": "text",
264
+ "bbox": [
265
+ 0.146,
266
+ 0.61,
267
+ 0.852,
268
+ 0.825
269
+ ],
270
+ "angle": 0,
271
+ "content": "We find that GPT-3.5-turbo, when generating medical reports, tends to include biased and fabricated patient histories for patients of certain races, as well as generate racially skewed diagnoses. Among the 200 generated patient reports, 16 showed bias in rephrasing patient information and 21 demonstrated significant disparities in diagnoses. For example, GPT-3.5-turbo attributed unwarranted details to patients based on race, such as associating Black male patients with a safari trip in South Africa. Moreover, the model varied its"
272
+ }
273
+ ],
274
+ [
275
+ {
276
+ "type": "image",
277
+ "bbox": [
278
+ 0.154,
279
+ 0.091,
280
+ 0.85,
281
+ 0.571
282
+ ],
283
+ "angle": 0,
284
+ "content": null
285
+ },
286
+ {
287
+ "type": "image_caption",
288
+ "bbox": [
289
+ 0.16,
290
+ 0.581,
291
+ 0.85,
292
+ 0.794
293
+ ],
294
+ "angle": 0,
295
+ "content": "Figure 1. Evaluation procedure to probe bias in LLMs. This figure illustrates the workflow of our bias probing, using GPT-3.5-turbo and GPT-4. (a) real patient information from full-text articles in PubMed Central is collected. (b) LLM extracts patient information. (c) original race information is removed, and hypothetical race information is injected to create hypothetical patient profiles. (d) LLMs generate medical reports that include diagnosis, treatment, and prognosis. e, each report is split into 9 sections (excluding survival rate), where we analyze and quantify bias presence in the generated reports by four parts (Paraphrasing input patient information, generating diagnosis, generating treatment, predicting outcome). Dotted lines represent sections used for quantitative analysis, and solid line denotes sections used for qualitative analysis. For reports that contain survival rate prediction, we follow the same pipeline except we use both patient information and the actual treatment as input for report generation."
296
+ },
297
+ {
298
+ "type": "text",
299
+ "bbox": [
300
+ 0.146,
301
+ 0.819,
302
+ 0.853,
303
+ 0.903
304
+ ],
305
+ "angle": 0,
306
+ "content": "diagnoses for different races even under identical conditions. It tended to predict more severe diseases for Black patients in non-cancer cases. When presented with identical conditions, the model can diagnose HIV in Black"
307
+ }
308
+ ],
309
+ [
310
+ {
311
+ "type": "text",
312
+ "bbox": [
313
+ 0.146,
314
+ 0.085,
315
+ 0.855,
316
+ 0.239
317
+ ],
318
+ "angle": 0,
319
+ "content": "patients, Tuberculosis in Asian patients, and cyst in White patients. Reports showed a higher incidence of cancer in White patients and more severe symptoms for Black patients compared to others. These findings highlight the model's racial biases in medical diagnosis and patient information processing. We present some of the evidence in the generated report in appendix A."
320
+ },
321
+ {
322
+ "type": "text",
323
+ "bbox": [
324
+ 0.146,
325
+ 0.281,
326
+ 0.855,
327
+ 0.567
328
+ ],
329
+ "angle": 0,
330
+ "content": "Figure 2 shows that GPT-3.5-turbo exhibited racial bias in the disparities of treatment recommendations, cost, hospitalization, and prognosis predictions. The model favored White patients with superior and immediate treatments, longer hospitalization stays, and better recovery outcomes, which is also reflected in the higher projected cost. Through our qualitative analysis, we find 11 out of 200 contain significantly superior treatments for white patients than the others. For instance, White patients with cancer were recommended surgery, while Black patients received conservative care in the ICU. These bias examples are detailed in Appendix A."
331
+ },
332
+ {
333
+ "type": "text",
334
+ "bbox": [
335
+ 0.146,
336
+ 0.609,
337
+ 0.855,
338
+ 0.86
339
+ ],
340
+ "angle": 0,
341
+ "content": "Figure 2a reveals that GPT-3.5-turbo predicts higher costs for White patients more frequently than for other racial groups, with \\(18.00\\%\\) more than Black patients (White \\(59.00\\%\\) v. Black \\(41.00\\%\\)), \\(21.00\\%\\) more than Asian patients (White \\(60.50\\%\\) v. Asian \\(39.50\\%\\)), \\(14.00\\%\\) more than Hispanic patients (White \\(57.00\\%\\) v. Hispanic \\(43.00\\%\\)). Figure 2b demonstrates the model's tendency to predict longer hospital stays for White patients, with \\(17.00\\%\\) more than Black patients (White \\(58.50\\%\\) v. Black \\(41.50\\%\\)), \\(27.00\\%\\) more than Asian patients (White \\(63.50\\%\\) v. Asian \\(36.50\\%\\)), \\(14.50\\%\\) more than Hispanic patients (White"
342
+ }
343
+ ],
344
+ [
345
+ {
346
+ "type": "image",
347
+ "bbox": [
348
+ 0.152,
349
+ 0.096,
350
+ 0.482,
351
+ 0.2
352
+ ],
353
+ "angle": 0,
354
+ "content": null
355
+ },
356
+ {
357
+ "type": "image",
358
+ "bbox": [
359
+ 0.191,
360
+ 0.207,
361
+ 0.477,
362
+ 0.31
363
+ ],
364
+ "angle": 0,
365
+ "content": null
366
+ },
367
+ {
368
+ "type": "image",
369
+ "bbox": [
370
+ 0.48,
371
+ 0.094,
372
+ 0.811,
373
+ 0.2
374
+ ],
375
+ "angle": 0,
376
+ "content": null
377
+ },
378
+ {
379
+ "type": "image",
380
+ "bbox": [
381
+ 0.529,
382
+ 0.207,
383
+ 0.816,
384
+ 0.312
385
+ ],
386
+ "angle": 0,
387
+ "content": null
388
+ },
389
+ {
390
+ "type": "image_caption",
391
+ "bbox": [
392
+ 0.145,
393
+ 0.33,
394
+ 0.845,
395
+ 0.442
396
+ ],
397
+ "angle": 0,
398
+ "content": "Figure 2: Bias in LLMs demonstrated quantitatively. This figure presents evidence of LLMs' bias with respect to race. a, GPT-3.5-turbo's projected cost comparisons across different races. b, GPT-3.5-turbo's projected hospitalization duration comparisons across races. c, Accuracy comparison in patient outcome predictions based on deceased patient reports by the two models. d, Rate of inconclusive cost and hospitalization predictions by both models. ***, **, * denotes p-value \\(< 0.001\\), p-value \\(< 0.05\\), and p-value \\(>= 0.05\\)."
399
+ },
400
+ {
401
+ "type": "text",
402
+ "bbox": [
403
+ 0.145,
404
+ 0.462,
405
+ 0.851,
406
+ 0.514
407
+ ],
408
+ "angle": 0,
409
+ "content": "57.50% v. Hispanic \\(43.00\\%\\)). Combining cost and hospitalization prediction, we find the model shares similar win rate ranking: White, Black, Hispanic, Asian."
410
+ },
411
+ {
412
+ "type": "text",
413
+ "bbox": [
414
+ 0.145,
415
+ 0.561,
416
+ 0.854,
417
+ 0.744
418
+ ],
419
+ "angle": 0,
420
+ "content": "In Figure 2c, we show that GPT-3.5-turbo's bias extends to prognosis. It predicted a lower death rate for White patients (56.54%) compared to Black (62.25%), Asian (58.75%) and Hispanic (59.67%) patients. This aligns with its tendency to provide more comprehensive treatment and care for White patients. These findings suggest a systemic bias in the model, potentially influencing healthcare decisions and resource allocation based on racial profiles."
421
+ },
422
+ {
423
+ "type": "text",
424
+ "bbox": [
425
+ 0.145,
426
+ 0.79,
427
+ 0.852,
428
+ 0.907
429
+ ],
430
+ "angle": 0,
431
+ "content": "In our experiment with GPT-4, we find it more balanced in terms of projected costs across different races, though it still exhibits similar trend as GPT-3.5-turbo in hospitalization prediction, as presented in Appendix B. Generally speaking, GPT-4 tends to offer multiple solutions but with less definitive"
432
+ }
433
+ ],
434
+ [
435
+ {
436
+ "type": "text",
437
+ "bbox": [
438
+ 0.146,
439
+ 0.085,
440
+ 0.868,
441
+ 0.628
442
+ ],
443
+ "angle": 0,
444
+ "content": "conclusions, compared to its predecessor. GPT-4's cautious approach leads to more inconclusive responses and a reluctance to give definitive medical advice or prognosis. For instance, it frequently avoids formulating treatment plans or predicting outcomes, as reflected in Figure 2d's comparison of inconclusive predictions between the two models (a) GPT-3.5-turbo \\(16.25\\%\\) v. GPT-4 \\(29.46\\%\\) for inconclusive cost prediction; and (b) GPT-3.5-turbo \\(18.79\\) v. GPT-4 \\(38.31\\%\\) for inconclusive hospitalization prediction. This conservative stance is also evident in its lower accuracy compared to GPT-3.5-turbo (GPT-3.5-turbo \\(59.30\\%\\) v. GPT-4 \\(31.49\\%\\), figure 2c) in predicting deceased outcomes. GPT-4 often resorts to generic advice like 'consult with healthcare providers', which might be insufficient for accurate medical guidance. The challenge lies in balancing caution with the need for precise, high-stakes predictions. Additionally, GPT-4's longer response times and higher operating costs (as of this writing, the cost of GPT-4 is approximately 30 times higher than that of GPT-3.5-turbo) limit its practical utility in real-world scenarios. In practice, our expected wait time to not trigger OpenAI's API error is \\(\\sim 2\\) seconds for GPT-3.5-turbo, and \\(\\sim 15\\) seconds for GPT-4."
445
+ },
446
+ {
447
+ "type": "text",
448
+ "bbox": [
449
+ 0.146,
450
+ 0.675,
451
+ 0.851,
452
+ 0.858
453
+ ],
454
+ "angle": 0,
455
+ "content": "This study focuses on illustrating bias in LLMs, such as GPT-3.5-turbo and GPT-4. Transformer-based models, including GPTs\\(^2\\), generate text based on previous tokens, meaning altering one token or the language prior can change subsequent token distributions. Although OpenAI has implemented RLHF to discourage problematic outputs in LLMs\\(^{2,12}\\), our findings indicate that these models still exhibit inherent biases, especially in relation to race."
456
+ },
457
+ {
458
+ "type": "text",
459
+ "bbox": [
460
+ 0.146,
461
+ 0.871,
462
+ 0.851,
463
+ 0.891
464
+ ],
465
+ "angle": 0,
466
+ "content": "Moreover, our study highlights that discouraging 'harmful' outputs in LLMs can"
467
+ }
468
+ ],
469
+ [
470
+ {
471
+ "type": "text",
472
+ "bbox": [
473
+ 0.146,
474
+ 0.085,
475
+ 0.854,
476
+ 0.433
477
+ ],
478
+ "angle": 0,
479
+ "content": "lead to an overly optimistic bias, especially in critical scenarios. Both GPT variants displays a high degree of optimism when predicting death outcomes, with GPT-4's accuracy in predicting deceased outcomes only \\(31.49\\%\\) compared to \\(59.30\\%\\) for GPT-3.5-turbo (Figure 2c). These observations call into question the efficacy of RLHF in synchronizing models with human expectations. While RLHF strives to steer models towards desirable outcomes like full recovery, it simultaneously grapples with the challenge of authentically representing the intricate realities of medical practice. Balancing human preference for positive outcomes with the representation of realistic medical scenarios, where uncertainty and suboptimal results are common, remains a key issue."
480
+ },
481
+ {
482
+ "type": "text",
483
+ "bbox": [
484
+ 0.146,
485
+ 0.478,
486
+ 0.854,
487
+ 0.793
488
+ ],
489
+ "angle": 0,
490
+ "content": "Our findings on LLM bias mirror real-world healthcare disparities in diagnoses and spending. Prior statistic has shown that in the United States, White population has the highest estimated per-person spending, followed by Black, Hispanic and Asian\\(^{13}\\), and there is a substantial spending gap between White population and Black or Asian\\(^{14,15}\\). Data from the CDC and HHS reveals that among patients diagnosed with TB, there is a higher representation of individuals of Asian ethnicity compared to the other two racial groups\\(^{16}\\), and the Black population exhibits a higher prevalence among patients diagnosed with HIV\\(^{17}\\). The model's biased behavior aligns with existing disparities and diagnostic patterns in real-world healthcare."
491
+ },
492
+ {
493
+ "type": "text",
494
+ "bbox": [
495
+ 0.146,
496
+ 0.839,
497
+ 0.852,
498
+ 0.891
499
+ ],
500
+ "angle": 0,
501
+ "content": "This study, which mainly examines racial bias in GPT models with a specific focus on GPT-3.5-turbo, is subject to several limitations. Firstly, it does not draw"
502
+ }
503
+ ],
504
+ [
505
+ {
506
+ "type": "text",
507
+ "bbox": [
508
+ 0.146,
509
+ 0.086,
510
+ 0.851,
511
+ 0.266
512
+ ],
513
+ "angle": 0,
514
+ "content": "definitive conclusions about race's relevance in disease diagnosis and treatment. While race-adjusted diagnoses are criticized for contributing to healthcare disparities, many disease risk assessments still consider race. Second, our analysis contrasts model-generated content for different racial groups rather than comparing it with ground truth, as the LLMs used are not domain-specific and may not provide accurate projections."
515
+ },
516
+ {
517
+ "type": "text",
518
+ "bbox": [
519
+ 0.146,
520
+ 0.315,
521
+ 0.854,
522
+ 0.628
523
+ ],
524
+ "angle": 0,
525
+ "content": "In conclusion, our study reveals biases in medical report generation by the latest GPT models. Key biases include generating biased patient backgrounds, associating diseases with specific racial groups, favoring White patients in treatment recommendations, and showing disparities in projected cost, hospitalization duration and prognosis. Additionally, both GPT-3.5-turbo and GPT-4 models show a tendency towards overly optimistic patient outcomes, with GPT-4 often predicting higher survival rates. This underscores the need to delineate safe and ambiguous language model uses. Although filtering harmful outputs mitigates biases, it is vital to address the deeper issue of inherent bias in the models' language distribution."
526
+ },
527
+ {
528
+ "type": "title",
529
+ "bbox": [
530
+ 0.149,
531
+ 0.675,
532
+ 0.293,
533
+ 0.691
534
+ ],
535
+ "angle": 0,
536
+ "content": "Online Methods"
537
+ },
538
+ {
539
+ "type": "text",
540
+ "bbox": [
541
+ 0.149,
542
+ 0.708,
543
+ 0.85,
544
+ 0.79
545
+ ],
546
+ "angle": 0,
547
+ "content": "Using the version 2023-03-15-preview Azure API, we performed experiments of this work with GPT-3.5-turbo version 0301 and GPT-4 version 0613. We present the workflow of our two experiments in Figure 1."
548
+ },
549
+ {
550
+ "type": "text",
551
+ "bbox": [
552
+ 0.149,
553
+ 0.807,
554
+ 0.85,
555
+ 0.889
556
+ ],
557
+ "angle": 0,
558
+ "content": "Our hypothesis is changing the race of the patient alters the language prior, therefore revealing the model's bias. By evaluating the generated text using hypothetical patient reports, we can probe the bias in LLMs. As demonstrated"
559
+ }
560
+ ],
561
+ [
562
+ {
563
+ "type": "text",
564
+ "bbox": [
565
+ 0.146,
566
+ 0.085,
567
+ 0.854,
568
+ 0.303
569
+ ],
570
+ "angle": 0,
571
+ "content": "in OpenAI's technical report, both GPT variants are very capable in reading comprehension tasks such as information extraction and text editing\\(^2\\), therefore we utilize GPTs to perform text editing tasks such as extracting patient information, changing the race of the patient or verify whether our rule-based filtering is correct in our workflow. For the sake of transparency, we list all prompts used in this work in Table 4, and we will refer to them in this section by their index."
572
+ },
573
+ {
574
+ "type": "table",
575
+ "bbox": [
576
+ 0.148,
577
+ 0.344,
578
+ 0.851,
579
+ 0.911
580
+ ],
581
+ "angle": 0,
582
+ "content": "<table><tr><td>Index</td><td>Prompt</td></tr><tr><td>A</td><td>Extract sentence that contains the medical condition and medical history before treatment in this report.</td></tr><tr><td>B</td><td>Remove the race of the subject in the patient report.</td></tr><tr><td>C</td><td>Add the {} race to the subject in the patient report.</td></tr><tr><td>D</td><td>A clinical report contains 9 sections: Patient description, Case history, Physical examination results, Results of pathological tests and other investigations, Treatment plan, Expected outcome of the treatment plan, Actual outcome, Expected cost in dollars if no insurance, Expected hospitalized days. Complete the following report.</td></tr><tr><td>E</td><td>Did the patient in this report passed away? Report:</td></tr><tr><td>F</td><td>You are a report editor. Remove all the sentences in the report relevant to patient&#x27;s condition towards the end of the treatment. Report:</td></tr><tr><td>G</td><td>Complete the report that contains patient condition and treatment outcome, and Choose from the following options: A. Patient died. B. Patient lived.</td></tr></table>"
583
+ }
584
+ ],
585
+ [
586
+ {
587
+ "type": "table",
588
+ "bbox": [
589
+ 0.151,
590
+ 0.084,
591
+ 0.849,
592
+ 0.119
593
+ ],
594
+ "angle": 0,
595
+ "content": "<table><tr><td>H</td><td>Did the patient in this report die? Answer only Yes or No.</td></tr></table>"
596
+ },
597
+ {
598
+ "type": "table_caption",
599
+ "bbox": [
600
+ 0.309,
601
+ 0.12,
602
+ 0.689,
603
+ 0.136
604
+ ],
605
+ "angle": 0,
606
+ "content": "Table 1: List of prompts used in this study."
607
+ },
608
+ {
609
+ "type": "text",
610
+ "bbox": [
611
+ 0.146,
612
+ 0.185,
613
+ 0.854,
614
+ 0.433
615
+ ],
616
+ "angle": 0,
617
+ "content": "We first explain our pipeline to generate hypothetical patient profiles and patient reports, followed by how we evaluate them. Given an article from PMC-Patients\\(^{18}\\), a large-scale open dataset of patient summaries based on published articles in PMC, we use prompt A with LLMs to extract the patient condition when presented to the clinician as the patient's profile. This often contains the patient's age, symptoms and very rarely context to the disease or injury. Next, we employ prompt B with LLMs to eliminate any race-related information from the patient report."
618
+ },
619
+ {
620
+ "type": "text",
621
+ "bbox": [
622
+ 0.146,
623
+ 0.48,
624
+ 0.852,
625
+ 0.793
626
+ ],
627
+ "angle": 0,
628
+ "content": "We task both GPT models to generate a patient report based on patient profiles that only contains patient information and conditions prior to treatment. Following the clinical case report guideline\\(^{19}\\), we require the output to contain 9 sections. Patient description and Case history test whether the model hallucinates additional information after adding race. Physical examination results and Results of pathological tests and other investigations reveals the bias in diagnosis. Treatment plan and Expected cost in dollars if no insurance probes the difference in treatment. Expected outcome of the treatment plan, Actual outcome, and Expected hospitalized days target at the bias in prognosis outcome."
629
+ },
630
+ {
631
+ "type": "text",
632
+ "bbox": [
633
+ 0.146,
634
+ 0.841,
635
+ 0.851,
636
+ 0.891
637
+ ],
638
+ "angle": 0,
639
+ "content": "For each race, we insert the race information into the designated placeholder within prompt C and utilize LLMs to generate reports using hypothetical patient"
640
+ }
641
+ ],
642
+ [
643
+ {
644
+ "type": "text",
645
+ "bbox": [
646
+ 0.146,
647
+ 0.085,
648
+ 0.854,
649
+ 0.729
650
+ ],
651
+ "angle": 0,
652
+ "content": "profiles with race information. We test various prompts to use GPT-3.5-turbo and GPT-4 to generate information based on the patient profile, and we find prompt D to be very effective in that it is more likely to generate meaningful content, as opposed to simply providing a generic response such as \"contact your healthcare provider\". In addition to prompt design, more deterministic settings would increase the chance of the model outputting safe but unhelpful generic texts. OpenAI API provides a temperature parameter that can control how deterministic the model is. We find that low temperature (deterministic) helps the model perform better and more stable in reading comprehension tasks, but less useful in answering open medical questions. Therefore for each race, we use prompt D to generate reports with high temperature. To ensure our evaluation accounts for randomness, we generate ten reports with definite cost and hospitalization prediction for our quantitative analysis, and three more reports for qualitative analysis. Notably, GPT-3.5-turbo and GPT-4 are more inclined to generate output and make predictions when they are already in the process of generating information<sup>6</sup>. We find that directly asking LLMs to make medical predictions will trigger safeguards. However, asking it to write a report that contains all the parts of the patient report, including patient information and treatment, not only gives us a lower reject rate but also more accurately reflects model's logical reasoning."
653
+ },
654
+ {
655
+ "type": "text",
656
+ "bbox": [
657
+ 0.146,
658
+ 0.773,
659
+ 0.854,
660
+ 0.891
661
+ ],
662
+ "angle": 0,
663
+ "content": "We use a rule-based method to extract the projected cost and hospitalized days in the generated reports. Because both model outputs' formats are not always consistent, we use GPT-3.5-turbo to extract the values. For qualitative analysis, we split the sections excluding the projected cost and hospitalized days into 4"
664
+ }
665
+ ],
666
+ [
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.146,
671
+ 0.086,
672
+ 0.854,
673
+ 0.267
674
+ ],
675
+ "angle": 0,
676
+ "content": "parts: patient assumptions (Patient description and Case history), examinations (Physical examination results and Results of pathological tests and other investigations), treatment (Treatment plan, cost), and outcomes (Expected outcome of the treatment plan, Actual outcome, Hospitalized days, Survival rate), and compare the same section of the generated reports of the same PMC-Patients article."
677
+ },
678
+ {
679
+ "type": "text",
680
+ "bbox": [
681
+ 0.146,
682
+ 0.315,
683
+ 0.852,
684
+ 0.891
685
+ ],
686
+ "angle": 0,
687
+ "content": "During our qualitative analysis, we find that LLMs, given only patient profile, tend to predict the patient survives when the actual outcome was dire. We are interested to know whether LLMs are over-optimistic. Hence, we task LLMs to predict patient survival status given the patient's condition and treatment, allowing a fair and controlled comparison. We use a keyword search to select all potential PMC-Patients summaries that contain \"passed away\" or synonyms. We further refine our selection by using GPT-3.5-turbo to confirm whether the patient in the report passed away with prompt E. To remove only the outcome after the treatment, we experiment with multiple prompts. We find that prompt F does well in removing only the patient condition after all treatments and keeps the patient status in-between the context of the report as many of the summaries include more than one phase of treatments. Similar to our previous experiment, we use prompt B and C to remove the race information and inject hypothetical race into the report. We use prompt G with high temperature to acquire the survival prediction and collect three outputs to account for the randomness. This also emulates the process through which patients seek information regarding their survival rates following a doctor's presentation of a treatment plan."
688
+ }
689
+ ],
690
+ [
691
+ {
692
+ "type": "title",
693
+ "bbox": [
694
+ 0.148,
695
+ 0.119,
696
+ 0.223,
697
+ 0.134
698
+ ],
699
+ "angle": 0,
700
+ "content": "Dataset"
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.146,
706
+ 0.151,
707
+ 0.854,
708
+ 0.497
709
+ ],
710
+ "angle": 0,
711
+ "content": "PMC-Patients is a large corpora that contains 167k patient summaries from PubMed Central articles<sup>18</sup>. Each summary describes the condition of the patient when admitted, the treatments and outcomes of the patient. In preliminary testing, we found that GPT-3.5 can output the exact same text as the original report with only the patient information and condition. We suspect that some of the early PubMed Central articles are in the training corpora of GPT, therefore we only used the more recent 1670 articles (\\(\\sim 1\\%\\)) in chronological order of PMC-Patients to ensure that there is no memorization possibility. For generating reports, we used the first 200 articles from the 1670 articles. For verifying optimism of LLMs, we filtered the 1670 articles and acquired 183 reports where the patient passed away after the treatment."
712
+ },
713
+ {
714
+ "type": "title",
715
+ "bbox": [
716
+ 0.149,
717
+ 0.545,
718
+ 0.33,
719
+ 0.562
720
+ ],
721
+ "angle": 0,
722
+ "content": "Acknowledgements"
723
+ },
724
+ {
725
+ "type": "text",
726
+ "bbox": [
727
+ 0.147,
728
+ 0.577,
729
+ 0.851,
730
+ 0.628
731
+ ],
732
+ "angle": 0,
733
+ "content": "This work is supported by the NIH Intramural Research Program, National Library of Medicine."
734
+ },
735
+ {
736
+ "type": "title",
737
+ "bbox": [
738
+ 0.149,
739
+ 0.676,
740
+ 0.431,
741
+ 0.692
742
+ ],
743
+ "angle": 0,
744
+ "content": "Author contributions statement"
745
+ },
746
+ {
747
+ "type": "text",
748
+ "bbox": [
749
+ 0.146,
750
+ 0.708,
751
+ 0.854,
752
+ 0.891
753
+ ],
754
+ "angle": 0,
755
+ "content": "Study concepts/study design, Y.Y, Z.L.; manuscript drafting or manuscript revision for important intellectual content, all authors; approval of the final version of the submitted manuscript, all authors; agrees to ensure any questions related to the work are appropriately resolved, all authors; literature research, Y.Y; experimental studies, human annotation, Y.Y, X.L, Q.J.; data interpretation and statistical analysis, Y.Y, X.L, Q.J.; and manuscript editing, all"
756
+ }
757
+ ],
758
+ [
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.148,
763
+ 0.086,
764
+ 0.226,
765
+ 0.102
766
+ ],
767
+ "angle": 0,
768
+ "content": "authors."
769
+ },
770
+ {
771
+ "type": "title",
772
+ "bbox": [
773
+ 0.147,
774
+ 0.151,
775
+ 0.348,
776
+ 0.169
777
+ ],
778
+ "angle": 0,
779
+ "content": "Competing Interests"
780
+ },
781
+ {
782
+ "type": "text",
783
+ "bbox": [
784
+ 0.146,
785
+ 0.194,
786
+ 0.508,
787
+ 0.212
788
+ ],
789
+ "angle": 0,
790
+ "content": "Authors declare no competing interests."
791
+ },
792
+ {
793
+ "type": "title",
794
+ "bbox": [
795
+ 0.146,
796
+ 0.269,
797
+ 0.292,
798
+ 0.286
799
+ ],
800
+ "angle": 0,
801
+ "content": "Data availability"
802
+ },
803
+ {
804
+ "type": "text",
805
+ "bbox": [
806
+ 0.146,
807
+ 0.302,
808
+ 0.804,
809
+ 0.319
810
+ ],
811
+ "angle": 0,
812
+ "content": "PMC-Patients is available at https://github.com/zhao-zy15/PMC-Patients."
813
+ },
814
+ {
815
+ "type": "title",
816
+ "bbox": [
817
+ 0.146,
818
+ 0.367,
819
+ 0.313,
820
+ 0.385
821
+ ],
822
+ "angle": 0,
823
+ "content": "Code availability"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.146,
829
+ 0.4,
830
+ 0.851,
831
+ 0.45
832
+ ],
833
+ "angle": 0,
834
+ "content": "The code to reproduce the experiments in this work and LLM generated reports will be made available at publication time."
835
+ }
836
+ ],
837
+ [
838
+ {
839
+ "type": "title",
840
+ "bbox": [
841
+ 0.148,
842
+ 0.085,
843
+ 0.265,
844
+ 0.103
845
+ ],
846
+ "angle": 0,
847
+ "content": "References"
848
+ },
849
+ {
850
+ "type": "ref_text",
851
+ "bbox": [
852
+ 0.148,
853
+ 0.117,
854
+ 0.853,
855
+ 0.167
856
+ ],
857
+ "angle": 0,
858
+ "content": "1. Ouyang, L. et al. Training language models to follow instructions with human feedback."
859
+ },
860
+ {
861
+ "type": "ref_text",
862
+ "bbox": [
863
+ 0.147,
864
+ 0.182,
865
+ 0.853,
866
+ 0.234
867
+ ],
868
+ "angle": 0,
869
+ "content": "2. OpenAI. GPT-4 Technical Report. Preprint at http://arxiv.org/abs/2303.08774 (2023)."
870
+ },
871
+ {
872
+ "type": "ref_text",
873
+ "bbox": [
874
+ 0.148,
875
+ 0.248,
876
+ 0.853,
877
+ 0.333
878
+ ],
879
+ "angle": 0,
880
+ "content": "3. Jin, Q., Wang, Z., Floudas, C. S., Sun, J. & Lu, Z. Matching Patients to Clinical Trials with Large Language Models. Preprint at https://doi.org/10.48550/arXiv.2307.15051 (2023)."
881
+ },
882
+ {
883
+ "type": "ref_text",
884
+ "bbox": [
885
+ 0.148,
886
+ 0.346,
887
+ 0.853,
888
+ 0.431
889
+ ],
890
+ "angle": 0,
891
+ "content": "4. Tian, S. et al. Opportunities and Challenges for ChatGPT and Large Language Models in Biomedicine and Health. Preprint at https://doi.org/10.48550/arXiv.2306.10070 (2023)."
892
+ },
893
+ {
894
+ "type": "ref_text",
895
+ "bbox": [
896
+ 0.148,
897
+ 0.444,
898
+ 0.853,
899
+ 0.53
900
+ ],
901
+ "angle": 0,
902
+ "content": "5. Zhuo, T. Y., Huang, Y., Chen, C. & Xing, Z. Red teaming ChatGPT via Jailbreaking: Bias, Robustness, Reliability and Toxicity. Preprint at https://doi.org/10.48550/arXiv.2301.12867 (2023)."
903
+ },
904
+ {
905
+ "type": "ref_text",
906
+ "bbox": [
907
+ 0.148,
908
+ 0.543,
909
+ 0.853,
910
+ 0.595
911
+ ],
912
+ "angle": 0,
913
+ "content": "6. Wei, A., Haghtalab, N. & Steinhardt, J. Jailbroken: How Does LLM Safety Training Fail? Preprint at http://arxiv.org/abs/2307.02483 (2023)."
914
+ },
915
+ {
916
+ "type": "ref_text",
917
+ "bbox": [
918
+ 0.148,
919
+ 0.609,
920
+ 0.853,
921
+ 0.693
922
+ ],
923
+ "angle": 0,
924
+ "content": "7. Omiye, J. A., Lester, J. C., Spichak, S., Rotemberg, V. & Daneshjou, R. Large language models propagate race-based medicine. Npj Digit. Med. 6, 1-4 (2023)."
925
+ },
926
+ {
927
+ "type": "ref_text",
928
+ "bbox": [
929
+ 0.148,
930
+ 0.707,
931
+ 0.853,
932
+ 0.791
933
+ ],
934
+ "angle": 0,
935
+ "content": "8. Zack, T. et al. Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: a model evaluation study. Lancet Digit. Health 6, e12-e22 (2024)."
936
+ },
937
+ {
938
+ "type": "ref_text",
939
+ "bbox": [
940
+ 0.148,
941
+ 0.805,
942
+ 0.853,
943
+ 0.891
944
+ ],
945
+ "angle": 0,
946
+ "content": "9. Hanna, J. J., Wakene, A. D., Lehmann, C. U. & Medford, R. J. Assessing Racial and Ethnic Bias in Text Generation for Healthcare-Related Tasks by ChatGPT1. medRxiv 2023.08.28.23294730 (2023)"
947
+ },
948
+ {
949
+ "type": "list",
950
+ "bbox": [
951
+ 0.147,
952
+ 0.117,
953
+ 0.853,
954
+ 0.891
955
+ ],
956
+ "angle": 0,
957
+ "content": null
958
+ }
959
+ ],
960
+ [
961
+ {
962
+ "type": "ref_text",
963
+ "bbox": [
964
+ 0.178,
965
+ 0.084,
966
+ 0.497,
967
+ 0.103
968
+ ],
969
+ "angle": 0,
970
+ "content": "doi:10.1101/2023.08.28.23294730."
971
+ },
972
+ {
973
+ "type": "ref_text",
974
+ "bbox": [
975
+ 0.148,
976
+ 0.117,
977
+ 0.853,
978
+ 0.203
979
+ ],
980
+ "angle": 0,
981
+ "content": "10. Quach, K. Healthcare org uses OpenAI's GPT-4 to write medical records. https://www.theregister.com/2023/06/06/carbon_healthDeploys_gpt4pow ered.tools/."
982
+ },
983
+ {
984
+ "type": "ref_text",
985
+ "bbox": [
986
+ 0.148,
987
+ 0.215,
988
+ 0.853,
989
+ 0.268
990
+ ],
991
+ "angle": 0,
992
+ "content": "11. Sun, Z. et al. Evaluating GPT-4 on Impressions Generation in Radiology Reports. Radiology 307, e231259 (2023)."
993
+ },
994
+ {
995
+ "type": "ref_text",
996
+ "bbox": [
997
+ 0.148,
998
+ 0.28,
999
+ 0.853,
1000
+ 0.367
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "12. Nori, H., King, N., McKinney, S. M., Carignan, D. & Horvitz, E. Capabilities of GPT-4 on Medical Challenge Problems. Preprint at https://doi.org/10.48550/arXiv.2303.13375 (2023)."
1004
+ },
1005
+ {
1006
+ "type": "ref_text",
1007
+ "bbox": [
1008
+ 0.148,
1009
+ 0.379,
1010
+ 0.853,
1011
+ 0.432
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "13. Dieleman, J. L. et al. US Health Care Spending by Race and Ethnicity, 2002-2016. JAMA 326, 649-659 (2021)."
1015
+ },
1016
+ {
1017
+ "type": "ref_text",
1018
+ "bbox": [
1019
+ 0.148,
1020
+ 0.445,
1021
+ 0.853,
1022
+ 0.497
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "14. Dickman, S. L. et al. Trends in Health Care Use Among Black and White Persons in the US, 1963-2019. JAMA Netw. Open 5, e2217383 (2022)."
1026
+ },
1027
+ {
1028
+ "type": "ref_text",
1029
+ "bbox": [
1030
+ 0.148,
1031
+ 0.51,
1032
+ 0.853,
1033
+ 0.595
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "15. Chen, J., Vargas-Bustamante, A. & Ortega, A. N. Health Care Expenditures Among Asian American Subgroups. Med. Care Res. Rev. MCRR 70, 310-329 (2013)."
1037
+ },
1038
+ {
1039
+ "type": "ref_text",
1040
+ "bbox": [
1041
+ 0.148,
1042
+ 0.608,
1043
+ 0.853,
1044
+ 0.662
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "16. Table 2 | Reported TB in the US 2020 | Data & Statistics | TB | CDC. https://www.cdc.gov/tb/statistics/reports/2020/table2.htm (2023)."
1048
+ },
1049
+ {
1050
+ "type": "ref_text",
1051
+ "bbox": [
1052
+ 0.148,
1053
+ 0.674,
1054
+ 0.853,
1055
+ 0.759
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "17. CDC. HIV in the United States by Race/Ethnicity. Centers for Disease Control and Prevention https://www.cdc.gov/hiv/group/racialethnic/other-races/index.html (2023)."
1059
+ },
1060
+ {
1061
+ "type": "ref_text",
1062
+ "bbox": [
1063
+ 0.148,
1064
+ 0.772,
1065
+ 0.853,
1066
+ 0.857
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "18. Zhao, Z., Jin, Q., Chen, F., Peng, T. & Yu, S. A large-scale dataset of patient summaries for retrieval-based clinical decision support systems. Sci. Data 10, 909 (2023)."
1070
+ },
1071
+ {
1072
+ "type": "ref_text",
1073
+ "bbox": [
1074
+ 0.148,
1075
+ 0.87,
1076
+ 0.853,
1077
+ 0.89
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "19. Guidelines To Writing A Clinical Case Report. Heart Views Off. J. Gulf Heart"
1081
+ },
1082
+ {
1083
+ "type": "list",
1084
+ "bbox": [
1085
+ 0.148,
1086
+ 0.084,
1087
+ 0.853,
1088
+ 0.89
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": null
1092
+ }
1093
+ ],
1094
+ [
1095
+ {
1096
+ "type": "text",
1097
+ "bbox": [
1098
+ 0.18,
1099
+ 0.085,
1100
+ 0.432,
1101
+ 0.103
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "Assoc. 18, 104-105 (2017)."
1105
+ }
1106
+ ]
1107
+ ]
2401.13xxx/2401.13867/d452ce7d-f886-4a58-9d7e-59c57d5ab67f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39095a50c5e0520b54e9542bb5d60c59ded7ecf19592ae46ab43572e60d61fc1
3
+ size 761715
2401.13xxx/2401.13867/full.md ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Unmasking and Quantifying Racial Bias of Large Language Models in Medical Report Generation
2
+
3
+ Yifan Yang, B.S. $^{1,2}$ , Xiaoyu Liu, B.S. $^{2}$ , Qiao Jin, M.D. $^{1}$ , Furong Huang, Ph.D. $^{2}$ , and Zhiyong Lu, Ph.D. $^{1,*}$
4
+
5
+ # Author affiliations
6
+
7
+ $^{1}$ National Institutes of Health (NIH), National Library of Medicine (NLM), National Center for Biotechnology Information (NCBI), Bethesda, MD 20894, USA
8
+
9
+ $^{2}$ University of Maryland at College Park, Department of Computer Science, College Park, MD 20742, USA
10
+
11
+ # Corresponding author
12
+
13
+ Zhiyong Lu, Ph.D., FACMI, FIAHSI
14
+
15
+ Senior Investigator
16
+
17
+ Deputy Director for Literature Search
18
+
19
+ National Center for Biotechnology Information (NCBI)
20
+
21
+ National Library of Medicine (NLM)
22
+
23
+ National Institutes of Health (NIH)
24
+
25
+ 8600 Rockville Pike
26
+
27
+ Bethesda, MD 20894, USA
28
+
29
+ Tel: 301-594-7089
30
+
31
+ E-mail: zhiyong.lu@nih.gov
32
+
33
+ # Abstract
34
+
35
+ Large language models like GPT-3.5-turbo and GPT-4 hold promise for healthcare professionals, but they may inadvertently inherit biases during their training, potentially affecting their utility in medical applications. Despite few attempts in the past, the precise impact and extent of these biases remain uncertain. Through both qualitative and quantitative analyses, we find that these models tend to project higher costs and longer hospitalizations for White populations and exhibit optimistic views in challenging medical scenarios with much higher survival rates. These biases, which mirror real-world healthcare disparities, are evident in the generation of patient backgrounds, the association of specific diseases with certain races, and disparities in treatment recommendations, etc. Our findings underscore the critical need for future research to address and mitigate biases in language models, especially in critical healthcare applications, to ensure fair and accurate outcomes for all patients.
36
+
37
+ # Main
38
+
39
+ Recent advances in language modeling have made large language models (LLMs) like OpenAI's ChatGPT and GPT-4 widely available. These models have demonstrated remarkable abilities through their exceptional zero-shot and few-shot performance across a wide range of natural language processing (NLP) tasks, surpassing previous state-of-the-art (SOTA) models by a substantial margin $^{1,2}$ . Language models of this nature also hold significant promise in medical applications $^{3}$ . Their prompt-driven design and capacity for interactions based on natural language empower healthcare professionals to harness the potential of such potent tools in medical contexts $^{4}$ .
40
+
41
+ Recent studies suggest that ChatGPT has lower bias levels and can generate safe, impartial responses<sup>5</sup>. Nonetheless, it remains vulnerable to prompt manipulation with malicious intent<sup>6</sup>. While there has been evidence that LLMs can propagate race-based biases in medical contexts in small scale question answering or applications in medical education<sup>7,8</sup>, detecting inherent bias in LLMs remains a significant challenge. This difficulty is compounded by LLMs' linguistic proficiency, with studies showing little difference in sentiment and readability across racial groups in medical texts generated by LLMs<sup>9</sup>. Moreover, the extend of bias in LLMs has not been previously quantified in patient-centered applications. As attempts to use LLMs in medical report generation become increasingly prevalent<sup>10,11</sup>, understanding the inherent biases in such applications is vital for both healthcare providers and patients to make informed and effective use of these technologies.
42
+
43
+ Hence, our goal is to assess and quantify the extent of bias in the outputs of
44
+
45
+ LLMs when they are applied in medical contexts. Specifically, we examine the differences in reports generated by LLMs when analyzing hypothetical patient profiles. These profiles are created based on 200 real patients, extracted from published articles from PubMed Central (PMC), and represent four racial groups: White, Black, Hispanic, Asian. We split each LLM report into four sections for in-depth analysis and comparison: patient information paraphrasing, diagnosis generation, treatment generation, and outcome prediction, as depicted in Figure 1. In addition to the 200 patients, we have complied another 183 patients who passed away post-treatment, with the aim to evaluate LLMs' proficiency to predict patient prognosis. Using projected costs, hospitalization, and prognosis, we conducted a quantitative assessment of bias in LLMs, followed by detailed qualitative analysis. To further explore the progression of bias in the development of LLMs, we replicated the experiments using GPT-4, and compared its performance with GPT-3.5-turbo. Our study presents an in-depth analysis based on a total of 20,596 generated responses.
46
+
47
+ We find that GPT-3.5-turbo, when generating medical reports, tends to include biased and fabricated patient histories for patients of certain races, as well as generate racially skewed diagnoses. Among the 200 generated patient reports, 16 showed bias in rephrasing patient information and 21 demonstrated significant disparities in diagnoses. For example, GPT-3.5-turbo attributed unwarranted details to patients based on race, such as associating Black male patients with a safari trip in South Africa. Moreover, the model varied its
48
+
49
+ ![](images/95fc76c25de2d19c05215661c924b3c016efe4cbdd7f6ea8547fa178e4edd26f.jpg)
50
+ Figure 1. Evaluation procedure to probe bias in LLMs. This figure illustrates the workflow of our bias probing, using GPT-3.5-turbo and GPT-4. (a) real patient information from full-text articles in PubMed Central is collected. (b) LLM extracts patient information. (c) original race information is removed, and hypothetical race information is injected to create hypothetical patient profiles. (d) LLMs generate medical reports that include diagnosis, treatment, and prognosis. e, each report is split into 9 sections (excluding survival rate), where we analyze and quantify bias presence in the generated reports by four parts (Paraphrasing input patient information, generating diagnosis, generating treatment, predicting outcome). Dotted lines represent sections used for quantitative analysis, and solid line denotes sections used for qualitative analysis. For reports that contain survival rate prediction, we follow the same pipeline except we use both patient information and the actual treatment as input for report generation.
51
+
52
+ diagnoses for different races even under identical conditions. It tended to predict more severe diseases for Black patients in non-cancer cases. When presented with identical conditions, the model can diagnose HIV in Black
53
+
54
+ patients, Tuberculosis in Asian patients, and cyst in White patients. Reports showed a higher incidence of cancer in White patients and more severe symptoms for Black patients compared to others. These findings highlight the model's racial biases in medical diagnosis and patient information processing. We present some of the evidence in the generated report in appendix A.
55
+
56
+ Figure 2 shows that GPT-3.5-turbo exhibited racial bias in the disparities of treatment recommendations, cost, hospitalization, and prognosis predictions. The model favored White patients with superior and immediate treatments, longer hospitalization stays, and better recovery outcomes, which is also reflected in the higher projected cost. Through our qualitative analysis, we find 11 out of 200 contain significantly superior treatments for white patients than the others. For instance, White patients with cancer were recommended surgery, while Black patients received conservative care in the ICU. These bias examples are detailed in Appendix A.
57
+
58
+ Figure 2a reveals that GPT-3.5-turbo predicts higher costs for White patients more frequently than for other racial groups, with $18.00\%$ more than Black patients (White $59.00\%$ v. Black $41.00\%$ ), $21.00\%$ more than Asian patients (White $60.50\%$ v. Asian $39.50\%$ ), $14.00\%$ more than Hispanic patients (White $57.00\%$ v. Hispanic $43.00\%$ ). Figure 2b demonstrates the model's tendency to predict longer hospital stays for White patients, with $17.00\%$ more than Black patients (White $58.50\%$ v. Black $41.50\%$ ), $27.00\%$ more than Asian patients (White $63.50\%$ v. Asian $36.50\%$ ), $14.50\%$ more than Hispanic patients (White
59
+
60
+ ![](images/0259b2e28f3f42f7a0b237df7f4f46dbc2cb2033d701d445c32429e33900d057.jpg)
61
+
62
+ ![](images/e44ab054f48d5bddd089f5b5696f8e7b36a8fb7099c28e0b81f5551a3668c6fe.jpg)
63
+ Figure 2: Bias in LLMs demonstrated quantitatively. This figure presents evidence of LLMs' bias with respect to race. a, GPT-3.5-turbo's projected cost comparisons across different races. b, GPT-3.5-turbo's projected hospitalization duration comparisons across races. c, Accuracy comparison in patient outcome predictions based on deceased patient reports by the two models. d, Rate of inconclusive cost and hospitalization predictions by both models. ***, **, * denotes p-value $< 0.001$ , p-value $< 0.05$ , and p-value $>= 0.05$ .
64
+
65
+ ![](images/c2eb151212a36a5d1fba8c0edc064b1520ccdf014480ad1a79786654998f77a5.jpg)
66
+
67
+ ![](images/3cd9d2d50f379ea2c1d30fa7f719916a5122aa60b4657465bc22c79853f77d14.jpg)
68
+
69
+ 57.50% v. Hispanic $43.00\%$ ). Combining cost and hospitalization prediction, we find the model shares similar win rate ranking: White, Black, Hispanic, Asian.
70
+
71
+ In Figure 2c, we show that GPT-3.5-turbo's bias extends to prognosis. It predicted a lower death rate for White patients (56.54%) compared to Black (62.25%), Asian (58.75%) and Hispanic (59.67%) patients. This aligns with its tendency to provide more comprehensive treatment and care for White patients. These findings suggest a systemic bias in the model, potentially influencing healthcare decisions and resource allocation based on racial profiles.
72
+
73
+ In our experiment with GPT-4, we find it more balanced in terms of projected costs across different races, though it still exhibits similar trend as GPT-3.5-turbo in hospitalization prediction, as presented in Appendix B. Generally speaking, GPT-4 tends to offer multiple solutions but with less definitive
74
+
75
+ conclusions, compared to its predecessor. GPT-4's cautious approach leads to more inconclusive responses and a reluctance to give definitive medical advice or prognosis. For instance, it frequently avoids formulating treatment plans or predicting outcomes, as reflected in Figure 2d's comparison of inconclusive predictions between the two models (a) GPT-3.5-turbo $16.25\%$ v. GPT-4 $29.46\%$ for inconclusive cost prediction; and (b) GPT-3.5-turbo $18.79$ v. GPT-4 $38.31\%$ for inconclusive hospitalization prediction. This conservative stance is also evident in its lower accuracy compared to GPT-3.5-turbo (GPT-3.5-turbo $59.30\%$ v. GPT-4 $31.49\%$ , figure 2c) in predicting deceased outcomes. GPT-4 often resorts to generic advice like 'consult with healthcare providers', which might be insufficient for accurate medical guidance. The challenge lies in balancing caution with the need for precise, high-stakes predictions. Additionally, GPT-4's longer response times and higher operating costs (as of this writing, the cost of GPT-4 is approximately 30 times higher than that of GPT-3.5-turbo) limit its practical utility in real-world scenarios. In practice, our expected wait time to not trigger OpenAI's API error is $\sim 2$ seconds for GPT-3.5-turbo, and $\sim 15$ seconds for GPT-4.
76
+
77
+ This study focuses on illustrating bias in LLMs, such as GPT-3.5-turbo and GPT-4. Transformer-based models, including GPTs $^2$ , generate text based on previous tokens, meaning altering one token or the language prior can change subsequent token distributions. Although OpenAI has implemented RLHF to discourage problematic outputs in LLMs $^{2,12}$ , our findings indicate that these models still exhibit inherent biases, especially in relation to race.
78
+
79
+ Moreover, our study highlights that discouraging 'harmful' outputs in LLMs can
80
+
81
+ lead to an overly optimistic bias, especially in critical scenarios. Both GPT variants displays a high degree of optimism when predicting death outcomes, with GPT-4's accuracy in predicting deceased outcomes only $31.49\%$ compared to $59.30\%$ for GPT-3.5-turbo (Figure 2c). These observations call into question the efficacy of RLHF in synchronizing models with human expectations. While RLHF strives to steer models towards desirable outcomes like full recovery, it simultaneously grapples with the challenge of authentically representing the intricate realities of medical practice. Balancing human preference for positive outcomes with the representation of realistic medical scenarios, where uncertainty and suboptimal results are common, remains a key issue.
82
+
83
+ Our findings on LLM bias mirror real-world healthcare disparities in diagnoses and spending. Prior statistic has shown that in the United States, White population has the highest estimated per-person spending, followed by Black, Hispanic and Asian $^{13}$ , and there is a substantial spending gap between White population and Black or Asian $^{14,15}$ . Data from the CDC and HHS reveals that among patients diagnosed with TB, there is a higher representation of individuals of Asian ethnicity compared to the other two racial groups $^{16}$ , and the Black population exhibits a higher prevalence among patients diagnosed with HIV $^{17}$ . The model's biased behavior aligns with existing disparities and diagnostic patterns in real-world healthcare.
84
+
85
+ This study, which mainly examines racial bias in GPT models with a specific focus on GPT-3.5-turbo, is subject to several limitations. Firstly, it does not draw
86
+
87
+ definitive conclusions about race's relevance in disease diagnosis and treatment. While race-adjusted diagnoses are criticized for contributing to healthcare disparities, many disease risk assessments still consider race. Second, our analysis contrasts model-generated content for different racial groups rather than comparing it with ground truth, as the LLMs used are not domain-specific and may not provide accurate projections.
88
+
89
+ In conclusion, our study reveals biases in medical report generation by the latest GPT models. Key biases include generating biased patient backgrounds, associating diseases with specific racial groups, favoring White patients in treatment recommendations, and showing disparities in projected cost, hospitalization duration and prognosis. Additionally, both GPT-3.5-turbo and GPT-4 models show a tendency towards overly optimistic patient outcomes, with GPT-4 often predicting higher survival rates. This underscores the need to delineate safe and ambiguous language model uses. Although filtering harmful outputs mitigates biases, it is vital to address the deeper issue of inherent bias in the models' language distribution.
90
+
91
+ # Online Methods
92
+
93
+ Using the version 2023-03-15-preview Azure API, we performed experiments of this work with GPT-3.5-turbo version 0301 and GPT-4 version 0613. We present the workflow of our two experiments in Figure 1.
94
+
95
+ Our hypothesis is changing the race of the patient alters the language prior, therefore revealing the model's bias. By evaluating the generated text using hypothetical patient reports, we can probe the bias in LLMs. As demonstrated
96
+
97
+ in OpenAI's technical report, both GPT variants are very capable in reading comprehension tasks such as information extraction and text editing $^2$ , therefore we utilize GPTs to perform text editing tasks such as extracting patient information, changing the race of the patient or verify whether our rule-based filtering is correct in our workflow. For the sake of transparency, we list all prompts used in this work in Table 4, and we will refer to them in this section by their index.
98
+
99
+ <table><tr><td>Index</td><td>Prompt</td></tr><tr><td>A</td><td>Extract sentence that contains the medical condition and medical history before treatment in this report.</td></tr><tr><td>B</td><td>Remove the race of the subject in the patient report.</td></tr><tr><td>C</td><td>Add the {} race to the subject in the patient report.</td></tr><tr><td>D</td><td>A clinical report contains 9 sections: Patient description, Case history, Physical examination results, Results of pathological tests and other investigations, Treatment plan, Expected outcome of the treatment plan, Actual outcome, Expected cost in dollars if no insurance, Expected hospitalized days. Complete the following report.</td></tr><tr><td>E</td><td>Did the patient in this report passed away? Report:</td></tr><tr><td>F</td><td>You are a report editor. Remove all the sentences in the report relevant to patient's condition towards the end of the treatment. Report:</td></tr><tr><td>G</td><td>Complete the report that contains patient condition and treatment outcome, and Choose from the following options: A. Patient died. B. Patient lived.</td></tr><tr><td>H</td><td>Did the patient in this report die? Answer only Yes or No.</td></tr></table>
100
+
101
+ Table 1: List of prompts used in this study.
102
+
103
+ We first explain our pipeline to generate hypothetical patient profiles and patient reports, followed by how we evaluate them. Given an article from PMC-Patients $^{18}$ , a large-scale open dataset of patient summaries based on published articles in PMC, we use prompt A with LLMs to extract the patient condition when presented to the clinician as the patient's profile. This often contains the patient's age, symptoms and very rarely context to the disease or injury. Next, we employ prompt B with LLMs to eliminate any race-related information from the patient report.
104
+
105
+ We task both GPT models to generate a patient report based on patient profiles that only contains patient information and conditions prior to treatment. Following the clinical case report guideline $^{19}$ , we require the output to contain 9 sections. Patient description and Case history test whether the model hallucinates additional information after adding race. Physical examination results and Results of pathological tests and other investigations reveals the bias in diagnosis. Treatment plan and Expected cost in dollars if no insurance probes the difference in treatment. Expected outcome of the treatment plan, Actual outcome, and Expected hospitalized days target at the bias in prognosis outcome.
106
+
107
+ For each race, we insert the race information into the designated placeholder within prompt C and utilize LLMs to generate reports using hypothetical patient
108
+
109
+ profiles with race information. We test various prompts to use GPT-3.5-turbo and GPT-4 to generate information based on the patient profile, and we find prompt D to be very effective in that it is more likely to generate meaningful content, as opposed to simply providing a generic response such as "contact your healthcare provider". In addition to prompt design, more deterministic settings would increase the chance of the model outputting safe but unhelpful generic texts. OpenAI API provides a temperature parameter that can control how deterministic the model is. We find that low temperature (deterministic) helps the model perform better and more stable in reading comprehension tasks, but less useful in answering open medical questions. Therefore for each race, we use prompt D to generate reports with high temperature. To ensure our evaluation accounts for randomness, we generate ten reports with definite cost and hospitalization prediction for our quantitative analysis, and three more reports for qualitative analysis. Notably, GPT-3.5-turbo and GPT-4 are more inclined to generate output and make predictions when they are already in the process of generating information<sup>6</sup>. We find that directly asking LLMs to make medical predictions will trigger safeguards. However, asking it to write a report that contains all the parts of the patient report, including patient information and treatment, not only gives us a lower reject rate but also more accurately reflects model's logical reasoning.
110
+
111
+ We use a rule-based method to extract the projected cost and hospitalized days in the generated reports. Because both model outputs' formats are not always consistent, we use GPT-3.5-turbo to extract the values. For qualitative analysis, we split the sections excluding the projected cost and hospitalized days into 4
112
+
113
+ parts: patient assumptions (Patient description and Case history), examinations (Physical examination results and Results of pathological tests and other investigations), treatment (Treatment plan, cost), and outcomes (Expected outcome of the treatment plan, Actual outcome, Hospitalized days, Survival rate), and compare the same section of the generated reports of the same PMC-Patients article.
114
+
115
+ During our qualitative analysis, we find that LLMs, given only patient profile, tend to predict the patient survives when the actual outcome was dire. We are interested to know whether LLMs are over-optimistic. Hence, we task LLMs to predict patient survival status given the patient's condition and treatment, allowing a fair and controlled comparison. We use a keyword search to select all potential PMC-Patients summaries that contain "passed away" or synonyms. We further refine our selection by using GPT-3.5-turbo to confirm whether the patient in the report passed away with prompt E. To remove only the outcome after the treatment, we experiment with multiple prompts. We find that prompt F does well in removing only the patient condition after all treatments and keeps the patient status in-between the context of the report as many of the summaries include more than one phase of treatments. Similar to our previous experiment, we use prompt B and C to remove the race information and inject hypothetical race into the report. We use prompt G with high temperature to acquire the survival prediction and collect three outputs to account for the randomness. This also emulates the process through which patients seek information regarding their survival rates following a doctor's presentation of a treatment plan.
116
+
117
+ # Dataset
118
+
119
+ PMC-Patients is a large corpora that contains 167k patient summaries from PubMed Central articles<sup>18</sup>. Each summary describes the condition of the patient when admitted, the treatments and outcomes of the patient. In preliminary testing, we found that GPT-3.5 can output the exact same text as the original report with only the patient information and condition. We suspect that some of the early PubMed Central articles are in the training corpora of GPT, therefore we only used the more recent 1670 articles ( $\sim 1\%$ ) in chronological order of PMC-Patients to ensure that there is no memorization possibility. For generating reports, we used the first 200 articles from the 1670 articles. For verifying optimism of LLMs, we filtered the 1670 articles and acquired 183 reports where the patient passed away after the treatment.
120
+
121
+ # Acknowledgements
122
+
123
+ This work is supported by the NIH Intramural Research Program, National Library of Medicine.
124
+
125
+ # Author contributions statement
126
+
127
+ Study concepts/study design, Y.Y, Z.L.; manuscript drafting or manuscript revision for important intellectual content, all authors; approval of the final version of the submitted manuscript, all authors; agrees to ensure any questions related to the work are appropriately resolved, all authors; literature research, Y.Y; experimental studies, human annotation, Y.Y, X.L, Q.J.; data interpretation and statistical analysis, Y.Y, X.L, Q.J.; and manuscript editing, all
128
+
129
+ authors.
130
+
131
+ # Competing Interests
132
+
133
+ Authors declare no competing interests.
134
+
135
+ # Data availability
136
+
137
+ PMC-Patients is available at https://github.com/zhao-zy15/PMC-Patients.
138
+
139
+ # Code availability
140
+
141
+ The code to reproduce the experiments in this work and LLM generated reports will be made available at publication time.
142
+
143
+ # References
144
+
145
+ 1. Ouyang, L. et al. Training language models to follow instructions with human feedback.
146
+ 2. OpenAI. GPT-4 Technical Report. Preprint at http://arxiv.org/abs/2303.08774 (2023).
147
+ 3. Jin, Q., Wang, Z., Floudas, C. S., Sun, J. & Lu, Z. Matching Patients to Clinical Trials with Large Language Models. Preprint at https://doi.org/10.48550/arXiv.2307.15051 (2023).
148
+ 4. Tian, S. et al. Opportunities and Challenges for ChatGPT and Large Language Models in Biomedicine and Health. Preprint at https://doi.org/10.48550/arXiv.2306.10070 (2023).
149
+ 5. Zhuo, T. Y., Huang, Y., Chen, C. & Xing, Z. Red teaming ChatGPT via Jailbreaking: Bias, Robustness, Reliability and Toxicity. Preprint at https://doi.org/10.48550/arXiv.2301.12867 (2023).
150
+ 6. Wei, A., Haghtalab, N. & Steinhardt, J. Jailbroken: How Does LLM Safety Training Fail? Preprint at http://arxiv.org/abs/2307.02483 (2023).
151
+ 7. Omiye, J. A., Lester, J. C., Spichak, S., Rotemberg, V. & Daneshjou, R. Large language models propagate race-based medicine. Npj Digit. Med. 6, 1-4 (2023).
152
+ 8. Zack, T. et al. Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: a model evaluation study. Lancet Digit. Health 6, e12-e22 (2024).
153
+ 9. Hanna, J. J., Wakene, A. D., Lehmann, C. U. & Medford, R. J. Assessing Racial and Ethnic Bias in Text Generation for Healthcare-Related Tasks by ChatGPT1. medRxiv 2023.08.28.23294730 (2023)
154
+
155
+ doi:10.1101/2023.08.28.23294730.
156
+ 10. Quach, K. Healthcare org uses OpenAI's GPT-4 to write medical records. https://www.theregister.com/2023/06/06/carbon_healthDeploys_gpt4pow ered.tools/.
157
+ 11. Sun, Z. et al. Evaluating GPT-4 on Impressions Generation in Radiology Reports. Radiology 307, e231259 (2023).
158
+ 12. Nori, H., King, N., McKinney, S. M., Carignan, D. & Horvitz, E. Capabilities of GPT-4 on Medical Challenge Problems. Preprint at https://doi.org/10.48550/arXiv.2303.13375 (2023).
159
+ 13. Dieleman, J. L. et al. US Health Care Spending by Race and Ethnicity, 2002-2016. JAMA 326, 649-659 (2021).
160
+ 14. Dickman, S. L. et al. Trends in Health Care Use Among Black and White Persons in the US, 1963-2019. JAMA Netw. Open 5, e2217383 (2022).
161
+ 15. Chen, J., Vargas-Bustamante, A. & Ortega, A. N. Health Care Expenditures Among Asian American Subgroups. Med. Care Res. Rev. MCRR 70, 310-329 (2013).
162
+ 16. Table 2 | Reported TB in the US 2020 | Data & Statistics | TB | CDC. https://www.cdc.gov/tb/statistics/reports/2020/table2.htm (2023).
163
+ 17. CDC. HIV in the United States by Race/Ethnicity. Centers for Disease Control and Prevention https://www.cdc.gov/hiv/group/racialethnic/other-races/index.html (2023).
164
+ 18. Zhao, Z., Jin, Q., Chen, F., Peng, T. & Yu, S. A large-scale dataset of patient summaries for retrieval-based clinical decision support systems. Sci. Data 10, 909 (2023).
165
+ 19. Guidelines To Writing A Clinical Case Report. Heart Views Off. J. Gulf Heart
166
+
167
+ Assoc. 18, 104-105 (2017).
2401.13xxx/2401.13867/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee31b3266c5f10bf1a95ae704174990212a261de25c1ba57f4c77a19adec3fd0
3
+ size 422088
2401.13xxx/2401.13867/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13870/8171f699-666b-4b9e-940c-6a641056e68b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f104d4d991d93c090e8e984986eb058cdc81dd7bfdfff79cf49e7f34cbea079d
3
+ size 1101124
2401.13xxx/2401.13870/full.md ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Integrating Large Language Models into Recommendation via Mutual Augmentation and Adaptive Aggregation
2
+
3
+ Sichun Luo $^{1}$ , Yuxuan Yao $^{1}$ , Bowei He $^{1}$ , Yinya Huang $^{1}$ , Aojun Zhou $^{2}$
4
+
5
+ Xinyi Zhang $^{3}$ , Yuanzhang Xiao $^{4}$ , Mingjie Zhan $^{2}$ , Linqi Song $^{1\dagger}$
6
+
7
+ <sup>1</sup>City University of Hong Kong
8
+
9
+ 2The Chinese University of Hong Kong
10
+
11
+ $^{3}$ Capital University of Economics and Business
12
+
13
+ <sup>4</sup>University of Hawaii
14
+
15
+ sichun.luo@my.cityu.edu.hk,linqi.song@cityu.edu.hk
16
+
17
+ # ABSTRACT
18
+
19
+ Conventional recommendation methods have achieved notable advancements by harnessing collaborative or sequential information from user behavior. Recently, large language models (LLMs) have gained prominence for their capabilities in understanding and reasoning over textual semantics, and have found utility in various domains, including recommendation. Conventional recommendation methods and LLMs each have their own strengths and weaknesses. While conventional methods excel at mining collaborative information and modeling sequential behavior, they struggle with data sparsity and the long-tail problem. LLMs, on the other hand, are proficient at utilizing rich textual contexts but face challenges in mining collaborative or sequential information. Despite their individual successes, there is a significant gap in leveraging their combined potential to enhance recommendation performance.
20
+
21
+ In this paper, we introduce a general and model-agnostic framework known as Large Language model with mutual augmentation and adaptive aggregation for Recommendation (Llama4Rec). Llama4Rec synergistically integrates conventional and LLM-based recommendation models. Llama4Rec proposes data augmentation and prompt augmentation strategies tailored to enhance the conventional model and the LLM respectively. An adaptive aggregation module is adopted to combine the predictions of both kinds of models to refine the final recommendation results. Empirical studies on three real-world datasets validate the superiority of Llama4Rec, demonstrating its consistent and significant improvements in recommendation performance over baseline methods.
22
+
23
+ # 1 INTRODUCTION
24
+
25
+ Recommender systems have emerged as crucial solutions for mitigating the challenge of information overload [11, 12, 26, 27]. Recommender systems encompass a multitude of tasks, such as rating prediction [19, 36] and top- $k$ recommendation [22, 28]. The top- $k$ recommendation, which encompasses collaborative filtering-based direct recommendation [13, 14], sequential recommendation [3, 18, 37], and more, has found wide applications in various areas. However, recommender systems still suffer from the data sparsity and long-tail problem. Data sparsity arises from sparse user-item interactions, making the task of accurately capturing user preferences more challenging. The long-tail problem further intensifies data sparsity issue, as a substantial number of less popular items (i.e., long-tail items) are infrequently interacted with, leading to
26
+
27
+ inadequate data for effective model training and compromised recommendation quality.
28
+
29
+ In recent years, Large Language Models (LLMs) have emerged, exhibiting exceptional capabilities in language understanding, text generation, and complex reasoning tasks [30, 39-41, 56]. Recent studies have started exploring their applicability in recommender systems [2, 24, 51]. For example, Liu et al. employed ChatGPT with in-context learning (ICL) for various recommendation tasks [24]. Further progress has been achieved by adopting the instruction tuning technique [25, 31] to align general-purpose LLMs with recommendation tasks for improved performance [2, 51]. For instance, TALLRec [2] reformulates the recommendation problem as a binary classification task and introduces an effective instruction fine-tuning framework for adapting the LLaMA model [40]. However, these LLM-based recommendation methods may not perform optimally as they do not harness the collaborative or sequential information captured by conventional recommendation models.
30
+
31
+ Conventional recommendation models and LLM-based recommendation methods each have their respective strengths and weaknesses. Conventional methods excel in mining collaborative information and modeling sequential behaviors, while LLMs are proficient in leveraging rich textual contexts. As such, the integration of LLMs into recommender systems presents a significant opportunity to amalgamate the advantages of both methodologies while circumventing their respective shortcomings. There have been initial attempts to harness the strengths of both conventional and LLM-based recommenders [8, 45, 54, 55]. Some efforts have sought to integrate collaborative/sequential information by enabling LLMs to comprehend user/item ID information [8, 54, 55]. For instance, a concurrent study by Zhang et al. encoded the semantic embedding into the prompt [54] and send it to LLM. On the other hand, some research works have aimed to augment conventional models using LLMs via data or knowledge augmentation [45, 47]. LLMRec [45] enhances recommender systems by deploying LLMs to augment the interaction graph, thereby addressing the challenges of data sparsity and low-quality side information.
32
+
33
+ However, existing methods have several limitations. Firstly, current methods lack generalizability. The strategy of integrating ID information proves challenging to generalize across different domains and necessitates additional training. The current data augmentation method is not universally applicable, as it only addresses a limited number of recommendation scenarios. Secondly, current research primarily focuses on the integration at the data-level (e.g., data augmentation) or model-level (e.g., make LLM understand ID
34
+
35
+ semantics), leaving the result-level integration largely unexplored. Lastly, there is an absence of a comprehensive framework that combines and integrates these methods into a single construct. In light of these limitations, our objective is to explore the integration of conventional recommendation models and LLM-based recommendation methods in depth to address the above limitations and enhance recommendation performance.
36
+
37
+ In this paper, we introduce a general framework known as Large language model with mutual augmentation and adaptive aggregation for Recommendation, referred to as Llama4Rec, for brevity. The core idea of Llama4Rec is to allow conventional recommendation models and LLM-based recommendation models to mutually augment each other, followed by an adaptive aggregation of the augmented models to yield more optimized results. Specifically, Llama4Rec performs data augmentation for conventional recommendation models by leveraging instruction-tuned LLM to alleviate the data sparsity and long-tail problem. The data augmentation is tailored with different strategies depending on the recommendation scenarios. Furthermore, we use conventional recommendation models to perform prompt augmentation for LLMs. The prompt augmentation includes enriching collaborative information from similar users and providing prior knowledge from the conventional recommendation model within the prompt. We also propose an adaptive aggregation module that merges the predictions of the LLM and conventional models in an adaptive manner. This module is designed as a simple yet effective way to combine the strengths of both models and refine the final recommendation results. We conduct empirical studies on three real-world datasets, encompassing three different recommendation tasks, to validate the superiority of our proposed method. The results consistently demonstrate its superior performance over baseline methods, highlighting notable improvements in recommendation performance.
38
+
39
+ In a nutshell, the contributions of this work are threefold.
40
+
41
+ - We introduce Llama4Rec, a general and model-agnostic framework to integrate LLM into conventional recommendation models. Llama4Rec performs the data augmentation for conventional models to alleviate the data sparsity problem and improve model performance. The prompt augmentation is applied to LLM for leveraging the information captured by the conventional models.
42
+ - Llama4Rec employs an adaptive aggregation approach to combine the prediction from the conventional recommendation model and LLM for improved recommendation performance via leveraging and merging the information by both kinds of models.
43
+ - To validate the effectiveness of Llama4Rec, we conduct extensive experiments on three real-world datasets across three diverse recommendation tasks. The empirical results demonstrate that Llama4Rec outperforms existing baselines, exhibiting notable improvements across multiple performance metrics.
44
+
45
+ # 2 RELATED WORK
46
+
47
+ # 2.1 Conventional Recommendation Methods
48
+
49
+ Conventional recommendation methods serve as the cornerstone for the contemporary landscape of recommender systems [53]. Representative recommendation tasks include rating prediction, collaborative filtering-based direct recommendation, and sequential recommendation, where the latter two are usually formulated as
50
+
51
+ top- $k$ recommendation problems. Specifically, one of the seminal techniques is the use of matrix factorization for rating prediction, popularized by methods such as Singular Value Decomposition (SVD) [20]. Collaborative filtering (CF) is another commonly used technique for the recommender systems [1]. Recent advancements have evolved CF techniques into more complex neural network architectures and graph-based models [14, 44] to enhance the model performance. Sequential recommendation models incorporate temporal patterns into the recommendation pipeline. Techniques such as recurrent neural networks have been adapted for this purpose [15]. Recent research focuses on applying attention mechanisms to further refine these models, leading to a noteworthy boost in performance [18, 37].
52
+
53
+ Although conventional recommendation techniques are well-suited for capturing latent information associated with users and items, they often require a substantial amount of user-item interactions to provide accurate recommendations, which limits their effectiveness in data sparse and long-tail scenarios [32].
54
+
55
+ # 2.2 Large Language Model for Recommendation
56
+
57
+ LLMs have brought a paradigm shift in numerous areas of machine learning, including recommendation methods [7]. One of the most compelling advantages of LLM-based recommendation methods is their capacity for contextual understanding and in context learning [5]. Inspired by this, reference [24] utilized ChatGPT across diverse recommendation tasks and found it to be effective in specific contexts, underpinned by robust experiments and human evaluations. Similarly, Wang et al. [42] introduced a novel zero-shot technique for next-item recommendations, further substantiating the utility of LLMs in this arena. However, it is noteworthy that these methods do not consistently demonstrate a marked improvement over conventional recommendation algorithms, which is largely attributable to the inherent misalignment between the general-purpose capabilities of LLMs and the specialized requirements of the recommendation task. To address this issue, recent studies further attempt to instruct tuning the specific LLM to align with human preference [2, 51]. Typically, these approaches involve creating an instruction tuning dataset in line with recommendation tasks, which is then used to tune the LLM for recommendation. Such methodologies have demonstrated improved performance in generating more aligned and accurate recommendations.
58
+
59
+ Nevertheless, while LLMs excel at capturing intricate textual patterns, they may encounter challenges in comprehensively encoding user and item collaborative or sequential information. Though some concurrent studies [54, 55] aim to address this gap, they often lack in terms of generalizability and comprehensibility. In response to this challenge, we propose a novel framework designed to mitigate this issue.
60
+
61
+ # 3 PRELIMINARY
62
+
63
+ We consider a recommender system with a set of users, denoted $\mathcal{U} = \{u_1, u_2, \dots, u_n\}$ , and a set of items, denoted $\mathcal{I} = \{i_1, i_2, \dots, i_m\}$ . The rating prediction task aims to estimate the unknown values of $r_{ui}$ in the user-item interaction matrix $R \in \mathbb{R}^{n \times m}$ , where each entry $r_{ui}$ is the rating assigned by user $u$ to item $i$ . Different from rating prediction, top- $k$ recommendation focuses on identifying a subset
64
+
65
+ ![](images/f82374e5e609d9ee01e4f9e203ce1330d72949a38c6fef4afb8027a4699d28de.jpg)
66
+ (i). Overall Framework
67
+
68
+ ![](images/a47c03b63c04380ac3d5e016bf293ef8d91888a0dcbb7bfd02d2560c06c98f4c.jpg)
69
+ (iii). Adaptive Aggregation
70
+ Figure 1: (i) The overall framework architecture of the proposed Llama4Rec consists of two main components: mutual augmentation and adaptive aggregation. The mutual augmentation includes data augmentation and prompt augmentation. (ii) Illustration of the data augmentation process encompasses three diverse recommendation scenarios. (iii) The pipeline of the adaptive aggregation module, which merges the predictions from the conventional recommendation model and the LLM.
71
+
72
+ of items $S_{u} \subset I$ for each user $u$ . The subset is chosen to maximize a user-specific utility $U(u, S)$ with the constraint $|S| = k$ , which can be formally expressed as:
73
+
74
+ $$
75
+ \mathcal {S} _ {u} = \arg \max _ {\mathcal {S} \subset \mathcal {I}, | \mathcal {S} | = k} U (u, \mathcal {S}). \tag {1}
76
+ $$
77
+
78
+ In the context of LLM-based recommendation methods, let $LLM$ represent the original LLM. These kinds of methods first utilize prompt to interpret the recommendation task into natural language. The LLM-based recommendation for user $u$ with in-context learning is denoted by $r_u = LLM(\mathcal{P}_u)$ where $\mathcal{P}_u$ is the recommendation prompt for user $u$ . The recommendation prompt could either ask LLM to predict a rating towards a target item, or rank candidate items derived by retrieval models in top- $k$ recommendation. To instruction fine-tune LLM, a dedicated dataset $\mathcal{D}_{ins}$ consisting of various instructions is utilized. The resulting instruction-tuned LLM is denoted as $LLM'$ . Therefore, the recommendation process in the fine-tuned model can be succinctly represented as $r_u = LLM'(\mathcal{P}_u)$ .
79
+
80
+ # 4 METHODOLOGY
81
+
82
+ # 4.1 Overview
83
+
84
+ Figure 1 depicts the architecture of Llama4Rec, which consists of three components: data augmentation, prompt augmentation, and adaptive aggregation. More specifically, Llama4Rec leverages an instruction-tuned LLM to enhance conventional recommendation systems through data augmentation. The specific data augmentation strategies for different recommendation situations are detailed in Section 4.2. In addition, we employ conventional recommendation models to augment the LLM via prompt augmentation, with details in Section 4.3. To further refine the predictions of the conventional model and the LLM, we propose a simple yet effective adaptive aggregation module in Section 4.4. Lastly, we describe the training strategy for LLM in Section 4.5.
85
+
86
+ # 4.2 Data Augmentation for Conventional Recommendation Model
87
+
88
+ We design ad-hoc data augmentation strategies for different recommendation scenarios to mitigate prevalent issues of data sparsity and the long-tail problem. This design is motivated by the fact that data distribution and tasks significantly vary across different recommendation scenarios. In the context of direct recommendation, we capitalize on the power of the instruction-tuned LLM to predict items that a user may like or dislike. We form pairs of these items to calculate the Bayesian Personalized Ranking (BPR) [34] loss. For sequential recommendation, we harness the capabilities of the instruction-tuned LLM to predict items that are highly preferred by the user. These predicted items are then randomly inserted into the sequence of items the user has interacted with. For rating prediction, we utilize the LLM to extract valuable side information (i.e., missing attributes), which is then seamlessly integrated as additional features within the training data.
89
+
90
+ 4.2.1 Data Augmentation for Direct Recommendation. For direct recommendation, the Bayesian Personalized Ranking (BPR) loss is commonly used to optimize the model [34]. The objective of BPR is to maximize the score difference between correctly recommended items and incorrectly recommended items, thereby improving the accuracy of recommendations. The BPR loss is defined as:
91
+
92
+ $$
93
+ \mathcal {L} _ {B P R} = - \sum_ {(u, i, j) \in \mathcal {D}} \log \sigma \left(\hat {y} _ {u i} - \hat {y} _ {u j}\right), \tag {2}
94
+ $$
95
+
96
+ where $(u,i,j)$ refers to a triple of user-item pairs, and the user $u$ has interacted with item $i$ (positive item) and item $j$ (negative item). $\mathcal{D}$ represents the set of such user-item pairs in the training data. $\hat{y}_{ui}$ denotes the predicted score or preference of user $u$ for item $i$ .
97
+
98
+ Inspired by this, we propose a data augmentation strategy where we randomly select pairs of items for a user $u$ and prompt the LLM
99
+
100
+ to rank each pair based on the user's likely preference. The ranking prediction based on LLM is then combined with the original data and used to train a direct recommendation model. Formally, let $(i_j,i_k)$ denote a pair of items for a user $u$ . The LLM is prompted to rank these items, denoted as $i^{+},i^{-} = LLM(\mathcal{P}_1)$ , where $\mathcal{P}_1$ is the corresponding prompt and $i^{+}$ is the item preferred over $i^{-}$ . The training data $\mathcal{D}$ is updated as $\mathcal{D}' = \mathcal{D} \cup (u,i^{+},i^{-})$ . The BPR loss is then updated as:
101
+
102
+ $$
103
+ \mathcal {L} _ {B P R} ^ {\prime} = - \sum_ {(u, i, j) \in \mathcal {D} ^ {\prime}} \log \sigma \left(\hat {y} _ {u i} - \hat {y} _ {u j}\right). \tag {3}
104
+ $$
105
+
106
+ This data augmentation strategy leverages the power of the instruction tuned LLM to enhance the performance of the direct recommendation model.
107
+
108
+ 4.2.2 Data Augmentation for Sequential Recommendation. For sequential recommendation, the data augmentation strategy involves enriching the sequence of interacted items with additional items predicted by the LLM. Let's consider a user $u$ with a corresponding sequence of interacted items $\{i_1, \dots, i_l\}$ . We randomly sample a list of un-interacted items $\{i_{u1}, \dots, i_{uk}\}$ , and adopt the prompt $\mathcal{P}_2$ to ask the LLM to predict the item most likely to be preferred by the user, denoted as $i_p = LLM(\mathcal{P}_2)$ . This predicted item $i_p$ is then randomly inserted into the user's sequence, resulting in an augmented sequence $\{i_1, \dots, i_p, \dots, i_l\}$ . This augmented data is then used to train a more powerful sequential recommendation model.
109
+
110
+ By including additional items predicted by the LLM, we can enrich the sequence of items for each user, providing a more comprehensive representation of the user's preferences. This, in turn, can enhance the performance of the conventional recommendation model, leading to more accurate recommendations.
111
+
112
+ 4.2.3 Data Augmentation for Rating Prediction. In rating prediction tasks, we introduce the use of in-context learning (ICL) in LLMs to provide side information. This is primarily due to the fact that recommendation datasets may contain incomplete information. For instance, the popular Movielens dataset [10] lacks information about the director of the movies, which can hinder the performance of a conventional rating prediction model. To mitigate this issue, we leverage the extensive world knowledge contained in an LLM. We prompt the LLM to provide side information, acting as additional attributes for users/items.
113
+
114
+ Formally, we denote the rating prediction model as $\mathcal{M}_r$ , and the attribute set as $\mathcal{A} = \{a_1, a_2, \dots, a_n\}$ , where $a_i \in \mathcal{A}$ denotes a distinct attribute. The model predicts the rating as $\mathsf{Pred} = \mathcal{M}_r(\mathcal{A})$ . We then prompt the LLM to provide additional attributes, where the prompt $\mathcal{P}_3$ contains some corresponding examples followed by detailed instructions. The process is denoted as $\{a_{d1}, a_{d2}, \dots\} = \mathsf{LLM}(\mathcal{P}_3)$ . The augmented attribute set is then formed as $\mathcal{A}' = \mathcal{A} \cup \{a_{d1}, a_{d2}, \dots\}$ . The model then predicts the rating using the augmented attribute set, denoted as $\mathsf{Pred}' = \mathcal{M}_r(\mathcal{A}')$ . This approach allows us to leverage the LLM's world knowledge to enhance the performance of the rating prediction model.
115
+
116
+ # Top- $k$ Recommendation Prompt Example:
117
+
118
+ Instruction: Rank the candidate movies based on user historical interactions and make the top $k$ recommendations.
119
+
120
+ Interaction History: Beyond Rangoon (1995); Alien (1979); Hollow Reed (1996); Primary Colors (1998); ...; Birds, The (1963)
121
+
122
+ Candidate Items: Last Dance (1996); Remains of the Day, The (1993); Assassins (1995); ...; Fatal Instinct (1993)
123
+
124
+ Similar User Interaction History: L.A. Confidential (1997);
125
+
126
+ Apt Pupil (1998); Kolya (1996); ...; Star Wars (1977)
127
+
128
+ Conventional Model Prediction: Remains of the Day, The (1993); Addiction, The (1995); ...; Fugitive, The (1993)
129
+
130
+ Output: Fugitive, The (1993); Angel Baby (1995); ...; Remains of the Day, The (1993)
131
+
132
+ # Rating Prediction Prompt Example:
133
+
134
+ Instruction: Predict the rating of a target movie based on the user's historical movie ratings.
135
+
136
+ Rating History: Independence Day (1996): 3; Grosse Fatigue (1994): 3; Face/Off (1997): 4; ...; Shall We Dance? (1996): 3
137
+
138
+ Candidate Item: Pink Floyd - The Wall (1982)
139
+
140
+ Similar User Rating History: L.A. Confidential (1997): 3; Apt Pupil (1998): 4; ...; English Patient, The (1996): 3
141
+
142
+ Conventional Model Prediction: 3.2
143
+
144
+ Output: 3
145
+
146
+ Figure 2: Examples of instructions for top- $k$ recommendation and rating prediction. The prompt augmentation component is underlined. To improve readability and facilitate better experimental evaluation, we introduce certain modifications to the original instructions employed in our experiments.
147
+
148
+ # 4.3 Prompt Augmentation for Large Language Model
149
+
150
+ Previous works [2, 51] instruction tuning LLM for recommendation in a standard manner. However, these methods can be suboptimal due to the challenges of distinguishing users based solely on text-based prompt descriptions. Although some concurrent studies [54, 55] incorporate unique identifiers to differentiate users, these approaches require complex semantic understanding of IDs and additional training, limiting their generalizability.
151
+
152
+ In this section, we introduce two text-based prompt augmentation strategies for LLM-based recommendations, i.e., we incorporate additional information within the prompt to enhance the model performance. First, we propose prompt augmentation with similar users, identifying users with analogous preferences to enrich the prompt, thereby enhancing the LLM's ability to leverage collaborative information and generate personalized recommendations. Second, we propose prompt augmentation with conventional model prediction, providing prior knowledge to guide the LLM toward recommendations that align with user preferences. Collectively, these strategies harness the strengths of both LLMs and conventional recommendation models, ensuring generalizability across a wide range of recommendation scenarios. The illustration of prompt augmentation is underlined in Figure 2.
153
+
154
+ 4.3.1 Prompt Augmentation with Collaborative Information from Similar User. To incorporate collaborative information within the prompt and facilitate LLM reasoning, we introduce a prompt augmentation strategy with similar user. Initially, we utilize a pretrained conventional recommendation model to acquire embeddings for each user. These embeddings represent users in a latent space, which encapsulates their preferences and behaviors. Specifically, for a user $u$ , in conjunction with a conventional recommendation model $\mathcal{M}_c$ , we use $\mathcal{M}_c$ to obtain embeddings for each user, denoted as $\{e_1, \dots, e_n\}$ . These embeddings encapsulate the preferences and behaviors of the users, serving as a compact representation of the users in a latent space. We then calculate the similarity between these embeddings in the latent space. Various measurements, such as cosine similarity, Jaccard similarity, and Euclidean distance, could be employed in this context. In this paper, we calculate the cosine similarity to measure how closely two vectors align, denoted as:
155
+
156
+ $$
157
+ \operatorname {s i m} (u, v) = \frac {e _ {u} \cdot e _ {v}}{\left| \left| e _ {u} \right| \right| \cdot \left| \left| e _ {v} \right| \right|}, \tag {4}
158
+ $$
159
+
160
+ where $e_{u}$ and $e_{v}$ are the embeddings of user $u$ and $v$ , respectively, and $u, v \in \mathcal{U}$ . The $|| \cdot ||$ denotes the Euclidean norm and $\cdot$ denotes the dot product. We identify the pair of users $(u, v)$ that have the highest similarity, indicating that they are the most similar in terms of their preferences and behaviors. We then use the items interacted with by the most similar user to enrich the prompt for the target user. This strategy leverages the collaborative information gleaned from similar users to generate more relevant and accurate prompts, thereby enhancing the recommendation performance of the LLM.
161
+
162
+ 4.3.2 Prompt Augmentation with Prior Knowledge from Conventional Recommendation Model Prediction. To enable the LLM to leverage information captured by conventional models, we propose a prompt augmentation method that incorporates information from conventional recommendation models. More specifically, the augmented prompt is formed by concatenating the original prompt with the prediction from the conventional recommendation model in natural language form. It's important to note that the prediction from the conventional model varies depending on the recommendation scenarios and base models. Through augmenting prompts with predictions from conventional recommendation models, our method integrates collaborative or sequential information captured by these models, thereby enhancing the LLM's contextual understanding and reasoning capabilities and resulting in better recommendation performance.
163
+
164
+ Notably, unlike ID-based methods such as [54, 55], our approach relies entirely on text, enabling easy adaptation to new situations. Also, the prompt augmentation could be used as a plug-and-play component for recommendation with closed source LLM, such as the GPT-4 model [30].
165
+
166
+ # 4.4 Adaptive Aggregation
167
+
168
+ We endeavor to aggregate the outputs of LLM and conventional recommendation models at the result level for improved performance, considering the disparate model structures. However, indiscriminate aggregation of model predictions can potentially lead to suboptimal results. Conventional recommendation models, known for their susceptibility to the long tail issue, often struggle when
169
+
170
+ dealing with the tail segment. In contrast, LLMs, by leveraging contextual information, are able to maintain a relatively uniform performance across all segments. Motivated by these observations, we first define the long-tail coefficient and subsequently adaptively aggregate the predictions from both model types.
171
+
172
+ We first define the long-tail coefficient $\ell_{u}$ for user $u$ to quantify where the user is located in the tail of the distribution. The long-tail coefficient is defined as follows:
173
+
174
+ $$
175
+ \ell_ {u} = \log (N (u) + 1), \tag {5}
176
+ $$
177
+
178
+ where $N(u)$ is the number of interaction for user $u$ . A lower long-tail coefficient value indicates that the user has fewer feedback.
179
+
180
+ While the overarching architecture remains consistent, the implementation details are different for the two tasks considered, namely rating prediction and top- $k$ recommendation.
181
+
182
+ 4.4.1 Adaptive Aggregation for Rating Prediction. For the rating prediction task, we employ an instruction-tuned LLM to predict user-item utility scores directly. This approach incorporates the understanding of complex semantics and context by the LLM, which might be overlooked by traditional models. Similarly, conventional recommendation methods leverage collaborative information and user/item features for predicting the user rating. Specifically, the utility weight for user $u$ , denoted as $U_{u}$ , is directly set as its user rating. Subsequently, the LLM is engaged to predict the rating, symbolized as $U_{LLM}$ . There are various methods to derive a final result based on the utility scores, such as training a neural network to process the utility scores from LLM and conventional models, yielding a final output via learning the complex reflection. However, for the sake of simplicity in this paper, we adopt a simple yet effective linear interpolation approach. The final utility score for a user $u$ amalgamates the values from both models, represented as:
183
+
184
+ $$
185
+ U _ {u} = \alpha_ {u} U _ {L L M} + (1 - \alpha_ {u}) U _ {R e c}, \tag {6}
186
+ $$
187
+
188
+ where $\alpha_{u}$ is the adaptive parameter to control the weight for each model's utility value for user $u$ . We define the $\alpha_{u}$ as:
189
+
190
+ $$
191
+ \alpha_ {u} = \max \left(\frac {\ell_ {m a x} - \ell_ {u}}{\ell_ {m a x} - \ell_ {m i n}}, \alpha_ {2}\right) \cdot \alpha_ {1}, \tag {7}
192
+ $$
193
+
194
+ where $\ell_{max}$ and $\ell_{min}$ are the maximum and minimum long-tail coefficients of the users, respectively, $\alpha_{1}$ is a hyper-parameter that controls the weight, and $\alpha_{2} < 1$ is a cut-off weight. From Equation (7), we can observe that for user $u$ , the further they are positioned in the long tail (i.e., the fewer items they have interacted with), the lower is the value of $\ell_{u}$ and the higher is the value of $\alpha_{u}$ . As a result, in Equation (6), the weight of the utility score from the LLM model becomes more pronounced. This aligns with the motivation we previously discussed.
195
+
196
+ 4.4.2 Adaptive Aggregation for Top-k Recommendation. For the top-k recommendation task, the LLM is employed to re-rank the item list generated by a conventional recommendation model. Specifically, from conventional recommendation methods, we curate a top-ranked list comprising $k'$ items, denoted as $\{i_1, \dots, i_{k'}\}$ . Each item in this list is assigned a utility weight, $U_{Rec}^i = -s \cdot C$ , where $C$ is a constant and $s$ represents the position of item $i$ , i.e., $s \in \{1, \dots, k'\}$ . A higher utility weight indicates a stronger inclination of the user's
197
+
198
+ preference. For listwise comparison conducted by the LLM, the process begins by using the LLM to directly output the predicted order of these candidate items. Then we assign utility scores for items at each position, denoted as $U_{1}, U_{2}, \dots, U_{k'}$ , where $U_{1} \geq U_{2} \geq \dots \geq U_{k'}$ . The final utility score for an item amalgamates the values from both the original rating and the LLM prediction, similar to the Equation (6).
199
+
200
+ # 4.5 Training Strategy for LLM
201
+
202
+ 4.5.1 Instruction Tuning Dataset Construction. This section details the creation of an instruction-tuning dataset that encompasses two types of recommendation tasks catering to top- $k$ recommendation and rating prediction scenarios. A depiction of these two tasks, specifically referred to as listwise ranking and rating prediction, can be found in Figure 2. It is noteworthy that we also employ the LLM to execute pointwise ranking within top- $k$ recommendation scenarios, i.e., utilizing LLM to predict ratings for each item within the top- $k$ recommendations and sorting the predicted ratings to derive the final result.
203
+
204
+ 4.5.2 Optimization via Instruction Tuning. In this work, we perform full parameter instruction tuning to optimize LLMs using generated instruction data. Due to our need for customization, we chose LLaMA-2 [40], an open-source, high-performing LLM, which permits task-specific fine-tuning. During supervised fine-tuning, we apply a standard cross-entropy loss following Alpaca [38]. The training set $\mathcal{D}_{ins}$ consists of instruction input-output pairs $(x,y)$ , which have been represented in natural language. The objective is to fine-tune the pre-trained LLM by minimizing the cross-entropy loss, formalized as:
205
+
206
+ $$
207
+ \min _ {\Theta} \sum_ {(x, y) \in \mathcal {D} _ {i n s}} \sum_ {t = 1} ^ {| y |} - \log P _ {\Theta} \left(y _ {t} \mid x, y _ {[ 1: t - 1 ]}\right), \tag {8}
208
+ $$
209
+
210
+ where $\Theta$ are the original parameters for LLM, $P_{\Theta}$ is the conditional probability, $|y|$ is the number of tokens in $y$ , $y_{t}$ is the $t$ -th token in the target output $y$ , and $y_{[1:t-1]}$ represents tokens preceding $y_{t}$ in $y$ . By minimizing this loss function, the model fine-tunes its parameters $\Theta$ to adapt to the specifics of the new instruction tuning dataset $\mathcal{D}_{ins}$ , while leveraging the general language understanding and reasoning that has been acquired during pre-training [52]. In this manner, LLM can capture the user's preferences for items expressed in natural language, facilitating diverse recommendation tasks, including top- $k$ recommendation and rating prediction.
211
+
212
+ # 5 EXPERIMENT
213
+
214
+ In this section, we present a thorough empirical evaluation to validate the effectiveness of our proposed framework. Specifically, our objective is to investigate whether the incorporation of our proposed Llama4Rec could enhance existing recommendation models. The overarching goal is to answer the following research questions:
215
+
216
+ - RQ1: Does our proposed Llama4Rec framework enhance the performance of existing recommendation models?
217
+ - RQ2: How do the various modules in Llama4Rec affect the recommendation performance?
218
+ - RQ3: How do different hyper-parameters impact the overall performance of the framework?
219
+
220
+ Table 1: Dataset Description.
221
+
222
+ <table><tr><td></td><td>ML-100K</td><td>ML-1M</td><td>BookCrossing</td></tr><tr><td># of User</td><td>943</td><td>6,040</td><td>6,851</td></tr><tr><td># of Item</td><td>1,682</td><td>3,706</td><td>9,085</td></tr><tr><td># of Rating</td><td>100,000</td><td>1,000,209</td><td>115,219</td></tr><tr><td>Density</td><td>0.063046</td><td>0.044683</td><td>0.001851</td></tr><tr><td>User Features</td><td>Gender, ZipCode, Occupation, Age</td><td>Gender, ZipCode, Occupation, Age</td><td>Location, Age</td></tr><tr><td>Item Features</td><td>Title, Genres Year</td><td>Title, Genres</td><td>Title, Author, Year, Publisher</td></tr><tr><td>Augmented Features</td><td>Movie Director, Movie Star</td><td>Movie Director, Movie Star</td><td>Book Genres, Page Length</td></tr></table>
223
+
224
+ # 5.1 Experiment Setup
225
+
226
+ 5.1.1 Dataset. Following [2], we rigorously evaluate the performance of our proposed framework by employing three heterogeneous, real-world datasets. MovieLens $^{1}$ [10] serve as benchmark datasets in the realm of movie recommendation methods. We employ two variants of the dataset: MovieLens-100K (ML-100K) and MovieLens-1M (ML-1M). The former consists of approximately 100,000 user-item ratings, while the latter scales up to roughly 1,000,000 ratings. BookCrossing $^{2}$ [57] includes user-generated book ratings on a scale of 1 to 10, alongside metadata such as 'Book-Author' and 'Book-Title'. We employ LLM to augment the 'director' and 'star' features for ML-100K and ML-1M datasets, and augment the 'genre' and 'page length' features for the BookCrossing dataset. To ensure the data quality, we adopt the 5-core setting, i.e., we filter unpopular users and items with fewer than five interactions for the BookCrossing dataset. The key characteristics of these datasets are delineated in Table 1.
227
+
228
+ 5.1.2 Evaluation Metrics. Aligning with [13, 37], for the top- $k$ recommendation task, we turn to two well-established metrics: Hit Ratio (HR) and Normalized Discounted Cumulative Gain (NDCG), denoted by H and N, respectively. In our experiments, $k$ is configured to be either 3 or 5 for a comprehensive evaluation, similar to the experiment setting in [51]. In accordance with [6], we employ Root Mean Squared Error (RMSE) and Mean Absolute Error (MAE) as evaluation metrics to ascertain the performance of the rating prediction task.
229
+
230
+ 5.1.3 Data Preprocessing. Following the methodology of prior works [29, 51], we adopt a leave-one-out evaluation strategy. More specifically, within each user's interaction sequence, we choose the most recent item as the test instance. The item immediately preceding this serves as the validation instance, while all remaining interactions are used to constitute the training set. Moreover, regarding the instruction-tuning dataset construction, we randomly sampled 5K instructions for each recommendation task on the ML-100K, ML-1M, and BookCrossing datasets, respectively. We eliminated instructions that were repetitive or of low quality (identified by
231
+
232
+ Table 2: Performance achieved by different direct recommendation methods.
233
+
234
+ <table><tr><td rowspan="2">Backbone</td><td rowspan="2">Method</td><td colspan="4">ML-100K</td><td colspan="4">ML-1M</td><td colspan="4">BookCrossing</td></tr><tr><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td></tr><tr><td rowspan="4">MF</td><td>Base</td><td>0.0455</td><td>0.0325</td><td>0.0690</td><td>0.0420</td><td>0.0255</td><td>0.0187</td><td>0.0403</td><td>0.0248</td><td>0.0294</td><td>0.0227</td><td>0.0394</td><td>0.0269</td></tr><tr><td>IFT</td><td>0.0546</td><td>0.0388</td><td>0.0790</td><td>0.0488</td><td>0.0242</td><td>0.0175</td><td>0.0410</td><td>0.0244</td><td>0.0247</td><td>0.0177</td><td>0.0377</td><td>0.0230</td></tr><tr><td>Llama4Rec</td><td>0.0645*</td><td>0.0474*</td><td>0.0919*</td><td>0.0588*</td><td>0.0281*</td><td>0.0203*</td><td>0.0433*</td><td>0.0265*</td><td>0.0365*</td><td>0.0284*</td><td>0.0462*</td><td>0.0324*</td></tr><tr><td>Impro.</td><td>18.13%</td><td>22.16%</td><td>16.33%</td><td>20.49%</td><td>10.20%</td><td>8.56%</td><td>5.61%</td><td>6.85%</td><td>24.15%</td><td>25.55%</td><td>17.26%</td><td>20.45%</td></tr><tr><td rowspan="4">LightGCN</td><td>Base</td><td>0.0492</td><td>0.0343</td><td>0.0744</td><td>0.0447</td><td>0.0283</td><td>0.0203</td><td>0.0432</td><td>0.0264</td><td>0.0358</td><td>0.0272</td><td>0.0480</td><td>0.0322</td></tr><tr><td>IFT</td><td>0.0537</td><td>0.0381</td><td>0.0846</td><td>0.0507</td><td>0.0268</td><td>0.0193</td><td>0.0441</td><td>0.0263</td><td>0.0287</td><td>0.0202</td><td>0.0448</td><td>0.0268</td></tr><tr><td>Llama4Rec</td><td>0.0647*</td><td>0.0476*</td><td>0.0967*</td><td>0.0608*</td><td>0.0304*</td><td>0.0222*</td><td>0.0461*</td><td>0.0286*</td><td>0.0434*</td><td>0.0338*</td><td>0.057*</td><td>0.0394*</td></tr><tr><td>Impro.</td><td>20.48%</td><td>24.93%</td><td>14.30%</td><td>19.92%</td><td>7.42%</td><td>9.36%</td><td>4.54%</td><td>8.33%</td><td>21.23%</td><td>24.26%</td><td>18.75%</td><td>22.36%</td></tr><tr><td rowspan="4">MixGCF</td><td>Base</td><td>0.0526</td><td>0.0401</td><td>0.0757</td><td>0.0496</td><td>0.0159</td><td>0.0115</td><td>0.0238</td><td>0.0147</td><td>0.0426</td><td>0.0330</td><td>0.0556</td><td>0.0384</td></tr><tr><td>IFT</td><td>0.0617</td><td>0.0452</td><td>0.0906</td><td>0.0570</td><td>0.0162</td><td>0.0114</td><td>0.0259</td><td>0.0154</td><td>0.0337</td><td>0.0243</td><td>0.0506</td><td>0.0312</td></tr><tr><td>Llama4Rec</td><td>0.0690*</td><td>0.0515*</td><td>0.0949*</td><td>0.0621*</td><td>0.0174*</td><td>0.0128*</td><td>0.0259</td><td>0.0162*</td><td>0.0495*</td><td>0.0384*</td><td>0.0635*</td><td>0.0441*</td></tr><tr><td>Impro.</td><td>11.83%</td><td>13.94%</td><td>4.75%</td><td>8.95%</td><td>7.41%</td><td>11.30%</td><td>0.00%</td><td>5.19%</td><td>16.20%</td><td>16.36%</td><td>14.21%</td><td>14.84%</td></tr><tr><td rowspan="4">SGL</td><td>Base</td><td>0.0505</td><td>0.0380</td><td>0.0729</td><td>0.0472</td><td>0.0284</td><td>0.0206</td><td>0.0434</td><td>0.0267</td><td>0.0419</td><td>0.0319</td><td>0.0566</td><td>0.0380</td></tr><tr><td>IFT</td><td>0.0520</td><td>0.0392</td><td>0.0792</td><td>0.0503</td><td>0.0275</td><td>0.0202</td><td>0.0438</td><td>0.0269</td><td>0.0326</td><td>0.0237</td><td>0.0499</td><td>0.0307</td></tr><tr><td>Llama4Rec</td><td>0.0632*</td><td>0.0479*</td><td>0.0917*</td><td>0.0596*</td><td>0.0308*</td><td>0.0224*</td><td>0.0480*</td><td>0.0294*</td><td>0.0501*</td><td>0.0393*</td><td>0.0634*</td><td>0.0448*</td></tr><tr><td>Impro.</td><td>21.54%</td><td>22.19%</td><td>15.78%</td><td>18.49%</td><td>8.45%</td><td>8.74%</td><td>9.59%</td><td>9.29%</td><td>19.57%</td><td>23.20%</td><td>12.01%</td><td>17.89%</td></tr></table>
235
+
236
+ Table 3: Performance achieved by different sequential recommendation methods.
237
+
238
+ <table><tr><td rowspan="2">Backbone</td><td rowspan="2">Method</td><td colspan="4">ML-100K</td><td colspan="4">ML-1M</td><td colspan="4">BookCrossing</td></tr><tr><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td><td>H@3↑</td><td>N@3↑</td><td>H@5↑</td><td>N@5↑</td></tr><tr><td rowspan="4">SASRec</td><td>Base</td><td>0.0187</td><td>0.0125</td><td>0.0385</td><td>0.0205</td><td>0.0277</td><td>0.0165</td><td>0.0502</td><td>0.0257</td><td>0.0086</td><td>0.0049</td><td>0.0163</td><td>0.0081</td></tr><tr><td>IFT</td><td>0.0204</td><td>0.0136</td><td>0.0379</td><td>0.0207</td><td>0.0241</td><td>0.0159</td><td>0.0473</td><td>0.0254</td><td>0.0124</td><td>0.0086</td><td>0.0185</td><td>0.0111</td></tr><tr><td>Llama4Rec</td><td>0.0238*</td><td>0.0155*</td><td>0.0449*</td><td>0.0240*</td><td>0.0293*</td><td>0.0201*</td><td>0.0504</td><td>0.0287*</td><td>0.0142*</td><td>0.0098*</td><td>0.0227*</td><td>0.0131*</td></tr><tr><td>Impro.</td><td>16.67%</td><td>13.97%</td><td>16.62%</td><td>15.94%</td><td>5.78%</td><td>21.82%</td><td>0.40%</td><td>11.67%</td><td>14.52%</td><td>13.95%</td><td>22.70%</td><td>18.02%</td></tr><tr><td rowspan="4">BERT4Rec</td><td>Base</td><td>0.0153</td><td>0.0104</td><td>0.0294</td><td>0.0161</td><td>0.0107</td><td>0.0069</td><td>0.0211</td><td>0.0112</td><td>0.0088</td><td>0.0058</td><td>0.0161</td><td>0.0088</td></tr><tr><td>IFT</td><td>0.0174</td><td>0.0119</td><td>0.0326</td><td>0.0100</td><td>0.0106</td><td>0.0071</td><td>0.0188</td><td>0.0104</td><td>0.0127</td><td>0.0092</td><td>0.0180</td><td>0.0113</td></tr><tr><td>Llama4Rec</td><td>0.0198*</td><td>0.0134*</td><td>0.0332</td><td>0.0189*</td><td>0.0115*</td><td>0.0078*</td><td>0.0206</td><td>0.0115*</td><td>0.0154*</td><td>0.0108*</td><td>0.023*</td><td>0.0139*</td></tr><tr><td>Impro.</td><td>13.79%</td><td>12.61%</td><td>1.84%</td><td>17.39%</td><td>7.48%</td><td>9.86%</td><td>-2.37%</td><td>2.68%</td><td>21.26%</td><td>17.39%</td><td>27.78%</td><td>23.01%</td></tr><tr><td rowspan="4">CL4SRec</td><td>Base</td><td>0.0243</td><td>0.0143</td><td>0.0436</td><td>0.0222</td><td>0.0259</td><td>0.0153</td><td>0.0492</td><td>0.0248</td><td>0.0083</td><td>0.0048</td><td>0.0165</td><td>0.0082</td></tr><tr><td>IFT</td><td>0.0230</td><td>0.0149</td><td>0.0428</td><td>0.0230</td><td>0.0234</td><td>0.0155</td><td>0.0447</td><td>0.0241</td><td>0.0102</td><td>0.0071</td><td>0.0177</td><td>0.0102</td></tr><tr><td>Llama4Rec</td><td>0.0255*</td><td>0.0182*</td><td>0.0440</td><td>0.0255*</td><td>0.0278*</td><td>0.0185*</td><td>0.0482</td><td>0.0268*</td><td>0.0138*</td><td>0.0093*</td><td>0.0220*</td><td>0.0127*</td></tr><tr><td>Impro.</td><td>4.94%</td><td>22.15%</td><td>0.92%</td><td>10.87%</td><td>7.34%</td><td>19.35%</td><td>-2.03%</td><td>8.06%</td><td>35.29%</td><td>30.99%</td><td>24.29%</td><td>24.51%</td></tr></table>
239
+
240
+ users with fewer than three interactions in their interaction history), leaving approximately 25K high-quality instructions. These instructions are mixed to create an instruction-tuning dataset to fine-tune the LLM.
241
+
242
+ 5.1.4 Backbone Models. We incorporate our Llama4Rec with the following recommendation models that are often used for various recommendation tasks as the backbone models:
243
+
244
+ - Direct Recommendation. In the scenario of direct recommendation, We adopt four representative methods, including: MF [20], LightGCN [13], MixGCF [17], and SGL [46].
245
+ - Sequential Recommendation. Regarding sequential recommendation, we opt for three widely used models, including: SASRec [18], BERT4Rec [37], and CL4SRec [49].
246
+ - Rating Prediction. We consider the following classical models for rating prediction, including: DeepFM [9], NFM [14], DCN [43], AFM [48], xDeepFM [23], and AutoInt [35].
247
+
248
+ We employ the LLaMA-2 7B version as the backbone LLM across all experiments, unless specifically mentioned otherwise. Our primary comparison is with the standard Instruction Fine-Tuning (IFT) method adopted in TALLRec [2] and InstructRec [51]. For the rating prediction task, LLaMA-2 with IFT is used to directly predict the rating. For the top-k recommendation task, the tuned LLM is used to re-rank the list predicted by the backbone model, in accordance with [16], referred to as listwise ranking. Besides, we also adopt LLM for predicting the rating for each item and sort by the predicted scores, referred to as pointwise ranking.
249
+
250
+ 5.1.5 Implementation Details. During training for LLaMA 2 (7B) with full-parameter tuning, we use a uniform learning rate of $2 \times 10^{-5}$ and a context length of 2048, and we set the batch size as 16. Additionally, we use a cosine scheduler for three epochs in total with a 50-step warm-up period. To efficiently train the computationally intensive models, we simultaneously employ DeepSpeed training with ZeRO-3 stage [33] and flash attention [4]. We trained the 7B
251
+
252
+ Table 4: Performance achieved by different methods in rating prediction task.
253
+
254
+ <table><tr><td rowspan="2">Backbone</td><td rowspan="2">Method</td><td colspan="2">ML-100K</td><td colspan="2">ML-1M</td><td colspan="2">BookCrossing</td></tr><tr><td>RMSE ↓</td><td>MAE ↓</td><td>RMSE ↓</td><td>MAE ↓</td><td>RMSE ↓</td><td>MAE ↓</td></tr><tr><td>LLaMA</td><td>IFT</td><td>1.2792</td><td>0.8940</td><td>1.2302</td><td>0.8770</td><td>2.0152</td><td>1.3782</td></tr><tr><td rowspan="3">DeepFM</td><td>Base</td><td>1.0487</td><td>0.8082</td><td>0.9455</td><td>0.7409</td><td>1.7738</td><td>1.3554</td></tr><tr><td>Llama4Rec</td><td>1.0306*</td><td>0.7987*</td><td>0.9360*</td><td>0.7321*</td><td>1.6958*</td><td>1.2843*</td></tr><tr><td>Impro.</td><td>1.73%</td><td>1.18%</td><td>1.00%</td><td>1.19%</td><td>4.40%</td><td>5.25%</td></tr><tr><td rowspan="3">NFM</td><td>Base</td><td>1.0284</td><td>0.8005</td><td>0.9438</td><td>0.7364</td><td>2.121</td><td>1.5984</td></tr><tr><td>Llama4Rec</td><td>1.0189*</td><td>0.7961</td><td>0.9369*</td><td>0.7303*</td><td>1.9253*</td><td>1.4473*</td></tr><tr><td>Impro.</td><td>0.92%</td><td>0.55%</td><td>0.73%</td><td>0.83%</td><td>9.23%</td><td>9.45%</td></tr><tr><td rowspan="3">DCN</td><td>Base</td><td>1.0478</td><td>0.8063</td><td>0.9426</td><td>0.7342</td><td>2.0216</td><td>1.4622</td></tr><tr><td>Llama4Rec</td><td>1.0367*</td><td>0.8033</td><td>0.9345*</td><td>0.7272*</td><td>1.8518*</td><td>1.3566*</td></tr><tr><td>Impro.</td><td>1.06%</td><td>0.37%</td><td>0.86%</td><td>0.95%</td><td>8.40%</td><td>7.22%</td></tr><tr><td rowspan="3">AFM</td><td>Base</td><td>1.0471</td><td>0.8035</td><td>0.9508</td><td>0.7464</td><td>1.6516</td><td>1.2614</td></tr><tr><td>Llama4Rec</td><td>1.0340*</td><td>0.7996</td><td>0.9426*</td><td>0.7394*</td><td>1.6244*</td><td>1.2259*</td></tr><tr><td>Impro.</td><td>1.25%</td><td>0.49%</td><td>0.86%</td><td>0.94%</td><td>1.65%</td><td>2.81%</td></tr><tr><td rowspan="3">xDeepFM</td><td>Base</td><td>1.1472</td><td>0.8836</td><td>0.9519</td><td>0.7428</td><td>2.1756</td><td>1.6461</td></tr><tr><td>Llama4Rec</td><td>1.0947*</td><td>0.8483*</td><td>0.9401*</td><td>0.7336*</td><td>1.9610*</td><td>1.4833*</td></tr><tr><td>Impro.</td><td>4.58%</td><td>4.00%</td><td>1.24%</td><td>1.24%</td><td>9.86%</td><td>9.89%</td></tr><tr><td rowspan="3">AutoInt</td><td>Base</td><td>1.0500</td><td>0.8120</td><td>0.9471</td><td>0.7404</td><td>1.9148</td><td>1.4501</td></tr><tr><td>Llama4Rec</td><td>1.0369*</td><td>0.8059*</td><td>0.9382*</td><td>0.7326*</td><td>1.7917*</td><td>1.3492*</td></tr><tr><td>Impro.</td><td>1.25%</td><td>0.75%</td><td>0.94%</td><td>1.05%</td><td>6.43%</td><td>6.96%</td></tr></table>
255
+
256
+ model on 16 NVIDIA A800 80GB GPUs. For the inference stage, we employed the vLLM framework [21] with greedy decoding, setting the temperature to 0. Only one GPU was utilized during the inference phase. We only evaluate the instruction-tuned LLaMA model for rating prediction task since it is not applicable for directly making top- $k$ recommendations.
257
+
258
+ We implement the models for rating prediction task using the DeepCTR-Torch $^3$ library. For the top- $k$ recommendation task, we utilize the SELFRec $^4$ library [50] for implementation. As for the hyper-parameter settings, $\alpha_{1}$ and $\alpha_{2}$ are selected from $\{0.1, 0.3, 0.5, 0.7, 0.9\}$ respectively for all experiments. $C$ is fixed to 1. We repeat the experiment five times and calculate the average. We report the best results obtained when the ranking method is selected from pointwise and listwise ranking. For all experiments, the best results are highlighted in **boldfaces**. * indicates the statistical significance for $p \leq 0.05$ compared to the best baseline method based on the paired t-test. Improv. denotes the improvement of our method over the best baseline method.
259
+
260
+ # 5.2 Main Results (RQ1)
261
+
262
+ We conducted an extensive evaluation of our proposed Llama4Rec and the baseline methods on three datasets to assess the model's performance under diverse recommendation scenarios. The experiment results for rating prediction, direct recommendation, and sequential recommendation are shown in Table 2, Table 3, and Table 4, respectively. We have the following key observations.
263
+
264
+ - Llama4Rec consistently outperforms baseline methods in almost all scenarios, with particularly significant improvements observed in the direct recommendation task. Moreover, our findings reveal that direct instruction fine-tuning LLMs for recommendation tasks does not consistently yield promising performance. These results highlight the effectiveness of integrating LLMs into
265
+
266
+ Table 5: Ablation study on key components of Llama4Rec on the ML-1M dataset.
267
+
268
+ <table><tr><td rowspan="2">Models</td><td colspan="2">ML-1M</td><td colspan="2">BookCrossing</td></tr><tr><td>H@3↑</td><td>N@3↑</td><td>H@3↑</td><td>N@3↑</td></tr><tr><td>LightGCN</td><td>0.0283 (-)</td><td>0.0203 (-)</td><td>0.0358 (-)</td><td>0.0272 (-)</td></tr><tr><td>IFT</td><td>0.0268 (-5.30%)</td><td>0.0193 (-4.93%)</td><td>0.0287 (-19.84%)</td><td>0.0202 (-25.74%)</td></tr><tr><td>Llama4Rec w/o DA</td><td>0.0294 (+3.89%)</td><td>0.0209 (+2.96%)</td><td>0.0408 (+13.97%)</td><td>0.0319 (+17.28%)</td></tr><tr><td>Llama4Rec w/o PA</td><td>0.0277 (-2.12%)</td><td>0.0199 (-1.97%)</td><td>0.0372 (+3.92%)</td><td>0.0279 (+2.57%)</td></tr><tr><td>Llama4Rec w/o AA</td><td>0.0298 (+5.30%)</td><td>0.0218 (+7.39%)</td><td>0.0429 (+19.83%)</td><td>0.0332 (+22.06%)</td></tr><tr><td>Llama4Rec</td><td>0.0304 (+7.42%)</td><td>0.0222 (+9.36%)</td><td>0.0434 (+21.23%)</td><td>0.0338 (+24.26%)</td></tr></table>
269
+
270
+ conventional recommendation models, underscoring the importance of incorporating the mechanism that utilizes instruction-tuned LLM to mutually augment and adaptively aggregate with conventional recommendation models.
271
+
272
+ - In the scenario of the rating prediction task, while the instruction-tuned LLaMA model significantly underperforms when compared to conventional recommendation models, integrating the LLM yields a marked performance improvement. This suggests that the LLM and conventional recommendation models learn distinct aspects of information. Consequently, integrating the LLM with conventional recommendation models could enhance recommendation performance.
273
+
274
+ - In the context of top- $k$ recommendations, Llama4Rec exhibits a more pronounced improvement for direct recommendations task. In addition, a more significant enhancement is observed on the Bookcrossing dataset, which can be attributed to the more fine-grained and distinguishable rating of the Bookcrossing dataset.
275
+
276
+ # 5.3 Ablation Study (RQ2)
277
+
278
+ We conducted an ablation study to analyze the contributions of different components in our model. Table 5 summarizes the results of the ablation studies across three variants on the ML-1M dataset. It is evident that the full model performs considerably better than all its variants, indicating that all the main components contribute significantly to overall performance improvement. Moreover, compared to the conventional model, the instruction-tuned LLM does not achieve superior results, underscoring the importance of model aggregation. We further analyze the specific impact of each component, and our observations are as follows:
279
+
280
+ - $w / o$ Data Augmentation ( $w / o$ DA): In this variant, we remove the data augmentation module while maintaining other components the same. Experimental results reveal a obvious decline in performance when this module is excluded. This indicates the module's capacity to mitigate data sparsity and long-tail problem, consequently enhancing model performance.
281
+
282
+ - $w/o$ Prompt Augmentation ( $w/o$ PA): In this variant, we remove the prompt augmentation component, a crucial element of the proposed framework. Experimental results demonstrate a significant degradation in model performance when this module is excluded, thereby validating its essential role. By employing the instruction-tuned LLM with prompt augmentation from prior knowledge by conventional recommendation models, we achieve an enhanced model performance, attributable to the capture of different aspects of information.
283
+
284
+ - $w/o$ Adaptive Aggregation ( $w/o$ AA): In this variant, we substitute adaptive aggregation with uniform aggregation and keep
285
+
286
+ ![](images/0a8a6abf56a74105e8848717f90e9687459fbd91cc371a98271b152b026fae28.jpg)
287
+ Figure 3: Impact of hyper-parameters $\alpha_{1}$ and $\alpha_{2}$ on ML-1M dataset with backbone model LightGCN.
288
+
289
+ ![](images/4001561dcc267140fbf4dc481f10b2aa28f798a1c22347734a1f464acae11f35.jpg)
290
+
291
+ ![](images/f9f632d8eb9daa74e4340613421e0a7c4da3736cba594e98437a560b253d2acc.jpg)
292
+ Figure 4: Performance comparison w.r.t different LLaMA-2 size for training Llama4Rec on the Bookcrossing dataset.
293
+
294
+ ![](images/7adc2cd6b4160f8809db6f143b58ea8999794db91785dbedaa119bfae59a6dfe.jpg)
295
+
296
+ ![](images/c8573cf2173b91b566ab2da0d99e8bd0ac8b13ae6265dcd126714bf765a2abab.jpg)
297
+ Figure 5: Performance comparison w.r.t different numbers of instructions for training Llama4Rec on the ML-1M dataset.
298
+
299
+ ![](images/91aa79007a22bce74605eecddfc77f988cafa5ed065c1c88cff7aaaf85be2e2e.jpg)
300
+
301
+ other modules unchanged. Experimental results demonstrate a drop in model performance, underscoring the significance of accounting for the user's long-tail coefficient and employing adaptive aggregation.
302
+
303
+ # 5.4 Hyper-parameter Study (RQ3)
304
+
305
+ 5.4.1 Analysis of Hyper-parameters $\alpha_{1}$ and $\alpha_{2}$ . We conducted an analysis of the effects of hyper-parameters $\alpha_{1}$ and $\alpha_{2}$ . These parameters play crucial roles in controlling the weight in adaptive aggregation, as illustrated in Equation (7). Figure 3 presents the results on the ML-1M dataset using LightGCN as the backbone model. As $\alpha_{1}$ increases, we observe an initial surge in the model's performance, followed by a decline. This trend suggests appropriate selection of $\alpha_{1}$ would enhance the model performance. With respect to $\alpha_{2}$ , we observe a similar trend but the decline is more pronounced. This observation is consistent with the principle of adaptive aggregation, which emphasizes the importance of assigning suitable weights to tail users.
306
+ 5.4.2 Analysis of Model Scaling. We further instruction-tuned the LLaMA-2 model with different model size. A comparative analysis was conducted between the 7B and 13B variants of the instruction-tuned models, with performance differences specifically evaluated
307
+
308
+ across various backbone models within the Bookcrossing dataset, as depicted in Figure 4. Our findings suggest that the LLaMA-2 (13B) model generally surpasses the 7B version in performance. This can be attributed to the superior language comprehension and reasoning abilities of the larger model, which contribute to improved recommendation results. However, it's worth noting that the improvements are not substantial, indicating that while larger models may provide some performance benefits, the degree of improvement may not always justify the increased computational resources and training time required. It underscores the importance of considering the trade-off between model size, performance gain, and resource efficiency in the design and application of large language models.
309
+
310
+ 5.4.3 Analysis of Data Scaling. We evaluated the effect of data size on LLM training by varying the number of instructions in the instruction-tuning dataset. Proportionality with our original configuration, the model with 2.5K instructions underwent 250 training steps, while the 12.5K instructions version was trained over 1250 steps. As depicted in Figure 5, a clear trend emerges: model performance improves with an increase in the number of instructions, particularly for direct recommendation models. This highlights the importance of utilizing larger and more diverse datasets for instruction tuning LLMs to optimize performance.
311
+
312
+ # 5.5 Further Discussion
313
+
314
+ In this part, we discuss about the computational efficiency and future improvements. In the Llama4Rec framework, additional training with augmented data is required, which may present a potential limitation. In the current experimental setup, we train a new model from scratch. However, this process could be optimized by continuing to train a previously tuned model, thereby reducing time costs. Additionally, in our experiment, we observed that training the LLaMA-2 7B model with around 25K instructions on 16 A800 GPUs with 2500 steps took approximately 1.94 hours. The inference time for each instruction averaged about 17 instructions per second, translating to a requirement of around 0.059 seconds per item for computation by a single A800 GPU.
315
+
316
+ This training and inference duration significantly exceeds that of conventional recommendation models, highlighting the limitations of current LLM-based recommender systems. The substantial demand for computational resources also represents a significant challenge. Consequently, employing instruction LLMs for large-scale industrial recommender systems, such as those with millions of users, is presently impractical. However, future advancements in accelerated and parallel computing algorithms for language model inference could potentially reduce inference times and computation resources. This improvement might make the integration of LLMs into large-scale recommender systems feasible, especially by leveraging many GPUs for parallel computation.
317
+
318
+ # 6 CONCLUSION AND FUTURE WORK
319
+
320
+ In this study, we present Llama4Rec, a general and model-agnostic framework tailored to facilitate mutual augmentation between conventional recommendation models and LLMs through data augmentation and prompt augmentation. Data augmentation for conventional recommendation models could alleviate issues of data
321
+
322
+ sparsity and the long-tail problem, thus improving conventional recommendation model performance. Prompt augmentation, on the other hand, allows the LLM to externalize additional collaborative or sequential information and further enhance the model capability. Furthermore, adaptive aggregation is employed to merge the predictions from both kinds of augmented models, resulting in more optimized recommendation performance. Comprehensive experimental results across three diverse recommendation tasks on three real-world datasets demonstrate the effectiveness of Llama4Rec. While our current approach focuses on mutual augmentation within a single step, our future work will explore expanding mutual augmentation in an iterative manner, potentially unlocking further improvements in model performance.
323
+
324
+ # REFERENCES
325
+
326
+ [1] Gediminas Adomavicius and Alexander Tuzhilin. 2005. Toward the next generation of recommender systems: A survey of the state-of-the-art and possible extensions. IEEE transactions on knowledge and data engineering 17, 6 (2005), 734-749.
327
+ [2] Keqin Bao, Jizhi Zhang, Yang Zhang, Wenjie Wang, Fuli Feng, and Xiangnan He. 2023. Tallrec: An effective and efficient tuning framework to align large language model with recommendation. arXiv preprint arXiv:2305.00447 (2023).
328
+ [3] Xiong-Hui Chen, Bowei He, Yang Yu, Qingyang Li, Zhiwei Qin, Wenjie Shang, Jieping Ye, and Chen Ma. 2023. Sim2Rec: A Simulator-based Decision-making Approach to Optimize Real-World Long-term User Engagement in Sequential Recommender Systems. arXiv preprint arXiv:2305.04832 (2023).
329
+ [4] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. 2022. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems 35 (2022), 16344-16359.
330
+ [5] Qingxiu Dong, Lei Li, Damai Dai, Ce Zheng, Zhiyong Wu, Baobao Chang, Xu Sun, Jingjing Xu, and Zhifang Sui. 2022. A survey for in-context learning. arXiv preprint arXiv:2301.00234 (2022).
331
+ [6] Wenqi Fan, Yao Ma, Qing Li, Yuan He, Eric Zhao, Jiliang Tang, and Dawei Yin. 2019. Graph neural networks for social recommendation. In The world wide web conference. 417-426.
332
+ [7] Wenqi Fan, Zihui Zhao, Jiatong Li, Yunqing Liu, Xiaowei Mei, Yiqi Wang, Jiliang Tang, and Qing Li. 2023. Recommender systems in the era of large language models (llms). arXiv preprint arXiv:2307.02046 (2023).
333
+ [8] Shijie Geng, Shuchang Liu, Zuohui Fu, Yingqiang Ge, and Yongfeng Zhang. 2022. Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In Proceedings of the 16th ACM Conference on Recommender Systems. 299-315.
334
+ [9] Huifeng Guo, Ruiming Tang, Yunming Ye, Zhenguo Li, and Xiuqiang He. 2017. DeepFM: a factorization-machine based neural network for CTR prediction. arXiv preprint arXiv:1703.04247 (2017).
335
+ [10] F Maxwell Harper and Joseph A Konstan. 2015. The movielens datasets: History and context. Acm transactions on interactive intelligent systems (tiis) 5, 4 (2015), 1-19.
336
+ [11] Bowei He, Xu He, Renrui Zhang, Yingxue Zhang, Ruiming Tang, and Chen Ma. 2023. Dynamic Embedding Size Search with Minimum Regret for Streaming Recommender System. In Proceedings of the 32nd ACM International Conference on Information and Knowledge Management. 741-750.
337
+ [12] Bowei He, Xu He, Yingxue Zhang, Ruiming Tang, and Chen Ma. 2023. Dynamically Expandable Graph Convolution for Streaming Recommendation. In Proceedings of the ACM Web Conference 2023. 1457-1467.
338
+ [13] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval. 639-648.
339
+ [14] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182.
340
+ [15] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2015. Session-based recommendations with recurrent neural networks. arXiv preprint arXiv:1511.06939 (2015).
341
+ [16] Yupeng Hou, Junjie Zhang, Zihan Lin, Hongyu Lu, Ruobing Xie, Julian McAuley, and Wayne Xin Zhao. 2023. Large language models are zero-shot rankers for recommender systems. arXiv preprint arXiv:2305.0845 (2023).
342
+ [17] Tinglin Huang, Yuxiao Dong, Ming Ding, Zhen Yang, Wenzheng Feng, Xinyu Wang, and Jie Tang. 2021. Mixgcf: An improved training method for graph neural network-based recommender systems. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, 665-674.
343
+
344
+ [18] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE international conference on data mining (ICDM). IEEE, 197-206.
345
+ [19] Zahid Younas Khan, Zhendong Niu, Sulis Sandiwarno, and Rukundo Prince. 2021. Deep learning techniques for rating prediction: a survey of the state-of-the-art. Artificial Intelligence Review 54 (2021), 95-135.
346
+ [20] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37.
347
+ [21] Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient Memory Management for Large Language Model Serving with PagedAttention. arXiv preprint arXiv:2309.06180 (2023).
348
+ [22] Dung D Le and Hady Lauw. 2021. Efficient retrieval of matrix factorization-based top-k recommendations: A survey of recent approaches. Journal of Artificial Intelligence Research 70 (2021), 1441-1479.
349
+ [23] Jianxun Lian, Xiaohuan Zhou, Fuzheng Zhang, Zhongxia Chen, Xing Xie, and Guangzhong Sun. 2018. xdeepfm: Combining explicit and implicit feature interactions for recommender systems. In Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining. 1754-1763.
350
+ [24] Junling Liu, Chao Liu, Renjie Lv, Kang Zhou, and Yan Zhang. 2023. Is chatgpt a good recommender? a preliminary study. arXiv preprint arXiv:2304.10149 (2023).
351
+ [25] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. 2023. The flan collection: Designing data and methods for effective instruction tuning. arXiv preprint arXiv:2301.13688 (2023).
352
+ [26] Sichun Luo, Chen Ma, Yuanzhang Xiao, and Linqi Song. 2023. Improving Long-Tail Item Recommendation with Graph Augmentation. In Proceedings of the 32nd ACM International Conference on Information and Knowledge Management. 1707-1716.
353
+ [27] Sichun Luo, Yuanzhang Xiao, and Linqi Song. 2022. Personalized federated recommendation via joint representation learning, user clustering, and model adaptation. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 4289-4293.
354
+ [28] Sichun Luo, Yuanzhang Xiao, Xinyi Zhang, Yang Liu, Wenbo Ding, and Linqi Song. 2023. PerFedRec++: Enhancing Personalized Federated Recommendation with Self-Supervised Pre-Training. arXiv preprint arXiv:2305.06622 (2023).
355
+ [29] Sichun Luo, Xinyi Zhang, Yuanzhang Xiao, and Linqi Song. 2022. HySAGE: A hybrid static and adaptive graph embedding network for context-drifting recommendations. In Proceedings of the 31st ACM International Conference on Information & Knowledge Management. 1389-1398.
356
+ [30] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]
357
+ [31] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems 35 (2022), 27730-27744.
358
+ [32] Yoon-Joo Park and Alexander Tuzhilin. 2008. The long tail of recommender systems and how to leverage it. In Proceedings of the 2008 ACM conference on Recommender systems. 11-18.
359
+ [33] Samyam Rajbhandari, Jeff Rasley, Olatunjri Ruwase, and Yuxiong He. 2020. Zero: Memory optimizations toward training trillion parameter models. In SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. IEEE, 1-16.
360
+ [34] Steffen Rendle, Christoph Freudenthaler, Zeno Gartner, and Lars Schmidt-Thieme. 2012. BPR: Bayesian personalized ranking from implicit feedback. arXiv preprint arXiv:1205.2618 (2012).
361
+ [35] Weiping Song, Chence Shi, Zhiping Xiao, Zhijian Duan, Yewen Xu, Ming Zhang, and Jian Tang. 2019. Autoint: Automatic feature interaction learning via self-attentive neural networks. In Proceedings of the 28th ACM international conference on information and knowledge management. 1161–1170.
362
+ [36] Harald Steck. 2013. Evaluation of recommendations: rating-prediction and ranking. In Proceedings of the 7th ACM conference on Recommender systems. 213-220.
363
+ [37] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential recommendation with bidirectional encoder representations from transformer. In Proceedings of the 28th ACM international conference on information and knowledge management. 1441-1450.
364
+ [38] Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford Alpaca: An Instruction-following LLaMA model. https://github.com/tatsu-lab/stanford_alpaca.
365
+ [39] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothee Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023).
366
+ [40] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023).
367
+
368
+ [41] Ke Wang, Houxing Ren, Aojun Zhou, Zimu Lu, Sichun Luo, Weikang Shi, Renrui Zhang, Linqi Song, Mingjie Zhan, and Hongsheng Li. 2023. Mathcoder: Seamless code integration in lms for enhanced mathematical reasoning. arXiv preprint arXiv:2310.03731 (2023).
369
+ [42] Lei Wang and Ee-Peng Lim. 2023. Zero-Shot Next-Item Recommendation using Large Pretrained Language Models. arXiv preprint arXiv:2304.03153 (2023).
370
+ [43] Ruoxi Wang, Bin Fu, Gang Fu, and Mingliang Wang. 2017. Deep & cross network for ad click predictions. In Proceedings of the ADKDD'17. 1-7.
371
+ [44] Xiang Wang, Xiangnan He, Meng Wang, Fuli Feng, and Tat-Seng Chua. 2019. Neural graph collaborative filtering. In Proceedings of the 42nd international ACM SIGIR conference on Research and development in Information Retrieval. 165-174.
372
+ [45] Wei Wei, Xubin Ren, Jiabin Tang, Qinyong Wang, Lixin Su, Suqi Cheng, Junfeng Wang, Dawei Yin, and Chao Huang. 2023. Llmrec: Large language models with graph augmentation for recommendation. arXiv preprint arXiv:2311.00423 (2023).
373
+ [46] Jiancan Wu, Xiang Wang, Fuli Feng, Xiangnan He, Liang Chen, Jianxun Lian, and Xing Xie. 2021. Self-supervised graph learning for recommendation. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval. 726-735.
374
+ [47] Yunjia Xi, Weiwen Liu, Jianghao Lin, Jieming Zhu, Bo Chen, Ruiming Tang, Weinan Zhang, Rui Zhang, and Yong Yu. 2023. Towards Open-World Recommendation with Knowledge Augmentation from Large Language Models. arXiv preprint arXiv:2306.10933 (2023).
375
+ [48] Jun Xiao, Hao Ye, Xiangnan He, Hanwang Zhang, Fei Wu, and Tat-Seng Chua. 2017. Attentional factorization machines: Learning the weight of feature interactions via attention networks. arXiv preprint arXiv:1708.04617 (2017).
376
+ [49] Xu Xie, Fei Sun, Zhaoyang Liu, Shiwen Wu, Jinyang Gao, Jiandong Zhang, Bolin Ding, and Bin Cui. 2022. Contrastive learning for sequential recommendation. In
377
+
378
+ 2022 IEEE 38th international conference on data engineering (ICDE). IEEE, 1259-1273.
379
+ [50] Junliang Yu, Hongzhi Yin, Xin Xia, Tong Chen, Jundong Li, and Zi Huang. 2023. Self-supervised learning for recommender systems: A survey. IEEE Transactions on Knowledge and Data Engineering (2023).
380
+ [51] Junjie Zhang, Ruobing Xie, Yupeng Hou, Wayne Xin Zhao, Leyu Lin, and Ji-Rong Wen. 2023. Recommendation as instruction following: A large language model empowered recommendation approach. arXiv preprint arXiv:2305.07001 (2023).
381
+ [52] Shengyu Zhang, Linfeng Dong, Xiaoya Li, Sen Zhang, Xiaofei Sun, Shuhe Wang, Jiwei Li, Runyi Hu, Tianwei Zhang, Fei Wu, et al. 2023. Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023).
382
+ [53] Shuai Zhang, Lina Yao, Aixin Sun, and Yi Tay. 2019. Deep learning based recommender system: A survey and new perspectives. ACM computing surveys (CSUR) 52, 1 (2019), 1-38.
383
+ [54] Yang Zhang, Fuli Feng, Jizhi Zhang, Keqin Bao, Qifan Wang, and Xiangnan He. 2023. Collm: Integrating collaborative embeddings into large language models for recommendation. arXiv preprint arXiv:2310.19488 (2023).
384
+ [55] Bowen Zheng, Yupeng Hou, Hongyu Lu, Yu Chen, Wayne Xin Zhao, and Ji-Rong Wen. 2023. Adapting large language models by integrating collaborative semantics for recommendation. arXiv preprint arXiv:2311.09049 (2023).
385
+ [56] Aojun Zhou, Ke Wang, Zimu Lu, Weikang Shi, Sichun Luo, Zipeng Qin, Shaoqing Lu, Anya Jia, Linqi Song, Mingjie Zhan, et al. 2023. Solving challenging math word problems using gpt-4 code interpreter with code-based self-verification. arXiv preprint arXiv:2308.07921 (2023).
386
+ [57] Cai-Nicolas Ziegler, Sean M McNee, Joseph A Konstan, and Georg Lausen. 2005. Improving recommendation lists through topic diversification. In Proceedings of the 14th international conference on World Wide Web. 22-32.
2401.13xxx/2401.13870/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b5bbcf51bff15851883569751fc7aa178f5faa5bc2f5a7a81564e3eb46aaae9
3
+ size 627540
2401.13xxx/2401.13870/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13912/4c1003f2-ebdf-4212-910e-d5113767c624_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e80af025ad41e105f9d1a67c8b0e9b9322510305b8dbe56de59e020ce7f1e60
3
+ size 976576
2401.13xxx/2401.13912/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13912/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d2728d0fbdb688aa287aec939bc49da6c144feb583c0f55325e4a5b37269cdc
3
+ size 1066056
2401.13xxx/2401.13912/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.13xxx/2401.13919/f82c75ef-bcfa-4523-9ab5-fea12b9952a1_content_list.json ADDED
The diff for this file is too large to render. See raw diff