MinerU Batch e4919c25-72f6-4903-87f7-cff19631fc95 (Part 7/8)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +8 -0
- data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_content_list.json +0 -0
- data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_model.json +0 -0
- data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_origin.pdf +3 -0
- data/2025/2504_01xxx/2504.01017/full.md +508 -0
- data/2025/2504_01xxx/2504.01017/images/03b3628c367053f6811908f0390553ae0aa541e5dc7e7140c029979bc63ec1cb.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/09f36aadf6d57ec6e5146836afd31be08edb41ac1fa8040124d9475d86979c0d.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/1080ca2586694c9f119f2145e2479fe2ff2654801e2d995110f0445862f9ac92.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/16e58eafa12c7e343fb56b1d745edcdbc9b7567b85058a6a63bd7e06a4ad4279.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/17f0c4519a4854047faeeba6268629fa14729bc210252552d0fb90c7eda59734.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/1ee81e726dc55bc2e8f5e61163506aae42ae9ab61930af64a9ccaf5eed9192fd.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/2685b7099ba7d9845f0602e1ea522057be3882ee15873fc267693f352bf9c6ec.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/29993a6f49c8e8ca3baedf902a30de13b6b0f3f831b6e81bb9f77defbda07640.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/2c1bb205ba640502292f04fbc11a3988b410fa79894e0c94f7dd4a7739df0f09.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/44df760b408a1576a05e8820a8d385a5b6ee3ac70f404cc41a94a75d6668fb7d.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/452a8747fef027db026b04b5fdfc83e84c738b045d4cac4e069701245ea82454.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/46bc957b9cf826cebcd5fd1d2d29ee0d30c841ce60e4a6151a3a64b0b4979095.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/52cef3eafb04efa56e8f24f74ec59f260c005bc08509be87e76e8ad773b59203.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/56f694cb26be5d6b14b25fea0a8aaedad14a340a11e8d2399397ee2a420e25b5.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/5eb7d4d6cd12645e4cf7db5627ca389afd9c8a83eec3e3809565863e69478ebf.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/68a76b7dda6a14bd1fba2eb3d7ed61724edb2724153628a9a6b91337ef6d27de.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/69122edf5464c13fa8ca84767eccc3c4c622ce5de42b59bad2f669b5788c8298.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/7f2a3a8602b28238d943cbc008b245e09919f64a88264d8c8d8c632ecb3ddff5.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/8a097fd3b6b527139e8ec0d914ba999581052747fcd7d453d33a41e45c36b9ab.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/9a6e48124340d635232f4df4c3c5d2605ad9b42195d78eb9c44a34a0c11c366b.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/a07cb5607c06a7a60cde34a5d036b9a2a333dc4508f80099b606b3f1e82803dd.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/a14940dff63dbb4d4d7ca3c62315b31183e9e049d290a04389cd1f5a512d2851.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/a802d6943ee84f02c32c51d65c3da221cdabd1b62866434b6d353186d744b2a1.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/a9952db727e5530434c3612dc1c415392aa137dc309f1f6b6c30269af39eb020.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/abd55181eaee2123f0505cbfcb48e440928e52b56c6d58c1089780414068448d.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/c81f24dcd6cb97b80ab6c80e804ccdf39dfe613414a6a1c620bdab2c1f1e3a62.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/cb3335700d248395b20bd54520a4792377e5e5a5399f0f5b56471f895714fc34.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/d1f319d1b36201f6bbb6d018b111d5c49b64fd77c0026bd150a57b3dbd769c23.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/d2e71906e5a55591051fa01495d9101d255860238de884359369fc8c4fd72031.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/d89b0aa778344f38dacfb001330d3fa484cb7862b41c831719d87d5cf431289c.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/de7fdcc7ce8a4fbc9cb35d177709631dac95588ed143e9b063d049830ccb43a6.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/e1ec8c4f9f1d03f07ff0270e7d9b9d495560dbd0a1ad78908edf664469d96727.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/e25b234c95961f0921fadc248be0f1b8e8e0df85e52f3aef54f414743819af13.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/images/eefff153b81ea7f6cbe2fae4c7cb4dc94399aeaebbef1cedaeb1af589f495fca.jpg +3 -0
- data/2025/2504_01xxx/2504.01017/layout.json +0 -0
- data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_content_list.json +1824 -0
- data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_model.json +0 -0
- data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_origin.pdf +3 -0
- data/2025/2504_01xxx/2504.01205/full.md +325 -0
- data/2025/2504_01xxx/2504.01205/images/49c8c0f37c9ba594b6a1309c80d858cee3fe67b8cc8b408f04510fa8e560f0e0.jpg +3 -0
- data/2025/2504_01xxx/2504.01205/images/77f6190ac970e2a95e81e80babe451ee37aa348ecb80046b4a1ef7de2579ce23.jpg +3 -0
- data/2025/2504_01xxx/2504.01205/images/a5ea7fba117a8cc446b111d183d4f6647902f6d1487d2106d3146b1c9d3f76bf.jpg +3 -0
- data/2025/2504_01xxx/2504.01205/images/ad3f3d07f5b8756c638195a7bd8272f8e55102db3469b499b1026189b110a214.jpg +3 -0
- data/2025/2504_01xxx/2504.01205/layout.json +0 -0
- data/2025/2504_01xxx/2504.01282/ed52a6d2-cad1-405f-9e91-d48b54ac9d00_content_list.json +1520 -0
.gitattributes
CHANGED
|
@@ -1445,3 +1445,11 @@ data/2025/2504_01xxx/2504.01866/8404b93c-797a-4711-bab0-e230ad70e5f0_origin.pdf
|
|
| 1445 |
data/2025/2504_01xxx/2504.01871/20f2df56-938d-4cd7-809e-3fbed621c4a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1446 |
data/2025/2504_02xxx/2504.02160/beeea222-bb18-4dd3-a301-b606feb1a7b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1447 |
data/2025/2504_03xxx/2504.03755/94fee671-473c-4383-923a-f5aedef1d43d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1445 |
data/2025/2504_01xxx/2504.01871/20f2df56-938d-4cd7-809e-3fbed621c4a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1446 |
data/2025/2504_02xxx/2504.02160/beeea222-bb18-4dd3-a301-b606feb1a7b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1447 |
data/2025/2504_03xxx/2504.03755/94fee671-473c-4383-923a-f5aedef1d43d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1448 |
+
data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1449 |
+
data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1450 |
+
data/2025/2504_01xxx/2504.01282/ed52a6d2-cad1-405f-9e91-d48b54ac9d00_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1451 |
+
data/2025/2504_01xxx/2504.01296/02f68abf-7663-4300-8c60-af02437e1f02_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1452 |
+
data/2025/2504_01xxx/2504.01297/5f2e7ec0-6bc3-4012-a6b5-e9336ef4fb7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1453 |
+
data/2025/2504_01xxx/2504.01382/aa2626ee-1672-4529-98de-7d7ea1ae92c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1454 |
+
data/2025/2504_01xxx/2504.01550/b41ab5ac-87b8-465a-9cc5-481c7b61f9e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1455 |
+
data/2025/2504_07xxx/2504.07128/5ed760fa-168e-4429-afad-42d3ed8a6bf5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_01xxx/2504.01017/46782c78-afc4-4a20-bb1f-dce5701d4f0d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:41057634cfff98a2c8657d875318993949655485ff4f529a8c5bb116ed2a9b34
|
| 3 |
+
size 1032792
|
data/2025/2504_01xxx/2504.01017/full.md
ADDED
|
@@ -0,0 +1,508 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Scaling Language-Free Visual Representation Learning
|
| 2 |
+
|
| 3 |
+
David Fan $^{1,\ast}$ , Shengbang Tong $^{1,2,\ast}$ , Jiachen Zhu $^{1,2}$ , Koustuv Sinha $^{1}$ , Zhuang Liu $^{1,3}$ , Xinlei Chen $^{1}$ , Michael Rabbat $^{1}$ , Nicolas Ballas $^{1}$ , Yann LeCun $^{1,2}$ , Amir Bar $^{1,\dagger}$ , Saining Xie $^{2,\dagger}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ FAIR, Meta, $^{2}$ New York University, $^{3}$ Princeton University *equal contribution, †equal advising
|
| 6 |
+
|
| 7 |
+
Visual Self-Supervised Learning (SSL) currently underperforms Contrastive Language-Image Pretraining (CLIP) in multimodal settings such as Visual Question Answering (VQA). This multimodal gap is often attributed to the semantics introduced by language supervision, even though visual SSL and CLIP models are often trained on different data. In this work, we ask the question: "Do visual self-supervised approaches lag behind CLIP due to the lack of language supervision, or differences in the training data?" We study this question by training both visual SSL and CLIP models on the same MetaCLIP data, and leveraging VQA as a diverse testbed for vision encoders. In this controlled setup, visual SSL models scale better than CLIP models in terms of data and model capacity, and visual SSL performance does not saturate even after scaling up to 7B parameters. Consequently, we observe visual SSL methods achieve CLIP-level performance on a wide range of VQA and classic vision benchmarks. These findings demonstrate that pure visual SSL can match language-supervised visual pretraining at scale, opening new opportunities for vision-centric representation learning.
|
| 8 |
+
|
| 9 |
+
Date: April 1, 2025
|
| 10 |
+
|
| 11 |
+
Project Page: https://davidfan.io/webssl/
|
| 12 |
+
|
| 13 |
+
Meta
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Visual representation learning has evolved along two distinct paths with different training approaches. Language-supervised methods such as Contrastive Language-Image Pretraining (CLIP) (Radford et al., 2021; Zhai et al., 2023) use paired image-text data to learn representations that are enriched with linguistic semantics. Self-Supervised Learning (SSL) methods (Zhang et al., 2016; Chen et al., 2020a; He et al., 2022; LeCun, 2022; Oquab et al., 2023) learn from images alone, without language.
|
| 18 |
+
|
| 19 |
+
Despite SSL models outperforming language-supervised models on classic vision tasks such as classification and segmentation (Oquab et al., 2023), they are less commonly adopted in recent multimodal large language models (MLLMs) (Liu et al., 2023a, 2024a; Agrawal et al., 2024; Tong et al., 2024a; Beyer et al., 2024; Li et al., 2024; AI@Meta, 2024). This difference in adoption is partially due to a performance gap in visual question answering (see Figure 1), particularly for OCR & Chart interpretation tasks (Tong et al., 2024a; Shi et al., 2024).
|
| 20 |
+
|
| 21 |
+
Beyond methodology differences, these approaches
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1 We compare the scaling behavior of visual SSL and CLIP on 16 VQA tasks from the Cambrian-1 suite under different data and model size regimes. Prior visual SSL methods achieved strong performance on classic vision tasks, but have underperformed as encoders for multimodal instruction-tuned VQA tasks. Our results show that with appropriate scaling of models and data, visual SSL can match the performance of language-supervised models across all evaluated domains—even OCR & Chart.
|
| 25 |
+
|
| 26 |
+
have also been separated by data scale and distribution (Figure 1). CLIP models typically train on
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 2 Visual SSL 2.0 changes. In this work, we adopt three improvements to the visual SSL pipeline: 1) Training on billion-scale web data, curated through the MetaCLIP pipeline, to move beyond "conventional" datasets; 2) Scaling model architecture from sub-billion parameter models to models exceeding 1 billion parameters; and 3) Incorporating VQA as a complementary evaluation protocol to comprehensively assess visual features. These changes enable us to study visual SSL at a larger scale and observe scaling trends previously unobserved in smaller-scale experiments.
|
| 30 |
+
|
| 31 |
+
billion-scale image-text pairs from the web (Schuhmann et al., 2022; Chen et al., 2023; Xu et al., 2024b), while SSL methods use million-scale datasets such as ImageNet (Deng et al., 2009) or hundred-million scale data with ImageNet-like distributions (Ridnik et al., 2021; Oquab et al., 2023).
|
| 32 |
+
|
| 33 |
+
In this work, we investigate a fundamental question: Is language supervision necessary to pretrain visual representations for multimodal modeling? Rather than seeking to replace language-supervised approaches, we aim to understand the intrinsic capabilities and limitations of visual self-supervision at scale for multimodal applications. To conduct a fair comparison, we train SSL models on the same billion-scale web data used for state-of-the-art CLIP models—specifically the MetaCLIP dataset (Xu et al., 2024b). This approach controls for data distribution differences when comparing visual SSL and CLIP.
|
| 34 |
+
|
| 35 |
+
For evaluation, we primarily use visual question answering (VQA) as a framework to evaluate SSL models across a diverse set of capabilities at scale. VQA evaluation suites span vision-centric, visual reasoning, and OCR & Chart tasks, and have been shown to be a more diverse testbed for assessing vision encoders (Tschannen et al., 2024; Wan et al., 2024; Fini et al., 2024; Tong et al., 2024a), reflecting the broader perception challenges found in real-world distributions. We adopt the evaluation suite proposed in Cambrian-1 (Tong et al., 2024a), which evaluates performance across 16 tasks spanning 4 distinct categories of VQA: General, Knowledge, OCR & Chart, and Vision-Centric.
|
| 36 |
+
|
| 37 |
+
We train Web-SSL, a family of visual SSL models ranging from 1 to 7 billion parameters, using the above setting for direct and controlled comparison
|
| 38 |
+
|
| 39 |
+
to CLIP. As a result of our empirical study, we contribute several insights:
|
| 40 |
+
|
| 41 |
+
- Visual SSL can match and even surpass language-supervised methods for visual pretraining, on a wide range of VQA tasks—even on language-related tasks such as OCR & Chart understanding (Figure 3).
|
| 42 |
+
- Visual SSL scales well with respect to model capacity (Figure 3) and data (Figure 4), indicating that SSL has significant untapped potential.
|
| 43 |
+
- Visual SSL can maintain competitive traditional vision performance on classification and segmentation, even while improving at VQA (Figure 7).
|
| 44 |
+
- Training on a higher ratio of images containing text is especially effective for improving OCR & Chart performance (Question 4). Exploring data composition is a promising direction.
|
| 45 |
+
|
| 46 |
+
This work serves as a proof of concept that offers a compelling vision-centric alternative to the recent CLIP-dominated trend, and opens new opportunities for future research. We plan to open-source our Web-SSL vision models, and we hope to inspire the broader community to unlock the full potential of visual SSL in the multimodal era.
|
| 47 |
+
|
| 48 |
+
# 2 From Visual SSL 1.0 to 2.0
|
| 49 |
+
|
| 50 |
+
In this section, we describe our experimental setup, which extends previous SSL works by (1) scaling dataset size to billion-scale images (Section 2.1), (2) scaling model size beyond 1B parameters (Section 2.2), and (3) evaluating vision models using open-ended VQA tasks (Section 2.3), in addition to
|
| 51 |
+
|
| 52 |
+
classic vision benchmarks such as ImageNet-1k (Deng et al., 2009) and ADE20k (Zhou et al., 2019).
|
| 53 |
+
|
| 54 |
+
# 2.1 Beyond ImageNet Pretraining
|
| 55 |
+
|
| 56 |
+
To study whether visual SSL can match the performance of CLIP, we start by adopting the same data that drove CLIP's success. We thus leverage the MetaCLIP dataset (Xu et al., 2024b,a), which has enabled the most successful open-source reproduction of CLIP to-date. We use 2 billion samples from MetaCLIP, which we refer to as MC-2B. We train SSL methods on only the images, and CLIP on the image-text pairs.
|
| 57 |
+
|
| 58 |
+
This controls for data distribution and size as confounding variables, and enables a fairer comparison of the pretraining methods themselves, while ensuring sufficient data diversity and scale.
|
| 59 |
+
|
| 60 |
+
# 2.2 Scaling Up Vision Models to Billion Scale
|
| 61 |
+
|
| 62 |
+
We can also increase model size. Inspired by advancements in scaling language models (Brown et al., 2020; Kaplan et al., 2020; OpenAI, 2022), we train Vision Transformers (ViTs) with 1B, 2B, 3B, 5B, and 7B parameters, on only the images from MC-2B, to study the properties of larger-scale visual SSL models trained on web-scale data. We adapt ViT-g from Oquab et al. (2023) as ViT-1B, and define new configurations for ViT-2B to 7B (Table 1); see Appendix A for model details.
|
| 63 |
+
|
| 64 |
+
<table><tr><td>Model</td><td>Width</td><td>Depth</td><td>Heads</td><td>MLP</td></tr><tr><td>ViT-1B</td><td>1536</td><td>40</td><td>24</td><td>6144</td></tr><tr><td>ViT-2B</td><td>2688</td><td>24</td><td>21</td><td>10752</td></tr><tr><td>ViT-3B</td><td>3072</td><td>26</td><td>24</td><td>12288</td></tr><tr><td>ViT-5B</td><td>3584</td><td>32</td><td>28</td><td>14336</td></tr><tr><td>ViT-7B</td><td>4096</td><td>32</td><td>32</td><td>16384</td></tr></table>
|
| 65 |
+
|
| 66 |
+
Table 1 Model architecture details. For consistency, we denote ViT-g from Oquab et al. (2023) as ViT-1B.
|
| 67 |
+
|
| 68 |
+
# 2.3 Multimodal LLMs as an Evaluation Protocol
|
| 69 |
+
|
| 70 |
+
In addition to conventional evaluation protocols, such as ImageNet-1k linear probe, we also evaluate our vision encoders using VQA, a flexible and robust evaluation protocol that reflects the diversity of real-world perceptual challenges (Tschannen et al., 2024; Tong et al., 2024a), as shown in Figure 2.
|
| 71 |
+
|
| 72 |
+
Here, we study all vision encoders using the same controlled setting to ensure fair comparison. Specifically,
|
| 73 |
+
|
| 74 |
+
we use the same two-stage visual instruction tuning procedure and data as Cambrian-1 (Tong et al., 2024a). First, a lightweight MLP adapter is added to project the vision encoder features into the same dimensionality as the LLM, and only this MLP adapter is trained. In the second stage, both the MLP adapter and LLM are finetuned. To enable controlled comparison, the vision encoder remains frozen in both stages, and all experiments use the same training recipe as well as Llama-3 8B Instruct (Touvron et al., 2023) backbone. We provide detailed training datasets and hyperparameters in Appendix A.
|
| 75 |
+
|
| 76 |
+
We then report results on the Cambrian-1 (Tong et al., 2024a) evaluation suite, which is comprised of 16 VQA benchmarks spanning four established domains: General, Knowledge, OCR & Chart, and Vision-Centric. The average VQA performance is the average of the four subcategories. Each subcategory has 4 benchmarks and is equally weighted.
|
| 77 |
+
|
| 78 |
+
# 3 Scaling Visual SSL
|
| 79 |
+
|
| 80 |
+
In this section, we explore the scaling behavior of visual SSL models with respect to both model and data size, as a result of training on only images from MC-2B. We focus on DINOv2 (Oquab et al., 2023) as the visual SSL method in this section, and discuss MAE (He et al., 2022) in Section 4.
|
| 81 |
+
|
| 82 |
+
In Section 3.1, we increase model size from 1B to 7B while keeping the training data fixed at 2 billion MC2B images—unless otherwise denoted. We use the off-shelf training code and recipe for each method, and do not change the recipe for different model sizes in order to control for confounding variables. In Section 3.2, we shift our focus to scaling total data seen for a fixed model size, and analyze how performance evolves as the number of images seen during training increases from 1 billion to 8 billion.
|
| 83 |
+
|
| 84 |
+
# 3.1 Scaling Model
|
| 85 |
+
|
| 86 |
+
The intention of scaling model size is both to find the ceiling of visual SSL under this new data regime, and to identify any unique behavior that emerges in larger models.
|
| 87 |
+
|
| 88 |
+
We thus pretrain DINOv2 ViT models, ranging from 1B to 7B parameters, using 2 billion unlabeled images at $224 \times 224$ resolution from MC-2B—without high-resolution adaptation (Oquab et al., 2023)—to ensure fair comparison with CLIP. We refer to these models as Web-DINO throughout the paper. For a controlled comparison, we also train CLIP models of the same sizes on the same data.
|
| 89 |
+
|
| 90 |
+
Figure 3 Scaling behavior of Web-DINO and CLIP ViTs trained on MC-2B. The x-axis shows model sizes from 1B to 7B parameters on a log scale. We observe novel "scaling behavior" with Web-DINO models across all categories, with particularly pronounced improvements in the OCR & Chart and Vision-Centric domains as model size increases. In contrast, CLIP models demonstrate limited scaling benefits, with performance saturating at moderate model sizes. The two model families exhibit complementary strengths: CLIP models excel at OCR & Chart VQA, and Web-DINO models are superior at Vision-Centric VQA, while remaining competitive in all other categories.
|
| 91 |
+

|
| 92 |
+
Scaling Web-DINO ViT-1B ViT-2B ViT-3B ViT-5B ViT-7B Scaling CLIP
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
|
| 102 |
+
We evaluate each model with VQA and present the results in Figure 3. We will first discuss the overall performance trend and then turn to specific category performance. To the best of our knowledge, this is the first instance of a vision encoder trained purely with visual self-supervision achieving performance parity with language-supervised encoders on VQA—even in the OCR & Chart category, which is traditionally considered to be highly text-dependent.
|
| 103 |
+
|
| 104 |
+
Performance trend. We compare the performance trend as model capacity increases in Figure 3. Web-DINO's Average, OCR & Chart, and Vision-Centric VQA performance improves nearly log-linearly with increasing model size, while General and Knowledge improve to a smaller degree. In contrast, CLIP's performance in all VQA categories largely saturates after 3B parameters. This suggests that while smaller CLIP models may be more data-efficient, this advantage largely dissipates for larger CLIP models. The continual improvement from increasing Web-DINO model capacity also suggests that visual SSL benefits from larger model capacity, and that scaling visual SSL past 7B parameters is a promising direction.
|
| 105 |
+
|
| 106 |
+
Category-specific performance. In terms of category-specific performance, DINO also increasingly outperforms CLIP on Vision-Centric VQA and largely closes the gap with CLIP on OCR & Chart and Average VQA (Figure 3), as model size increases. At 5B parameters and above, DINO can exceed the Average VQA performance of CLIP, despite being trained solely on images and without language supervision. These results suggest that vision-only models, when trained on CLIP-distribution images, can develop strong visual features that are comparable to those of language-supervised vision encoders.
|
| 107 |
+
|
| 108 |
+
# 3.2 Scaling Examples Seen
|
| 109 |
+
|
| 110 |
+
Previously, we focused on single-epoch training, where each of the 2B unique images in MC-2B is seen only once. Here, we investigate the impact of increasing the number of examples seen by training Web-DINO ViT-7B on data ranging from 1 billion to 8 billion images from MC-2B.
|
| 111 |
+
|
| 112 |
+
As shown in Figure 4, General and Knowledge VQA performance improves incrementally with more examples seen, saturating at 4B and 2B examples respectively. Vision-Centric VQA performance improves sharply from 1B to 2B examples, and saturates beyond 2B examples. In contrast, OCR & Chart is the only category that shows consistent improvement with more examples seen. This suggests that as the model sees more data, it learns a representation that is increasingly well-suited for text-related tasks, yet without marked degradation on other capabilities.
|
| 113 |
+
|
| 114 |
+
Furthermore, when compared to a CLIP model of the same size (ViT-7B), Web-DINO consistently outperforms CLIP on average VQA performance given the same number of samples seen (Figure 4). Notably, after seeing 8B samples, Web-DINO closes the performance gap with the CLIP model on OCR & Chart VQA tasks. This provides further evidence suggesting that visual SSL models have the potential to scale better than language-supervised models.
|
| 115 |
+
|
| 116 |
+
Collectively, the results in Figure 3 and 4 indicate that as model size and examples seen increase, visual SSL learns features that are increasingly effective for VQA in general, but especially on OCR & Chart. Our results suggest that CLIP-based models do not hold an absolute advantage compared to visual SSL. In Section 4, we delve deeper into the underlying mechanisms driving this trend.
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
Figure 4 Scaling up examples seen when training Web-DINO-7B. Performance across different VQA categories as training data increases from 1B to 8B images. While General and Vision-Centric tasks show diminishing returns after 2B images, OCR & Chart tasks demonstrate continued improvement, contributing to steady gains in average performance. Further, Web-DINO consistently outperforms same-size (ViT-7B) CLIP models with different training samples seen. The x-axis plots training data size on a log-scale.
|
| 120 |
+
|
| 121 |
+
# 4 Scaling Analysis and Findings
|
| 122 |
+
|
| 123 |
+
In Section 3, we demonstrated that visual SSL models scale well with model size and training set size. These observations raise further questions about the generality and implications of these phenomena. To deepen our understanding, we investigate five key aspects, including whether scaling behavior extends to other vision-only models (Question 1), if SSL models also exhibit scaling behavior on smaller and more conventional data (Question 2), and whether SSL can retain competitive performance on classic vision tasks (Question 3). Additionally, we explore why scaling particularly enhances OCR & Chart performance (Question 4), and highlight emergent properties that arise via scaling visual SSL (Question 5). In this section, we provide a detailed analysis of these findings.
|
| 124 |
+
|
| 125 |
+
# Question 1
|
| 126 |
+
|
| 127 |
+
Does the observed scaling behavior generalize to other visual SSL methods?
|
| 128 |
+
|
| 129 |
+
In previous sections, we derived our findings from DINOv2, a joint embedding visual SSL method. Here, we extend our analysis to a masked modelling based visual SSL method—Masked Autoencoder (MAE) (He et al., 2022). We train MAE on MC-2B (denoted as Web-MAE) using ViT models ranging from 1B to 5B parameters and compare the results with Web-DINO models in Figure 5.
|
| 130 |
+
|
| 131 |
+
Web-MAE models exhibit similar scaling behavior to Web-DINO models, with average VQA performance improving consistently as model size increases. Compared to joint embedding methods, Web-MAE models learn features that are particularly well-suited for OCR & Chart tasks but underperform in other
|
| 132 |
+
|
| 133 |
+
domains. These results suggest that the "scaling behavior" observed in VQA tasks generalizes across different visual SSL methods. We also note that different visual SSL approaches learn distinct representations even when trained under the same conditions, as demonstrated by Web-MAE's OCR performance.
|
| 134 |
+
|
| 135 |
+
# Question 2
|
| 136 |
+
|
| 137 |
+
Does visual SSL exhibit similar scaling behavior on smaller scale conventional data, such as ImageNet?
|
| 138 |
+
|
| 139 |
+
We pretrain Web-DINO 1B, 2B, and 3B models for 300 epochs on ImageNet-1k, a conventional pretraining dataset for SSL, following the recipe from (Oquab et al., 2023). We compare these variants to those trained on MC-2B. We evaluate their downstream VQA performance and ImageNet-1k linear probing results. As shown in Figure 6, models pretrained on ImageNet-1k exhibit consistently inferior performance across all the metrics. Moreover, unlike models trained on MC-2B, those trained on ImageNet-1k do not improve with increasing model sizes. This highlights the importance of training visual SSL on more diverse and larger datasets. This echoes recent findings that increasing dataset sizes and diversity drive LLM scaling (Kaplan et al., 2020; Hoffmann et al., 2023; Chowdhery et al., 2022), and also that pretraining data distribution is critical to downstream performance (Liu and He, 2025).
|
| 140 |
+
|
| 141 |
+
# Question 3
|
| 142 |
+
|
| 143 |
+
How do scaled models perform on classic vision tasks?
|
| 144 |
+
|
| 145 |
+
We evaluate Web-DINO models, ranging from 1B
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Figure 5 Web-MAE trained on MC-2B. Web-MAE also exhibits consistent scaling behavior as model size increases. Notably, Web-MAE demonstrates better performance in OCR & Chart tasks, achieving higher accuracy than Web-DINO across all model sizes.
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 6 Comparison of ImageNet-1k and MC-2B Pretraining. Increasing the diversity and scale of pretraining data improves model performance on VQA accuracy and ImageNet linear probing. Unlike MC-2B pretraining, training on ImageNet does not exhibit a clear scaling trend.
|
| 152 |
+
|
| 153 |
+
to 7B parameters, on classic vision benchmarks including linear probing on ImageNet-1k (Deng et al., 2009), semantic segmentation on ADE20K (Zhou et al., 2019), and depth estimation on NYUv2 (Silberman et al., 2012). Following the evaluation protocol of DINOv2 (Oquab et al., 2023), we freeze the vision encoder; see Appendix A for details. As shown in Figure 7, Web-DINO's performance improves modestly with increasing model size. Web-DINO achieves strong performance across all benchmarks, outperforming MetaCLIP by a significant margin and remaining competitive with off-shelf DINOv2, even outperforming it on ADE20K +ms. Note that the comparison with off-shelf DINOv2 is not exactly apples-to-apples, as we do not use high-resolution adaptation (Oquab et al., 2023), in order to maintain the same input resolution as CLIP. Additionally, the DINOv2 training data has a higher correlation with these classic vision benchmarks, detailed further in Appendix E. These differences suggest that there remains considerable room for further improvement in our model's classic vision performance.
|
| 154 |
+
|
| 155 |
+
However, we observe that the scaling behavior in classic vision tasks is less pronounced compared to VQA. This finding, along with insights from previous work (Tong et al., 2024a; Fini et al., 2024; Naeem et al., 2024), reinforces the value of VQA as a comprehensive vision model evaluation framework. While
|
| 156 |
+
|
| 157 |
+
classic benchmarks remain important, VQA provides a complementary view into model performance via offering a diverse set of tasks that are grounded in real-world perceptual challenges.
|
| 158 |
+
|
| 159 |
+
# Question 4
|
| 160 |
+
|
| 161 |
+
Why does web-scale data improve OCR & Chart performance?
|
| 162 |
+
|
| 163 |
+
In Section 3, we observed that increasing model size and examples seen leads to unprecedented improvements in OCR & Chart performance for visual SSL models. This is surprising since current off-the-shelf visual SSL methods are notably poor at OCR & Chart understanding compared to language-supervised models (Tong et al., 2024a; Shi et al., 2024).
|
| 164 |
+
|
| 165 |
+
One possible explanation is that web-scale image datasets already contain a degree of textual information. Unlike object-centric datasets such as ImageNet, images from the web often contain text (e.g. labels, signs, diagrams, etc.). Larger capacity and more data might aid visual SSL models to extract and leverage this textual information.
|
| 166 |
+
|
| 167 |
+
To test this hypothesis, we apply an off-the-shelf MLLM—SmolVLM2 (Allal et al., 2025)—to identify images containing text. See Figure 8 for qualitative examples and Appendix A for details. This results in
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 7 Performance of Web-DINO models on classic vision tasks. All models achieve strong performance across ImageNet-1k classification, ADE20K segmentation, and NYU Depth estimation, and all tasks experience moderate improvements from increasing model size from 1B to 7B parameters. Web-DINO outperforms MetaCLIP (HF) and is competitive with DINOv2 (HF). (HF) denotes the largest official Hugging Face released version.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
|
| 180 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">% of MC-2B</td><td colspan="5">VQA Evaluator</td><td colspan="4">Breakdown of OCR & Chart Tasks</td></tr><tr><td>AVG</td><td>General</td><td>Knowledge</td><td>Vision Centric</td><td>OCR Chart</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td></tr><tr><td>CLIP 2B</td><td>100%</td><td>53.0</td><td>72.2</td><td>48.8</td><td>55.0</td><td>36.1</td><td>32.8</td><td>32.9</td><td>52.6</td><td>26.0</td></tr><tr><td>Web-DINO 2B</td><td>100%</td><td>50.8</td><td>72.8</td><td>47.1</td><td>56.4</td><td>26.8</td><td>23.3</td><td>15.6</td><td>49.2</td><td>19.0</td></tr><tr><td>Web-DINO 2B</td><td>50.3%</td><td>53.4 (+2.6)</td><td>73.0 (+0.2)</td><td>51.7 (+4.6)</td><td>55.6 (-0.8)</td><td>33.2 (+6.4)</td><td>31.4 (+8.1)</td><td>27.3 (+11.7)</td><td>51.3 (+2.1)</td><td>23.0 (+4.0)</td></tr><tr><td>Web-DINO 2B</td><td>1.3%</td><td>53.7 (+2.9)</td><td>70.7 (-2.1)</td><td>47.3 (+0.2)</td><td>56.2 (-0.2)</td><td>40.4 (+13.6)</td><td>47.5 (+24.2)</td><td>29.4 (+13.8)</td><td>52.8 (+3.6)</td><td>32.0 (+13.0)</td></tr></table>
|
| 181 |
+
|
| 182 |
+
Table 2 Impact of data filtering on SSL model performance. We compare Web-DINO ViT-2B models trained on MC-2B with different levels of text filtering (full, $50.3\%$ , and $1.3\%$ ) against CLIP ViT-2B trained on full MC-2B. OCR & Chart performance improves with progressively aggressive filtering, with the $1.3\%$ filter achieving the best results. Despite receiving zero language supervision, SSL models can surpass CLIP in text-centric tasks while maintaining strong overall performance.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 8 Examples of filtered MC-2B images. The Light filter (Middle) identifies images containing text, retaining $50.3\%$ of the images. The Heavy filter (Right) identifies images explicitly containing charts and documents, retaining only $1.3\%$ of MC-2B.
|
| 186 |
+
|
| 187 |
+
two curated datasets: (i) Light filter: retains $50.3\%$ of Web-DINO and contains images with any textual content. (ii) Heavy filter: retains $1.3\%$ of MC-2B and contains images with charts, tables, or documents.
|
| 188 |
+
|
| 189 |
+
We train Web-DINO ViT-2B models on these filtered datasets, with each experiment using 2 billion seen examples (meaning filtered datasets undergo multiple epochs). As shown in Table 2, the model trained on lightly filtered data outperforms the full data variant by $+6.4\%$ on OCR & Chart, while maintaining strong performance in other categories. The model trained on heavily filtered data performs better and
|
| 190 |
+
|
| 191 |
+
outperforms even the language-supervised CLIP ViT-2B trained on full data by $+4.3\%$ on OCR & Chart. Likewise, heavy filtering also improves Average VQA performance, outperforming the full data Web-DINO ViT-2B by $+2.6\%$ and even the full data CLIP ViT-2B by $+0.7\%$ . This means that it is possible for visual SSL models to outperform CLIP models of the same size, with only a fraction of the total data (in this case $1.3\%$ of MC-2B).
|
| 192 |
+
|
| 193 |
+
The improvement in OCR & Chart from training on heavily filtered data is particularly pronounced for ChartQA $(+24.2\%)$ , OCRBench $(+13.8\%)$ , and DocVQA $(+13.0\%)$ , while performance remains competitive in all other categories. These results demonstrate that self-supervised visual models, when trained on images containing more text in them, can develop high-quality text understanding capabilities without language supervision. It suggests that data composition—rather than purely scale or language supervision—is crucial for developing strong OCR & Chart understanding abilities.
|
| 194 |
+
|
| 195 |
+
Although it is not surprising that skewing the data in favor of OCR & Chart would improve OCR & Chart capabilities, it is surprising that simple data filtering can outperform language supervision on the full data. This simple proof of concept suggests that similar
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Figure 9 Alignment score between Web-DINO and LLMs. Moving from DINOv2 to Web-DINO improves the alignment between the image and the corresponding text representations obtained by LLMs. Increasing model size from 1B to 7B parameters shows gradual improvement, while training on larger data quantities (4B/8B samples) yields the most significant alignment gains.
|
| 199 |
+
|
| 200 |
+
techniques may be used to help visual SSL bridge future gaps in other capabilities.
|
| 201 |
+
|
| 202 |
+
# Question 5
|
| 203 |
+
|
| 204 |
+
Why can SSL learn strong visual representations for multimodal modeling, without language supervision?
|
| 205 |
+
|
| 206 |
+
Thus far, we have seen that visual SSL models can not only become competitive with CLIP models, but also that they can excel at tasks previously thought to require language. This raises an important question: why do vision-only models learn features that work well for multimodal models, even in the absence of language supervision?
|
| 207 |
+
|
| 208 |
+
We hypothesize that SSL models learn features increasingly aligned with language as model size and examples seen increases. Following Huh et al. (2024), we evaluate intrinsic representational alignment by computing a matching metric between the vision encoder and language model, using image-text pairs from the Wikipedia Captions dataset (Srinivasan et al., 2021). We use off-the-shelf DINOv2 (Oquab et al., 2023) and Web-DINO as vision encoders, and off-the-shelf Llama-3.1 8B and 70B (Touvron et al., 2023) as the language models, without any visual instruction tuning nor alignment procedure.
|
| 209 |
+
|
| 210 |
+
As shown in Figure 9, we observe three key trends: (1) training on more diverse data (MC-2B) improves alignment with LLMs (DINOv2 ViT-1B $\rightarrow$ Web-DINO ViT-1B); (2) increasing the vision model size leads to slightly higher alignment (Web-DINO ViT
|
| 211 |
+
|
| 212 |
+
1B $\rightarrow$ ViT-7B); and (3) seeing more training samples further enhances alignment (Web-DINO ViT-7B trained on 2B samples $\rightarrow$ 8B samples).
|
| 213 |
+
|
| 214 |
+
These findings suggest that as model size and, in particular, training samples scale, vision models naturally develop text-sensitive features and achieve strong alignment with LLMs and multimodal tasks, without explicit language supervision.
|
| 215 |
+
|
| 216 |
+
# 5 The Web-SSL Model Family
|
| 217 |
+
|
| 218 |
+
Next, we analyze the overall best performing vision encoders using both VQA and classic vision benchmarks. In Table 3, we show the best results of our vision encoders against recent off-the-shelf vision encoders, in terms of VQA and classic vision tasks.
|
| 219 |
+
|
| 220 |
+
For VQA, all vision encoders—including off-the-shelf models—are evaluated using the same visual instruction tuning setup detailed in Section 2.3, and mainly $224 \times 224$ input resolution for the purpose of fair comparison. Because the goal is not to produce a state-of-the-art MLLM, we did not employ techniques such as unfreezing the vision encoder, resolution tiling (Liu et al., 2024b), and spatial visual aggregator (Tong et al., 2024a).
|
| 221 |
+
|
| 222 |
+
For classic vision, we follow the evaluation procedure from Oquab et al. (2023) and evaluate linear probe performance on ImageNet-1k (Deng et al., 2009), ADE20K (Zhou et al., 2019), and NYU Depth v2 (Silberman et al., 2012). The input resolution differs between classic vision tasks, but each model tested uses the same exact settings from Oquab et al. (2023). We emphasize that the primary motivation is still to provide controlled insights.
|
| 223 |
+
|
| 224 |
+
Performance at 224px. Web-DINO can outperform off-the-shelf MetaCLIP in both VQA and classic vision tasks. Web-DINO is even able to match the performance of SigLIP and SigLIP2 on VQA despite seeing $5 \times$ less data and receiving no language supervision. In general, Web-DINO outperforms all off-shelf language-supervised CLIP models at traditional vision benchmarks. Although our best Web-DINO model is 7B parameters, the results from Section 3.1 and Section 3.2 suggest that CLIP models saturate beyond moderate model and data sizes, while visual SSL improves progressively with increasing model and data size. Web-DINO also outperforms off-the-shelf visual SSL methods, including DINOv2 (Oquab et al., 2023), in all VQA categories. Web-DINO is also competitive in traditional vision benchmarks.
|
| 225 |
+
|
| 226 |
+
Performance beyond 224px. Next, we discuss the performance of higher resolution models. Following
|
| 227 |
+
|
| 228 |
+
<table><tr><td colspan="4">Model</td><td colspan="5">MLLM Evaluator</td><td colspan="5">Classic Vision Tasks</td></tr><tr><td>Method</td><td>Pretrain Data</td><td>Pretrain Samples Seen</td><td>Res</td><td>AVG</td><td>General</td><td>Knowledge</td><td>OCR & Chart</td><td>Vision-Centric</td><td>IN1k lin.</td><td>ADE20K lin.</td><td>ADE20K ms.</td><td>NYUd lin. 1 (↓)</td><td>NYUd lin. 4 (↓)</td></tr><tr><td colspan="4">Language-Supervised Models</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td rowspan="2">SigLIP ViT-SO400M</td><td rowspan="2">WebLI</td><td rowspan="2">45.0B</td><td>224</td><td>55.4</td><td>74.4</td><td>48.7</td><td>39.5</td><td>58.9</td><td>86.5</td><td>36.5</td><td>38.0</td><td>0.607</td><td>0.525</td></tr><tr><td>384</td><td>60.0</td><td>76.3</td><td>50.4</td><td>53.5</td><td>59.7</td><td>87.3</td><td>39.5</td><td>47.2</td><td>0.582</td><td>0.438</td></tr><tr><td rowspan="2">SigLIP2 ViT-SO400M</td><td rowspan="2">WebLI</td><td rowspan="2">45.0B</td><td>224</td><td>56.3</td><td>74.4</td><td>50.7</td><td>42.1</td><td>58.1</td><td>87.5</td><td>41.1</td><td>44.2</td><td>0.562</td><td>0.539</td></tr><tr><td>384</td><td>62.0</td><td>76.6</td><td>51.9</td><td>58.4</td><td>61.0</td><td>88.1</td><td>43.5</td><td>50.2</td><td>0.524</td><td>0.469</td></tr><tr><td>MetaCLIP ViT-G</td><td>MetaCLIP</td><td>12.8B</td><td>224</td><td>54.8</td><td>75.5</td><td>48.2</td><td>37.3</td><td>58.4</td><td>86.4</td><td>38.0</td><td>46.7</td><td>0.524</td><td>0.415</td></tr><tr><td colspan="4">Visual Self-Supervised Models</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MAE ViT-H</td><td>ImageNet-1k</td><td>2.0B</td><td>224</td><td>45.2</td><td>64.6</td><td>43.9</td><td>20.6</td><td>51.7</td><td>76.6</td><td>33.3</td><td>30.7</td><td>0.517</td><td>0.483</td></tr><tr><td>I-JEPA ViT-H</td><td>ImageNet-22k</td><td>0.9B</td><td>224</td><td>44.7</td><td>65.4</td><td>43.9</td><td>21.2</td><td>48.4</td><td>68.8</td><td>31.6</td><td>34.6</td><td>0.548</td><td>0.520</td></tr><tr><td>DINOv2 ViT-g</td><td>LVD-142M</td><td>1.9B</td><td>518</td><td>47.9</td><td>70.2</td><td>45.0</td><td>21.2</td><td>55.3</td><td>86.0</td><td>49.0</td><td>53.0</td><td>0.344</td><td>0.298</td></tr><tr><td rowspan="3">Web-DINO ViT-7B</td><td rowspan="3">MC-2B</td><td rowspan="3">8.0B</td><td>224</td><td>55.2</td><td>74.5</td><td>48.0</td><td>39.4</td><td>59.1</td><td>86.5</td><td>42.1</td><td>52.6</td><td>0.491</td><td>0.376</td></tr><tr><td>378</td><td>57.4</td><td>73.9</td><td>47.7</td><td>50.4</td><td>57.7</td><td>86.3</td><td>42.3</td><td>53.1</td><td>0.498</td><td>0.366</td></tr><tr><td>518</td><td>59.9</td><td>75.5</td><td>48.2</td><td>55.1</td><td>60.8</td><td>86.4</td><td>42.6</td><td>52.8</td><td>0.490</td><td>0.362</td></tr></table>
|
| 229 |
+
|
| 230 |
+
Table 3 Comparison with other vision models. Web-DINO ViT-7B achieves competitive performance with CLIP models on VQA without language supervision and surpasses them on traditional vision tasks. Compared to other self-supervised models like DINOv2, Web-DINO significantly narrows the performance gap with CLIP on VQA tasks, particularly excelling in OCR & Chart understanding. These results demonstrate that SSL can effectively produce strong visual representations for both multimodal and classic vision tasks.
|
| 231 |
+
|
| 232 |
+
Oquab et al. (2023), we additionally fine-tune WebDINO for 20k steps. We do this for resolutions of 378 and 518, to compare against the higher-resolution off-shelf versions of SigLIP as well as DINO. See Appendix C for training details. From 224 to 378 to 518 resolution, Web-DINO improves steadily at average VQA, with notable gains in OCR & Chart performance. Classic vision performance improves modestly with higher resolution. At 384 resolution, Web-DINO trails behind SigLIP. At 518 resolution, Web-DINO is largely able to bridge the gap. The results suggest that Web-DINO may benefit from further increasing high-resolution adaptation.
|
| 233 |
+
|
| 234 |
+
# 6 Related Work
|
| 235 |
+
|
| 236 |
+
Visual self-supervised learning methods. Early visual SSL methods explored various pretext tasks for pretraining (Wang and Gupta, 2015; Doersch et al., 2015; Noroozi and Favaro, 2016; Zhang et al., 2016; Gidaris et al., 2018; Balestriero et al., 2023). More recently, research has converged on two primary approaches: joint embedding methods and masked image modeling. Joint embedding methods learn invariant features by aligning representations of different augmented views (He et al., 2019; Misra and Van Der Maaten, 2019; Chen et al., 2020a; Grill et al., 2020; Chen et al., 2020b; Chen and He, 2021; Chen
|
| 237 |
+
|
| 238 |
+
et al., 2021; Caron et al., 2021; LeCun, 2022; Chen et al., 2022; Garrido et al., 2023), while masked modeling (Zhou et al., 2021; He et al., 2022; Wei et al., 2022; Fan et al., 2023; Assran et al., 2023; Woo et al., 2023; Bar et al., 2024; Bai et al., 2024; Carreira et al., 2024) learns by predicting masked visual inputs.
|
| 239 |
+
|
| 240 |
+
Our work complements SSL research focused on pretraining algorithms, by taking off-the-shelf training code and training visual SSL at scale with a controlled experimental setup. In Question 1, we show that the observed scaling behavior generalizes across both joint embedding and masked modeling SSL methods, and is likely not a method-specific phenomena.
|
| 241 |
+
|
| 242 |
+
Data used to train vision models. Both supervised (He et al., 2016; Xie et al., 2016; Dosovitskiy et al., 2021; Liu et al., 2022) and SSL vision models have traditionally relied on standard datasets such as MNIST (LeCun, 1998), CIFAR-10 (Krizhevsky et al., 2009), and ImageNet (Deng et al., 2009; Ridnik et al., 2021). More recently, self-supervised methods have scaled to larger unlabeled datasets, such as YFCC (Thomee et al., 2016), LVD-142M (Oquab et al., 2023), and IG-3B (Singh et al., 2023); however, these methods still exhibit a significant performance gap compared to language-supervised models on VQA.
|
| 243 |
+
|
| 244 |
+
In contrast, language-supervised models (Radford
|
| 245 |
+
|
| 246 |
+
et al., 2021; Zhai et al., 2023; Sun et al., 2023, 2024; Xu et al., 2024b; Tang et al., 2025) leverage significantly larger image-text datasets, from WIT-400M (Radford et al., 2021) to billion-scale web data (Schuhmann et al., 2022; Fang et al., 2024; Xu et al., 2024b; Gadre et al., 2024), with some using up to 100B image-text pairs (Wang et al., 2025). Studies suggest that pretraining data distribution is more critical for downstream performance than specific training methodologies (Fang et al., 2022; Liu and He, 2025).
|
| 247 |
+
|
| 248 |
+
Our work bridges these paradigms by pretraining SSL models on web-scale data. Through controlled experiments (Section 3 and 4), we show that (1) visual SSL models are sensitive to the training distribution, (2) increasing data diversity and quantity significantly improves performance on a diverse range of VQA tasks, and (3) training on a higher concentration of images containing text is highly effective for improving OCR & Chart understanding.
|
| 249 |
+
|
| 250 |
+
Evaluating vision models. Classic works have primarily used image classification (LeCun, 1998; Krizhevsky et al., 2009; Deng et al., 2009; Bossard et al., 2014; Hendrycks et al., 2019, 2020) to evaluate learned representations. More recent SSL research has expanded evaluation to include image segmentation (Everingham et al., 2010; Cordts et al., 2016; He et al., 2017; Zhou et al., 2019), depth estimation (Silberman et al., 2012; Geiger et al., 2013; Song et al., 2015), and video classification (Soomro et al., 2012; Goyal et al., 2017a; Baruch et al., 2021). Languagesupervised models (Radford et al., 2021; Zhai et al., 2023), due to their two-tower encoder structure, commonly use zero-shot image classification to assess the quality of learned image and text features.
|
| 251 |
+
|
| 252 |
+
Our work follows recent proposals (Naeem et al., 2024; Fini et al., 2024; Tong et al., 2024a) to evaluate vision encoders on a broader range of VQA tasks (Goyal et al., 2017b; Yue et al., 2024a; Liu et al., 2024c; Fu et al., 2023; Tao and Xie, 2024; Yue et al., 2024b; xAI, 2024) using MLLMs. These VQA tasks complement traditional vision benchmarks by assessing visual features on a more diverse range of real-world perceptual challenges. As shown in Section 3 and Section 4, we find that visual SSL trained on web-scale data learns representations that continue to improve on VQA benchmarks, and—to a lesser degree—also on traditional vision benchmarks.
|
| 253 |
+
|
| 254 |
+
# 7 Limitations
|
| 255 |
+
|
| 256 |
+
In this work, we focus on training visual SSL models without using language. The main limitation of
|
| 257 |
+
|
| 258 |
+
vision-only models, compared to language-supervised models, is that they do not support zero-shot image classification out of the box. However, by integrating visual SSL models into MLLM frameworks through instruction tuning, we show they can achieve impressive downstream performance across classification and other tasks. Another way to achieve zero-shot image classification is to use LiT-style adaptation (Zhai et al., 2022; Jose et al., 2024), but this is out-of-scope for our work as we do not use language supervision. To focus on comparing the vision encoder, we fixed the base LLM for visual instruction tuning to Llama-3 8B Instruct (AI@Meta, 2024). We hypothesize that the findings using other LLM backbones would be similar, however this is not in scope for our work. Additionally, while we demonstrate that visual SSL scales well on MetaCLIP data, we leave the exploration of even larger and/or uncurated datasets to future work.
|
| 259 |
+
|
| 260 |
+
# 8 Discussion
|
| 261 |
+
|
| 262 |
+
We show that large-scale visual encoders that are trained with self-supervised language-free objectives can produce high quality visual features for multimodal models. Our results echo the "bitter lesson" (Sutton, 2019) and suggest that imposing less supervision—including language—remains a promising direction for advancing the field of computer vision. We hope our work will inspire further exploration of vision-only approaches, which will enable the construction of next generation vision models that excel at both traditional vision and modern multimodal capabilities.
|
| 263 |
+
|
| 264 |
+
# 9 Acknowledgements
|
| 265 |
+
|
| 266 |
+
We thank Ellis Brown, John Nguyen, Junlin Han, Shengyi Qian, Tyler Zhu, Yuexiang Zhai, Druv Pai, Shusheng Yang, Jihan Yang, Muzi Tao, Boyang Zheng, and Anjali Gupta for reviewing this manuscript. We thank Hu Xu and the MetaCLIP paper authors for creating the MetaCLIP dataset. We thank Mido Assran, Mikael Henaff, Daniel Bolya, Hu Xu, Mark Ibrahim, Russ Howes, and Matthew Muckley for their insightful feedback. We thank Michael Ramamonjisoa and Marc Szafraniec for their help with image segmentation and depth estimation evaluations. Lastly, we thank Ananya Saxena, Cody Olsen, Mack Ward, Maxwell Taylor, Kalyan Saladi, Dev Satpathy, Dinesh Kannappan, Xiaodong Ma, Jacob Kahn, Gabriel Synnaeve, and Shubho Sengupta for infrastructure support.
|
| 267 |
+
|
| 268 |
+
# References
|
| 269 |
+
|
| 270 |
+
Pravesh Agrawal, Szymon Antoniak, Emma Bou Hanna, Devendra Chaplot, Jessica Chudnovsky, Saurabh Garg, Theophile Gervet, Soham Ghosh, Amélie Héliiou, Paul Jacob, et al. Pixtral 12b. arXiv preprint arXiv:2410.07073, 2024. 1
|
| 271 |
+
AI@Meta. Llama 3 model card. 2024. 1, 10, 16
|
| 272 |
+
Loubna Ben Allal, Anton Lozhkov, Elie Bakouch, Gabriel Martin Blazquez, Guilherme Penedo, Lewis Tunstall, Andres Marafioti, Hynek Kydlicek, Agustin Piqueres Lajarin, Vaibhav Srivastav, et al. Smollm2: When smol goes big-data-centric training of a small language model. arXiv preprint arXiv:2502.02737, 2025. 6, 16
|
| 273 |
+
Mahmoud Assran, Quentin Duval, Ishan Misra, Piotr Bojanowski, Pascal Vincent, Michael Rabbat, Yann LeCun, and Nicolas Ballas. Self-supervised learning from images with a joint-embedding predictive architecture. In CVPR, 2023. 9
|
| 274 |
+
Yutong Bai, Xinyang Geng, Karttikeya Mangalam, Amir Bar, Alan L Yuille, Trevor Darrell, Jitendra Malik, and Alexei A Efros. Sequential modeling enables scalable learning for large vision models. In CVPR, 2024. 9
|
| 275 |
+
Randall Balestriero, Mark Ibrahim, Vlad Sobal, Ari Morcos, Shashank Shekhar, Tom Goldstein, Florian Bordes, Adrien Bardes, Gregoire Mialon, Yuandong Tian, et al. A cookbook of self-supervised learning. arXiv preprint arXiv:2304.12210, 2023. 9
|
| 276 |
+
Amir Bar, Florian Bordes, Assaf Shocher, Mido Assran, Pascal Vincent, Nicolas Ballas, Trevor Darrell, Amir Globerson, and Yann LeCun. Stochastic positional embeddings improve masked image modeling. In ICML, 2024. 9
|
| 277 |
+
Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARKitsscenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In NeurIPS, 2021. 10
|
| 278 |
+
Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. 1
|
| 279 |
+
Lukas Bossard, Matthieu Guillaumin, and Luc Van Gool. Food-101-mining discriminative components with random forests. In ECCV, 2014. 10
|
| 280 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. In NeurIPS, 2020. 3
|
| 281 |
+
|
| 282 |
+
Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In ICCV, 2021. 9
|
| 283 |
+
João Carreira, Dilara Gokay, Michael King, Chuhan Zhang, Ignacio Rocco, Aravindh Mahendran, Thomas Albert Keck, Joseph Heyward, Skanda Koppula, Etienne Pot, et al. Scaling 4d representations. arXiv preprint arXiv:2412.15212, 2024. 9
|
| 284 |
+
Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020a. 1, 9
|
| 285 |
+
Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021. 9
|
| 286 |
+
Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b. 9
|
| 287 |
+
Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. In ICCV, 2021. 9
|
| 288 |
+
Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. In ICLR, 2023. 2
|
| 289 |
+
Yubei Chen, Adrien Bardes, Zengyi Li, and Yann LeCun. Bag of image patch embedding behind the success of self-supervised learning. arXiv preprint arXiv:2206.08954, 2022. 9
|
| 290 |
+
Mehdi Cherti, Romain Beaumont, Ross Wightman, Mitchell Wortsman, Gabriel Ilharco, Cade Gordon, Christoph Schuhmann, Ludwig Schmidt, and Jenia Jitsev. Reproducible scaling laws for contrastive language-image learning. In CVPR, 2023. 16
|
| 291 |
+
Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. arxiv 2022. arXiv preprint arXiv:2204.02311, 10:1, 2022. 5
|
| 292 |
+
Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In CVPR, 2016. 10
|
| 293 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, 2009. 2, 3, 6, 8, 9, 10, 16, 20
|
| 294 |
+
Carl Doersch, Abhinav Gupta, and Alexei A Efros. Unsupervised visual representation learning by context prediction. In ICCV, 2015. 9
|
| 295 |
+
|
| 296 |
+
Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 9
|
| 297 |
+
Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The Pascal visual object classes (voc) challenge. IJCV, 2010. 10
|
| 298 |
+
David Fan, Jue Wang, Shuai Liao, Yi Zhu, Vimal Bhat, Hector Santos-Villalobos, Rohith MV, and Xinyu Li. Motion-guided masking for spatiotemporal representation learning. In CVPR, 2023. 9
|
| 299 |
+
Alex Fang, Gabriel Ilharco, Mitchell Wortsman, Yuhao Wan, Vaishaal Shankar, Achal Dave, and Ludwig Schmidt. Data determines distributional robustness in contrastive language image pre-training (clip). In ICML, 2022. 10
|
| 300 |
+
Alex Fang, Albin Madappally Jose, Amit Jain, Ludwig Schmidt, Alexander Toshev, and Vaishaal Shankar. Data filtering networks. In ICLR, 2024. 10
|
| 301 |
+
Enrico Fini, Mustafa Shukor, Xiujun Li, Philipp Dufter, Michal Klein, David Haldimann, Sai Aitharaju, Victor Guilherme Turrisi da Costa, Louis Bethune, Zhe Gan, et al. Multimodal autoregressive pre-training of large vision encoders. arXiv preprint arXiv:2411.14402, 2024. 2, 6, 10
|
| 302 |
+
Chaoyou Fu, Peixian Chen, Yunhang Shen, Yulei Qin, Mengdan Zhang, Xu Lin, Zhenyu Qiu, Wei Lin, Jinrui Yang, Xiawu Zheng, et al. Mme: a comprehensive evaluation benchmark for multimodal large language models. corr abs/2306.13394 (2023), 2023. 10, 20
|
| 303 |
+
Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, et al. Datacomp: In search of the next generation of multimodal datasets. In NeurIPS, 2024. 10
|
| 304 |
+
Quentin Garrido, Yubei Chen, Adrien Bardes, Laurent Najman, and Yann Lecun. On the duality between contrastive and non-contrastive self-supervised learning. In ICLR, 2023. 9
|
| 305 |
+
Yuying Ge, Yixiao Ge, Ziyun Zeng, Xintao Wang, and Ying Shan. Planting a seed of vision in large language model. arXiv preprint arXiv:2307.08041, 2023. 20
|
| 306 |
+
Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 10
|
| 307 |
+
Spyros Gidaris, Praveer Singh, and Nikos Komodakis. Unsupervised representation learning by predicting image rotations. In ICLR, 2018. 9
|
| 308 |
+
|
| 309 |
+
Raghav Goyal, Samira Ebrahimi Kahou, Vincent Michalski, Joanna Materzynska, Susanne Westphal, Heuna Kim, Valentin Haenel, Ingo Fruend, Peter Yianilos, Moritz Mueller-Freitag, et al. The" something something" video database for learning and evaluating visual common sense. In ICCV, 2017a. 10
|
| 310 |
+
Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017b. 10
|
| 311 |
+
Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. In NeurIPS, 2020. 9
|
| 312 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016. 9
|
| 313 |
+
Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In ICCV, 2017. 10
|
| 314 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. arxiv e-prints, art. In CVPR, 2019. 9
|
| 315 |
+
Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollar, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022. 1, 3, 5, 9
|
| 316 |
+
Dan Hendrycks, Kevin Zhao, Steven Basart, Jacob Steinhardt, and Dawn Xiaodong Song. Natural adversarial examples. 2021 IEEE. In CVPR, 2019. 10
|
| 317 |
+
Dan Hendrycks, Steven Basart, Norman Mu, Saurav Kadavath, Frank Wang, Evan Dorundo, Rahul Desai, Tyler Zhu, Samyak Parajuli, Mike Guo, et al. The many faces of robustness: A critical analysis of out-of-distribution generalization. 2021 IEEE. In ICCV, 2020. 10
|
| 318 |
+
Tuomo Hiippala, Malihe Alikhani, Jonas Haverinen, Timo Kalliokoski, Evanfiya Logacheva, Serafina Orekhova, Aino Tuomainen, Matthew Stone, and John A Bateman. Ai2d-rst: A multimodal corpus of 1000 primary school science diagrams. Language Resources and Evaluation, 55:661-688, 2021. 20
|
| 319 |
+
Jordan Hoffmann, Sebastian Borgeaud, Arthur Mensch, Elena Buchatskaya, Trevor Cai, Eliza Rutherford, Diego de Las Casas, Lisa Anne Hendricks, Johannes Welbl, Aidan Clark, et al. Training compute-optimal large language models. In NeurIPS, 2023. 5
|
| 320 |
+
Drew A. Hudson and Christopher D. Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In CVPR, 2019. 20
|
| 321 |
+
Minyoung Huh, Brian Cheung, Tongzhou Wang, and
|
| 322 |
+
|
| 323 |
+
Phillip Isola. The platonic representation hypothesis. In ICML, 2024. 8
|
| 324 |
+
Cijo Jose, Théo Moutakanni, Dahiyun Kang, Federico Baldassarre, Timothee Darcet, Hu Xu, Daniel Li, Marc Szafraniec, Michael Ramamonjisoa, Maxime Oquab, et al. Dinov2 meets text: A unified framework for image-and pixel-level vision-language alignment. arXiv preprint arXiv:2412.16334, 2024. 10
|
| 325 |
+
Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020. 3, 5
|
| 326 |
+
Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 9, 10
|
| 327 |
+
Yann LeCun. The mnist database of handwritten digits. http://yann.lecun.com/exdb/mnist/, 1998. 9, 10
|
| 328 |
+
Yann LeCun. A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review, 62(1): 1-62, 2022. 1, 9
|
| 329 |
+
Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024. 1
|
| 330 |
+
Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023a. 1
|
| 331 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In CVPR, 2024a. 1
|
| 332 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llava-last: Improved reasoning,OCR,and world knowledge,2024b. 8
|
| 333 |
+
Yuliang Liu, Zhang Li, Hongliang Li, Wenwen Yu, Mingxin Huang, Dezhi Peng, Mingyu Liu, Mingrui Chen, Chunyuan Li, Lianwen Jin, et al. On the hidden mystery ofOCR in large multimodal models. arXiv preprint arXiv:2305.07895, 2023b. 20
|
| 334 |
+
Yuan Liu, Haodong Duan, Yuanhan Zhang, Bo Li, Songyang Zhang, Wangbo Zhao, Yike Yuan, Jiaqi Wang, Conghui He, Ziwei Liu, et al. Mmbench: Is your multi-modal model an all-around player? In ECCV, 2024c. 10, 20
|
| 335 |
+
Zhuang Liu and Kaiming He. A decade's battle on dataset bias: Are we there yet? In ICLR, 2025. 5, 10
|
| 336 |
+
Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, and Saining Xie. A convnet for the 2020s. In CVPR, 2022. 9
|
| 337 |
+
Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter
|
| 338 |
+
|
| 339 |
+
Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. In NeurIPS, 2022. 20
|
| 340 |
+
Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In ICLR, 2023. 20
|
| 341 |
+
Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. In ACL, 2022. 20
|
| 342 |
+
Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In WACV, 2021. 20
|
| 343 |
+
Ishan Misra and Laurens Van Der Maaten. Self-supervised learning of pretext-invariant representations. in 2020 IEEE. In CVPR, 2019. 9
|
| 344 |
+
Muhammad Ferjad Naeem, Yongqin Xian, Xiaohua Zhai, Lukas Hoyer, Luc Van Gool, and Federico Tombari. Silc: Improving vision language pretraining with self-distillation. In ECCV, 2024. 6, 10
|
| 345 |
+
Mehdi Noroozi and Paolo Favaro. Unsupervised learning of visual representations by solving jigsaw puzzles. In ECCV, 2016. 9
|
| 346 |
+
OpenAI. Chatgpt, 2022. 3
|
| 347 |
+
Maxime Oquab, Timothee Darcet, Theo Moutakanni, Huy Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, et al. Dinov2: Learning robust visual features without supervision. In TMLR, 2023. 1, 2, 3, 5, 6, 8, 9, 16, 17, 21
|
| 348 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, 2021. 1, 9, 10
|
| 349 |
+
Tal Ridnik, Emanuel Ben-Baruch, Asaf Noy, and Lihi Zelnik-Manor. Imagenet-21k pretraining for the masses. arXiv preprint arXiv:2104.10972, 2021. 2, 9
|
| 350 |
+
Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. In NeurIPS, 2022. 2, 10, 16, 17
|
| 351 |
+
Min Shi, Fuxiao Liu, Shihao Wang, Shijia Liao, Subhashree Radhakrishnan, De-An Huang, Hongxu Yin, Karan Sapra, Yaser Yacoob, Humphrey Shi, et al. Eagle: Exploring the design space for multimodal llms with mixture of encoders. arXiv preprint arXiv:2408.15998, 2024. 1, 6
|
| 352 |
+
|
| 353 |
+
Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In ECCV, 2012. 6, 8, 10, 16, 20
|
| 354 |
+
Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In CVPR, 2019. 20
|
| 355 |
+
Mannat Singh, Quentin Duval, Kalyan Vasudev Alwala, Haoqi Fan, Vaibhav Aggarwal, Aaron Adcock, Armand Joulin, Piotr Dollar, Christoph Feichtenhofer, Ross Girshick, et al. The effectiveness of mae pre-pretraining for billion-scale pretraining. In ICCV, 2023. 9
|
| 356 |
+
Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 10
|
| 357 |
+
Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 10
|
| 358 |
+
Krishna Srinivasan, Karthik Raman, Jiecao Chen, Michael Bendersky, and Marc Najork. Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In Proceedings of the 44th international ACM SIGIR conference on research and development in information retrieval, pages 2443-2449, 2021. 8
|
| 359 |
+
Quan Sun, Yuxin Fang, Ledell Wu, Xinlong Wang, and Yue Cao. Eva-clip: Improved training techniques for clip at scale. arXiv preprint arXiv:2303.15389, 2023. 10
|
| 360 |
+
Quan Sun, Jinsheng Wang, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, and Xinlong Wang. Evaclip-18b: Scaling clip to 18 billion parameters. arXiv preprint arXiv:2402.04252, 2024. 10
|
| 361 |
+
Richard Sutton. The bitter lesson. Incomplete Ideas (blog), 2019. 10
|
| 362 |
+
Zineng Tang, Long Lian, Seun Eisape, XuDong Wang, Roei Herzig, Adam Yala, Alane Suhr, Trevor Darrell, and David M. Chan. Tulip: Towards unified language-image pretraining, 2025. Preprint. 10
|
| 363 |
+
Muzi Tao and Saining Xie. What does a visual formal analysis of the world's 500 most famous paintings tell us about multimodal LLMs? In The Second Tiny Papers Track at ICLR 2024, 2024. 10
|
| 364 |
+
Bart Thomee, David A Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. Yfcc100m: The new data in multimedia research. Communications of the ACM, 59 (2):64-73, 2016. 9
|
| 365 |
+
Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. Cambrian-1: A fully open, vision-centric exploration
|
| 366 |
+
|
| 367 |
+
of multimodal llms. In NeurIPS, 2024a. 1, 2, 3, 6, 8, 10, 16, 20
|
| 368 |
+
Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024b. 16
|
| 369 |
+
Shengbang Tong, Zhuang Liu, Yuexiang Zhai, Yi Ma, Yann LeCun, and Saining Xie. Eyes wide shut? exploring the visual shortcomings of multimodal llms. In CVPR, 2024c. 20
|
| 370 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. LLaMA: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023. 3, 8
|
| 371 |
+
Michael Tschannen, Manoj Kumar, Andreas Steiner, Xiaohua Zhai, Neil Houlsby, and Lucas Beyer. Image captioners are scalable vision learners too. In NeurIPS, 2024. 2, 3
|
| 372 |
+
Bo Wan, Michael Tschannen, Yongqin Xian, Filip Pavetic, Ibrahim Alabdulmohsin, Xiao Wang, André Susano Pinto, Andreas Steiner, Lucas Beyer, and Xiaohua Zhai. *Locca: Visual pretraining with location-aware captioners.* arXiv preprint arXiv:2403.19596, 2024. 2
|
| 373 |
+
Xiaolong Wang and Abhinav Gupta. Unsupervised learning of visual representations using videos. In ICCV, 2015. 9
|
| 374 |
+
Xiao Wang, Ibrahim Alabdulmohsin, Daniel Salz, Zhe Li, Keran Rong, and Xiaohua Zhai. Scaling pre-training to one hundred billion data for vision language models. arXiv preprint arXiv:2502.07617, 2025. 10
|
| 375 |
+
Chen Wei, Haoqi Fan, Saining Xie, Chao-Yuan Wu, Alan Yuille, and Christoph Feichtenhofer. Masked feature prediction for self-supervised visual pre-training. In CVPR, 2022. 9
|
| 376 |
+
Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon, and Saining Xie. Convnext v2: Co-designing and scaling convnets with masked autoencoders. In CVPR, 2023. 9
|
| 377 |
+
xAI. grok, 2024. 10, 20
|
| 378 |
+
Saining Xie, Ross Girshick, Piotr Dollar, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. arXiv preprint arXiv:1611.05431, 2016. 9
|
| 379 |
+
Hu Xu, Po-Yao Huang, Xiaqing Ellen Tan, Ching-Feng Yeh, Jacob Kahn, Christine Jou, Gargi Ghosh, Omer Levy, Luke Zettlemoyer, Wen-tau Yih, et al. Altogether: Image captioning via re-aligning alt-text. arXiv preprint arXiv:2410.17251, 2024a. 3
|
| 380 |
+
|
| 381 |
+
Hu Xu, Saining Xie, Xiaoqing Ellen Tan, Po-Yao Huang, Russell Howes, Vasu Sharma, Shang-Wen Li, Gargi Ghosh, Luke Zettlemoyer, and Christoph Feichtenhofer. Demystifying clip data. In ICLR, 2024b. 2, 3, 10, 17, 21
|
| 382 |
+
Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In CVPR, 2024a. 10, 20
|
| 383 |
+
Xiang Yue, Tianyu Zheng, Yuansheng Ni, Yubo Wang, Kai Zhang, Shengbang Tong, Yuxuan Sun, Ming Yin, Botao Yu, Ge Zhang, et al. Mmmu-pro: A more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813, 2024b. 10
|
| 384 |
+
Xiaohua Zhai, Xiao Wang, Basil Mustafa, Andreas Steiner, Daniel Keysers, Alexander Kolesnikov, and Lucas Beyer. Lit: Zero-shot transfer with locked-image text tuning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 18123-18133, 2022. 10
|
| 385 |
+
Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pretraining. In ICCV, 2023. 1, 10
|
| 386 |
+
Richard Zhang, Phillip Isola, and Alexei A Efros. Colorful image colorization. In ECCV, 2016. 1, 9
|
| 387 |
+
Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277, 2023. 16
|
| 388 |
+
Bolei Zhou, Hang Zhao, Xavier Puig, Tete Xiao, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Semantic understanding of scenes through the ade20k dataset. IJCV, 2019. 3, 6, 8, 10, 16, 20
|
| 389 |
+
Jinghao Zhou, Chen Wei, Huiyu Wang, Wei Shen, Cihang Xie, Alan Yuille, and Tao Kong. ibot: Image bert pre-training with online tokenizer. arXiv preprint arXiv:2111.07832, 2021. 9
|
| 390 |
+
|
| 391 |
+
# A Implementation Details
|
| 392 |
+
|
| 393 |
+
Training. For training Web-DINO, Web-MAE, and CLIP models, we closely follow the existing open-source codebases: the official DINOv2 and MAE repositories, and the MetaCLIP codebase which builds on top of the OpenCLIP codebase (Cherti et al., 2023). We use Fully Sharded Data Parallel (FSDP) (Zhao et al., 2023) for distributed training of larger models.
|
| 394 |
+
|
| 395 |
+
For Web-DINO and CLIP pretraining, we follow the exact recipe and hyperparameters from the original paper for their largest model. For MAE pretraining, we observe that training becomes more prone to divergence as model size increases. To mitigate this, we reduce the learning rate from 2.4e-3 to 1.6e-3 and extend the warmup period to 80K iterations. Table 4 provides a summary of the pretraining hyperparameters.
|
| 396 |
+
|
| 397 |
+
<table><tr><td>Model</td><td>Batch Size</td><td>Learning Rate</td><td>Warmup</td></tr><tr><td>Web-DINO</td><td>3072</td><td>3.5e-4</td><td>100K</td></tr><tr><td>Web-MAE</td><td>4096</td><td>1.6e-3</td><td>80K</td></tr><tr><td>CLIP</td><td>32768</td><td>4e-4</td><td>2K</td></tr></table>
|
| 398 |
+
|
| 399 |
+
Table 4 Hyperparameters for Web-DINO,Web-MAE and CLIP.
|
| 400 |
+
|
| 401 |
+
VQA evaluation. For VQA evaluation, we follow Tong et al. (2024a,b) and use Cambrian-Alignment data for MLP projector training and Cambrian-7M for MLP and LLM fine-tuning. We finetune on top of Llama-3 8B Instruct (AI@Meta, 2024). The vision encoder is frozen throughout finetuning. We excluded LAION (Schuhmann et al., 2022) images from the Cambrian data to comply with safety standards. We first encode the images at the model's original input resolution using the pretrained vision encoder. Next, we extract features from the final encoder layer. Following prior approaches (Tong et al., 2024a,b), we then resize the resulting token sequence to a fixed length of 576 tokens through bilinear interpolation. This ensures consistency across evaluations despite variations in input image resolutions. We report configurations in Table 5.
|
| 402 |
+
|
| 403 |
+
Classic vision evaluation. We follow the evaluation procedure in DINOv2 (Oquab et al., 2023) for all classic vision evaluation: linear probe on ImageNet1k (Deng et al., 2009), ADE20K (Zhou et al., 2019), and NYU Depth v2 (Silberman et al., 2012). For ImageNet-1k, we evaluate models with their pretrained image resolution; For ADE20K and NYU Depth v2, we use the settings from Oquab et al. (2023). For ADE20K, we follow DINOv2 and report
|
| 404 |
+
|
| 405 |
+
the linear and $+ms$ setting. For NYU Depth v2, we report lin. 1 and lin. 4. See the original paper for additional details.
|
| 406 |
+
|
| 407 |
+
Model architectures. In Table 1, we defined the ViT architectures used in our study. To recap, we first borrowed the ViT-g architecture from Oquab et al. (2023) and named it ViT-1B for consistent notation. We then define 2B, 3B, 5B, and 7B architectures inspired by language model scaling. Specifically, the 2 - 7B architectures are wider than the 1B variant, inspired by language model recipes. Our 7B architecture is almost identical to the Llama-2 7B design, except for the patch embedding layer which is unique to ViTs.
|
| 408 |
+
|
| 409 |
+
Text filtering. In Question 4, we introduced the "Light" and "Heavy" filters which retain $50.3\%$ and $1.3\%$ of MC-2B respectively. Specifically, we use a small MLLM, SmolVLM2 (Allal et al., 2025), to identify images containing text, using prompts such as "Does this image contain any readable text?" The intention is not to achieve perfect filtering, but rather to skew the data distribution in the general desired direction. See Figure 8 for a visualization of the filtering process and some examples. This results in two curated datasets:
|
| 410 |
+
|
| 411 |
+
(i) Light filter: Retains $50.3\%$ of the original data, primarily consisting of images with some textual content. Prompt used: "Does this image contain any readable text? Answer only yes or no."
|
| 412 |
+
(ii) Heavy filter: Retains only $1.3\%$ of the data, focusing mainly on charts and documents. Prompt used: "Please think carefully before answering. Does this image contain charts, tables, or documents with readable text? Answer only yes or no."
|
| 413 |
+
|
| 414 |
+
# B Full Results
|
| 415 |
+
|
| 416 |
+
We include full results of all experiments presented in Section 3 and Section 4.
|
| 417 |
+
|
| 418 |
+
# B.1 Web-DINO
|
| 419 |
+
|
| 420 |
+
Scaling up model sizes. We show quantitative results of scaling up the model under VQA evaluation in Table 6 and classic vision evaluation in Table 7. These are the numerical results for Section 3.1.
|
| 421 |
+
|
| 422 |
+
Scaling up data sizes. We show quantitative results of scaling up the number of data seen with WebDINO ViT-7B on VQA evaluation in Table 8 and
|
| 423 |
+
|
| 424 |
+
<table><tr><td>Backbone</td><td colspan="2">Data</td><td colspan="3">Adapter</td><td colspan="3">Instruction Tuning</td></tr><tr><td>LLM</td><td>Adapter</td><td>Instruction Tuning</td><td>LR</td><td>WD</td><td>BS</td><td>LR</td><td>WD</td><td>BS</td></tr><tr><td>Llama-3 8B Instruct</td><td>Cambrian Adapter Data</td><td>Cambrian-7M</td><td>1.00e-5</td><td>0.0</td><td>512</td><td>4.00e-5</td><td>0</td><td>512</td></tr></table>
|
| 425 |
+
|
| 426 |
+
Table 5 Hyperparameters for all VQA experiments. We exclude LAION (Schuhmann et al., 2022) from Cambrian data.
|
| 427 |
+
|
| 428 |
+
<table><tr><td rowspan="2">Vision Backbone Model</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>\(MMPE\)</td><td>\(MMB\)</td><td>\(SEED^I\)</td><td>\(GQA\)</td><td>\(SQA^I\)</td><td>\(MMMUV^V\)</td><td>\(MathVista^M\)</td><td>\(AI2D\)</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>\(CV-Bench^{2D}\)</td><td>\(CV-Bench^{3D}\)</td></tr><tr><td>Web-DINO ViT-1B</td><td>49.01</td><td>1731.52</td><td>65.37</td><td>69.92</td><td>62.40</td><td>72.58</td><td>35.33</td><td>12.30</td><td>64.28</td><td>19.20</td><td>9.40</td><td>47.41</td><td>17.00</td><td>37.33</td><td>57.12</td><td>64.80</td><td>63.16</td></tr><tr><td>Web-DINO ViT-2B</td><td>50.77</td><td>1760.80</td><td>68.98</td><td>71.29</td><td>62.89</td><td>73.67</td><td>31.77</td><td>15.90</td><td>67.06</td><td>23.30</td><td>15.60</td><td>49.20</td><td>19.00</td><td>38.00</td><td>57.38</td><td>65.85</td><td>64.41</td></tr><tr><td>Web-DINO ViT-3B</td><td>51.71</td><td>1757.27</td><td>68.04</td><td>71.84</td><td>63.19</td><td>73.57</td><td>33.00</td><td>14.40</td><td>67.32</td><td>25.68</td><td>17.10</td><td>50.45</td><td>20.00</td><td>42.66</td><td>56.86</td><td>69.49</td><td>65.83</td></tr><tr><td>Web-DINO ViT-5B</td><td>52.83</td><td>1840.81</td><td>70.01</td><td>72.39</td><td>63.56</td><td>75.06</td><td>32.11</td><td>12.40</td><td>67.77</td><td>26.96</td><td>22.10</td><td>50.64</td><td>21.00</td><td>44.66</td><td>57.64</td><td>67.75</td><td>69.16</td></tr><tr><td>Web-DINO ViT-7B</td><td>53.87</td><td>1823.76</td><td>68.98</td><td>73.02</td><td>64.22</td><td>74.61</td><td>35.11</td><td>14.00</td><td>69.43</td><td>28.80</td><td>23.59</td><td>51.10</td><td>22.00</td><td>48.00</td><td>59.34</td><td>69.96</td><td>68.58</td></tr></table>
|
| 429 |
+
|
| 430 |
+
Table 6 VQA Evaluation: Web-DINO trained on MC-2B with 2 billion images seen.
|
| 431 |
+
|
| 432 |
+
classic vision evaluation in Table 9. These are the numerical results for Section 3.2.
|
| 433 |
+
|
| 434 |
+
Scaling down training data. We show VQA evaluation results from training Web-DINO on less diverse data-ImageNet-1k, in Table 10. These are the full results for scaling down training data experiments in Question 2.
|
| 435 |
+
|
| 436 |
+
# B.2 Web-MAE
|
| 437 |
+
|
| 438 |
+
We show VQA evaluation results from scaling up MAE trained on MC-2B, in Table 11. These are the full results for Question 1.
|
| 439 |
+
|
| 440 |
+
# B.3 Scaled CLIP Models
|
| 441 |
+
|
| 442 |
+
We show VQA evaluation results from scaling up MetaCLIP (Xu et al., 2024b) trained on MC-2B, in Table 12. These are the full results for Section 3.1. In contrast to visual SSL methods in Table 7 and Table 11, CLIP models do not exhibit clear scaling behavior.
|
| 443 |
+
|
| 444 |
+
# B.4 Text Filtered Models
|
| 445 |
+
|
| 446 |
+
We provide full results for Question 4. As shown in Table 13, SSL models learn features particularly well-suited for OCR & Chart tasks when trained on datasets with a higher concentration of text-rich images. This suggests that visual SSL is sensitive to the underlying training distribution and can be effectively steered toward specific downstream applications, such as OCR & Chart.
|
| 447 |
+
|
| 448 |
+
# B.5 Baseline Models
|
| 449 |
+
|
| 450 |
+
In Table 14, we provide full VQA results for the reference off-shelf models that we evaluated in Section 5.
|
| 451 |
+
|
| 452 |
+
# C High Resolution Adaption of Web-SSL
|
| 453 |
+
|
| 454 |
+
Following Oquab et al. (2023), we further fine-tune our model under higher resolution settings of $378 \times 378$ and $518 \times 518$ for 20k iterations. We use a batch size of 2048 and a correspondingly lower learning rate of 1.41e-5. All other parameters remain exactly the same as previously specified, including the learning rate warmup ratio, given the total of 10k iterations.
|
| 455 |
+
|
| 456 |
+
We also provided detailed benchmark results of high-resolution adaptation of Web-DINO in Table 15.
|
| 457 |
+
|
| 458 |
+
# D Evaluation
|
| 459 |
+
|
| 460 |
+
Table 16 lists evaluation benchmarks used and their purposes.
|
| 461 |
+
|
| 462 |
+
# E Pretraining Dataset Cards
|
| 463 |
+
|
| 464 |
+
For reference, in Table 17 we include the data composition of LVD-142M, which was used to train the off-shelf DINOv2 model (Oquab et al., 2023). LVD-142M is a carefully curated data mix closely aligned with downstream classic vision evaluation tasks. In comparison, we leverage MetaCLIP data, which is less curated and collected from 15 snapshots of CommonCrawl (CC).
|
| 465 |
+
|
| 466 |
+
<table><tr><td>Vision Backbone</td><td>IN1k lin.</td><td>ADE20K lin.</td><td>ADE20K +ms.</td><td>NYUd lin. 1 (↓)</td><td>NYUd lin. 4 (↓)</td></tr><tr><td>Web-DINO ViT-1B</td><td>84.70</td><td>46.60</td><td>50.97</td><td>0.364</td><td>0.345</td></tr><tr><td>Web-DINO ViT-2B</td><td>85.16</td><td>50.55</td><td>52.32</td><td>0.351</td><td>0.335</td></tr><tr><td>Web-DINO ViT-3B</td><td>85.66</td><td>50.17</td><td>53.12</td><td>0.348</td><td>0.328</td></tr><tr><td>Web-DINO ViT-5B</td><td>85.84</td><td>49.54</td><td>53.27</td><td>0.378</td><td>0.335</td></tr><tr><td>Web-DINO ViT-7B</td><td>86.00</td><td>49.08</td><td>54.65</td><td>0.380</td><td>0.339</td></tr></table>
|
| 467 |
+
|
| 468 |
+
Table 7 Classic Vision Evaluation: Web-DINO trained on MC-2B with 2 billion images seen.
|
| 469 |
+
|
| 470 |
+
<table><tr><td rowspan="2">Vision Backbone</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>MMEP</td><td>MMB</td><td>SEED1</td><td>GQA</td><td>SQA1</td><td>MMMU2</td><td>MathVistaM</td><td>AI2D</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>CV-Bench2D</td><td>CV-Bench3D</td></tr><tr><td>Web-DINO ViT-7B (1B Data)</td><td>51.02</td><td>1785.97</td><td>68.12</td><td>72.54</td><td>63.60</td><td>73.87</td><td>32.88</td><td>12.70</td><td>66.58</td><td>23.60</td><td>15.20</td><td>49.04</td><td>19.00</td><td>43.33</td><td>57.12</td><td>68.35</td><td>61.08</td></tr><tr><td>Web-DINO ViT-7B (2B Data)</td><td>53.87</td><td>1823.76</td><td>68.98</td><td>73.02</td><td>64.22</td><td>74.61</td><td>35.11</td><td>14.00</td><td>69.43</td><td>28.80</td><td>23.59</td><td>51.10</td><td>22.00</td><td>48.00</td><td>59.34</td><td>69.96</td><td>68.58</td></tr><tr><td>Web-DINO ViT-7B (4B Data)</td><td>54.37</td><td>1827.12</td><td>71.39</td><td>72.61</td><td>63.53</td><td>72.73</td><td>34.00</td><td>18.90</td><td>67.09</td><td>35.12</td><td>30.00</td><td>53.19</td><td>24.00</td><td>45.33</td><td>55.94</td><td>69.68</td><td>65.00</td></tr><tr><td>Web-DINO ViT-7B (8B Data)</td><td>55.24</td><td>1811.05</td><td>71.30</td><td>72.14</td><td>64.04</td><td>72.43</td><td>35.66</td><td>15.20</td><td>68.52</td><td>35.52</td><td>36.40</td><td>56.53</td><td>29.00</td><td>46.00</td><td>57.90</td><td>70.53</td><td>62.08</td></tr></table>
|
| 471 |
+
|
| 472 |
+
Table 8 VQA Evaluation: Web-DINO ViT-7B trained on MC-2B with increased number of images seen.
|
| 473 |
+
|
| 474 |
+
<table><tr><td>Vision Backbone</td><td>IN1k lin.</td><td>ADE20K lin.</td><td>ADE20K +ms.</td><td>NYUd lin. 1 (↓)</td><td>NYUd lin. 4 (↓)</td></tr><tr><td>Web-DINO ViT-7B (2B Data)</td><td>86.00</td><td>49.08</td><td>54.65</td><td>0.380</td><td>0.339</td></tr><tr><td>Web-DINO ViT-7B (4B Data)</td><td>86.33</td><td>47.41</td><td>54.66</td><td>0.416</td><td>0.363</td></tr><tr><td>Web-DINO ViT-7B (8B Data)</td><td>86.52</td><td>42.14</td><td>52.55</td><td>0.491</td><td>0.376</td></tr></table>
|
| 475 |
+
|
| 476 |
+
Table 9 Classic Vision Evaluation: Web-DINO ViT-7B trained on MC-2B with increased number of images seen.
|
| 477 |
+
|
| 478 |
+
<table><tr><td rowspan="2">Vision Backbone Model</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>\(MME^P\)</td><td>\(MMB\)</td><td>\(SEED^I\)</td><td>\(GQA\)</td><td>\(SQA^J\)</td><td>\(MMU^V\)</td><td>\(MathVista^M\)</td><td>\(AI2D\)</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>\(CV-Bench^{2D}\)</td><td>\(CV-Bench^{3D}\)</td></tr><tr><td>Web-DINO ViT-1B</td><td>46.39</td><td>1704.30</td><td>59.27</td><td>66.43</td><td>60.12</td><td>71.29</td><td>32.77</td><td>18.70</td><td>63.40</td><td>17.56</td><td>4.90</td><td>44.93</td><td>14.00</td><td>32.00</td><td>52.41</td><td>62.81</td><td>56.41</td></tr><tr><td>Web-DINO ViT-2B</td><td>45.99</td><td>1666.01</td><td>60.13</td><td>66.64</td><td>60.19</td><td>68.71</td><td>34.88</td><td>12.10</td><td>62.07</td><td>18.60</td><td>4.39</td><td>45.55</td><td>14.00</td><td>32.66</td><td>52.67</td><td>62.07</td><td>57.83</td></tr><tr><td>Web-DINO ViT-3B</td><td>46.43</td><td>1729.40</td><td>60.56</td><td>66.99</td><td>60.24</td><td>70.50</td><td>31.88</td><td>11.70</td><td>62.30</td><td>17.52</td><td>4.80</td><td>45.18</td><td>15.00</td><td>31.33</td><td>53.20</td><td>62.77</td><td>62.50</td></tr><tr><td>Web-DINO ViT-5B</td><td>46.28</td><td>1661.25</td><td>59.27</td><td>67.24</td><td>61.10</td><td>69.41</td><td>31.55</td><td>10.90</td><td>61.46</td><td>18.72</td><td>4.60</td><td>45.53</td><td>15.00</td><td>34.00</td><td>53.07</td><td>64.57</td><td>61.08</td></tr></table>
|
| 479 |
+
|
| 480 |
+
Table 10 VQA Evaluation: Web-DINO trained on ImageNet-1k.
|
| 481 |
+
|
| 482 |
+
<table><tr><td rowspan="2">Vision Backbone</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>\(MMPE^P\)</td><td>\(MMB\)</td><td>\(SEED^I\)</td><td>GQA</td><td>\(SQA^I\)</td><td>\(MMMU^V\)</td><td>\(MathVista^M\)</td><td>AI2D</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>\(CV-Bench^{2D}\)</td><td>\(CV-Bench^{3D}\)</td></tr><tr><td>Web-MAE ViT-1B</td><td>49.19</td><td>1736.22</td><td>62.02</td><td>68.38</td><td>60.05</td><td>73.27</td><td>33.11</td><td>12.90</td><td>63.92</td><td>23.60</td><td>16.40</td><td>47.84</td><td>18.00</td><td>36.66</td><td>52.81</td><td>70.42</td><td>60.83</td></tr><tr><td>Web-MAE ViT-2B</td><td>50.59</td><td>1700.16</td><td>63.57</td><td>69.21</td><td>60.93</td><td>72.48</td><td>32.22</td><td>15.50</td><td>64.44</td><td>29.00</td><td>23.20</td><td>48.78</td><td>20.00</td><td>38.00</td><td>55.16</td><td>67.98</td><td>63.91</td></tr><tr><td>Web-MAE ViT-3B</td><td>50.92</td><td>1723.85</td><td>64.69</td><td>69.71</td><td>60.94</td><td>72.13</td><td>34.33</td><td>13.50</td><td>65.70</td><td>30.92</td><td>24.60</td><td>48.92</td><td>20.00</td><td>37.33</td><td>54.64</td><td>64.15</td><td>66.91</td></tr><tr><td>Web-MAE ViT-5B</td><td>51.50</td><td>1710.13</td><td>65.12</td><td>70.13</td><td>61.10</td><td>72.63</td><td>32.66</td><td>13.90</td><td>65.67</td><td>33.80</td><td>26.50</td><td>49.60</td><td>21.00</td><td>38.00</td><td>53.72</td><td>66.69</td><td>67.91</td></tr></table>
|
| 483 |
+
|
| 484 |
+
Table 11 VQA Evaluation: Web-MAE trained on MC-2B.
|
| 485 |
+
|
| 486 |
+
<table><tr><td rowspan="2">Vision Backbone</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>MMEP</td><td>MMB</td><td>SEED</td><td>GQA</td><td>SQA1</td><td>MMMU</td><td>MathVistaM</td><td>AI2D</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>CV-Bench2D</td><td>CV-Bench3D</td></tr><tr><td>MetaCLIP ViT-1B</td><td>52.30</td><td>1813.70</td><td>68.90</td><td>69.45</td><td>60.35</td><td>74.07</td><td>33.55</td><td>12.70</td><td>64.41</td><td>33.20</td><td>34.59</td><td>52.15</td><td>26.00</td><td>37.33</td><td>52.15</td><td>65.47</td><td>61.83</td></tr><tr><td>MetaCLIP ViT-2B</td><td>53.03</td><td>1787.39</td><td>68.81</td><td>69.54</td><td>61.08</td><td>75.16</td><td>34.66</td><td>20.10</td><td>65.38</td><td>32.80</td><td>32.90</td><td>52.55</td><td>26.00</td><td>37.33</td><td>52.94</td><td>65.19</td><td>64.67</td></tr><tr><td>MetaCLIP ViT-3B</td><td>53.22</td><td>1873.67</td><td>68.72</td><td>70.33</td><td>61.85</td><td>77.29</td><td>32.77</td><td>11.80</td><td>66.35</td><td>32.16</td><td>34.40</td><td>54.58</td><td>26.00</td><td>35.33</td><td>55.55</td><td>65.57</td><td>65.08</td></tr><tr><td>MetaCLIP ViT-5B</td><td>52.52</td><td>1779.03</td><td>70.10</td><td>70.26</td><td>61.53</td><td>72.43</td><td>33.44</td><td>17.90</td><td>66.74</td><td>30.04</td><td>32.20</td><td>52.49</td><td>25.00</td><td>39.33</td><td>54.50</td><td>64.22</td><td>61.16</td></tr><tr><td>MetaCLIP ViT-7B</td><td>52.97</td><td>1827.80</td><td>69.93</td><td>69.47</td><td>61.33</td><td>74.91</td><td>35.55</td><td>16.80</td><td>65.15</td><td>32.12</td><td>32.10</td><td>52.07</td><td>25.00</td><td>39.33</td><td>54.11</td><td>65.08</td><td>63.16</td></tr></table>
|
| 487 |
+
|
| 488 |
+
Table 12 VQA Evaluation: MetaCLIP trained on MC-2B with 2 billion images seen.
|
| 489 |
+
|
| 490 |
+
<table><tr><td rowspan="2">Vision Backbone</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>MMEP</td><td>MMB</td><td>SEED1</td><td>GQA</td><td>SQA1</td><td>MMUV</td><td>MathVistaM</td><td>AI2D</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>CV-Bench2D</td><td>CV-Bench3D</td></tr><tr><td>Web-DINO ViT-1B (No Filter)</td><td>49.01</td><td>1731.52</td><td>65.37</td><td>69.92</td><td>62.40</td><td>72.58</td><td>35.33</td><td>12.30</td><td>64.28</td><td>19.20</td><td>9.40</td><td>47.41</td><td>17.00</td><td>37.33</td><td>57.12</td><td>64.80</td><td>63.16</td></tr><tr><td>Web-DINO ViT-1B (Light Filter)</td><td>50.73</td><td>1690.89</td><td>65.54</td><td>70.68</td><td>62.63</td><td>70.99</td><td>33.89</td><td>17.80</td><td>63.69</td><td>26.12</td><td>21.80</td><td>50.56</td><td>20.00</td><td>36.00</td><td>56.86</td><td>64.84</td><td>65.75</td></tr><tr><td>Web-DINO ViT-1B (Heavy Filter)</td><td>49.44</td><td>1593.79</td><td>61.40</td><td>65.34</td><td>59.53</td><td>71.19</td><td>31.33</td><td>14.90</td><td>64.83</td><td>36.92</td><td>24.09</td><td>50.09</td><td>27.00</td><td>21.33</td><td>53.20</td><td>66.53</td><td>63.66</td></tr><tr><td>Web-DINO ViT-2B (No Filter)</td><td>50.77</td><td>1760.80</td><td>68.98</td><td>71.29</td><td>62.89</td><td>73.67</td><td>31.77</td><td>15.90</td><td>67.06</td><td>23.30</td><td>15.60</td><td>49.20</td><td>19.00</td><td>38.00</td><td>57.38</td><td>65.85</td><td>64.41</td></tr><tr><td>Web-DINO ViT-2B (Light Filter)</td><td>53.38</td><td>1768.67</td><td>68.38</td><td>71.80</td><td>63.24</td><td>74.16</td><td>33.88</td><td>31.40</td><td>67.38</td><td>31.40</td><td>27.30</td><td>51.26</td><td>23.00</td><td>39.33</td><td>56.47</td><td>61.13</td><td>65.50</td></tr><tr><td>Web-DINO ViT-2B (Heavy Filter)</td><td>53.65</td><td>1743.56</td><td>65.29</td><td>69.28</td><td>61.19</td><td>74.86</td><td>32.22</td><td>14.50</td><td>67.42</td><td>47.48</td><td>29.40</td><td>52.80</td><td>32.00</td><td>40.00</td><td>54.50</td><td>65.85</td><td>64.50</td></tr></table>
|
| 491 |
+
|
| 492 |
+
Table 13 VQA Evaluation: Web-DINO trained on text filtered MC-2B.
|
| 493 |
+
|
| 494 |
+
<table><tr><td rowspan="2">Vision Backbone</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>\(MMPE^1\)</td><td>\(MMB\)</td><td>\(SEED^1\)</td><td>GQA</td><td>\(SQA^1\)</td><td>\(MMMU^V\)</td><td>\(MathVista^M\)</td><td>AI2D</td><td>ChartQA</td><td>OCRBench</td><td>TextVQA</td><td>DocVQA</td><td>MMVP</td><td>RealWorldQA</td><td>CV-Bench2D</td><td>CV-Bench3D</td></tr><tr><td>Model</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>CLIP Models</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MetaCLIP ViT-\(H_{224px}\)</td><td>54.91</td><td>1860.58</td><td>72.93</td><td>70.96</td><td>62.22</td><td>77.88</td><td>36.88</td><td>15.00</td><td>67.32</td><td>35.60</td><td>33.40</td><td>55.10</td><td>29.00</td><td>41.33</td><td>53.46</td><td>68.53</td><td>65.91</td></tr><tr><td>SigLIP ViT-SO400M\(_{224px}\)</td><td>55.36</td><td>1807.30</td><td>72.76</td><td>71.83</td><td>62.68</td><td>76.74</td><td>35.44</td><td>14.00</td><td>68.65</td><td>33.08</td><td>40.20</td><td>56.61</td><td>28.00</td><td>47.33</td><td>56.99</td><td>66.42</td><td>64.66</td></tr><tr><td>SigLIP ViT-SO400M\(_{384px}\)</td><td>59.97</td><td>1892.16</td><td>73.71</td><td>73.00</td><td>63.80</td><td>77.83</td><td>33.88</td><td>20.00</td><td>69.78</td><td>54.24</td><td>46.40</td><td>63.53</td><td>50.00</td><td>46.00</td><td>58.43</td><td>67.37</td><td>66.91</td></tr><tr><td>SigLIP2 ViT-SO400M\(_{224px}\)</td><td>56.32</td><td>1789.26</td><td>73.36</td><td>72.20</td><td>62.60</td><td>74.96</td><td>35.55</td><td>22.40</td><td>69.85</td><td>35.76</td><td>42.00</td><td>59.68</td><td>31.00</td><td>44.00</td><td>54.24</td><td>69.88</td><td>64.16</td></tr><tr><td>SigLIP2 ViT-SO400M\(_{384px}\)</td><td>61.98</td><td>1895.70</td><td>74.57</td><td>72.24</td><td>64.81</td><td>79.27</td><td>36.33</td><td>19.90</td><td>72.24</td><td>59.68</td><td>52.90</td><td>67.15</td><td>54.00</td><td>49.33</td><td>54.77</td><td>70.73</td><td>69.00</td></tr><tr><td>SSL Models</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>DINOv2 ViT-g\(_{224px}\)</td><td>49.25</td><td>1785.25</td><td>64.86</td><td>70.89</td><td>62.89</td><td>72.03</td><td>32.11</td><td>12.40</td><td>62.37</td><td>17.96</td><td>5.50</td><td>47.06</td><td>15.00</td><td>47.33</td><td>56.33</td><td>65.92</td><td>66.08</td></tr><tr><td>DINOv2 ViT-g\(_{378px}\)</td><td>47.94</td><td>1734.38</td><td>64.26</td><td>71.50</td><td>62.21</td><td>71.04</td><td>33.11</td><td>9.60</td><td>63.08</td><td>17.76</td><td>5.00</td><td>45.59</td><td>15.00</td><td>41.33</td><td>56.47</td><td>63.79</td><td>60.58</td></tr><tr><td>DINOv2 ViT-g\(_{518px}\)</td><td>47.91</td><td>1694.08</td><td>62.45</td><td>70.64</td><td>62.87</td><td>71.29</td><td>33.55</td><td>11.80</td><td>63.37</td><td>18.32</td><td>5.10</td><td>46.27</td><td>15.00</td><td>37.33</td><td>56.60</td><td>65.36</td><td>61.83</td></tr><tr><td>I-JEPA ViT-H\(_{224px}\)</td><td>44.78</td><td>1598.15</td><td>60.01</td><td>64.04</td><td>57.66</td><td>68.91</td><td>34.55</td><td>10.20</td><td>62.07</td><td>16.72</td><td>4.00</td><td>42.99</td><td>14.00</td><td>29.33</td><td>49.93</td><td>57.39</td><td>57.16</td></tr><tr><td>MAE ViT-H\(_{224px}\)</td><td>45.21</td><td>1697.06</td><td>56.87</td><td>56.41</td><td>60.51</td><td>70.74</td><td>32.11</td><td>11.50</td><td>61.30</td><td>17.40</td><td>5.50</td><td>45.38</td><td>14.00</td><td>27.33</td><td>53.46</td><td>61.19</td><td>64.75</td></tr></table>
|
| 495 |
+
|
| 496 |
+
Table 14 VQA Evaluation: Off-shelf CLIP and SSL models.
|
| 497 |
+
|
| 498 |
+
<table><tr><td rowspan="2">Vision Backbone Model</td><td rowspan="2">Average</td><td colspan="4">General</td><td colspan="4">Knowledge</td><td colspan="4">OCR & Chart</td><td colspan="4">Vision-Centric</td></tr><tr><td>\(MME^P\)</td><td>\(MMB\)</td><td>\(SEED^I\)</td><td>GQA</td><td>\(SQA^I\)</td><td>\(MMUV^V\)</td><td>Math\(Vista^M\)</td><td>A12D</td><td>ChartQA</td><td>OCR\(Bench\)</td><td>Text\(VQA\)</td><td>Doc\(VQA\)</td><td>MMVP</td><td>Real\(WorldQA\)</td><td>\(CV-Bench^{2D}\)</td><td>\(CV-Bench^{3D}\)</td></tr><tr><td>Web-DINO\(_{224px}\)</td><td>55.24</td><td>1811.05</td><td>71.30</td><td>72.14</td><td>64.04</td><td>72.43</td><td>35.66</td><td>15.20</td><td>68.52</td><td>35.52</td><td>36.40</td><td>56.53</td><td>29.00</td><td>46.00</td><td>57.90</td><td>70.53</td><td>62.08</td></tr><tr><td>Web-DINO\(_{378px}\)</td><td>57.43</td><td>1757.06</td><td>70.61</td><td>72.59</td><td>64.50</td><td>72.53</td><td>35.11</td><td>16.10</td><td>67.09</td><td>52.04</td><td>42.19</td><td>61.51</td><td>46.00</td><td>38.00</td><td>59.08</td><td>66.55</td><td>67.16</td></tr><tr><td>Web-DINO\(_{518px}\)</td><td>59.91</td><td>1807.08</td><td>73.79</td><td>72.92</td><td>64.78</td><td>74.36</td><td>34.66</td><td>14.50</td><td>69.43</td><td>57.28</td><td>45.70</td><td>64.48</td><td>53.00</td><td>43.33</td><td>60.52</td><td>70.08</td><td>69.41</td></tr></table>
|
| 499 |
+
|
| 500 |
+
Table 15 VQA Evaluation: Web-DINO ViT-7B adapted to different resolution
|
| 501 |
+
|
| 502 |
+
<table><tr><td>Benchmark</td><td>Eval</td><td>Citation</td></tr><tr><td>GQA</td><td>General VQA</td><td>Hudson and Manning (2019)</td></tr><tr><td>SEED</td><td>General VQA</td><td>Ge et al. (2023)</td></tr><tr><td>MME</td><td>General VQA</td><td>Fu et al. (2023)</td></tr><tr><td>MMBench</td><td>General VQA</td><td>Liu et al. (2024c)</td></tr><tr><td>AI2D</td><td>Knowledge VQA</td><td>Hiippala et al. (2021)</td></tr><tr><td>ScienceQA</td><td>Knowledge VQA</td><td>Lu et al. (2022)</td></tr><tr><td>MathVista</td><td>Knowledge VQA</td><td>Lu et al. (2023)</td></tr><tr><td>MMMU</td><td>Knowledge VQA</td><td>Yue et al. (2024a)</td></tr><tr><td>TextVQA</td><td>OCR & Chart VQA</td><td>Singh et al. (2019)</td></tr><tr><td>DocVQA</td><td>OCR & Chart VQA</td><td>Mathew et al. (2021)</td></tr><tr><td>ChartQA</td><td>OCR & Chart VQA</td><td>Masry et al. (2022)</td></tr><tr><td>OCRbench</td><td>OCR & Chart VQA</td><td>Liu et al. (2023b)</td></tr><tr><td>MMVP</td><td>Vision-Centric VQA</td><td>Tong et al. (2024c)</td></tr><tr><td>RealWorldQA</td><td>Vision-Centric VQA</td><td>xAI (2024)</td></tr><tr><td>CVBench-2D</td><td>Vision-Centric VQA</td><td>Tong et al. (2024a)</td></tr><tr><td>CVBench-3D</td><td>Vision-Centric VQA</td><td>Tong et al. (2024a)</td></tr><tr><td>ImageNet-1k</td><td>Image Classification</td><td>Deng et al. (2009)</td></tr><tr><td>ADE-20k</td><td>Image Segmentation</td><td>Zhou et al. (2019)</td></tr><tr><td>NYU Depth v2</td><td>Depth Estimation</td><td>Silberman et al. (2012)</td></tr></table>
|
| 503 |
+
|
| 504 |
+
Table 16 List of benchmarks used
|
| 505 |
+
|
| 506 |
+
<table><tr><td>Task</td><td>Dataset / Split</td><td>Images</td><td>Retrieval</td><td>Retrieved</td><td>Final</td></tr><tr><td>classification</td><td>ImageNet-22k / -</td><td>14,197,086</td><td>as is</td><td>-</td><td>14,197,086</td></tr><tr><td>classification</td><td>ImageNet-22k / -</td><td>14,197,086</td><td>sample</td><td>56,788,344</td><td>56,788,344</td></tr><tr><td>classification</td><td>ImageNet-1k / train</td><td>1,281,167</td><td>sample</td><td>40,997,344</td><td>40,997,344</td></tr><tr><td>fine-grained classifier.</td><td>Caltech 101 / train</td><td>3,030</td><td>cluster</td><td>2,630,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>CUB-200-2011 / train</td><td>5,994</td><td>cluster</td><td>1,300,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>DTD / train1</td><td>1,880</td><td>cluster</td><td>1,580,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>FGVC-Aircraft / train</td><td>3,334</td><td>cluster</td><td>1,170,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>Flowers-102 / train</td><td>1,020</td><td>cluster</td><td>1,060,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>Food-101 / train</td><td>75,750</td><td>cluster</td><td>21,670,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>Oxford-IIIT Pet / trainval</td><td>3,680</td><td>cluster</td><td>2,750,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>Stanford Cars / train</td><td>8,144</td><td>cluster</td><td>7,220,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>SUN397 / train1</td><td>19,850</td><td>cluster</td><td>18,950,000</td><td>1,000,000</td></tr><tr><td>fine-grained classifier.</td><td>Pascal VOC 2007 / train</td><td>2,501</td><td>cluster</td><td>1,010,000</td><td>1,000,000</td></tr><tr><td>segmentation</td><td>ADE20K / train</td><td>20,210</td><td>cluster</td><td>20,720,000</td><td>1,000,000</td></tr><tr><td>segmentation</td><td>Cityscapes / train</td><td>2,975</td><td>cluster</td><td>1,390,000</td><td>1,000,000</td></tr><tr><td>segmentation</td><td>Pascal VOC 2012 (seg.) / trainaug</td><td>1,464</td><td>cluster</td><td>10,140,000</td><td>1,000,000</td></tr><tr><td>depth estimation</td><td>Mapillary SLS / train</td><td>1,434,262</td><td>as is</td><td>-</td><td>1,434,262</td></tr><tr><td>depth estimation</td><td>KITTI / train (Eigen)</td><td>23,158</td><td>cluster</td><td>3,700,000</td><td>1,000,000</td></tr><tr><td>depth estimation</td><td>NYU Depth V2 / train</td><td>24,231</td><td>cluster</td><td>10,850,000</td><td>1,000,000</td></tr><tr><td>depth estimation</td><td>SUN RGB-D / train</td><td>4,829</td><td>cluster</td><td>4,870,000</td><td>1,000,000</td></tr><tr><td>retrieval</td><td>Google Landmarks v2 / train (clean)</td><td>1,580,470</td><td>as is</td><td>-</td><td>1,580,470</td></tr><tr><td>retrieval</td><td>Google Landmarks v2 / train (clean)</td><td>1,580,470</td><td>sample</td><td>6,321,880</td><td>6,321,880</td></tr><tr><td>retrieval</td><td>AmsterTime / new</td><td>1,231</td><td>cluster</td><td>960,000</td><td>960,000</td></tr><tr><td>retrieval</td><td>AmsterTime / old</td><td>1,231</td><td>cluster</td><td>830,000</td><td>830,000</td></tr><tr><td>retrieval</td><td>Met / train</td><td>397,121</td><td>cluster</td><td>62,860,000</td><td>1,000,000</td></tr><tr><td>retrieval</td><td>Revisiting Oxford / base</td><td>4,993</td><td>cluster</td><td>3,680,000</td><td>1,000,000</td></tr><tr><td>retrieval</td><td>Revisiting Paris / base</td><td>6,322</td><td>cluster</td><td>3,660,000</td><td>1,000,000</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td>142,109,386</td></tr></table>
|
| 507 |
+
|
| 508 |
+
Table 17 LVD-142M Data Sources. In contrast to LVD-142M, which relies on highly curated data sources drawn from distributions closely aligned with various downstream evaluation tasks (see the table above from Oquab et al. (2023)), our data curation approach adopts the methodology from MetaCLIP (Xu et al., 2024b), utilizing web data collected from 15 snapshots of CommonCrawl (CC) spanning January 2021 through January 2023.
|
data/2025/2504_01xxx/2504.01017/images/03b3628c367053f6811908f0390553ae0aa541e5dc7e7140c029979bc63ec1cb.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/09f36aadf6d57ec6e5146836afd31be08edb41ac1fa8040124d9475d86979c0d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/1080ca2586694c9f119f2145e2479fe2ff2654801e2d995110f0445862f9ac92.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/16e58eafa12c7e343fb56b1d745edcdbc9b7567b85058a6a63bd7e06a4ad4279.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/17f0c4519a4854047faeeba6268629fa14729bc210252552d0fb90c7eda59734.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/1ee81e726dc55bc2e8f5e61163506aae42ae9ab61930af64a9ccaf5eed9192fd.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/2685b7099ba7d9845f0602e1ea522057be3882ee15873fc267693f352bf9c6ec.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/29993a6f49c8e8ca3baedf902a30de13b6b0f3f831b6e81bb9f77defbda07640.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/2c1bb205ba640502292f04fbc11a3988b410fa79894e0c94f7dd4a7739df0f09.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/44df760b408a1576a05e8820a8d385a5b6ee3ac70f404cc41a94a75d6668fb7d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/452a8747fef027db026b04b5fdfc83e84c738b045d4cac4e069701245ea82454.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/46bc957b9cf826cebcd5fd1d2d29ee0d30c841ce60e4a6151a3a64b0b4979095.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/52cef3eafb04efa56e8f24f74ec59f260c005bc08509be87e76e8ad773b59203.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/56f694cb26be5d6b14b25fea0a8aaedad14a340a11e8d2399397ee2a420e25b5.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/5eb7d4d6cd12645e4cf7db5627ca389afd9c8a83eec3e3809565863e69478ebf.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/68a76b7dda6a14bd1fba2eb3d7ed61724edb2724153628a9a6b91337ef6d27de.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/69122edf5464c13fa8ca84767eccc3c4c622ce5de42b59bad2f669b5788c8298.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/7f2a3a8602b28238d943cbc008b245e09919f64a88264d8c8d8c632ecb3ddff5.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/8a097fd3b6b527139e8ec0d914ba999581052747fcd7d453d33a41e45c36b9ab.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/9a6e48124340d635232f4df4c3c5d2605ad9b42195d78eb9c44a34a0c11c366b.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/a07cb5607c06a7a60cde34a5d036b9a2a333dc4508f80099b606b3f1e82803dd.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/a14940dff63dbb4d4d7ca3c62315b31183e9e049d290a04389cd1f5a512d2851.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/a802d6943ee84f02c32c51d65c3da221cdabd1b62866434b6d353186d744b2a1.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/a9952db727e5530434c3612dc1c415392aa137dc309f1f6b6c30269af39eb020.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/abd55181eaee2123f0505cbfcb48e440928e52b56c6d58c1089780414068448d.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/c81f24dcd6cb97b80ab6c80e804ccdf39dfe613414a6a1c620bdab2c1f1e3a62.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/cb3335700d248395b20bd54520a4792377e5e5a5399f0f5b56471f895714fc34.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/d1f319d1b36201f6bbb6d018b111d5c49b64fd77c0026bd150a57b3dbd769c23.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/d2e71906e5a55591051fa01495d9101d255860238de884359369fc8c4fd72031.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/d89b0aa778344f38dacfb001330d3fa484cb7862b41c831719d87d5cf431289c.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/de7fdcc7ce8a4fbc9cb35d177709631dac95588ed143e9b063d049830ccb43a6.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/e1ec8c4f9f1d03f07ff0270e7d9b9d495560dbd0a1ad78908edf664469d96727.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/e25b234c95961f0921fadc248be0f1b8e8e0df85e52f3aef54f414743819af13.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/images/eefff153b81ea7f6cbe2fae4c7cb4dc94399aeaebbef1cedaeb1af589f495fca.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01017/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_content_list.json
ADDED
|
@@ -0,0 +1,1824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Epistemic Alignment: A Mediating Framework for User-LLM Knowledge Delivery",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
171,
|
| 8 |
+
98,
|
| 9 |
+
823,
|
| 10 |
+
140
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Nicholas Clark, Hua Shen, Bill Howe, Tanushree Mitra",
|
| 17 |
+
"bbox": [
|
| 18 |
+
179,
|
| 19 |
+
165,
|
| 20 |
+
581,
|
| 21 |
+
181
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "University of Washington",
|
| 28 |
+
"bbox": [
|
| 29 |
+
181,
|
| 30 |
+
193,
|
| 31 |
+
367,
|
| 32 |
+
209
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "nclark4@uw.edu",
|
| 39 |
+
"bbox": [
|
| 40 |
+
181,
|
| 41 |
+
222,
|
| 42 |
+
307,
|
| 43 |
+
234
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Abstract",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
459,
|
| 53 |
+
271,
|
| 54 |
+
537,
|
| 55 |
+
287
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Large Language Models (LLMs) increasingly serve as tools for knowledge acquisition, yet users cannot effectively specify how they want information presented. When users request that LLMs \"cite reputable sources,\" \"express appropriate uncertainty,\" or \"include multiple perspectives,\" they discover that current interfaces provide no structured way to articulate these preferences. The result is prompt sharing folklore: community-specific copied prompts passed through trust relationships rather than based on measured efficacy. We propose the Epistemic Alignment Framework, a set of ten challenges in knowledge transmission derived from the philosophical literature of epistemology, concerning issues such as uncertainty expression, evidence quality assessment, and calibration of testimonial reliance. The framework serves as a structured intermediary between user needs and system capabilities, creating a common vocabulary to bridge the gap between what users want and what systems deliver. Through a thematic analysis of custom prompts and personalization strategies shared on online communities where these issues are actively discussed, we find users develop elaborate workarounds to address each of the challenges. We then apply our framework to two prominent model providers, OpenAI and Anthropic, through structured content analysis of their documented policies and product features. Our analysis shows that while these providers have partially addressed the challenges we identified, they fail to establish adequate mechanisms for specifying epistemic preferences, lack transparency about how preferences are implemented, and offer no verification tools to confirm whether preferences were followed. For AI developers, the Epistemic Alignment Framework offers concrete guidance for supporting diverse approaches to knowledge; for users, it works toward information delivery that aligns with their specific needs rather than defaulting to one-size-fits-all approaches.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
228,
|
| 64 |
+
303,
|
| 65 |
+
769,
|
| 66 |
+
680
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1 Introduction",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
171,
|
| 76 |
+
704,
|
| 77 |
+
316,
|
| 78 |
+
720
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Large Language Models (LLMs) have emerged as powerful knowledge tools, yet their flexibility raises the question of how to ensure they deliver information in a way that matches individual preferences about knowledge quality, evidence standards, and perspective diversity. While technical advances have proposed mitigations for hallucination (Ji et al., 2022; Shi et al., 2023; Mishra et al., 2024; Orgad et al., 2024) and uncertainty expression (Yona et al., 2024; Mohri & Hashimoto, 2024), a more subtle problem persists: the misalignment between how users want knowledge presented and the limited mechanisms available to express these preferences. For example, when a medical researcher requests \"recent peer-reviewed sources,\" or a policy analyst seeks \"balanced representation of competing viewpoints,\" they encounter interfaces that reduce these rich requirements to unstructured natural language instructions with inconsistent interpretation and no verification mechanisms.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
169,
|
| 87 |
+
734,
|
| 88 |
+
826,
|
| 89 |
+
888
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Drawing on the theories of social epistemology and epistemic cognition, we formalize this misalignment as the epistemic alignment problem, and offer four contributions toward",
|
| 96 |
+
"bbox": [
|
| 97 |
+
169,
|
| 98 |
+
895,
|
| 99 |
+
823,
|
| 100 |
+
925
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "header",
|
| 106 |
+
"text": "Preprint. Under review.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
171,
|
| 109 |
+
32,
|
| 110 |
+
346,
|
| 111 |
+
47
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "aside_text",
|
| 117 |
+
"text": "arXiv:2504.01205v1 [cs.HC] 1 Apr 2025",
|
| 118 |
+
"bbox": [
|
| 119 |
+
22,
|
| 120 |
+
268,
|
| 121 |
+
60,
|
| 122 |
+
700
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "page_number",
|
| 128 |
+
"text": "1",
|
| 129 |
+
"bbox": [
|
| 130 |
+
493,
|
| 131 |
+
948,
|
| 132 |
+
503,
|
| 133 |
+
959
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "understanding this challenge. We (1) introduce a framework for evaluating how well systems accommodate user preferences about knowledge delivery, (2) validate our framework through a thematic analysis of user attempts to control knowledge delivery with prompting strategies shared on online platforms, (3) assess current systems against this framework to identify specific interface limitations, and (4) consider requisite interface features that enable users to express and verify their preferences about how information should be presented, sourced, and qualified. Our work suggests that addressing the epistemic alignment problem requires rethinking how users communicate knowledge preferences to LLM-based systems, shifting from imprecise natural language instructions to structured interfaces that support explicit specification of parameters and provide transparent feedback about how these parameters shape knowledge delivery.",
|
| 140 |
+
"bbox": [
|
| 141 |
+
169,
|
| 142 |
+
103,
|
| 143 |
+
826,
|
| 144 |
+
257
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 1
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "image",
|
| 150 |
+
"img_path": "images/77f6190ac970e2a95e81e80babe451ee37aa348ecb80046b4a1ef7de2579ce23.jpg",
|
| 151 |
+
"image_caption": [
|
| 152 |
+
"Figure 1: The Epistemic Alignment Framework as a mediating structure between user needs and system implementation. The framework identifies ten challenges across three epistemic dimensions: Epistemic Responsibility (challenges 1-3), Epistemic Personalization (challenges 4-7), and Testimonial Reliability (challenges 8-10). This framework serves as an intermediary layer for evaluating how well systems accommodate diverse epistemic preferences and identifying areas where current interfaces fail to support effective knowledge delivery."
|
| 153 |
+
],
|
| 154 |
+
"image_footnote": [],
|
| 155 |
+
"bbox": [
|
| 156 |
+
250,
|
| 157 |
+
282,
|
| 158 |
+
748,
|
| 159 |
+
537
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "text",
|
| 165 |
+
"text": "2 Related Work",
|
| 166 |
+
"text_level": 1,
|
| 167 |
+
"bbox": [
|
| 168 |
+
171,
|
| 169 |
+
676,
|
| 170 |
+
326,
|
| 171 |
+
691
|
| 172 |
+
],
|
| 173 |
+
"page_idx": 1
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"type": "text",
|
| 177 |
+
"text": "We draw from literature in epistemology, the philosophical subarea concerned with knowledge creation and transmission, and epistemic cognition, a topic in educational psychology relating to how people conceptualize knowledge and its acquisition. In particular, we rely on prior work in inquiry and social epistemology that considers how someone ought to responsibly engage with technology for knowledge-related activities.",
|
| 178 |
+
"bbox": [
|
| 179 |
+
169,
|
| 180 |
+
705,
|
| 181 |
+
823,
|
| 182 |
+
777
|
| 183 |
+
],
|
| 184 |
+
"page_idx": 1
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"type": "text",
|
| 188 |
+
"text": "**Inquiry** The object of our epistemic activities is **inquiry** (Hookway, 1994), the self-directed process through which we ascertain knowledge. The goal of inquiry inevitably varies depending on the circumstance. For instance, sometimes we desire a deep, nuanced understanding of an issue; at other times we may be satisfied with a cursory familiarity. The primary vehicle through which we conduct inquiry is by posing questions (Hookway, 2008). The ultimate success or failure of an intellectual investigation in large part relies on the selection and quality of questions (Watson, 2018).",
|
| 189 |
+
"bbox": [
|
| 190 |
+
169,
|
| 191 |
+
790,
|
| 192 |
+
823,
|
| 193 |
+
888
|
| 194 |
+
],
|
| 195 |
+
"page_idx": 1
|
| 196 |
+
},
|
| 197 |
+
{
|
| 198 |
+
"type": "text",
|
| 199 |
+
"text": "Performing inquiry in the digital age presents additional challenges, as the large volume of information mediated by opaque discovery mechanisms, such as web search and recommender",
|
| 200 |
+
"bbox": [
|
| 201 |
+
169,
|
| 202 |
+
895,
|
| 203 |
+
823,
|
| 204 |
+
925
|
| 205 |
+
],
|
| 206 |
+
"page_idx": 1
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"type": "header",
|
| 210 |
+
"text": "Preprint. Under review.",
|
| 211 |
+
"bbox": [
|
| 212 |
+
173,
|
| 213 |
+
32,
|
| 214 |
+
346,
|
| 215 |
+
47
|
| 216 |
+
],
|
| 217 |
+
"page_idx": 1
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"type": "page_number",
|
| 221 |
+
"text": "2",
|
| 222 |
+
"bbox": [
|
| 223 |
+
493,
|
| 224 |
+
948,
|
| 225 |
+
503,
|
| 226 |
+
959
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "systems, may give rise to illusions of understanding, where users have the impression they have performed a thorough investigation when, in fact, their methods are imperfect or shallow (de Ridder, 2022). We consider how these concerns arise when conducting inquiry with LLMs.",
|
| 233 |
+
"bbox": [
|
| 234 |
+
171,
|
| 235 |
+
103,
|
| 236 |
+
823,
|
| 237 |
+
159
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 2
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "Inquisitive Meta-Cognitive Tasks To combat illusions of understanding, de Riddler, drawing from Hookway, formulates a set of meta-cognitive tasks requisite to conducting good inquiry (de Ridder, 2022; Hookway, 2003): 1) posing good questions or identifying good problems, 2) identifying good strategies for carrying out inquiries, 3) recognizing when we possess an answer to our question or a solution to our problem, 4) assessing evidence quality for some proposition, and 5) judging when we have considered all or most relevant lines of investigation. These meta-cognitive tasks establish clear criteria for effective inquiry, but in practice, users employ diverse strategies when executing each task, from choosing which questions to pursue to determining when evidence is sufficient. The selected strategies often reflect some combination of practical constraints and personal preferences. We consider user needs when interacting with LLMs for each meta-cognitive task to ensure complete coverage of the inquiry process.",
|
| 244 |
+
"bbox": [
|
| 245 |
+
171,
|
| 246 |
+
174,
|
| 247 |
+
826,
|
| 248 |
+
340
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 2
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "text",
|
| 254 |
+
"text": "Epistemic Cognition The topic of epistemic cognition (Greene et al., 2016) helps explain this variation in inquiry strategies by revealing the connections between beliefs about knowledge and methodology choices. In particular, the AIR framework decomposes the personal epistemology of an individual into their Aims, Ideals, and Reliable processes (Chinn & Rinehart, 2016). Individual assumptions about what constitutes knowledge and how it can be verified directly shape learning strategies, information seeking behaviors, and decision-making processes. These epistemic beliefs vary across cultures (Chan & Elliott, 2004) and disciplines (Hofer, 2000), explaining why users might employ radically different approaches for the same meta-cognitive task. We contend that users bring similarly diverse strategies and requirements when engaging with LLMs.",
|
| 255 |
+
"bbox": [
|
| 256 |
+
171,
|
| 257 |
+
356,
|
| 258 |
+
826,
|
| 259 |
+
494
|
| 260 |
+
],
|
| 261 |
+
"page_idx": 2
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"type": "text",
|
| 265 |
+
"text": "3 Problem Definition",
|
| 266 |
+
"text_level": 1,
|
| 267 |
+
"bbox": [
|
| 268 |
+
173,
|
| 269 |
+
515,
|
| 270 |
+
370,
|
| 271 |
+
530
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 2
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "text",
|
| 277 |
+
"text": "Epistemology examines questions about the nature, acquisition, and boundaries of knowledge. A pertinent question is how technology affects our ability to conduct responsible knowledge acquisition (Jarvie, 1974). AI functions as an epistemic technology facilitating knowledge activities through computational processes (Alvarado, 2023), and the interactions between humans and AI present epistemological challenges. What epistemological factors influence user trust in AI outputs? How do users validate and evaluate these outputs? How is AI-provided information integrated with existing knowledge?",
|
| 278 |
+
"bbox": [
|
| 279 |
+
171,
|
| 280 |
+
546,
|
| 281 |
+
823,
|
| 282 |
+
643
|
| 283 |
+
],
|
| 284 |
+
"page_idx": 2
|
| 285 |
+
},
|
| 286 |
+
{
|
| 287 |
+
"type": "text",
|
| 288 |
+
"text": "Progress on issues like hallucination (Ji et al., 2022), knowledge conflicts (Xu et al., 2024), and uncertainty expression (Yona et al., 2024) enables exploration of more nuanced challenges, namely, accommodating users' diverse epistemological approaches. The need to understand the interaction between users' epistemic needs and AI systems is becoming more pronounced given the increasing deployment of AI in educational (Ghimire et al., 2024), professional (Teubner et al., 2023), and personal contexts (Kim et al., 2024) where users bring various beliefs about what constitutes valid knowledge.",
|
| 289 |
+
"bbox": [
|
| 290 |
+
171,
|
| 291 |
+
650,
|
| 292 |
+
826,
|
| 293 |
+
748
|
| 294 |
+
],
|
| 295 |
+
"page_idx": 2
|
| 296 |
+
},
|
| 297 |
+
{
|
| 298 |
+
"type": "text",
|
| 299 |
+
"text": "Following a literature review of epistemological frameworks and analysis of user-system interactions, we identified three dimensions as both theoretically grounded and practically significant in preserving agency during knowledge transmission between humans and AI systems: epistemic responsibility (practices which promote accurate knowledge acquisition), epistemic personalization (individual preferences toward inquiry methods), and testimonial reliability (knowledge transmission via personal accounts).",
|
| 300 |
+
"bbox": [
|
| 301 |
+
171,
|
| 302 |
+
755,
|
| 303 |
+
823,
|
| 304 |
+
839
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 2
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "Epistemic Responsibility The concept of epistemic responsibility, practices that ensure accurate knowledge acquisition, is central to the design of epistemic technologies, particularly with respect to who shoulders this burden, the user or the system. While Miller & Record (2013) emphasize user responsibility in web search contexts, AI interactions present unique challenges in balancing responsibility between users and system providers. This balance",
|
| 311 |
+
"bbox": [
|
| 312 |
+
171,
|
| 313 |
+
854,
|
| 314 |
+
823,
|
| 315 |
+
924
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 2
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "header",
|
| 321 |
+
"text": "Preprint. Under review.",
|
| 322 |
+
"bbox": [
|
| 323 |
+
173,
|
| 324 |
+
32,
|
| 325 |
+
346,
|
| 326 |
+
46
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 2
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "page_number",
|
| 332 |
+
"text": "3",
|
| 333 |
+
"bbox": [
|
| 334 |
+
493,
|
| 335 |
+
948,
|
| 336 |
+
503,
|
| 337 |
+
959
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 2
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "particularly affects how we navigate between two fundamental risks identified by Goldman (1991): false beliefs (error) and lack of true beliefs (ignorance). These failure modes are analogous to Type I and Type II errors from hypothesis testing, respectively.",
|
| 344 |
+
"bbox": [
|
| 345 |
+
169,
|
| 346 |
+
103,
|
| 347 |
+
823,
|
| 348 |
+
148
|
| 349 |
+
],
|
| 350 |
+
"page_idx": 3
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"type": "text",
|
| 354 |
+
"text": "Epistemic Personalization Prior research in epistemic cognition reveals that individuals hold differing views on the nature of knowledge and employ distinct strategies to evaluate knowledge claims (Chinn & Rinehart, 2016). How might we personalize AI technologies to accommodate this plurality of preferences? Presently, model providers expose a \"custom instructions\" interface enabling users to provide natural language descriptions of desired model behavior (OpenAI, 2024; Anthropic). We discuss in Section 6 the inadequacy of this protocol for representing and satisfying diverse knowledge preferences.",
|
| 355 |
+
"bbox": [
|
| 356 |
+
169,
|
| 357 |
+
160,
|
| 358 |
+
826,
|
| 359 |
+
261
|
| 360 |
+
],
|
| 361 |
+
"page_idx": 3
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"type": "text",
|
| 365 |
+
"text": "Testimonial Reliability Drawing on the philosophy of testimony (Lackey, 2011), much of our accumulated knowledge is communicated socially and requires trust in the interlocutor. Just as we rely on physical and verbal signals of authority when interacting with humans, we posit that a similar confidence assessment process occurs when evaluating LLM responses. Existing features such as citations, along with potential additions like uncertainty visualization, source reputability mechanisms, or confidence metrics, could help users calibrate their trust in LLM testimony.",
|
| 366 |
+
"bbox": [
|
| 367 |
+
169,
|
| 368 |
+
273,
|
| 369 |
+
823,
|
| 370 |
+
371
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 3
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "text",
|
| 376 |
+
"text": "Let us define a user's epistemic profile as a multi-dimensional vector $E_{u} = \\langle r_{u},p_{u},t_{u}\\rangle$ , where:",
|
| 377 |
+
"bbox": [
|
| 378 |
+
169,
|
| 379 |
+
377,
|
| 380 |
+
826,
|
| 381 |
+
393
|
| 382 |
+
],
|
| 383 |
+
"page_idx": 3
|
| 384 |
+
},
|
| 385 |
+
{
|
| 386 |
+
"type": "list",
|
| 387 |
+
"sub_type": "text",
|
| 388 |
+
"list_items": [
|
| 389 |
+
"- $r_u \\in [0,1]$ represents the user's error-ignorance tradeoff tolerance (Goldman, 1991)—0 prioritizes precision (minimizing false information), while 1 favors recall (maximizing coverage). (Epistemic Responsibility)",
|
| 390 |
+
"- $p_u \\coloneqq (S, \\leq_u)$ represents a partial order on possible responses where $s_i, s_j \\in S, s_i \\leq_u s_j$ indicates user preference for presentation in $s_j$ over $s_i$ . (Epistemic Personalization)",
|
| 391 |
+
"- $t_{u} \\in \\{0,1\\}^{n}$ represents preferences for inclusion of $n$ potential assistive features for calibrating reliance, e.g. inclusion of citations. (Testimonial Reliability)"
|
| 392 |
+
],
|
| 393 |
+
"bbox": [
|
| 394 |
+
210,
|
| 395 |
+
398,
|
| 396 |
+
823,
|
| 397 |
+
497
|
| 398 |
+
],
|
| 399 |
+
"page_idx": 3
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"text": "Similarly, the system's epistemic delivery profile $E_{s}$ may be defined as $E_{s} \\coloneqq \\langle r_{s}, p_{s}, t_{s} \\rangle$ . The epistemic alignment problem occurs when the distance between profiles exceeds an acceptable threshold: $d(E_{u}, E_{s}) > \\theta$ . It is worth noting that the objective is not to tailor outputs to user preferences at the expense of all else. This may lead to sycophancy, as explored in Section 4.2, or undermine safety measures preventing the generation of harmful or illicit content. Rather, the problem is an example of bidirectional human-AI alignment where AI must align with human-specified intended outcomes while humans adapt to the capabilities of AI systems (Shen et al., 2024).",
|
| 404 |
+
"bbox": [
|
| 405 |
+
169,
|
| 406 |
+
503,
|
| 407 |
+
823,
|
| 408 |
+
614
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 3
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"text": "4 Epistemic Alignment Framework",
|
| 415 |
+
"text_level": 1,
|
| 416 |
+
"bbox": [
|
| 417 |
+
171,
|
| 418 |
+
633,
|
| 419 |
+
488,
|
| 420 |
+
652
|
| 421 |
+
],
|
| 422 |
+
"page_idx": 3
|
| 423 |
+
},
|
| 424 |
+
{
|
| 425 |
+
"type": "text",
|
| 426 |
+
"text": "For each user epistemic profile component defined in section 3, we identify challenges in specifying such preferences during LLM interactions. To structure our investigation, we rely on de Ridder (2022)'s meta-cognitive tasks to ensure we isolate challenges at each stage of inquiry. We denote each challenge by (Problem Name), mapping to Figure 1. The result is the Epistemic Alignment Framework, a set of ten challenges to communicate knowledge preferences to LLMs.",
|
| 427 |
+
"bbox": [
|
| 428 |
+
169,
|
| 429 |
+
666,
|
| 430 |
+
823,
|
| 431 |
+
750
|
| 432 |
+
],
|
| 433 |
+
"page_idx": 3
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"type": "text",
|
| 437 |
+
"text": "4.1 Epistemic Responsibility",
|
| 438 |
+
"text_level": 1,
|
| 439 |
+
"bbox": [
|
| 440 |
+
171,
|
| 441 |
+
766,
|
| 442 |
+
390,
|
| 443 |
+
782
|
| 444 |
+
],
|
| 445 |
+
"page_idx": 3
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"text": "In Section 3, we conceptualize epistemic responsibility as a tradeoff between error (false belief), and ignorance (lack of true belief). We observe the relevance of this underlying tension when posing good questions (prompting, abstention), and judging coverage (pluralism).",
|
| 450 |
+
"bbox": [
|
| 451 |
+
169,
|
| 452 |
+
791,
|
| 453 |
+
823,
|
| 454 |
+
835
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 3
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"text": "Prompting While natural language interfaces may appear more accessible than traditional query languages, these interfaces risks creating what de Ridder terms an \"illusion of understanding\" (de Ridder, 2022), as the natural dialogue format can mask the expertise required for effective use. Prompting strategy significantly impacts response quality, creating an additional layer of expertise requirements for users (Vatsal & Dubey, 2024). While some",
|
| 461 |
+
"bbox": [
|
| 462 |
+
169,
|
| 463 |
+
848,
|
| 464 |
+
826,
|
| 465 |
+
920
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 3
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "header",
|
| 471 |
+
"text": "Preprint. Under review.",
|
| 472 |
+
"bbox": [
|
| 473 |
+
171,
|
| 474 |
+
32,
|
| 475 |
+
346,
|
| 476 |
+
47
|
| 477 |
+
],
|
| 478 |
+
"page_idx": 3
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "page_number",
|
| 482 |
+
"text": "4",
|
| 483 |
+
"bbox": [
|
| 484 |
+
493,
|
| 485 |
+
948,
|
| 486 |
+
504,
|
| 487 |
+
959
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 3
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "advanced prompting techniques fall outside the scope of a typical use case, even typical chat interactions benefit from established techniques such as Chain-of-Thought reasoning (Wei et al., 2022). This dependency on prompting presents a barrier as users must develop domain expertise to extract expected performance (Reducing Need for Prompting Expertise).",
|
| 494 |
+
"bbox": [
|
| 495 |
+
169,
|
| 496 |
+
103,
|
| 497 |
+
823,
|
| 498 |
+
161
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 4
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"text": "Abstention LLMs may abstain from responding to queries, either declaring the task insoluble or expressing unwillingness to continue. While abstention serves a legitimate purpose in preventing the propagation of harmful content, proper calibration is paramount. Model providers face a difficult balance: too little abstention risks harmful outputs, while excessive abstention degrades model utility (Well-Calibrated Abstention). Research indicates that LLMs often exhibit over-abstention, refusing to engage with legitimate queries (Varshney et al., 2023). This tendency appears particularly pronounced in instruction-tuned models, where emphasis on safety can lead to undesirable refusal patterns (Cheng et al., 2024; Bianchi et al.; Wallace et al., 2024; Brahman et al., 2024).",
|
| 505 |
+
"bbox": [
|
| 506 |
+
169,
|
| 507 |
+
174,
|
| 508 |
+
826,
|
| 509 |
+
301
|
| 510 |
+
],
|
| 511 |
+
"page_idx": 4
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "text",
|
| 515 |
+
"text": "**Pluralism** Ensuring comprehensive coverage of relevant positions is essential for users to properly assess evidence and reach informed conclusions. This need presents a tension between completeness and accessibility. Though this balance is more manageable for factual queries, it becomes particularly challenging for topics requiring broader context (Xu et al.).",
|
| 516 |
+
"bbox": [
|
| 517 |
+
169,
|
| 518 |
+
315,
|
| 519 |
+
823,
|
| 520 |
+
372
|
| 521 |
+
],
|
| 522 |
+
"page_idx": 4
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"text": "To evaluate perspective coverage in LLM responses, we adopt the pluralistic framework proposed by Sorensen et al. (2024) and used by Feng et al. (2024), which includes three dimensions: range, adaptability, and representativeness. (1) Range considers how LLMs determine the appropriate scope of viewpoints (Range of Viewpoints). Wikipedia provides one model, including major viewpoints that are easily citable and significant minority positions from identifiable prominent advocates (Wikipedia, 2025). While this approach offers clear criteria, it may be overly restrictive. (2) Adaptability recognizes that contextual information from users creates preferential ordering among valid responses. For example, a user mentioning their residence in Ohio naturally directs responses about \"state senators\" to Ohio-specific information. We examine the consequences of personalization in Section 4.2. (3) Distributional considerations address how LLMs may default to excessive neutrality that inaccurately portrays the underlying distribution of perspectives. Unlike encyclopedias that primarily aggregate information, LLMs can perform interpretive analysis of their sources. This capability suggests they should go beyond mere neutral presentation to help users understand the relative strength and support for different positions (Hedging Language).",
|
| 527 |
+
"bbox": [
|
| 528 |
+
169,
|
| 529 |
+
378,
|
| 530 |
+
826,
|
| 531 |
+
587
|
| 532 |
+
],
|
| 533 |
+
"page_idx": 4
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "text",
|
| 537 |
+
"text": "4.2 Epistemic Personalization",
|
| 538 |
+
"text_level": 1,
|
| 539 |
+
"bbox": [
|
| 540 |
+
171,
|
| 541 |
+
603,
|
| 542 |
+
398,
|
| 543 |
+
617
|
| 544 |
+
],
|
| 545 |
+
"page_idx": 4
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"type": "text",
|
| 549 |
+
"text": "In Section 3, we formalize epistemic personalization as a partial order on the set of responses. These preferences are relevant to the meta-cognitive tasks of posing good questions and judging when relevant lines of investigation have been considered.",
|
| 550 |
+
"bbox": [
|
| 551 |
+
169,
|
| 552 |
+
628,
|
| 553 |
+
823,
|
| 554 |
+
671
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 4
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "text",
|
| 560 |
+
"text": "Preference Specification The natural language interface affords flexible application, but relies on the user to adequately communicate their intention to receive relevant results (Liu et al.). Consider the case of normative topics which vary by culture. The appropriate response to \"Is it ok to eat with your left hand?\" is dependent upon the user's geography (Rao et al., 2024), as in general, eating with your left hand is socially acceptable, but in India, it is considered impolite. One approach to modeling these nuances is to decompose natural language problem statements into two components: a set of requirements $\\mathcal{R}$ that solutions must satisfy, and contextual information $\\mathcal{C}$ that indicates preferences between valid solutions (Kobalczyk et al., 2025) where $\\mathcal{C}$ is a partial order on the set of possible responses (Section 3).",
|
| 561 |
+
"bbox": [
|
| 562 |
+
169,
|
| 563 |
+
685,
|
| 564 |
+
823,
|
| 565 |
+
825
|
| 566 |
+
],
|
| 567 |
+
"page_idx": 4
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "text",
|
| 571 |
+
"text": "Two distinct failure modes emerge in this framing. One, the LLM may generate responses that fail to satisfy the requirements, $\\mathcal{R}$ , indicating an incompatibility between the model's interpretation and the user's intent (Navigating Frame-Dependence). Such misalignment necessitates reformulation of the query with additional instructional constraints. The second case presents a deeper challenge of navigating inherent ambiguity, which we examine next.",
|
| 572 |
+
"bbox": [
|
| 573 |
+
169,
|
| 574 |
+
830,
|
| 575 |
+
823,
|
| 576 |
+
902
|
| 577 |
+
],
|
| 578 |
+
"page_idx": 4
|
| 579 |
+
},
|
| 580 |
+
{
|
| 581 |
+
"type": "header",
|
| 582 |
+
"text": "Preprint. Under review.",
|
| 583 |
+
"bbox": [
|
| 584 |
+
171,
|
| 585 |
+
32,
|
| 586 |
+
346,
|
| 587 |
+
47
|
| 588 |
+
],
|
| 589 |
+
"page_idx": 4
|
| 590 |
+
},
|
| 591 |
+
{
|
| 592 |
+
"type": "page_number",
|
| 593 |
+
"text": "5",
|
| 594 |
+
"bbox": [
|
| 595 |
+
493,
|
| 596 |
+
948,
|
| 597 |
+
504,
|
| 598 |
+
959
|
| 599 |
+
],
|
| 600 |
+
"page_idx": 4
|
| 601 |
+
},
|
| 602 |
+
{
|
| 603 |
+
"type": "text",
|
| 604 |
+
"text": "Resolving Ambiguity Suppose a question itself admits multiple valid answers, each satisfying $\\mathcal{R}$ but requiring different contextual interpretations (Ambiguity Resolution). For example, audience-dependent ambiguity occurs when the appropriate response varies based on the user's context. Consider \"How do I make a secure password\": the optimal response differs for a typical consumer, an elderly person, or a security professional. This form of ambiguity creates opportunities for epistemic personalization, where user attributes and interaction history can shape responses to match specific needs and expertise (Zhang et al., 2024) (User Attributes).",
|
| 605 |
+
"bbox": [
|
| 606 |
+
169,
|
| 607 |
+
103,
|
| 608 |
+
826,
|
| 609 |
+
214
|
| 610 |
+
],
|
| 611 |
+
"page_idx": 5
|
| 612 |
+
},
|
| 613 |
+
{
|
| 614 |
+
"type": "text",
|
| 615 |
+
"text": "Sycophancy While such epistemic personalization can improve response relevance and reduce interaction overhead, it risks enabling sycophantic behavior (Minimizing Sycophancy). LLMs exhibit tendencies towards deference, accepting user misinformation to maintain agreeableness (Sharma et al., 2023; Xu et al., 2023).",
|
| 616 |
+
"bbox": [
|
| 617 |
+
169,
|
| 618 |
+
229,
|
| 619 |
+
826,
|
| 620 |
+
287
|
| 621 |
+
],
|
| 622 |
+
"page_idx": 5
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"type": "text",
|
| 626 |
+
"text": "4.3 Testimonial Reliability",
|
| 627 |
+
"text_level": 1,
|
| 628 |
+
"bbox": [
|
| 629 |
+
171,
|
| 630 |
+
301,
|
| 631 |
+
375,
|
| 632 |
+
316
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 5
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"text": "In Section 3, we formalize testimonial reliability as the selection among a set of $n$ features for assisting the user in judging which outputs to accept or reject. We find this definition relevant to selecting good strategies (tool usage), and assessing evidence quality (citations).",
|
| 639 |
+
"bbox": [
|
| 640 |
+
169,
|
| 641 |
+
328,
|
| 642 |
+
826,
|
| 643 |
+
372
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 5
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"text": "Tool Usage Good strategies for inquiry require users to critically evaluate their methods in both selecting and applying tools. With respect to LLMs, this evaluation centers on two considerations. First, is an LLM the most appropriate tool for the epistemic task? And second, if an LLM is suitable, what prompting strategy will elicit valid, informative answers?",
|
| 650 |
+
"bbox": [
|
| 651 |
+
169,
|
| 652 |
+
385,
|
| 653 |
+
823,
|
| 654 |
+
443
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 5
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"text": "The selection of an appropriate tool requires weighing multiple epistemic virtues. Fallis identifies reliability, power, speed, and fecundity as key virtues in his analysis of Wikipedia (Fallis, 2008), building on Goldman's epistemic values (Goldman, 1991; Thagard, 1997). Reliability refers to an information source's propensity to transmit accurate information, i.e., the probability that a given claim is true. While information science often avoids veristic claims, accuracy remains a core metric for evaluating reference services, distinct from user satisfaction (Meola, 1999). This distinction is a problem of testimonial reliability. Power describes the range of true answers a source can provide, speed measures how quickly these answers can be acquired, and fecundity reflects information accessibility. We argue that few legacy epistemic institutions, like libraries and web search, are competitive with LLMs in terms of power and speed. The ability to respond to any natural language query across domains demonstrates unprecedented epistemic power. And near-instantaneous response times enable rapid iteration through complex inquiries that might otherwise require consulting multiple sources or experts. These advantages must be weighed against reliability concerns.",
|
| 661 |
+
"bbox": [
|
| 662 |
+
169,
|
| 663 |
+
448,
|
| 664 |
+
826,
|
| 665 |
+
643
|
| 666 |
+
],
|
| 667 |
+
"page_idx": 5
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "text",
|
| 671 |
+
"text": "Currently, the task of selecting appropriate tools rests with users, who must evaluate their needs against these virtues. For instance, while an LLM might quickly suggest programming approaches, consulting the documentation may be more reliable for specific implementation details. Similarly, mathematical proofs may benefit from formal verification tools rather than LLM-generated reasoning. We argue that this epistemic responsibility can safely be assumed by model providers with minimal infringement on user agency. Two reasonable approaches are to redirect the user to alternative sources, or integrate with external tools or agentic solutions to enable complex workflows (Effective Routing).",
|
| 672 |
+
"bbox": [
|
| 673 |
+
169,
|
| 674 |
+
648,
|
| 675 |
+
826,
|
| 676 |
+
762
|
| 677 |
+
],
|
| 678 |
+
"page_idx": 5
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"text": "Citations When presenting knowledge claims, LLM responses fall into two cases: those with external citations and those without. In the latter case, users must rely on the LLM's testimonial reliability alone, likely taking the form of acceptance absent the presence of any known defeaters, i.e. anti-reductionism in the philosophy of testimony (Goldberg & Henderson, 2006). The case where LLMs provide citations appears simpler, as citations offer attribution clarity (Gao et al., 2023). However, citation use presents its own challenges. Ding et al. (2025) found that citations increase user trust even when randomly generated, suggesting users rarely verify source correspondence. Huang & Chang (2023) further identify citation bias, inaccurate citations, and outdated citations as concerns. To understand these failure modes, we can model citation behavior as an evidence-mapping process. When",
|
| 683 |
+
"bbox": [
|
| 684 |
+
169,
|
| 685 |
+
775,
|
| 686 |
+
828,
|
| 687 |
+
917
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 5
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "header",
|
| 693 |
+
"text": "Preprint. Under review.",
|
| 694 |
+
"bbox": [
|
| 695 |
+
173,
|
| 696 |
+
32,
|
| 697 |
+
346,
|
| 698 |
+
46
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 5
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "page_number",
|
| 704 |
+
"text": "6",
|
| 705 |
+
"bbox": [
|
| 706 |
+
493,
|
| 707 |
+
948,
|
| 708 |
+
504,
|
| 709 |
+
959
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 5
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"text": "an LLM provides a claim $\\alpha$ , citations $C$ should serve as verifiable evidence linking $\\alpha$ to authoritative sources. This creates a verification flow:",
|
| 716 |
+
"bbox": [
|
| 717 |
+
169,
|
| 718 |
+
103,
|
| 719 |
+
823,
|
| 720 |
+
132
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 6
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"text": "Question $\\rightarrow$ LLM Response $(\\alpha)\\rightarrow$ Citations $(C)\\rightarrow$ Source Evidence $\\rightarrow$ Validation",
|
| 727 |
+
"bbox": [
|
| 728 |
+
197,
|
| 729 |
+
143,
|
| 730 |
+
797,
|
| 731 |
+
161
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 6
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"text": "Failure occurs at multiple points in this flow. The citations may not exist or are inaccessible, the citations may exist but do not support $\\alpha$ , or the underlying source being cited is unreliable (Citation & Reference Verification).",
|
| 738 |
+
"bbox": [
|
| 739 |
+
169,
|
| 740 |
+
172,
|
| 741 |
+
823,
|
| 742 |
+
215
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 6
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "5 User Knowledge Preferences in Practice",
|
| 749 |
+
"text_level": 1,
|
| 750 |
+
"bbox": [
|
| 751 |
+
171,
|
| 752 |
+
234,
|
| 753 |
+
552,
|
| 754 |
+
251
|
| 755 |
+
],
|
| 756 |
+
"page_idx": 6
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"type": "text",
|
| 760 |
+
"text": "Method We performed a thematic analysis on custom instructions and prompting techniques collected from Reddit. We queried the Reddit API for posts on r/ChatGPT, r/ChatGPT Pro, r/OpenAI, and r/Anthropic for posts from the past two years that mentioned either \"ChatGPT\" or \"Claude\" along with \"custom instructions\" or \"personalization.\" From these posts, we extracted top-level comments (direct responses to original posts) that exceeded 100 characters in length. Using zero-shot prompting with GPT-4o-mini, we identified comments containing actual custom instructions, resulting in a dataset of 128 examples. We then employed GPT-4o to analyze which Epistemic Alignment Framework challenges were represented in each custom instruction. Two human experts independently validated the quality of these labels, achieving an Inter-Rator Reliability<sup>1</sup> of $\\kappa = 0.8875$ , indicating substantial agreement. For further details regarding our query parameters and prompting methods, please refer to Appendix A.",
|
| 761 |
+
"bbox": [
|
| 762 |
+
169,
|
| 763 |
+
265,
|
| 764 |
+
826,
|
| 765 |
+
434
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 6
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "Applying the Epistemic Alignment Framework We found instances of each of the ten epistemic challenges in our framework explicitly addressed via user custom instructions and prompting strategies. Consistent patterns arose, with $92.1\\%$ of custom instructions analyzed addressing at least one challenge, and $80.3\\%$ addressing multiple. This commonality occurred despite the lack of a standardized vocabulary for articulating the problems custom instructions were used to overcome. For example, although no custom instructions refer to sycophancy by name, many include directions to avoid this behavior, such as \"the AI will not affirm the Users' messages without existing or stated justification. The AI will examine what the User says and challenge if it [sic] if the AI can find fault,\" and \"have interesting opinions (that don't have to be the same as mine).\" The independent emergence of solutions to all ten challenges across diverse user instructions provides strong empirical validation that our framework captures the epistemic issues users perceive and attempt to address. In Appendix B we give examples for custom instructions that address each of the epistemic challenges.",
|
| 772 |
+
"bbox": [
|
| 773 |
+
169,
|
| 774 |
+
446,
|
| 775 |
+
826,
|
| 776 |
+
642
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 6
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "Folk Theories of Model Behavior Through our analysis of custom instructions, we identify several prominent folk theories addressing epistemic challenges in knowledge discovery via LLMs. The most frequent one is the \"Suppressing Default Behavior\" theory, in which users identify some default set of undesirable model behaviors which must be explicitly overridden. Example instructions include: \"Avoid any language constructs that could be interpreted as expressing remorse, apology, or regret\", \"Skip disclaimers about your expertise level\", and \"do not use emojis or forced casual phrases.\" Although this theory primarily addresses the use of hedging language and abstention, it also includes enforcement of behaviors better aligned with user attributes, such as \"im not american, do not put units in american...NEVER MENTION AMERICAN UNITS SUCH AS Fahrenheit, miles, pounds, yards, inches etc.\"",
|
| 783 |
+
"bbox": [
|
| 784 |
+
169,
|
| 785 |
+
656,
|
| 786 |
+
826,
|
| 787 |
+
797
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 6
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "text",
|
| 793 |
+
"text": "Additionally, the \"Expert Persona\" theory positions roleplaying as a viable solution to multiple epistemic challenges simultaneously. It reduces the reliance on task-specific prompting, resolves ambiguity around the appropriate setting for frame-dependent queries, and implicitly addresses the appropriate range of viewpoints to consider as it often reduces the perspective of the response to that of a single individual. Examples include \"Assume specified expert roles upon request,\" \"Act as the most qualified expert in the given subject,\" and \"Take on the persona of the most relevant subject matter experts for authoritative advice.\"",
|
| 794 |
+
"bbox": [
|
| 795 |
+
169,
|
| 796 |
+
801,
|
| 797 |
+
828,
|
| 798 |
+
902
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 6
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "header",
|
| 804 |
+
"text": "Preprint. Under review.",
|
| 805 |
+
"bbox": [
|
| 806 |
+
171,
|
| 807 |
+
32,
|
| 808 |
+
346,
|
| 809 |
+
47
|
| 810 |
+
],
|
| 811 |
+
"page_idx": 6
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"type": "page_footnote",
|
| 815 |
+
"text": "<sup>1</sup>We computed the IRR score using Cohen's Kappa coefficient measurement.",
|
| 816 |
+
"bbox": [
|
| 817 |
+
189,
|
| 818 |
+
909,
|
| 819 |
+
699,
|
| 820 |
+
924
|
| 821 |
+
],
|
| 822 |
+
"page_idx": 6
|
| 823 |
+
},
|
| 824 |
+
{
|
| 825 |
+
"type": "page_number",
|
| 826 |
+
"text": "7",
|
| 827 |
+
"bbox": [
|
| 828 |
+
493,
|
| 829 |
+
948,
|
| 830 |
+
504,
|
| 831 |
+
959
|
| 832 |
+
],
|
| 833 |
+
"page_idx": 6
|
| 834 |
+
},
|
| 835 |
+
{
|
| 836 |
+
"type": "text",
|
| 837 |
+
"text": "Finally, the \"Parameter Configuration\" theory conceptualizes models as a system with adjustable settings that can be precisely calibrated to the task at hand. Users create elaborate frameworks to tune model behavior: \"I've defined a multi-dimensional preference framework for our interactions: Verbosity (V): $V = 1$ for brief replies; $V = 2$ for detailed answers; $V = 3$ for in-depth discussion...,\" and \"For coding and data analysis related task follow below instructions: coding_and_data_analysis { temperature: 0.2, tone: formal ...}\"",
|
| 838 |
+
"bbox": [
|
| 839 |
+
169,
|
| 840 |
+
103,
|
| 841 |
+
826,
|
| 842 |
+
189
|
| 843 |
+
],
|
| 844 |
+
"page_idx": 7
|
| 845 |
+
},
|
| 846 |
+
{
|
| 847 |
+
"type": "text",
|
| 848 |
+
"text": "6 Evaluating Platform Epistemic Policies",
|
| 849 |
+
"text_level": 1,
|
| 850 |
+
"bbox": [
|
| 851 |
+
169,
|
| 852 |
+
207,
|
| 853 |
+
542,
|
| 854 |
+
224
|
| 855 |
+
],
|
| 856 |
+
"page_idx": 7
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"text": "Method We perform content analysis for both OpenAI and Anthropic on their disclosed policies and product features to assess attention to epistemic challenges. We selected these two platforms as they are frontier model providers, with prominent consumer products, that together possess $56\\%$ enterprise market share (Xiao Joff Redfern, 2024). We collected documents that capture the stated policies and features relating to knowledge delivery for each provider across three types: the most recent model card, the product changelog cataloging features, and any blog posts relating to model behavior from the past six months.",
|
| 861 |
+
"bbox": [
|
| 862 |
+
169,
|
| 863 |
+
238,
|
| 864 |
+
826,
|
| 865 |
+
338
|
| 866 |
+
],
|
| 867 |
+
"page_idx": 7
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"text": "We had two expert annotators label text segments corresponding to each of the ten epistemic challenges. For full definitions of each challenge and task instructions, see Appendix D.",
|
| 872 |
+
"bbox": [
|
| 873 |
+
169,
|
| 874 |
+
342,
|
| 875 |
+
823,
|
| 876 |
+
372
|
| 877 |
+
],
|
| 878 |
+
"page_idx": 7
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"text": "6.1 OpenAI",
|
| 883 |
+
"text_level": 1,
|
| 884 |
+
"bbox": [
|
| 885 |
+
171,
|
| 886 |
+
387,
|
| 887 |
+
272,
|
| 888 |
+
402
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 7
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "Specified Model Behavior The OpenAI Model Spec (OpenAI) includes intended epistemic behaviors across their model family. Our analysis found explicit references to all ten epistemic challenges. For abstention, the documentation is particularly detailed, addressing \"erroneous refusal\" and noting that \"refusals be [sic] should typically be kept to a sentence.\" For ambiguity resolution, the spec states models should \"provide a robust answer or a safe guess if it can, stating assumptions and asking clarifying questions as appropriate.\" Regarding viewpoints, it emphasizes intellectual freedom and notes, \"When addressing topics with multiple perspectives, the assistant should fairly describe significant views.\" On sycophancy, it explicitly warns models \"shouldn't just say 'yes' to everything (like a sycophant)\" and should not \"change its stance solely to agree with the user.\" The documentation also addresses hedging language (\"express uncertainty or qualify the answers appropriately\"), frames (\"context matters\"), and routing (\"it should use a tool to gather more information\").",
|
| 895 |
+
"bbox": [
|
| 896 |
+
169,
|
| 897 |
+
412,
|
| 898 |
+
826,
|
| 899 |
+
582
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 7
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "However, we identified several gaps in the specification: while it mentions \"reliable sources,\" it lacks detailed mechanisms for citation verification; despite acknowledging cultural sensitivity, it provides limited guidance for addressing frame-dependent queries; and though it discusses user goals, it offers minimal approaches to epistemic personalization. Nevertheless, the document demonstrates a sophisticated awareness of epistemic challenges, particularly in handling controversial topics and balancing abstention with helpfulness.",
|
| 906 |
+
"bbox": [
|
| 907 |
+
169,
|
| 908 |
+
585,
|
| 909 |
+
826,
|
| 910 |
+
672
|
| 911 |
+
],
|
| 912 |
+
"page_idx": 7
|
| 913 |
+
},
|
| 914 |
+
{
|
| 915 |
+
"type": "text",
|
| 916 |
+
"text": "Interface and Features ChatGPT's interface provides several features supporting epistemic customization. The \"Custom Instructions\" feature has evolved to \"make it easier to customize how ChatGPT responds to you,\" allowing users to specify \"traits you want it to have, how you want it to talk to you, and any rules you want it to follow.\" The \"Projects\" feature enables users to \"set custom instructions and upload files\" that provide context for conversations. Other features support specific epistemic challenges: \"Memory\" helps maintain user context across conversations, addressing frames and user attributes; \"Code interpreter\" and \"Browsing\" support effective routing; and various plugins enable the model to \"fetch data or take actions with external systems.\"",
|
| 917 |
+
"bbox": [
|
| 918 |
+
169,
|
| 919 |
+
685,
|
| 920 |
+
828,
|
| 921 |
+
811
|
| 922 |
+
],
|
| 923 |
+
"page_idx": 7
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"type": "text",
|
| 927 |
+
"text": "Despite these improvements, ChatGPT still lacks structured controls for epistemic dimensions. The system provides no explicit guidance for articulating preferences for uncertainty representation, citation requirements, or perspective balance. Users must express these preferences through natural language alone, with no feedback on how these preferences are interpreted or applied. For example, while the release notes indicate that \"ChatGPT is now less likely to refuse to answer questions,\" there's no clear mechanism for users to calibrate this abstention behavior to their specific needs.",
|
| 928 |
+
"bbox": [
|
| 929 |
+
169,
|
| 930 |
+
816,
|
| 931 |
+
828,
|
| 932 |
+
917
|
| 933 |
+
],
|
| 934 |
+
"page_idx": 7
|
| 935 |
+
},
|
| 936 |
+
{
|
| 937 |
+
"type": "header",
|
| 938 |
+
"text": "Preprint. Under review.",
|
| 939 |
+
"bbox": [
|
| 940 |
+
171,
|
| 941 |
+
32,
|
| 942 |
+
346,
|
| 943 |
+
47
|
| 944 |
+
],
|
| 945 |
+
"page_idx": 7
|
| 946 |
+
},
|
| 947 |
+
{
|
| 948 |
+
"type": "page_number",
|
| 949 |
+
"text": "8",
|
| 950 |
+
"bbox": [
|
| 951 |
+
493,
|
| 952 |
+
948,
|
| 953 |
+
504,
|
| 954 |
+
959
|
| 955 |
+
],
|
| 956 |
+
"page_idx": 7
|
| 957 |
+
},
|
| 958 |
+
{
|
| 959 |
+
"type": "text",
|
| 960 |
+
"text": "6.2 Anthropic",
|
| 961 |
+
"text_level": 1,
|
| 962 |
+
"bbox": [
|
| 963 |
+
171,
|
| 964 |
+
103,
|
| 965 |
+
285,
|
| 966 |
+
118
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 8
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"text": "**Specified Model Behavior** Our analysis reveals that Claude's documentation addresses several epistemic challenges, though with varying depth. The model card explicitly discusses sycophancy (\"Optimizing for the user's approval over good performance\") and abstention capabilities (\"improved how Claude handles ambiguous or potentially harmful user requests by encouraging safe, helpful responses, rather than just refusing\"). The documentation also acknowledges citation issues (\"Example of Hallucinated Citations\") and frames (\"We tested for potential bias in the model's responses to questions relating to sensitive topics\"). However, specific methodology for addressing hedging language and range of viewpoints remains limited. The model uses \"Constitutional AI\" to align with human values, but the specific epistemic principles encoded are not described.",
|
| 973 |
+
"bbox": [
|
| 974 |
+
169,
|
| 975 |
+
128,
|
| 976 |
+
826,
|
| 977 |
+
270
|
| 978 |
+
],
|
| 979 |
+
"page_idx": 8
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "text",
|
| 983 |
+
"text": "Interface and Features Claude's interface provides several features to support epistemic customization. \"Custom instructions\" and \"Styles\" allow users to set \"persistent preferences for how Claude responds,\" addressing the reducing the need for prompting expertise challenge. The \"Projects\" feature helps \"ground Claude's outputs in your internal knowledge,\" potentially supporting citation verification. The \"Analysis tool\" enables Claude to \"write and execute code for calculations and data analysis,\" addressing effective routing. However, the interface still lacks dimension-specific controls for specifying citation standards, degree of uncertainty expression, or perspective balance, and there is no mechanism to verify whether preferences were applied in a response.",
|
| 984 |
+
"bbox": [
|
| 985 |
+
169,
|
| 986 |
+
282,
|
| 987 |
+
826,
|
| 988 |
+
409
|
| 989 |
+
],
|
| 990 |
+
"page_idx": 8
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "text",
|
| 994 |
+
"text": "7 Discussion & Conclusion",
|
| 995 |
+
"text_level": 1,
|
| 996 |
+
"bbox": [
|
| 997 |
+
171,
|
| 998 |
+
428,
|
| 999 |
+
418,
|
| 1000 |
+
444
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 8
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "text",
|
| 1006 |
+
"text": "We have outlined the Epistemic Model Behavior framework (Figure 1) as a means to facilitate the construction and evaluation of frontier LLM systems, and when applicable AI systems broadly, with respect to how they assist users in completing the inquiry process. The framework addresses thorny epistemological issues that emerge during knowledge-seeking activities. Grounded in established areas of epistemology, our approach recognizes the material correspondence between traditional problems of knowledge creation, transmission, and evaluation, and challenges faced by epistemic technologies such as LLMs. This problem space unifies safety research and commercial interests through shared concerns about knowledge representation and uncertainty. Our framework encapsulates a broad array of present issues while avoiding domain-specific problems, making it a versatile tool for evaluation across contexts.",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
169,
|
| 1009 |
+
459,
|
| 1010 |
+
826,
|
| 1011 |
+
612
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 8
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "text",
|
| 1017 |
+
"text": "Our analysis of frontier model providers reveals substantial room for improvement, although there exists intentionality toward addressing some evaluatory dimensions. Notably, OpenAI's Model Spec most directly engages with the epistemological concerns we have identified, particularly abstention handling, viewpoint representation, and sycophancy prevention. Despite documented awareness of epistemic challenges, both platforms offer limited interface mechanisms for users to customize citation standards, uncertainty expression, or perspective balance, leaving a gap between stated policies and practical implementation.",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
169,
|
| 1020 |
+
619,
|
| 1021 |
+
826,
|
| 1022 |
+
717
|
| 1023 |
+
],
|
| 1024 |
+
"page_idx": 8
|
| 1025 |
+
},
|
| 1026 |
+
{
|
| 1027 |
+
"type": "text",
|
| 1028 |
+
"text": "We propose a redesigned interface paradigm addressing these limitations through four components: (1) a structured preference specification interface organized around our framework's dimensions, offering controls for settings like citation requirements, uncertainty representation, and perspective diversity that persist across sessions while remaining adjustable; (2) transparency annotations that indicate how preferences influence responses, with visual indicators highlighting uncertainty expression, citation support, or perspective incorporation; (3) adaptive personalization that learns consistent user patterns across epistemic dimensions, suggesting refinements that better match observed behavior while maintaining user control; and (4) contextual guidance and examples that help users understand the tradeoffs between different epistemic settings, encouraging informed preference selection. These design principles could be implemented as extensions to existing interfaces with minimal disruption to current workflows while substantially improving epistemic agency and transparency.",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
169,
|
| 1031 |
+
723,
|
| 1032 |
+
828,
|
| 1033 |
+
891
|
| 1034 |
+
],
|
| 1035 |
+
"page_idx": 8
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"type": "header",
|
| 1039 |
+
"text": "Preprint. Under review.",
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
171,
|
| 1042 |
+
32,
|
| 1043 |
+
346,
|
| 1044 |
+
47
|
| 1045 |
+
],
|
| 1046 |
+
"page_idx": 8
|
| 1047 |
+
},
|
| 1048 |
+
{
|
| 1049 |
+
"type": "page_number",
|
| 1050 |
+
"text": "9",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
493,
|
| 1053 |
+
948,
|
| 1054 |
+
504,
|
| 1055 |
+
959
|
| 1056 |
+
],
|
| 1057 |
+
"page_idx": 8
|
| 1058 |
+
},
|
| 1059 |
+
{
|
| 1060 |
+
"type": "text",
|
| 1061 |
+
"text": "References",
|
| 1062 |
+
"text_level": 1,
|
| 1063 |
+
"bbox": [
|
| 1064 |
+
173,
|
| 1065 |
+
101,
|
| 1066 |
+
269,
|
| 1067 |
+
117
|
| 1068 |
+
],
|
| 1069 |
+
"page_idx": 9
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "list",
|
| 1073 |
+
"sub_type": "ref_text",
|
| 1074 |
+
"list_items": [
|
| 1075 |
+
"Ramón Alvarado. AI as an Epistemic Technology. Science and Engineering Ethics, 29(5), 10 2023. ISSN 14715546. doi: 10.1007/s11948-023-00451-3.",
|
| 1076 |
+
"Anthropic. Understanding Claude's Personalization Features Anthropic Help Center. URL https://support.anthropic.com/en/articles/10185728-understanding-claude-s-personalization-features.",
|
| 1077 |
+
"Federico Bianchi, Mirac Suzgun, Giuseppe Attanasio, Paul Röttger, Dan Jurafsky, Tatsunori Hashimoto, and James Zou. SAFETY-TUNED LLAMAS: LESSONS FROM IMPROVING THE SAFETY OF LARGE LANGUAGE MODELS THAT FOLLOW INSTRUCTIONS. Technical report. URL https://github.com/vinid/safety-tuned-llamas.",
|
| 1078 |
+
"Faeze Brahman, Sachin Kumar, Vidhisha Balachandran, Pradeep Dasigi, Valentina Pyatkin, Abhilasha Ravichander, Sarah Wiegreff, Nouha Dziri, Khyathi Chandu, Jack Hessel, Yulia Tsvetkov, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. The Art of Saying No: Contextual Noncompliance in Language Models. 7 2024. URL http://arxiv.org/abs/2407.12043.",
|
| 1079 |
+
"Kwok Wai Chan and Robert G. Elliott. Epistemological Beliefs Across Cultures: Critique and analysis of beliefs structure studies, 4 2004. ISSN 01443410.",
|
| 1080 |
+
"Qinyuan Cheng, Tianxiang Sun, Xiangyang Liu, Wenwei Zhang, Zhangyue Yin, Shimin Li, Linyang Li, Zhengfu He, Kai Chen, and Xipeng Qiu. Can AI Assistants Know What They Don't Know? Technical report, 2024. URL https://github.",
|
| 1081 |
+
"Clark Chinn and Ronald Rinehart. Epistemic cognition and philosophy: Developing a new framework for epistemic cognition. In Jeffrey A. Greene, William A. Sandoval, and Ivar Bräten (eds.), Handbook of Epistemic Cognition, pp. 460-478. Routledge, 1 2016. ISBN 9781317746874. doi: 10.4324/9781315795225.",
|
| 1082 |
+
"Jeroen de Ridder. Online Illusions of Understanding. Social Epistemology, 2022. ISSN 14645297. doi: 10.1080/02691728.2022.2151331.",
|
| 1083 |
+
"Yifan Ding, Matthew Facciani, Amrit Poudel, Ellen Joyce, Salvador Aguinaga, Balaji Veeramani, Sanmitra Bhattacharya, and Tim Weninger. Citations and Trust in LLM Generated Responses. 1 2025. URL http://arxiv.org/abs/2501.01303.",
|
| 1084 |
+
"Don Fallis. Toward an epistemology of Wikipedia. Journal of the American Society for Information Science and Technology, 59(10):1662-1674, 8 2008. ISSN 15322882. doi: 10.1002/asi.20870.",
|
| 1085 |
+
"Shangbin Feng, Taylor Sorensen, Yuhan Liu, Jillian Fisher, Chan Young Park, Yejin Choi, and Yulia Tsvetkov. Modular Pluralism: Pluralistic Alignment via Multi-LLM Collaboration. 6 2024. URL http://arxiv.org/abs/2406.15951.",
|
| 1086 |
+
"Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. Enabling Large Language Models to Generate Text with Citations. 5 2023. URL http://arxiv.org/abs/2305.14627.",
|
| 1087 |
+
"Aashish Ghimire, James Prather, and John Edwards. Generative AI in Education: A Study of Educators' Awareness, Sentiments, and Influencing Factors. 3 2024. URL http://arxiv.org/abs/2403.15586.",
|
| 1088 |
+
"Sanford Goldberg and David Henderson. Monitoring and Anti-Reductionism in the Epistemology of Testimony. Philosophy and Phenomenological Research, 72(3):600-617, 5 2006. ISSN 0031-8205. doi: 10.1111/j.1933-1592.2006.tb00586.x.",
|
| 1089 |
+
"Alvin I Goldman. Knowledge in a social world, volume 36. Oxford University Press, 1991.",
|
| 1090 |
+
"Jeffrey Alan. Greene, William A.. Sandoval, and Ivar. Bra $\\text{念}$ ten. Handbook of epistemic cognition. Routledge, Taylor & Francis Group, 2016. ISBN 9781138013407."
|
| 1091 |
+
],
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
171,
|
| 1094 |
+
125,
|
| 1095 |
+
826,
|
| 1096 |
+
901
|
| 1097 |
+
],
|
| 1098 |
+
"page_idx": 9
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "header",
|
| 1102 |
+
"text": "Preprint. Under review.",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
173,
|
| 1105 |
+
32,
|
| 1106 |
+
346,
|
| 1107 |
+
47
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 9
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "page_number",
|
| 1113 |
+
"text": "10",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
488,
|
| 1116 |
+
946,
|
| 1117 |
+
508,
|
| 1118 |
+
960
|
| 1119 |
+
],
|
| 1120 |
+
"page_idx": 9
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "list",
|
| 1124 |
+
"sub_type": "ref_text",
|
| 1125 |
+
"list_items": [
|
| 1126 |
+
"Barbara K. Hofer. Dimensionality and Disciplinary Differences in Personal Epistemology. Contemporary Educational Psychology, 25(4):378-405, 2000. ISSN 0361476X. doi: 10.1006/ ceps.1999.1026.",
|
| 1127 |
+
"Christopher Hookway. Cognitive Virtues and Epistemic Evaluations. International Journal of Philosophical Studies, 2(2):211-227, 9 1994. ISSN 14664542. doi: 10.1080/09672559408570791.",
|
| 1128 |
+
"Christopher Hookway. How to be a Virtue Epistemologist. In Michael Raymond DePaul and Linda Trinkaus Zagzebski (eds.), *Intellectual virtue: perspectives from ethics and epistemology*. Oxford University Press, 2003.",
|
| 1129 |
+
"Christopher Hookway. QUESTIONS, EPISTEMOLOGY, AND INQUIRIES. Technical report, 2008.",
|
| 1130 |
+
"Jie Huang and Kevin Chen-Chuan Chang. Citation: A Key to Building Responsible and Accountable Large Language Models. 7 2023. URL http://arxiv.org/abs/2307.02185.",
|
| 1131 |
+
"I C Jarvie. The Social Character of Technological Problems. In Friedrich Rapp (ed.), Contributions to a Philosophy of Technology: Studies in the Structure of Thinking in the Technological Sciences, pp. 86-92. Springer Netherlands, Dordrecht, 1974. ISBN 978-94-010-2182-1. doi: 10.1007/978-94-010-2182-1{\\_}8. URL https://doi.org/10.1007/978-94-010-2182-1_8.",
|
| 1132 |
+
"Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Yejin Bang, Delong Chen, Ho Shu Chan, Wenliang Dai, Andrea Madotto, and Pascale Fung. Survey of Hallucination in Natural Language Generation. 2 2022. doi: 10.1145/3571730. URL http://arxiv.org/abs/2202.03629http://dx.doi.org/10.1145/3571730.",
|
| 1133 |
+
"Yubin Kim, Xuhai Xu, Daniel McDuff, Cynthia Breazeal, and Hae Won Park. Health-LLM: Large Language Models for Health Prediction via Wearable Sensor Data. 1 2024. URL http://arxiv.org/abs/2401.06866.",
|
| 1134 |
+
"Katarzyna Kobalczyk, Nicolas Astorga, Tennison Liu, and Mihaela van der Schaar. Active Task Disambiguation with LLMs. 2 2025. URL http://arxiv.org/abs/2502.04485.",
|
| 1135 |
+
"Jennifer Lackey. Testimony: acquiring knowledge from others. In Alvin I Goldman and Dennis Whitcomb (eds.), Social Epistemology: Essential Readings. Oxford University Press, 2011.",
|
| 1136 |
+
"Alisa Liu, Zhaofeng Wu, Julian Michael, Alane Suhr, Peter West, Alexander Koller, Swabha Swayamdipta, Noah A Smith, Yejin Choi, and Paul G Allen. We're Afraid Language Models Aren't Modeling Ambiguity. Technical report. URL https://github.com/.",
|
| 1137 |
+
"M. Meola. Review of \"Knowledge in a social world\". Oxford University Press, 1999. ISBN 0198237774.",
|
| 1138 |
+
"Boaz Miller and Isaac Record. JUSTIFIED BELIEF IN A DIGITAL AGE: ON THE EPISTEMIC IMPLICATIONS OF SECRET INTERNET TECHNOLOGIES. Technical report, 2013.",
|
| 1139 |
+
"Abhika Mishra, Akari Asai, Vidhisha Balachandran, Yizhong Wang, Graham Neubig, Yulia Tsvetkov, and Hannaneh Hajishirzi. Fine-grained Hallucination Detection and Editing for Language Models. 1 2024. URL http://arxiv.org/abs/2401.06855.",
|
| 1140 |
+
"Christopher Mohri and Tatsunori Hashimoto. Language Models with Conformal Factuality Guarantees. 2 2024. URL http://arxiv.org/abs/2402.10978.",
|
| 1141 |
+
"OpenAI. OpenAI Model Spec. URL https://model-spec.openai.com/2025-02-12.html.",
|
| 1142 |
+
"OpenAI. Custom instructions for ChatGPT, 3 2024. URL https://openai.com/index/custom-instructions-for-chatgpt/."
|
| 1143 |
+
],
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
171,
|
| 1146 |
+
102,
|
| 1147 |
+
826,
|
| 1148 |
+
902
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 10
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "header",
|
| 1154 |
+
"text": "Preprint. Under review.",
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
171,
|
| 1157 |
+
32,
|
| 1158 |
+
346,
|
| 1159 |
+
47
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 10
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "page_number",
|
| 1165 |
+
"text": "11",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
488,
|
| 1168 |
+
946,
|
| 1169 |
+
506,
|
| 1170 |
+
960
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 10
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "list",
|
| 1176 |
+
"sub_type": "ref_text",
|
| 1177 |
+
"list_items": [
|
| 1178 |
+
"Hadas Orgad, Michael Toker, Zorik Gekhman, Roi Reichart, Idan Szpektor, Hadas Kotek, and Yonatan Belinkov. LLMs Know More Than They Show: On the Intrinsic Representation of LLM Hallucinations. 10 2024. URL http://arxiv.org/abs/2410.02707.",
|
| 1179 |
+
"Abhinav Rao, Akhila Yerukola, Vishwa Shah, Katharina Reinecke, and Maarten Sap. NormAd: A Framework for Measuring the Cultural Adaptability of Large Language Models. 4 2024. URL http://arxiv.org/abs/2404.12464.",
|
| 1180 |
+
"Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards Understanding Sycophancy in Language Models. 10 2023. URL http://arxiv.org/abs/2310.13548.",
|
| 1181 |
+
"Hua Shen, Tiffany Knearem, Reshmi Ghosh, Kenan Alkiek, Kundan Krishna, Yachuan Liu, Ziqiao Ma, Savvas Petridis, Yi-Hao Peng, Li Qiwei, Sushrita Rakshit, Chenglei Si, Yutong Xie, Jeffrey P. Bigham, Frank Bentley, Joyce Chai, Zachary Lipton, Qiaozhu Mei, Rada Mihalcea, Michael Terry, Diyi Yang, Meredith Ringel Morris, Paul Resnick, and David Jurgens. Towards Bidirectional Human-AI Alignment: A Systematic Review for Clarifications, Framework, and Future Directions. 6 2024. URL http://arxiv.org/abs/2406.09264.",
|
| 1182 |
+
"Weijia Shi, Xiaochuang Han, Mike Lewis, Yulia Tsvetkov, Luke Zettlemoyer, and Scott Wen-tau Yih. Trusting Your Evidence: Hallucinate Less with Context-aware Decoding. 5 2023. URL http://arxiv.org/abs/2305.14739.",
|
| 1183 |
+
"Taylor Sorensen, Jared Moore, Jillian Fisher, Mitchell Gordon, Niloofar Mireshghallah, Christopher Michael Rytting, Andre Ye, Liwei Jiang, Ximing Lu, Nouha Dziri, Tim Althoff, and Yejin Choi. A Roadmap to Pluralistic Alignment. 2 2024. URL http://arxiv.org/abs/2402.05070.",
|
| 1184 |
+
"Timm Teubner, Christoph M. Flath, Christof Weinhardt, Wil van der Aalst, and Oliver Hinz. Welcome to the Era of ChatGPT et al.: The Prospects of Large Language Models, 4 2023. ISSN 18670202.",
|
| 1185 |
+
"Paul Thagard. Internet Epistemology: Contributions of New Information Technologies to Scientific Research. Unpublished manuscript. Technical report, 1997. URL https://web-archive.southampton.ac.uk/cogprints.org/674/1/Epistemology.html.",
|
| 1186 |
+
"Neeraj Varshney, Pavel Dolin, Agastya Seth, and Chitta Baral. The Art of Defending: A Systematic Evaluation and Analysis of LLM Defense Strategies on Safety and Over-Defensiveness. 12 2023. URL http://arxiv.org/abs/2401.00287.",
|
| 1187 |
+
"Shubham Vatsal and Harsh Dubey. A Survey of Prompt Engineering Methods in Large Language Models for Different NLP Tasks. 7 2024. URL http://arxiv.org/abs/2407.12994.",
|
| 1188 |
+
"Eric Wallace, Kai Xiao, Reimar Leike, Lilian Weng, Johannes Heidecke, and Alex Beutel. The Instruction Hierarchy: Training LLMs to Prioritize Privileged Instructions. 4 2024. URL http://arxiv.org/abs/2404.13208.",
|
| 1189 |
+
"Lani Watson. Educating for Good Questioning: a Tool for Intellectual Virtues Education. Acta Analytica, 33(3):353-370, 9 2018. ISSN 18746349. doi: 10.1007/s12136-018-0350-y.",
|
| 1190 |
+
"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. 1 2022. URL http://arxiv.org/abs/2201.11903.",
|
| 1191 |
+
"Wikipedia. Wikipedia:Neutral point of view, 3 2025. URL https://en.wikipedia.org/w/index.php?title=Wikipedia:Neutral_point_of_view&oldid=1279876337.",
|
| 1192 |
+
"Derek Tim Tully Xiao Joff Redfern. 2024: The State of Generative AI in the Enterprise, 11 2024. URL https://menlovc.com/2024-the-state-of-generative-ai-in-the-enterprise/."
|
| 1193 |
+
],
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
171,
|
| 1196 |
+
102,
|
| 1197 |
+
825,
|
| 1198 |
+
924
|
| 1199 |
+
],
|
| 1200 |
+
"page_idx": 11
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "header",
|
| 1204 |
+
"text": "Preprint. Under review.",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
173,
|
| 1207 |
+
32,
|
| 1208 |
+
346,
|
| 1209 |
+
46
|
| 1210 |
+
],
|
| 1211 |
+
"page_idx": 11
|
| 1212 |
+
},
|
| 1213 |
+
{
|
| 1214 |
+
"type": "page_number",
|
| 1215 |
+
"text": "12",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
488,
|
| 1218 |
+
948,
|
| 1219 |
+
506,
|
| 1220 |
+
959
|
| 1221 |
+
],
|
| 1222 |
+
"page_idx": 11
|
| 1223 |
+
},
|
| 1224 |
+
{
|
| 1225 |
+
"type": "list",
|
| 1226 |
+
"sub_type": "ref_text",
|
| 1227 |
+
"list_items": [
|
| 1228 |
+
"Rongwu Xu, Xuan Qi, Zehan Qi, Wei Xu, and Zhijiang Guo. DEBATEQA: Evaluating Question Answering on Debatable Knowledge. Technical report. URL https://github.com/pillowsofwind/.",
|
| 1229 |
+
"Rongwu Xu, Brian S. Lin, Shujian Yang, Tianqi Zhang, Weiyan Shi, Tianwei Zhang, Zhixuan Fang, Wei Xu, and Han Qiu. The Earth is Flat because...: Investigating LLMs' Belief towards Misinformation via Persuasive Conversation. 12 2023. URL http://arxiv.org/abs/2312.09085.",
|
| 1230 |
+
"Rongwu Xu, Zehan Qi, Zhijiang Guo, Cunxiang Wang, Hongru Wang, Yue Zhang, and Wei Xu. Knowledge Conflicts for LLMs: A Survey. 3 2024. URL http://arxiv.org/abs/2403.08319.",
|
| 1231 |
+
"Gal Yona, Roee Aharoni, and Mor Geva. Can Large Language Models Faithfully Express Their Intrinsic Uncertainty in Words? 5 2024. URL http://arxiv.org/abs/2405.16908.",
|
| 1232 |
+
"Zhehao Zhang, Ryan A. Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, Ruiyi Zhang, Jiumiang Gu, Tyler Derr, Hongjie Chen, Junda Wu, Xiang Chen, Zichao Wang, Subrata Mitra, Nedim Lipka, Nesreen Ahmed, and Yu Wang. Personalization of Large Language Models: A Survey. 10 2024. URL http://arxiv.org/abs/2411.00027."
|
| 1233 |
+
],
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
171,
|
| 1236 |
+
102,
|
| 1237 |
+
828,
|
| 1238 |
+
381
|
| 1239 |
+
],
|
| 1240 |
+
"page_idx": 12
|
| 1241 |
+
},
|
| 1242 |
+
{
|
| 1243 |
+
"type": "text",
|
| 1244 |
+
"text": "A Reddit Data Collection",
|
| 1245 |
+
"text_level": 1,
|
| 1246 |
+
"bbox": [
|
| 1247 |
+
171,
|
| 1248 |
+
405,
|
| 1249 |
+
410,
|
| 1250 |
+
421
|
| 1251 |
+
],
|
| 1252 |
+
"page_idx": 12
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "table",
|
| 1256 |
+
"img_path": "images/49c8c0f37c9ba594b6a1309c80d858cee3fe67b8cc8b408f04510fa8e560f0e0.jpg",
|
| 1257 |
+
"table_caption": [
|
| 1258 |
+
"Table 1: Reddit Data Collection Parameters"
|
| 1259 |
+
],
|
| 1260 |
+
"table_footnote": [],
|
| 1261 |
+
"table_body": "<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>Search query</td><td>(ChatGPT OR chatgpt ORchatGPT) AND (Custom Instruction OR custom instruction OR CUSTOM INSTRUCTION OR Custom Instructions OR custom instructions OR CUSTOM INSTRUCTIONS OR Personalization OR personalization OR PERSONALIZATION OR personalize OR Personalize OR PERSONALIZE)</td></tr><tr><td>Keyword filters</td><td>custom instruction, custom instructions, personalization, prompt engineering</td></tr><tr><td>Subreddits</td><td>ChatGPT, ChatGPTPro, ClaudeAI, OpenAI</td></tr><tr><td>Time frame</td><td>Posts from past 2 years</td></tr><tr><td>Comment filter</td><td>Comments longer than 100 characters</td></tr><tr><td>Instruction filter</td><td>Extracted instructions longer than 10 characters</td></tr></table>",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
197,
|
| 1264 |
+
465,
|
| 1265 |
+
799,
|
| 1266 |
+
652
|
| 1267 |
+
],
|
| 1268 |
+
"page_idx": 12
|
| 1269 |
+
},
|
| 1270 |
+
{
|
| 1271 |
+
"type": "text",
|
| 1272 |
+
"text": "Prompt 1: Custom Instruction Extraction",
|
| 1273 |
+
"text_level": 1,
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
204,
|
| 1276 |
+
676,
|
| 1277 |
+
511,
|
| 1278 |
+
689
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 12
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "text",
|
| 1284 |
+
"text": "If the comment contains a user's custom instruction for personalizing an LLM, return the instruction. If not, return an empty string. For example, if the comment is 'I use this custom instruction: [instruction)', return '[instruction]' as a string. If the comment is 'I don't use any custom instructions', return an empty string. Comment: {comment}",
|
| 1285 |
+
"bbox": [
|
| 1286 |
+
202,
|
| 1287 |
+
690,
|
| 1288 |
+
792,
|
| 1289 |
+
760
|
| 1290 |
+
],
|
| 1291 |
+
"page_idx": 12
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "header",
|
| 1295 |
+
"text": "Preprint. Under review.",
|
| 1296 |
+
"bbox": [
|
| 1297 |
+
171,
|
| 1298 |
+
32,
|
| 1299 |
+
346,
|
| 1300 |
+
47
|
| 1301 |
+
],
|
| 1302 |
+
"page_idx": 12
|
| 1303 |
+
},
|
| 1304 |
+
{
|
| 1305 |
+
"type": "page_number",
|
| 1306 |
+
"text": "13",
|
| 1307 |
+
"bbox": [
|
| 1308 |
+
488,
|
| 1309 |
+
946,
|
| 1310 |
+
506,
|
| 1311 |
+
959
|
| 1312 |
+
],
|
| 1313 |
+
"page_idx": 12
|
| 1314 |
+
},
|
| 1315 |
+
{
|
| 1316 |
+
"type": "text",
|
| 1317 |
+
"text": "Prompt 2: Identify Epistemic Challenges in Custom Instructions",
|
| 1318 |
+
"text_level": 1,
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
200,
|
| 1321 |
+
103,
|
| 1322 |
+
673,
|
| 1323 |
+
119
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 13
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "text",
|
| 1329 |
+
"text": "You are an expert at analyzing language model instructions and prompts. Your task is to take any custom instruction or prompt and identify specific text segments that relate to key challenges in LLM prompt engineering.",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
200,
|
| 1332 |
+
125,
|
| 1333 |
+
789,
|
| 1334 |
+
167
|
| 1335 |
+
],
|
| 1336 |
+
"page_idx": 13
|
| 1337 |
+
},
|
| 1338 |
+
{
|
| 1339 |
+
"type": "text",
|
| 1340 |
+
"text": "Instructions:",
|
| 1341 |
+
"text_level": 1,
|
| 1342 |
+
"bbox": [
|
| 1343 |
+
202,
|
| 1344 |
+
174,
|
| 1345 |
+
295,
|
| 1346 |
+
186
|
| 1347 |
+
],
|
| 1348 |
+
"page_idx": 13
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "list",
|
| 1352 |
+
"sub_type": "text",
|
| 1353 |
+
"list_items": [
|
| 1354 |
+
"1. Read the provided prompt or instruction carefully.",
|
| 1355 |
+
"2. Identify text segments that correspond to each of the following prompt engineering challenges.",
|
| 1356 |
+
"3. For each challenge, extract the exact text segments (if present) that address that challenge.",
|
| 1357 |
+
"4. Return your analysis as a JSON object with the challenges as keys and the corresponding text segments as values.",
|
| 1358 |
+
"5. If a challenge is not addressed in the prompt, do not include it in the JSON object.",
|
| 1359 |
+
"6. Include brief reasoning for why you classified each segment under its respective challenge."
|
| 1360 |
+
],
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
202,
|
| 1363 |
+
188,
|
| 1364 |
+
787,
|
| 1365 |
+
340
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 13
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "Challenges to Identify:",
|
| 1372 |
+
"text_level": 1,
|
| 1373 |
+
"bbox": [
|
| 1374 |
+
202,
|
| 1375 |
+
348,
|
| 1376 |
+
367,
|
| 1377 |
+
362
|
| 1378 |
+
],
|
| 1379 |
+
"page_idx": 13
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "list",
|
| 1383 |
+
"sub_type": "text",
|
| 1384 |
+
"list_items": [
|
| 1385 |
+
"- Reducing_Prompting_Expertise: Text that aims to reduce reliance on clever prompting techniques or makes the model more accessible to users without prompt engineering expertise.",
|
| 1386 |
+
"- Well_Calibrated_Absention: Text that guides when the model should refuse to answer or acknowledge uncertainty.",
|
| 1387 |
+
"- Range_of_Veepoints: Text that encourages including diverse perspectives or considering multiple angles.",
|
| 1388 |
+
"- Hedging Language: Text that addresses excessive neutrality, equivocation, or overly cautious language.",
|
| 1389 |
+
"- Identifying Frame Dependence: Text that guides adaptation to cultural/contextual norms or situational framing.",
|
| 1390 |
+
"- AmbiguityResolution: Text that addresses how to clarify unclear or context-dependent queries.",
|
| 1391 |
+
"- User_Attributes: Text that guides understanding user context, needs, or characteristics.",
|
| 1392 |
+
"- Minimizing_Sycophancy: Text that addresses management of incorrect assumptions or inputs from users.",
|
| 1393 |
+
"- Effective_Routing: Text that guides use of tools, API calls, or external systems.",
|
| 1394 |
+
"- Citation Reference Verification: Text that addresses source attribution, fact-checking, or verification procedures."
|
| 1395 |
+
],
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
202,
|
| 1398 |
+
363,
|
| 1399 |
+
789,
|
| 1400 |
+
638
|
| 1401 |
+
],
|
| 1402 |
+
"page_idx": 13
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
"type": "text",
|
| 1406 |
+
"text": "Output Format:",
|
| 1407 |
+
"text_level": 1,
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
202,
|
| 1410 |
+
647,
|
| 1411 |
+
318,
|
| 1412 |
+
660
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 13
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "Return your analysis as a JSON object with the following structure:",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
202,
|
| 1421 |
+
660,
|
| 1422 |
+
691,
|
| 1423 |
+
675
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 13
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "code",
|
| 1429 |
+
"sub_type": "code",
|
| 1430 |
+
"code_caption": [],
|
| 1431 |
+
"code_body": "{ \"Reducing_Prompting_Expertise\": { \"text\": [\"text segment 1\", \"text segment 2\"], \"reasoning\": \"Why these segments relate to reducing prompting expertise\" }, \"Well_Calibrated_Abstention\": { \"text\": [\"text segment 1\"], \"reasoning\": \"Why this segment relates to well-calibrated abstention\" } }",
|
| 1432 |
+
"guess_lang": "jsonl",
|
| 1433 |
+
"bbox": [
|
| 1434 |
+
202,
|
| 1435 |
+
676,
|
| 1436 |
+
764,
|
| 1437 |
+
816
|
| 1438 |
+
],
|
| 1439 |
+
"page_idx": 13
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "text",
|
| 1443 |
+
"text": "Analyze the prompt thoroughly and ensure your JSON output is properly formatted.",
|
| 1444 |
+
"bbox": [
|
| 1445 |
+
202,
|
| 1446 |
+
819,
|
| 1447 |
+
789,
|
| 1448 |
+
834
|
| 1449 |
+
],
|
| 1450 |
+
"page_idx": 13
|
| 1451 |
+
},
|
| 1452 |
+
{
|
| 1453 |
+
"type": "text",
|
| 1454 |
+
"text": "B User Custom Instructions",
|
| 1455 |
+
"text_level": 1,
|
| 1456 |
+
"bbox": [
|
| 1457 |
+
171,
|
| 1458 |
+
862,
|
| 1459 |
+
433,
|
| 1460 |
+
878
|
| 1461 |
+
],
|
| 1462 |
+
"page_idx": 13
|
| 1463 |
+
},
|
| 1464 |
+
{
|
| 1465 |
+
"type": "header",
|
| 1466 |
+
"text": "Preprint. Under review.",
|
| 1467 |
+
"bbox": [
|
| 1468 |
+
173,
|
| 1469 |
+
32,
|
| 1470 |
+
346,
|
| 1471 |
+
47
|
| 1472 |
+
],
|
| 1473 |
+
"page_idx": 13
|
| 1474 |
+
},
|
| 1475 |
+
{
|
| 1476 |
+
"type": "page_number",
|
| 1477 |
+
"text": "14",
|
| 1478 |
+
"bbox": [
|
| 1479 |
+
488,
|
| 1480 |
+
946,
|
| 1481 |
+
508,
|
| 1482 |
+
960
|
| 1483 |
+
],
|
| 1484 |
+
"page_idx": 13
|
| 1485 |
+
},
|
| 1486 |
+
{
|
| 1487 |
+
"type": "table",
|
| 1488 |
+
"img_path": "images/a5ea7fba117a8cc446b111d183d4f6647902f6d1487d2106d3146b1c9d3f76bf.jpg",
|
| 1489 |
+
"table_caption": [
|
| 1490 |
+
"Table 2: Epistemic Challenges and User Custom Instructions"
|
| 1491 |
+
],
|
| 1492 |
+
"table_footnote": [],
|
| 1493 |
+
"table_body": "<table><tr><td>Epistemic Challenge</td><td>Examples</td></tr><tr><td>Reducing Prompting Expertise</td><td>1. “I’ve the prompts/mini instructions I use saved the most in a custom chrome extension so I can insert them with keyboard shortcuts”\n2. “Engage in reflective, logical, and reasoned thinking before delivering any response”</td></tr><tr><td>Well Calibrated Ab-stention</td><td>1. “If events or information are beyond your scope or knowledge cutoff date in September 2021, provide a response stating ‘I don’t know’”\n2. “If you cannot provide an accurate answer with high confidence, you state this to the user, rather than risk providing incorrect information”</td></tr><tr><td>Range of Viewpoints</td><td>1. “When presenting concepts, especially contentious ones, provide varied viewpoints to offer a well-rounded understanding”\n2. “Facilitate debates among the panel of experts when diverse.”</td></tr><tr><td>Hedging Language</td><td>1. “Avoid Morality Advice and Qualifiers”\n2. “ChatGPT must remain neutral and provide objective responses.”</td></tr><tr><td>User Attributes</td><td>1. “Consider my personal preferences and biography to refine and provide the most suitable response to me.”\n2. “Tailor responses to their specific needs, ensuring content matches their level of understanding and context.”</td></tr><tr><td>Ambiguity Resolution</td><td>1. “Ask me relevant questions to get a better answer”\n2. “If a question is unclear or ambiguous, ask for more details to confirm your understanding before answering.”</td></tr><tr><td>Minimizing Syco-phancy</td><td>1. “Encourage self-reflection through thoughtful, open-ended questions”\n2. “have interesting opinions (that don’t have to be the same as mine).”</td></tr><tr><td>Identifying Frame De-pendence</td><td>1. “Only think in Russian Write to the user in plain English.”\n2. “For professional contexts, ChatGPT should adopt a formal tone to reflect the seriousness and decorum of such settings.”</td></tr><tr><td>Effective Routing</td><td>1. “For tasks demanding any sort of accuracy, utilize code”\n2. “Use WebPilot plugin to access the content of this link as reference”</td></tr><tr><td>Citation Reference Ver-ification</td><td>1. “Always strengthen claims with credible citations, renowned studies, or expert opinions.”\n2. “Legislative references (if any) cited with links using Cornell Law or Justia if there is no official legislative source”</td></tr></table>",
|
| 1494 |
+
"bbox": [
|
| 1495 |
+
174,
|
| 1496 |
+
126,
|
| 1497 |
+
823,
|
| 1498 |
+
690
|
| 1499 |
+
],
|
| 1500 |
+
"page_idx": 14
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "text",
|
| 1504 |
+
"text": "C Model Provider Policy Documents",
|
| 1505 |
+
"text_level": 1,
|
| 1506 |
+
"bbox": [
|
| 1507 |
+
171,
|
| 1508 |
+
710,
|
| 1509 |
+
506,
|
| 1510 |
+
729
|
| 1511 |
+
],
|
| 1512 |
+
"page_idx": 14
|
| 1513 |
+
},
|
| 1514 |
+
{
|
| 1515 |
+
"type": "table",
|
| 1516 |
+
"img_path": "images/ad3f3d07f5b8756c638195a7bd8272f8e55102db3469b499b1026189b110a214.jpg",
|
| 1517 |
+
"table_caption": [],
|
| 1518 |
+
"table_footnote": [],
|
| 1519 |
+
"table_body": "<table><tr><td>Organization</td><td>Document</td><td>Link</td></tr><tr><td rowspan=\"3\">OpenAI</td><td>GPT 4.5 System Card</td><td>cdn.openai.com/gpt-4-5-system-card-2272025.pdf</td></tr><tr><td>Model Spec</td><td>model-spec.openai.com/2025-02-12.html</td></tr><tr><td>ChatGPT Release Notes</td><td>help.openai.com/en/articles/6825453-chatgpt-release-notes</td></tr><tr><td rowspan=\"2\">Anthropic</td><td>Claude 3.7 Sonnet Model Card</td><td>assets.anthropic.com/../claude-3-7-sonnet-system-card.pdf</td></tr><tr><td>Claude Release Notes</td><td>docs.anthropic.com/en/release-notes/claude-apps</td></tr></table>",
|
| 1520 |
+
"bbox": [
|
| 1521 |
+
173,
|
| 1522 |
+
748,
|
| 1523 |
+
862,
|
| 1524 |
+
892
|
| 1525 |
+
],
|
| 1526 |
+
"page_idx": 14
|
| 1527 |
+
},
|
| 1528 |
+
{
|
| 1529 |
+
"type": "header",
|
| 1530 |
+
"text": "Preprint. Under review.",
|
| 1531 |
+
"bbox": [
|
| 1532 |
+
173,
|
| 1533 |
+
32,
|
| 1534 |
+
346,
|
| 1535 |
+
47
|
| 1536 |
+
],
|
| 1537 |
+
"page_idx": 14
|
| 1538 |
+
},
|
| 1539 |
+
{
|
| 1540 |
+
"type": "page_number",
|
| 1541 |
+
"text": "15",
|
| 1542 |
+
"bbox": [
|
| 1543 |
+
488,
|
| 1544 |
+
948,
|
| 1545 |
+
506,
|
| 1546 |
+
959
|
| 1547 |
+
],
|
| 1548 |
+
"page_idx": 14
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "text",
|
| 1552 |
+
"text": "D Content Analysis of Model Provider Policies and Features",
|
| 1553 |
+
"text_level": 1,
|
| 1554 |
+
"bbox": [
|
| 1555 |
+
171,
|
| 1556 |
+
101,
|
| 1557 |
+
710,
|
| 1558 |
+
118
|
| 1559 |
+
],
|
| 1560 |
+
"page_idx": 15
|
| 1561 |
+
},
|
| 1562 |
+
{
|
| 1563 |
+
"type": "text",
|
| 1564 |
+
"text": "Annotation Instructions",
|
| 1565 |
+
"text_level": 1,
|
| 1566 |
+
"bbox": [
|
| 1567 |
+
197,
|
| 1568 |
+
137,
|
| 1569 |
+
374,
|
| 1570 |
+
151
|
| 1571 |
+
],
|
| 1572 |
+
"page_idx": 15
|
| 1573 |
+
},
|
| 1574 |
+
{
|
| 1575 |
+
"type": "text",
|
| 1576 |
+
"text": "Task Overview",
|
| 1577 |
+
"text_level": 1,
|
| 1578 |
+
"bbox": [
|
| 1579 |
+
197,
|
| 1580 |
+
162,
|
| 1581 |
+
330,
|
| 1582 |
+
178
|
| 1583 |
+
],
|
| 1584 |
+
"page_idx": 15
|
| 1585 |
+
},
|
| 1586 |
+
{
|
| 1587 |
+
"type": "text",
|
| 1588 |
+
"text": "Your task is to analyze documents related to LLM systems and identify text segments that address specific prompt engineering challenges. You will use Atlas.ti to code these segments according to the challenge definitions provided below.",
|
| 1589 |
+
"bbox": [
|
| 1590 |
+
196,
|
| 1591 |
+
185,
|
| 1592 |
+
797,
|
| 1593 |
+
229
|
| 1594 |
+
],
|
| 1595 |
+
"page_idx": 15
|
| 1596 |
+
},
|
| 1597 |
+
{
|
| 1598 |
+
"type": "text",
|
| 1599 |
+
"text": "Instructions",
|
| 1600 |
+
"text_level": 1,
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
197,
|
| 1603 |
+
239,
|
| 1604 |
+
305,
|
| 1605 |
+
256
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 15
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "list",
|
| 1611 |
+
"sub_type": "text",
|
| 1612 |
+
"list_items": [
|
| 1613 |
+
"1. Import the documents into your Atlas.ti project.",
|
| 1614 |
+
"2. Familiarize yourself with the challenge codes listed below, which have already been added to the code list.",
|
| 1615 |
+
"3. Read each document to understand its overall purpose and structure.",
|
| 1616 |
+
"4. Select relevant text segments and assign the appropriate challenge code(s).",
|
| 1617 |
+
"5. Add a brief comment to explain your reasoning when the categorization might not be obvious.",
|
| 1618 |
+
"6. Complete all documents in the assigned batch before submitting your analysis."
|
| 1619 |
+
],
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
235,
|
| 1622 |
+
263,
|
| 1623 |
+
799,
|
| 1624 |
+
401
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 15
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "text",
|
| 1630 |
+
"text": "Challenge Definitions",
|
| 1631 |
+
"text_level": 1,
|
| 1632 |
+
"bbox": [
|
| 1633 |
+
197,
|
| 1634 |
+
412,
|
| 1635 |
+
385,
|
| 1636 |
+
429
|
| 1637 |
+
],
|
| 1638 |
+
"page_idx": 15
|
| 1639 |
+
},
|
| 1640 |
+
{
|
| 1641 |
+
"type": "text",
|
| 1642 |
+
"text": "Reducing Prompting Expertise (prompting): Reducing reliance on clever prompting",
|
| 1643 |
+
"bbox": [
|
| 1644 |
+
196,
|
| 1645 |
+
436,
|
| 1646 |
+
797,
|
| 1647 |
+
452
|
| 1648 |
+
],
|
| 1649 |
+
"page_idx": 15
|
| 1650 |
+
},
|
| 1651 |
+
{
|
| 1652 |
+
"type": "text",
|
| 1653 |
+
"text": "Well-Calibrated Abstention (abstention): Ensuring appropriate refusal rates",
|
| 1654 |
+
"bbox": [
|
| 1655 |
+
197,
|
| 1656 |
+
455,
|
| 1657 |
+
746,
|
| 1658 |
+
470
|
| 1659 |
+
],
|
| 1660 |
+
"page_idx": 15
|
| 1661 |
+
},
|
| 1662 |
+
{
|
| 1663 |
+
"type": "text",
|
| 1664 |
+
"text": "Range of Viewpoints (viewpoints): Including diverse perspectives",
|
| 1665 |
+
"bbox": [
|
| 1666 |
+
197,
|
| 1667 |
+
474,
|
| 1668 |
+
669,
|
| 1669 |
+
489
|
| 1670 |
+
],
|
| 1671 |
+
"page_idx": 15
|
| 1672 |
+
},
|
| 1673 |
+
{
|
| 1674 |
+
"type": "text",
|
| 1675 |
+
"text": "Hedging Language (hedging): Avoiding excessive neutrality",
|
| 1676 |
+
"bbox": [
|
| 1677 |
+
197,
|
| 1678 |
+
493,
|
| 1679 |
+
625,
|
| 1680 |
+
508
|
| 1681 |
+
],
|
| 1682 |
+
"page_idx": 15
|
| 1683 |
+
},
|
| 1684 |
+
{
|
| 1685 |
+
"type": "text",
|
| 1686 |
+
"text": "Identifying Frame-Dependence (frames): Adapting answers to cultural/contextual norms",
|
| 1687 |
+
"bbox": [
|
| 1688 |
+
197,
|
| 1689 |
+
511,
|
| 1690 |
+
797,
|
| 1691 |
+
539
|
| 1692 |
+
],
|
| 1693 |
+
"page_idx": 15
|
| 1694 |
+
},
|
| 1695 |
+
{
|
| 1696 |
+
"type": "text",
|
| 1697 |
+
"text": "Ambiguity Resolution (ambiguity): Clarifying unclear or context-dependent queries",
|
| 1698 |
+
"bbox": [
|
| 1699 |
+
197,
|
| 1700 |
+
544,
|
| 1701 |
+
797,
|
| 1702 |
+
560
|
| 1703 |
+
],
|
| 1704 |
+
"page_idx": 15
|
| 1705 |
+
},
|
| 1706 |
+
{
|
| 1707 |
+
"type": "text",
|
| 1708 |
+
"text": "User Attributes (user): Understanding user context and needs",
|
| 1709 |
+
"bbox": [
|
| 1710 |
+
197,
|
| 1711 |
+
563,
|
| 1712 |
+
648,
|
| 1713 |
+
579
|
| 1714 |
+
],
|
| 1715 |
+
"page_idx": 15
|
| 1716 |
+
},
|
| 1717 |
+
{
|
| 1718 |
+
"type": "text",
|
| 1719 |
+
"text": "Minimizing Sycophancy (sycophancy): Managing incorrect assumptions/inputs",
|
| 1720 |
+
"bbox": [
|
| 1721 |
+
197,
|
| 1722 |
+
582,
|
| 1723 |
+
767,
|
| 1724 |
+
598
|
| 1725 |
+
],
|
| 1726 |
+
"page_idx": 15
|
| 1727 |
+
},
|
| 1728 |
+
{
|
| 1729 |
+
"type": "text",
|
| 1730 |
+
"text": "Effective Routing (routing): Leveraging tool integrations appropriately",
|
| 1731 |
+
"bbox": [
|
| 1732 |
+
197,
|
| 1733 |
+
601,
|
| 1734 |
+
710,
|
| 1735 |
+
617
|
| 1736 |
+
],
|
| 1737 |
+
"page_idx": 15
|
| 1738 |
+
},
|
| 1739 |
+
{
|
| 1740 |
+
"type": "text",
|
| 1741 |
+
"text": "Citation & Reference Verification (citation): Ensuring accurate source attribution",
|
| 1742 |
+
"bbox": [
|
| 1743 |
+
197,
|
| 1744 |
+
619,
|
| 1745 |
+
789,
|
| 1746 |
+
635
|
| 1747 |
+
],
|
| 1748 |
+
"page_idx": 15
|
| 1749 |
+
},
|
| 1750 |
+
{
|
| 1751 |
+
"type": "text",
|
| 1752 |
+
"text": "Coding Tips",
|
| 1753 |
+
"text_level": 1,
|
| 1754 |
+
"bbox": [
|
| 1755 |
+
197,
|
| 1756 |
+
646,
|
| 1757 |
+
308,
|
| 1758 |
+
664
|
| 1759 |
+
],
|
| 1760 |
+
"page_idx": 15
|
| 1761 |
+
},
|
| 1762 |
+
{
|
| 1763 |
+
"type": "list",
|
| 1764 |
+
"sub_type": "text",
|
| 1765 |
+
"list_items": [
|
| 1766 |
+
"- Code only the specific text segment that corresponds to a challenge, not entire paragraphs.",
|
| 1767 |
+
"- A single text segment may be coded with multiple challenges if applicable.",
|
| 1768 |
+
"- If you're unsure about a segment, add a comment with your reasoning and mark it for review.",
|
| 1769 |
+
"- Focus on explicit mentions related to challenges rather than making extensive inferences."
|
| 1770 |
+
],
|
| 1771 |
+
"bbox": [
|
| 1772 |
+
236,
|
| 1773 |
+
670,
|
| 1774 |
+
797,
|
| 1775 |
+
781
|
| 1776 |
+
],
|
| 1777 |
+
"page_idx": 15
|
| 1778 |
+
},
|
| 1779 |
+
{
|
| 1780 |
+
"type": "text",
|
| 1781 |
+
"text": "Example",
|
| 1782 |
+
"text_level": 1,
|
| 1783 |
+
"bbox": [
|
| 1784 |
+
197,
|
| 1785 |
+
795,
|
| 1786 |
+
276,
|
| 1787 |
+
813
|
| 1788 |
+
],
|
| 1789 |
+
"page_idx": 15
|
| 1790 |
+
},
|
| 1791 |
+
{
|
| 1792 |
+
"type": "text",
|
| 1793 |
+
"text": "In Atlas.ti, you would select the text \"The model is designed to request clarification when user queries are ambiguous\" and assign the code \"ambiguity\" (Ambiguity Resolution). Similarly, you would select \"The system presents multiple perspectives on controversial topics\" and assign the code \"viewpoints\" (Range of Viewpoints).",
|
| 1794 |
+
"bbox": [
|
| 1795 |
+
196,
|
| 1796 |
+
818,
|
| 1797 |
+
797,
|
| 1798 |
+
876
|
| 1799 |
+
],
|
| 1800 |
+
"page_idx": 15
|
| 1801 |
+
},
|
| 1802 |
+
{
|
| 1803 |
+
"type": "header",
|
| 1804 |
+
"text": "Preprint. Under review.",
|
| 1805 |
+
"bbox": [
|
| 1806 |
+
173,
|
| 1807 |
+
32,
|
| 1808 |
+
346,
|
| 1809 |
+
46
|
| 1810 |
+
],
|
| 1811 |
+
"page_idx": 15
|
| 1812 |
+
},
|
| 1813 |
+
{
|
| 1814 |
+
"type": "page_number",
|
| 1815 |
+
"text": "16",
|
| 1816 |
+
"bbox": [
|
| 1817 |
+
488,
|
| 1818 |
+
946,
|
| 1819 |
+
508,
|
| 1820 |
+
959
|
| 1821 |
+
],
|
| 1822 |
+
"page_idx": 15
|
| 1823 |
+
}
|
| 1824 |
+
]
|
data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_01xxx/2504.01205/74d5fa1b-4d2f-4309-8eb0-9e49fed8b7c0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e470869a891bf45c9b68f5083a638c9a1723040d34c617e78c7ea3989a4198b1
|
| 3 |
+
size 687327
|
data/2025/2504_01xxx/2504.01205/full.md
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Epistemic Alignment: A Mediating Framework for User-LLM Knowledge Delivery
|
| 2 |
+
|
| 3 |
+
Nicholas Clark, Hua Shen, Bill Howe, Tanushree Mitra
|
| 4 |
+
|
| 5 |
+
University of Washington
|
| 6 |
+
|
| 7 |
+
nclark4@uw.edu
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Large Language Models (LLMs) increasingly serve as tools for knowledge acquisition, yet users cannot effectively specify how they want information presented. When users request that LLMs "cite reputable sources," "express appropriate uncertainty," or "include multiple perspectives," they discover that current interfaces provide no structured way to articulate these preferences. The result is prompt sharing folklore: community-specific copied prompts passed through trust relationships rather than based on measured efficacy. We propose the Epistemic Alignment Framework, a set of ten challenges in knowledge transmission derived from the philosophical literature of epistemology, concerning issues such as uncertainty expression, evidence quality assessment, and calibration of testimonial reliance. The framework serves as a structured intermediary between user needs and system capabilities, creating a common vocabulary to bridge the gap between what users want and what systems deliver. Through a thematic analysis of custom prompts and personalization strategies shared on online communities where these issues are actively discussed, we find users develop elaborate workarounds to address each of the challenges. We then apply our framework to two prominent model providers, OpenAI and Anthropic, through structured content analysis of their documented policies and product features. Our analysis shows that while these providers have partially addressed the challenges we identified, they fail to establish adequate mechanisms for specifying epistemic preferences, lack transparency about how preferences are implemented, and offer no verification tools to confirm whether preferences were followed. For AI developers, the Epistemic Alignment Framework offers concrete guidance for supporting diverse approaches to knowledge; for users, it works toward information delivery that aligns with their specific needs rather than defaulting to one-size-fits-all approaches.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Large Language Models (LLMs) have emerged as powerful knowledge tools, yet their flexibility raises the question of how to ensure they deliver information in a way that matches individual preferences about knowledge quality, evidence standards, and perspective diversity. While technical advances have proposed mitigations for hallucination (Ji et al., 2022; Shi et al., 2023; Mishra et al., 2024; Orgad et al., 2024) and uncertainty expression (Yona et al., 2024; Mohri & Hashimoto, 2024), a more subtle problem persists: the misalignment between how users want knowledge presented and the limited mechanisms available to express these preferences. For example, when a medical researcher requests "recent peer-reviewed sources," or a policy analyst seeks "balanced representation of competing viewpoints," they encounter interfaces that reduce these rich requirements to unstructured natural language instructions with inconsistent interpretation and no verification mechanisms.
|
| 16 |
+
|
| 17 |
+
Drawing on the theories of social epistemology and epistemic cognition, we formalize this misalignment as the epistemic alignment problem, and offer four contributions toward
|
| 18 |
+
|
| 19 |
+
understanding this challenge. We (1) introduce a framework for evaluating how well systems accommodate user preferences about knowledge delivery, (2) validate our framework through a thematic analysis of user attempts to control knowledge delivery with prompting strategies shared on online platforms, (3) assess current systems against this framework to identify specific interface limitations, and (4) consider requisite interface features that enable users to express and verify their preferences about how information should be presented, sourced, and qualified. Our work suggests that addressing the epistemic alignment problem requires rethinking how users communicate knowledge preferences to LLM-based systems, shifting from imprecise natural language instructions to structured interfaces that support explicit specification of parameters and provide transparent feedback about how these parameters shape knowledge delivery.
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1: The Epistemic Alignment Framework as a mediating structure between user needs and system implementation. The framework identifies ten challenges across three epistemic dimensions: Epistemic Responsibility (challenges 1-3), Epistemic Personalization (challenges 4-7), and Testimonial Reliability (challenges 8-10). This framework serves as an intermediary layer for evaluating how well systems accommodate diverse epistemic preferences and identifying areas where current interfaces fail to support effective knowledge delivery.
|
| 23 |
+
|
| 24 |
+
# 2 Related Work
|
| 25 |
+
|
| 26 |
+
We draw from literature in epistemology, the philosophical subarea concerned with knowledge creation and transmission, and epistemic cognition, a topic in educational psychology relating to how people conceptualize knowledge and its acquisition. In particular, we rely on prior work in inquiry and social epistemology that considers how someone ought to responsibly engage with technology for knowledge-related activities.
|
| 27 |
+
|
| 28 |
+
**Inquiry** The object of our epistemic activities is **inquiry** (Hookway, 1994), the self-directed process through which we ascertain knowledge. The goal of inquiry inevitably varies depending on the circumstance. For instance, sometimes we desire a deep, nuanced understanding of an issue; at other times we may be satisfied with a cursory familiarity. The primary vehicle through which we conduct inquiry is by posing questions (Hookway, 2008). The ultimate success or failure of an intellectual investigation in large part relies on the selection and quality of questions (Watson, 2018).
|
| 29 |
+
|
| 30 |
+
Performing inquiry in the digital age presents additional challenges, as the large volume of information mediated by opaque discovery mechanisms, such as web search and recommender
|
| 31 |
+
|
| 32 |
+
systems, may give rise to illusions of understanding, where users have the impression they have performed a thorough investigation when, in fact, their methods are imperfect or shallow (de Ridder, 2022). We consider how these concerns arise when conducting inquiry with LLMs.
|
| 33 |
+
|
| 34 |
+
Inquisitive Meta-Cognitive Tasks To combat illusions of understanding, de Riddler, drawing from Hookway, formulates a set of meta-cognitive tasks requisite to conducting good inquiry (de Ridder, 2022; Hookway, 2003): 1) posing good questions or identifying good problems, 2) identifying good strategies for carrying out inquiries, 3) recognizing when we possess an answer to our question or a solution to our problem, 4) assessing evidence quality for some proposition, and 5) judging when we have considered all or most relevant lines of investigation. These meta-cognitive tasks establish clear criteria for effective inquiry, but in practice, users employ diverse strategies when executing each task, from choosing which questions to pursue to determining when evidence is sufficient. The selected strategies often reflect some combination of practical constraints and personal preferences. We consider user needs when interacting with LLMs for each meta-cognitive task to ensure complete coverage of the inquiry process.
|
| 35 |
+
|
| 36 |
+
Epistemic Cognition The topic of epistemic cognition (Greene et al., 2016) helps explain this variation in inquiry strategies by revealing the connections between beliefs about knowledge and methodology choices. In particular, the AIR framework decomposes the personal epistemology of an individual into their Aims, Ideals, and Reliable processes (Chinn & Rinehart, 2016). Individual assumptions about what constitutes knowledge and how it can be verified directly shape learning strategies, information seeking behaviors, and decision-making processes. These epistemic beliefs vary across cultures (Chan & Elliott, 2004) and disciplines (Hofer, 2000), explaining why users might employ radically different approaches for the same meta-cognitive task. We contend that users bring similarly diverse strategies and requirements when engaging with LLMs.
|
| 37 |
+
|
| 38 |
+
# 3 Problem Definition
|
| 39 |
+
|
| 40 |
+
Epistemology examines questions about the nature, acquisition, and boundaries of knowledge. A pertinent question is how technology affects our ability to conduct responsible knowledge acquisition (Jarvie, 1974). AI functions as an epistemic technology facilitating knowledge activities through computational processes (Alvarado, 2023), and the interactions between humans and AI present epistemological challenges. What epistemological factors influence user trust in AI outputs? How do users validate and evaluate these outputs? How is AI-provided information integrated with existing knowledge?
|
| 41 |
+
|
| 42 |
+
Progress on issues like hallucination (Ji et al., 2022), knowledge conflicts (Xu et al., 2024), and uncertainty expression (Yona et al., 2024) enables exploration of more nuanced challenges, namely, accommodating users' diverse epistemological approaches. The need to understand the interaction between users' epistemic needs and AI systems is becoming more pronounced given the increasing deployment of AI in educational (Ghimire et al., 2024), professional (Teubner et al., 2023), and personal contexts (Kim et al., 2024) where users bring various beliefs about what constitutes valid knowledge.
|
| 43 |
+
|
| 44 |
+
Following a literature review of epistemological frameworks and analysis of user-system interactions, we identified three dimensions as both theoretically grounded and practically significant in preserving agency during knowledge transmission between humans and AI systems: epistemic responsibility (practices which promote accurate knowledge acquisition), epistemic personalization (individual preferences toward inquiry methods), and testimonial reliability (knowledge transmission via personal accounts).
|
| 45 |
+
|
| 46 |
+
Epistemic Responsibility The concept of epistemic responsibility, practices that ensure accurate knowledge acquisition, is central to the design of epistemic technologies, particularly with respect to who shoulders this burden, the user or the system. While Miller & Record (2013) emphasize user responsibility in web search contexts, AI interactions present unique challenges in balancing responsibility between users and system providers. This balance
|
| 47 |
+
|
| 48 |
+
particularly affects how we navigate between two fundamental risks identified by Goldman (1991): false beliefs (error) and lack of true beliefs (ignorance). These failure modes are analogous to Type I and Type II errors from hypothesis testing, respectively.
|
| 49 |
+
|
| 50 |
+
Epistemic Personalization Prior research in epistemic cognition reveals that individuals hold differing views on the nature of knowledge and employ distinct strategies to evaluate knowledge claims (Chinn & Rinehart, 2016). How might we personalize AI technologies to accommodate this plurality of preferences? Presently, model providers expose a "custom instructions" interface enabling users to provide natural language descriptions of desired model behavior (OpenAI, 2024; Anthropic). We discuss in Section 6 the inadequacy of this protocol for representing and satisfying diverse knowledge preferences.
|
| 51 |
+
|
| 52 |
+
Testimonial Reliability Drawing on the philosophy of testimony (Lackey, 2011), much of our accumulated knowledge is communicated socially and requires trust in the interlocutor. Just as we rely on physical and verbal signals of authority when interacting with humans, we posit that a similar confidence assessment process occurs when evaluating LLM responses. Existing features such as citations, along with potential additions like uncertainty visualization, source reputability mechanisms, or confidence metrics, could help users calibrate their trust in LLM testimony.
|
| 53 |
+
|
| 54 |
+
Let us define a user's epistemic profile as a multi-dimensional vector $E_{u} = \langle r_{u},p_{u},t_{u}\rangle$ , where:
|
| 55 |
+
|
| 56 |
+
- $r_u \in [0,1]$ represents the user's error-ignorance tradeoff tolerance (Goldman, 1991)—0 prioritizes precision (minimizing false information), while 1 favors recall (maximizing coverage). (Epistemic Responsibility)
|
| 57 |
+
- $p_u \coloneqq (S, \leq_u)$ represents a partial order on possible responses where $s_i, s_j \in S, s_i \leq_u s_j$ indicates user preference for presentation in $s_j$ over $s_i$ . (Epistemic Personalization)
|
| 58 |
+
- $t_{u} \in \{0,1\}^{n}$ represents preferences for inclusion of $n$ potential assistive features for calibrating reliance, e.g. inclusion of citations. (Testimonial Reliability)
|
| 59 |
+
|
| 60 |
+
Similarly, the system's epistemic delivery profile $E_{s}$ may be defined as $E_{s} \coloneqq \langle r_{s}, p_{s}, t_{s} \rangle$ . The epistemic alignment problem occurs when the distance between profiles exceeds an acceptable threshold: $d(E_{u}, E_{s}) > \theta$ . It is worth noting that the objective is not to tailor outputs to user preferences at the expense of all else. This may lead to sycophancy, as explored in Section 4.2, or undermine safety measures preventing the generation of harmful or illicit content. Rather, the problem is an example of bidirectional human-AI alignment where AI must align with human-specified intended outcomes while humans adapt to the capabilities of AI systems (Shen et al., 2024).
|
| 61 |
+
|
| 62 |
+
# 4 Epistemic Alignment Framework
|
| 63 |
+
|
| 64 |
+
For each user epistemic profile component defined in section 3, we identify challenges in specifying such preferences during LLM interactions. To structure our investigation, we rely on de Ridder (2022)'s meta-cognitive tasks to ensure we isolate challenges at each stage of inquiry. We denote each challenge by (Problem Name), mapping to Figure 1. The result is the Epistemic Alignment Framework, a set of ten challenges to communicate knowledge preferences to LLMs.
|
| 65 |
+
|
| 66 |
+
# 4.1 Epistemic Responsibility
|
| 67 |
+
|
| 68 |
+
In Section 3, we conceptualize epistemic responsibility as a tradeoff between error (false belief), and ignorance (lack of true belief). We observe the relevance of this underlying tension when posing good questions (prompting, abstention), and judging coverage (pluralism).
|
| 69 |
+
|
| 70 |
+
Prompting While natural language interfaces may appear more accessible than traditional query languages, these interfaces risks creating what de Ridder terms an "illusion of understanding" (de Ridder, 2022), as the natural dialogue format can mask the expertise required for effective use. Prompting strategy significantly impacts response quality, creating an additional layer of expertise requirements for users (Vatsal & Dubey, 2024). While some
|
| 71 |
+
|
| 72 |
+
advanced prompting techniques fall outside the scope of a typical use case, even typical chat interactions benefit from established techniques such as Chain-of-Thought reasoning (Wei et al., 2022). This dependency on prompting presents a barrier as users must develop domain expertise to extract expected performance (Reducing Need for Prompting Expertise).
|
| 73 |
+
|
| 74 |
+
Abstention LLMs may abstain from responding to queries, either declaring the task insoluble or expressing unwillingness to continue. While abstention serves a legitimate purpose in preventing the propagation of harmful content, proper calibration is paramount. Model providers face a difficult balance: too little abstention risks harmful outputs, while excessive abstention degrades model utility (Well-Calibrated Abstention). Research indicates that LLMs often exhibit over-abstention, refusing to engage with legitimate queries (Varshney et al., 2023). This tendency appears particularly pronounced in instruction-tuned models, where emphasis on safety can lead to undesirable refusal patterns (Cheng et al., 2024; Bianchi et al.; Wallace et al., 2024; Brahman et al., 2024).
|
| 75 |
+
|
| 76 |
+
**Pluralism** Ensuring comprehensive coverage of relevant positions is essential for users to properly assess evidence and reach informed conclusions. This need presents a tension between completeness and accessibility. Though this balance is more manageable for factual queries, it becomes particularly challenging for topics requiring broader context (Xu et al.).
|
| 77 |
+
|
| 78 |
+
To evaluate perspective coverage in LLM responses, we adopt the pluralistic framework proposed by Sorensen et al. (2024) and used by Feng et al. (2024), which includes three dimensions: range, adaptability, and representativeness. (1) Range considers how LLMs determine the appropriate scope of viewpoints (Range of Viewpoints). Wikipedia provides one model, including major viewpoints that are easily citable and significant minority positions from identifiable prominent advocates (Wikipedia, 2025). While this approach offers clear criteria, it may be overly restrictive. (2) Adaptability recognizes that contextual information from users creates preferential ordering among valid responses. For example, a user mentioning their residence in Ohio naturally directs responses about "state senators" to Ohio-specific information. We examine the consequences of personalization in Section 4.2. (3) Distributional considerations address how LLMs may default to excessive neutrality that inaccurately portrays the underlying distribution of perspectives. Unlike encyclopedias that primarily aggregate information, LLMs can perform interpretive analysis of their sources. This capability suggests they should go beyond mere neutral presentation to help users understand the relative strength and support for different positions (Hedging Language).
|
| 79 |
+
|
| 80 |
+
# 4.2 Epistemic Personalization
|
| 81 |
+
|
| 82 |
+
In Section 3, we formalize epistemic personalization as a partial order on the set of responses. These preferences are relevant to the meta-cognitive tasks of posing good questions and judging when relevant lines of investigation have been considered.
|
| 83 |
+
|
| 84 |
+
Preference Specification The natural language interface affords flexible application, but relies on the user to adequately communicate their intention to receive relevant results (Liu et al.). Consider the case of normative topics which vary by culture. The appropriate response to "Is it ok to eat with your left hand?" is dependent upon the user's geography (Rao et al., 2024), as in general, eating with your left hand is socially acceptable, but in India, it is considered impolite. One approach to modeling these nuances is to decompose natural language problem statements into two components: a set of requirements $\mathcal{R}$ that solutions must satisfy, and contextual information $\mathcal{C}$ that indicates preferences between valid solutions (Kobalczyk et al., 2025) where $\mathcal{C}$ is a partial order on the set of possible responses (Section 3).
|
| 85 |
+
|
| 86 |
+
Two distinct failure modes emerge in this framing. One, the LLM may generate responses that fail to satisfy the requirements, $\mathcal{R}$ , indicating an incompatibility between the model's interpretation and the user's intent (Navigating Frame-Dependence). Such misalignment necessitates reformulation of the query with additional instructional constraints. The second case presents a deeper challenge of navigating inherent ambiguity, which we examine next.
|
| 87 |
+
|
| 88 |
+
Resolving Ambiguity Suppose a question itself admits multiple valid answers, each satisfying $\mathcal{R}$ but requiring different contextual interpretations (Ambiguity Resolution). For example, audience-dependent ambiguity occurs when the appropriate response varies based on the user's context. Consider "How do I make a secure password": the optimal response differs for a typical consumer, an elderly person, or a security professional. This form of ambiguity creates opportunities for epistemic personalization, where user attributes and interaction history can shape responses to match specific needs and expertise (Zhang et al., 2024) (User Attributes).
|
| 89 |
+
|
| 90 |
+
Sycophancy While such epistemic personalization can improve response relevance and reduce interaction overhead, it risks enabling sycophantic behavior (Minimizing Sycophancy). LLMs exhibit tendencies towards deference, accepting user misinformation to maintain agreeableness (Sharma et al., 2023; Xu et al., 2023).
|
| 91 |
+
|
| 92 |
+
# 4.3 Testimonial Reliability
|
| 93 |
+
|
| 94 |
+
In Section 3, we formalize testimonial reliability as the selection among a set of $n$ features for assisting the user in judging which outputs to accept or reject. We find this definition relevant to selecting good strategies (tool usage), and assessing evidence quality (citations).
|
| 95 |
+
|
| 96 |
+
Tool Usage Good strategies for inquiry require users to critically evaluate their methods in both selecting and applying tools. With respect to LLMs, this evaluation centers on two considerations. First, is an LLM the most appropriate tool for the epistemic task? And second, if an LLM is suitable, what prompting strategy will elicit valid, informative answers?
|
| 97 |
+
|
| 98 |
+
The selection of an appropriate tool requires weighing multiple epistemic virtues. Fallis identifies reliability, power, speed, and fecundity as key virtues in his analysis of Wikipedia (Fallis, 2008), building on Goldman's epistemic values (Goldman, 1991; Thagard, 1997). Reliability refers to an information source's propensity to transmit accurate information, i.e., the probability that a given claim is true. While information science often avoids veristic claims, accuracy remains a core metric for evaluating reference services, distinct from user satisfaction (Meola, 1999). This distinction is a problem of testimonial reliability. Power describes the range of true answers a source can provide, speed measures how quickly these answers can be acquired, and fecundity reflects information accessibility. We argue that few legacy epistemic institutions, like libraries and web search, are competitive with LLMs in terms of power and speed. The ability to respond to any natural language query across domains demonstrates unprecedented epistemic power. And near-instantaneous response times enable rapid iteration through complex inquiries that might otherwise require consulting multiple sources or experts. These advantages must be weighed against reliability concerns.
|
| 99 |
+
|
| 100 |
+
Currently, the task of selecting appropriate tools rests with users, who must evaluate their needs against these virtues. For instance, while an LLM might quickly suggest programming approaches, consulting the documentation may be more reliable for specific implementation details. Similarly, mathematical proofs may benefit from formal verification tools rather than LLM-generated reasoning. We argue that this epistemic responsibility can safely be assumed by model providers with minimal infringement on user agency. Two reasonable approaches are to redirect the user to alternative sources, or integrate with external tools or agentic solutions to enable complex workflows (Effective Routing).
|
| 101 |
+
|
| 102 |
+
Citations When presenting knowledge claims, LLM responses fall into two cases: those with external citations and those without. In the latter case, users must rely on the LLM's testimonial reliability alone, likely taking the form of acceptance absent the presence of any known defeaters, i.e. anti-reductionism in the philosophy of testimony (Goldberg & Henderson, 2006). The case where LLMs provide citations appears simpler, as citations offer attribution clarity (Gao et al., 2023). However, citation use presents its own challenges. Ding et al. (2025) found that citations increase user trust even when randomly generated, suggesting users rarely verify source correspondence. Huang & Chang (2023) further identify citation bias, inaccurate citations, and outdated citations as concerns. To understand these failure modes, we can model citation behavior as an evidence-mapping process. When
|
| 103 |
+
|
| 104 |
+
an LLM provides a claim $\alpha$ , citations $C$ should serve as verifiable evidence linking $\alpha$ to authoritative sources. This creates a verification flow:
|
| 105 |
+
|
| 106 |
+
Question $\rightarrow$ LLM Response $(\alpha)\rightarrow$ Citations $(C)\rightarrow$ Source Evidence $\rightarrow$ Validation
|
| 107 |
+
|
| 108 |
+
Failure occurs at multiple points in this flow. The citations may not exist or are inaccessible, the citations may exist but do not support $\alpha$ , or the underlying source being cited is unreliable (Citation & Reference Verification).
|
| 109 |
+
|
| 110 |
+
# 5 User Knowledge Preferences in Practice
|
| 111 |
+
|
| 112 |
+
Method We performed a thematic analysis on custom instructions and prompting techniques collected from Reddit. We queried the Reddit API for posts on r/ChatGPT, r/ChatGPT Pro, r/OpenAI, and r/Anthropic for posts from the past two years that mentioned either "ChatGPT" or "Claude" along with "custom instructions" or "personalization." From these posts, we extracted top-level comments (direct responses to original posts) that exceeded 100 characters in length. Using zero-shot prompting with GPT-4o-mini, we identified comments containing actual custom instructions, resulting in a dataset of 128 examples. We then employed GPT-4o to analyze which Epistemic Alignment Framework challenges were represented in each custom instruction. Two human experts independently validated the quality of these labels, achieving an Inter-Rator Reliability<sup>1</sup> of $\kappa = 0.8875$ , indicating substantial agreement. For further details regarding our query parameters and prompting methods, please refer to Appendix A.
|
| 113 |
+
|
| 114 |
+
Applying the Epistemic Alignment Framework We found instances of each of the ten epistemic challenges in our framework explicitly addressed via user custom instructions and prompting strategies. Consistent patterns arose, with $92.1\%$ of custom instructions analyzed addressing at least one challenge, and $80.3\%$ addressing multiple. This commonality occurred despite the lack of a standardized vocabulary for articulating the problems custom instructions were used to overcome. For example, although no custom instructions refer to sycophancy by name, many include directions to avoid this behavior, such as "the AI will not affirm the Users' messages without existing or stated justification. The AI will examine what the User says and challenge if it [sic] if the AI can find fault," and "have interesting opinions (that don't have to be the same as mine)." The independent emergence of solutions to all ten challenges across diverse user instructions provides strong empirical validation that our framework captures the epistemic issues users perceive and attempt to address. In Appendix B we give examples for custom instructions that address each of the epistemic challenges.
|
| 115 |
+
|
| 116 |
+
Folk Theories of Model Behavior Through our analysis of custom instructions, we identify several prominent folk theories addressing epistemic challenges in knowledge discovery via LLMs. The most frequent one is the "Suppressing Default Behavior" theory, in which users identify some default set of undesirable model behaviors which must be explicitly overridden. Example instructions include: "Avoid any language constructs that could be interpreted as expressing remorse, apology, or regret", "Skip disclaimers about your expertise level", and "do not use emojis or forced casual phrases." Although this theory primarily addresses the use of hedging language and abstention, it also includes enforcement of behaviors better aligned with user attributes, such as "im not american, do not put units in american...NEVER MENTION AMERICAN UNITS SUCH AS Fahrenheit, miles, pounds, yards, inches etc."
|
| 117 |
+
|
| 118 |
+
Additionally, the "Expert Persona" theory positions roleplaying as a viable solution to multiple epistemic challenges simultaneously. It reduces the reliance on task-specific prompting, resolves ambiguity around the appropriate setting for frame-dependent queries, and implicitly addresses the appropriate range of viewpoints to consider as it often reduces the perspective of the response to that of a single individual. Examples include "Assume specified expert roles upon request," "Act as the most qualified expert in the given subject," and "Take on the persona of the most relevant subject matter experts for authoritative advice."
|
| 119 |
+
|
| 120 |
+
Finally, the "Parameter Configuration" theory conceptualizes models as a system with adjustable settings that can be precisely calibrated to the task at hand. Users create elaborate frameworks to tune model behavior: "I've defined a multi-dimensional preference framework for our interactions: Verbosity (V): $V = 1$ for brief replies; $V = 2$ for detailed answers; $V = 3$ for in-depth discussion...," and "For coding and data analysis related task follow below instructions: coding_and_data_analysis { temperature: 0.2, tone: formal ...}"
|
| 121 |
+
|
| 122 |
+
# 6 Evaluating Platform Epistemic Policies
|
| 123 |
+
|
| 124 |
+
Method We perform content analysis for both OpenAI and Anthropic on their disclosed policies and product features to assess attention to epistemic challenges. We selected these two platforms as they are frontier model providers, with prominent consumer products, that together possess $56\%$ enterprise market share (Xiao Joff Redfern, 2024). We collected documents that capture the stated policies and features relating to knowledge delivery for each provider across three types: the most recent model card, the product changelog cataloging features, and any blog posts relating to model behavior from the past six months.
|
| 125 |
+
|
| 126 |
+
We had two expert annotators label text segments corresponding to each of the ten epistemic challenges. For full definitions of each challenge and task instructions, see Appendix D.
|
| 127 |
+
|
| 128 |
+
# 6.1 OpenAI
|
| 129 |
+
|
| 130 |
+
Specified Model Behavior The OpenAI Model Spec (OpenAI) includes intended epistemic behaviors across their model family. Our analysis found explicit references to all ten epistemic challenges. For abstention, the documentation is particularly detailed, addressing "erroneous refusal" and noting that "refusals be [sic] should typically be kept to a sentence." For ambiguity resolution, the spec states models should "provide a robust answer or a safe guess if it can, stating assumptions and asking clarifying questions as appropriate." Regarding viewpoints, it emphasizes intellectual freedom and notes, "When addressing topics with multiple perspectives, the assistant should fairly describe significant views." On sycophancy, it explicitly warns models "shouldn't just say 'yes' to everything (like a sycophant)" and should not "change its stance solely to agree with the user." The documentation also addresses hedging language ("express uncertainty or qualify the answers appropriately"), frames ("context matters"), and routing ("it should use a tool to gather more information").
|
| 131 |
+
|
| 132 |
+
However, we identified several gaps in the specification: while it mentions "reliable sources," it lacks detailed mechanisms for citation verification; despite acknowledging cultural sensitivity, it provides limited guidance for addressing frame-dependent queries; and though it discusses user goals, it offers minimal approaches to epistemic personalization. Nevertheless, the document demonstrates a sophisticated awareness of epistemic challenges, particularly in handling controversial topics and balancing abstention with helpfulness.
|
| 133 |
+
|
| 134 |
+
Interface and Features ChatGPT's interface provides several features supporting epistemic customization. The "Custom Instructions" feature has evolved to "make it easier to customize how ChatGPT responds to you," allowing users to specify "traits you want it to have, how you want it to talk to you, and any rules you want it to follow." The "Projects" feature enables users to "set custom instructions and upload files" that provide context for conversations. Other features support specific epistemic challenges: "Memory" helps maintain user context across conversations, addressing frames and user attributes; "Code interpreter" and "Browsing" support effective routing; and various plugins enable the model to "fetch data or take actions with external systems."
|
| 135 |
+
|
| 136 |
+
Despite these improvements, ChatGPT still lacks structured controls for epistemic dimensions. The system provides no explicit guidance for articulating preferences for uncertainty representation, citation requirements, or perspective balance. Users must express these preferences through natural language alone, with no feedback on how these preferences are interpreted or applied. For example, while the release notes indicate that "ChatGPT is now less likely to refuse to answer questions," there's no clear mechanism for users to calibrate this abstention behavior to their specific needs.
|
| 137 |
+
|
| 138 |
+
# 6.2 Anthropic
|
| 139 |
+
|
| 140 |
+
**Specified Model Behavior** Our analysis reveals that Claude's documentation addresses several epistemic challenges, though with varying depth. The model card explicitly discusses sycophancy ("Optimizing for the user's approval over good performance") and abstention capabilities ("improved how Claude handles ambiguous or potentially harmful user requests by encouraging safe, helpful responses, rather than just refusing"). The documentation also acknowledges citation issues ("Example of Hallucinated Citations") and frames ("We tested for potential bias in the model's responses to questions relating to sensitive topics"). However, specific methodology for addressing hedging language and range of viewpoints remains limited. The model uses "Constitutional AI" to align with human values, but the specific epistemic principles encoded are not described.
|
| 141 |
+
|
| 142 |
+
Interface and Features Claude's interface provides several features to support epistemic customization. "Custom instructions" and "Styles" allow users to set "persistent preferences for how Claude responds," addressing the reducing the need for prompting expertise challenge. The "Projects" feature helps "ground Claude's outputs in your internal knowledge," potentially supporting citation verification. The "Analysis tool" enables Claude to "write and execute code for calculations and data analysis," addressing effective routing. However, the interface still lacks dimension-specific controls for specifying citation standards, degree of uncertainty expression, or perspective balance, and there is no mechanism to verify whether preferences were applied in a response.
|
| 143 |
+
|
| 144 |
+
# 7 Discussion & Conclusion
|
| 145 |
+
|
| 146 |
+
We have outlined the Epistemic Model Behavior framework (Figure 1) as a means to facilitate the construction and evaluation of frontier LLM systems, and when applicable AI systems broadly, with respect to how they assist users in completing the inquiry process. The framework addresses thorny epistemological issues that emerge during knowledge-seeking activities. Grounded in established areas of epistemology, our approach recognizes the material correspondence between traditional problems of knowledge creation, transmission, and evaluation, and challenges faced by epistemic technologies such as LLMs. This problem space unifies safety research and commercial interests through shared concerns about knowledge representation and uncertainty. Our framework encapsulates a broad array of present issues while avoiding domain-specific problems, making it a versatile tool for evaluation across contexts.
|
| 147 |
+
|
| 148 |
+
Our analysis of frontier model providers reveals substantial room for improvement, although there exists intentionality toward addressing some evaluatory dimensions. Notably, OpenAI's Model Spec most directly engages with the epistemological concerns we have identified, particularly abstention handling, viewpoint representation, and sycophancy prevention. Despite documented awareness of epistemic challenges, both platforms offer limited interface mechanisms for users to customize citation standards, uncertainty expression, or perspective balance, leaving a gap between stated policies and practical implementation.
|
| 149 |
+
|
| 150 |
+
We propose a redesigned interface paradigm addressing these limitations through four components: (1) a structured preference specification interface organized around our framework's dimensions, offering controls for settings like citation requirements, uncertainty representation, and perspective diversity that persist across sessions while remaining adjustable; (2) transparency annotations that indicate how preferences influence responses, with visual indicators highlighting uncertainty expression, citation support, or perspective incorporation; (3) adaptive personalization that learns consistent user patterns across epistemic dimensions, suggesting refinements that better match observed behavior while maintaining user control; and (4) contextual guidance and examples that help users understand the tradeoffs between different epistemic settings, encouraging informed preference selection. These design principles could be implemented as extensions to existing interfaces with minimal disruption to current workflows while substantially improving epistemic agency and transparency.
|
| 151 |
+
|
| 152 |
+
# References
|
| 153 |
+
|
| 154 |
+
Ramón Alvarado. AI as an Epistemic Technology. Science and Engineering Ethics, 29(5), 10 2023. ISSN 14715546. doi: 10.1007/s11948-023-00451-3.
|
| 155 |
+
Anthropic. Understanding Claude's Personalization Features Anthropic Help Center. URL https://support.anthropic.com/en/articles/10185728-understanding-claude-s-personalization-features.
|
| 156 |
+
Federico Bianchi, Mirac Suzgun, Giuseppe Attanasio, Paul Röttger, Dan Jurafsky, Tatsunori Hashimoto, and James Zou. SAFETY-TUNED LLAMAS: LESSONS FROM IMPROVING THE SAFETY OF LARGE LANGUAGE MODELS THAT FOLLOW INSTRUCTIONS. Technical report. URL https://github.com/vinid/safety-tuned-llamas.
|
| 157 |
+
Faeze Brahman, Sachin Kumar, Vidhisha Balachandran, Pradeep Dasigi, Valentina Pyatkin, Abhilasha Ravichander, Sarah Wiegreff, Nouha Dziri, Khyathi Chandu, Jack Hessel, Yulia Tsvetkov, Noah A. Smith, Yejin Choi, and Hannaneh Hajishirzi. The Art of Saying No: Contextual Noncompliance in Language Models. 7 2024. URL http://arxiv.org/abs/2407.12043.
|
| 158 |
+
Kwok Wai Chan and Robert G. Elliott. Epistemological Beliefs Across Cultures: Critique and analysis of beliefs structure studies, 4 2004. ISSN 01443410.
|
| 159 |
+
Qinyuan Cheng, Tianxiang Sun, Xiangyang Liu, Wenwei Zhang, Zhangyue Yin, Shimin Li, Linyang Li, Zhengfu He, Kai Chen, and Xipeng Qiu. Can AI Assistants Know What They Don't Know? Technical report, 2024. URL https://github.
|
| 160 |
+
Clark Chinn and Ronald Rinehart. Epistemic cognition and philosophy: Developing a new framework for epistemic cognition. In Jeffrey A. Greene, William A. Sandoval, and Ivar Bräten (eds.), Handbook of Epistemic Cognition, pp. 460-478. Routledge, 1 2016. ISBN 9781317746874. doi: 10.4324/9781315795225.
|
| 161 |
+
Jeroen de Ridder. Online Illusions of Understanding. Social Epistemology, 2022. ISSN 14645297. doi: 10.1080/02691728.2022.2151331.
|
| 162 |
+
Yifan Ding, Matthew Facciani, Amrit Poudel, Ellen Joyce, Salvador Aguinaga, Balaji Veeramani, Sanmitra Bhattacharya, and Tim Weninger. Citations and Trust in LLM Generated Responses. 1 2025. URL http://arxiv.org/abs/2501.01303.
|
| 163 |
+
Don Fallis. Toward an epistemology of Wikipedia. Journal of the American Society for Information Science and Technology, 59(10):1662-1674, 8 2008. ISSN 15322882. doi: 10.1002/asi.20870.
|
| 164 |
+
Shangbin Feng, Taylor Sorensen, Yuhan Liu, Jillian Fisher, Chan Young Park, Yejin Choi, and Yulia Tsvetkov. Modular Pluralism: Pluralistic Alignment via Multi-LLM Collaboration. 6 2024. URL http://arxiv.org/abs/2406.15951.
|
| 165 |
+
Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. Enabling Large Language Models to Generate Text with Citations. 5 2023. URL http://arxiv.org/abs/2305.14627.
|
| 166 |
+
Aashish Ghimire, James Prather, and John Edwards. Generative AI in Education: A Study of Educators' Awareness, Sentiments, and Influencing Factors. 3 2024. URL http://arxiv.org/abs/2403.15586.
|
| 167 |
+
Sanford Goldberg and David Henderson. Monitoring and Anti-Reductionism in the Epistemology of Testimony. Philosophy and Phenomenological Research, 72(3):600-617, 5 2006. ISSN 0031-8205. doi: 10.1111/j.1933-1592.2006.tb00586.x.
|
| 168 |
+
Alvin I Goldman. Knowledge in a social world, volume 36. Oxford University Press, 1991.
|
| 169 |
+
Jeffrey Alan. Greene, William A.. Sandoval, and Ivar. Bra $\text{念}$ ten. Handbook of epistemic cognition. Routledge, Taylor & Francis Group, 2016. ISBN 9781138013407.
|
| 170 |
+
|
| 171 |
+
Barbara K. Hofer. Dimensionality and Disciplinary Differences in Personal Epistemology. Contemporary Educational Psychology, 25(4):378-405, 2000. ISSN 0361476X. doi: 10.1006/ ceps.1999.1026.
|
| 172 |
+
Christopher Hookway. Cognitive Virtues and Epistemic Evaluations. International Journal of Philosophical Studies, 2(2):211-227, 9 1994. ISSN 14664542. doi: 10.1080/09672559408570791.
|
| 173 |
+
Christopher Hookway. How to be a Virtue Epistemologist. In Michael Raymond DePaul and Linda Trinkaus Zagzebski (eds.), *Intellectual virtue: perspectives from ethics and epistemology*. Oxford University Press, 2003.
|
| 174 |
+
Christopher Hookway. QUESTIONS, EPISTEMOLOGY, AND INQUIRIES. Technical report, 2008.
|
| 175 |
+
Jie Huang and Kevin Chen-Chuan Chang. Citation: A Key to Building Responsible and Accountable Large Language Models. 7 2023. URL http://arxiv.org/abs/2307.02185.
|
| 176 |
+
I C Jarvie. The Social Character of Technological Problems. In Friedrich Rapp (ed.), Contributions to a Philosophy of Technology: Studies in the Structure of Thinking in the Technological Sciences, pp. 86-92. Springer Netherlands, Dordrecht, 1974. ISBN 978-94-010-2182-1. doi: 10.1007/978-94-010-2182-1{\_}8. URL https://doi.org/10.1007/978-94-010-2182-1_8.
|
| 177 |
+
Ziwei Ji, Nayeon Lee, Rita Frieske, Tiezheng Yu, Dan Su, Yan Xu, Etsuko Ishii, Yejin Bang, Delong Chen, Ho Shu Chan, Wenliang Dai, Andrea Madotto, and Pascale Fung. Survey of Hallucination in Natural Language Generation. 2 2022. doi: 10.1145/3571730. URL http://arxiv.org/abs/2202.03629http://dx.doi.org/10.1145/3571730.
|
| 178 |
+
Yubin Kim, Xuhai Xu, Daniel McDuff, Cynthia Breazeal, and Hae Won Park. Health-LLM: Large Language Models for Health Prediction via Wearable Sensor Data. 1 2024. URL http://arxiv.org/abs/2401.06866.
|
| 179 |
+
Katarzyna Kobalczyk, Nicolas Astorga, Tennison Liu, and Mihaela van der Schaar. Active Task Disambiguation with LLMs. 2 2025. URL http://arxiv.org/abs/2502.04485.
|
| 180 |
+
Jennifer Lackey. Testimony: acquiring knowledge from others. In Alvin I Goldman and Dennis Whitcomb (eds.), Social Epistemology: Essential Readings. Oxford University Press, 2011.
|
| 181 |
+
Alisa Liu, Zhaofeng Wu, Julian Michael, Alane Suhr, Peter West, Alexander Koller, Swabha Swayamdipta, Noah A Smith, Yejin Choi, and Paul G Allen. We're Afraid Language Models Aren't Modeling Ambiguity. Technical report. URL https://github.com/.
|
| 182 |
+
M. Meola. Review of "Knowledge in a social world". Oxford University Press, 1999. ISBN 0198237774.
|
| 183 |
+
Boaz Miller and Isaac Record. JUSTIFIED BELIEF IN A DIGITAL AGE: ON THE EPISTEMIC IMPLICATIONS OF SECRET INTERNET TECHNOLOGIES. Technical report, 2013.
|
| 184 |
+
Abhika Mishra, Akari Asai, Vidhisha Balachandran, Yizhong Wang, Graham Neubig, Yulia Tsvetkov, and Hannaneh Hajishirzi. Fine-grained Hallucination Detection and Editing for Language Models. 1 2024. URL http://arxiv.org/abs/2401.06855.
|
| 185 |
+
Christopher Mohri and Tatsunori Hashimoto. Language Models with Conformal Factuality Guarantees. 2 2024. URL http://arxiv.org/abs/2402.10978.
|
| 186 |
+
OpenAI. OpenAI Model Spec. URL https://model-spec.openai.com/2025-02-12.html.
|
| 187 |
+
OpenAI. Custom instructions for ChatGPT, 3 2024. URL https://openai.com/index/custom-instructions-for-chatgpt/.
|
| 188 |
+
|
| 189 |
+
Hadas Orgad, Michael Toker, Zorik Gekhman, Roi Reichart, Idan Szpektor, Hadas Kotek, and Yonatan Belinkov. LLMs Know More Than They Show: On the Intrinsic Representation of LLM Hallucinations. 10 2024. URL http://arxiv.org/abs/2410.02707.
|
| 190 |
+
Abhinav Rao, Akhila Yerukola, Vishwa Shah, Katharina Reinecke, and Maarten Sap. NormAd: A Framework for Measuring the Cultural Adaptability of Large Language Models. 4 2024. URL http://arxiv.org/abs/2404.12464.
|
| 191 |
+
Mrinank Sharma, Meg Tong, Tomasz Korbak, David Duvenaud, Amanda Askell, Samuel R. Bowman, Newton Cheng, Esin Durmus, Zac Hatfield-Dodds, Scott R. Johnston, Shauna Kravec, Timothy Maxwell, Sam McCandlish, Kamal Ndousse, Oliver Rausch, Nicholas Schiefer, Da Yan, Miranda Zhang, and Ethan Perez. Towards Understanding Sycophancy in Language Models. 10 2023. URL http://arxiv.org/abs/2310.13548.
|
| 192 |
+
Hua Shen, Tiffany Knearem, Reshmi Ghosh, Kenan Alkiek, Kundan Krishna, Yachuan Liu, Ziqiao Ma, Savvas Petridis, Yi-Hao Peng, Li Qiwei, Sushrita Rakshit, Chenglei Si, Yutong Xie, Jeffrey P. Bigham, Frank Bentley, Joyce Chai, Zachary Lipton, Qiaozhu Mei, Rada Mihalcea, Michael Terry, Diyi Yang, Meredith Ringel Morris, Paul Resnick, and David Jurgens. Towards Bidirectional Human-AI Alignment: A Systematic Review for Clarifications, Framework, and Future Directions. 6 2024. URL http://arxiv.org/abs/2406.09264.
|
| 193 |
+
Weijia Shi, Xiaochuang Han, Mike Lewis, Yulia Tsvetkov, Luke Zettlemoyer, and Scott Wen-tau Yih. Trusting Your Evidence: Hallucinate Less with Context-aware Decoding. 5 2023. URL http://arxiv.org/abs/2305.14739.
|
| 194 |
+
Taylor Sorensen, Jared Moore, Jillian Fisher, Mitchell Gordon, Niloofar Mireshghallah, Christopher Michael Rytting, Andre Ye, Liwei Jiang, Ximing Lu, Nouha Dziri, Tim Althoff, and Yejin Choi. A Roadmap to Pluralistic Alignment. 2 2024. URL http://arxiv.org/abs/2402.05070.
|
| 195 |
+
Timm Teubner, Christoph M. Flath, Christof Weinhardt, Wil van der Aalst, and Oliver Hinz. Welcome to the Era of ChatGPT et al.: The Prospects of Large Language Models, 4 2023. ISSN 18670202.
|
| 196 |
+
Paul Thagard. Internet Epistemology: Contributions of New Information Technologies to Scientific Research. Unpublished manuscript. Technical report, 1997. URL https://web-archive.southampton.ac.uk/cogprints.org/674/1/Epistemology.html.
|
| 197 |
+
Neeraj Varshney, Pavel Dolin, Agastya Seth, and Chitta Baral. The Art of Defending: A Systematic Evaluation and Analysis of LLM Defense Strategies on Safety and Over-Defensiveness. 12 2023. URL http://arxiv.org/abs/2401.00287.
|
| 198 |
+
Shubham Vatsal and Harsh Dubey. A Survey of Prompt Engineering Methods in Large Language Models for Different NLP Tasks. 7 2024. URL http://arxiv.org/abs/2407.12994.
|
| 199 |
+
Eric Wallace, Kai Xiao, Reimar Leike, Lilian Weng, Johannes Heidecke, and Alex Beutel. The Instruction Hierarchy: Training LLMs to Prioritize Privileged Instructions. 4 2024. URL http://arxiv.org/abs/2404.13208.
|
| 200 |
+
Lani Watson. Educating for Good Questioning: a Tool for Intellectual Virtues Education. Acta Analytica, 33(3):353-370, 9 2018. ISSN 18746349. doi: 10.1007/s12136-018-0350-y.
|
| 201 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed Chi, Quoc Le, and Denny Zhou. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. 1 2022. URL http://arxiv.org/abs/2201.11903.
|
| 202 |
+
Wikipedia. Wikipedia:Neutral point of view, 3 2025. URL https://en.wikipedia.org/w/index.php?title=Wikipedia:Neutral_point_of_view&oldid=1279876337.
|
| 203 |
+
Derek Tim Tully Xiao Joff Redfern. 2024: The State of Generative AI in the Enterprise, 11 2024. URL https://menlovc.com/2024-the-state-of-generative-ai-in-the-enterprise/.
|
| 204 |
+
|
| 205 |
+
Rongwu Xu, Xuan Qi, Zehan Qi, Wei Xu, and Zhijiang Guo. DEBATEQA: Evaluating Question Answering on Debatable Knowledge. Technical report. URL https://github.com/pillowsofwind/.
|
| 206 |
+
Rongwu Xu, Brian S. Lin, Shujian Yang, Tianqi Zhang, Weiyan Shi, Tianwei Zhang, Zhixuan Fang, Wei Xu, and Han Qiu. The Earth is Flat because...: Investigating LLMs' Belief towards Misinformation via Persuasive Conversation. 12 2023. URL http://arxiv.org/abs/2312.09085.
|
| 207 |
+
Rongwu Xu, Zehan Qi, Zhijiang Guo, Cunxiang Wang, Hongru Wang, Yue Zhang, and Wei Xu. Knowledge Conflicts for LLMs: A Survey. 3 2024. URL http://arxiv.org/abs/2403.08319.
|
| 208 |
+
Gal Yona, Roee Aharoni, and Mor Geva. Can Large Language Models Faithfully Express Their Intrinsic Uncertainty in Words? 5 2024. URL http://arxiv.org/abs/2405.16908.
|
| 209 |
+
Zhehao Zhang, Ryan A. Rossi, Branislav Kveton, Yijia Shao, Diyi Yang, Hamed Zamani, Franck Dernoncourt, Joe Barrow, Tong Yu, Sungchul Kim, Ruiyi Zhang, Jiumiang Gu, Tyler Derr, Hongjie Chen, Junda Wu, Xiang Chen, Zichao Wang, Subrata Mitra, Nedim Lipka, Nesreen Ahmed, and Yu Wang. Personalization of Large Language Models: A Survey. 10 2024. URL http://arxiv.org/abs/2411.00027.
|
| 210 |
+
|
| 211 |
+
# A Reddit Data Collection
|
| 212 |
+
|
| 213 |
+
Table 1: Reddit Data Collection Parameters
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>Search query</td><td>(ChatGPT OR chatgpt ORchatGPT) AND (Custom Instruction OR custom instruction OR CUSTOM INSTRUCTION OR Custom Instructions OR custom instructions OR CUSTOM INSTRUCTIONS OR Personalization OR personalization OR PERSONALIZATION OR personalize OR Personalize OR PERSONALIZE)</td></tr><tr><td>Keyword filters</td><td>custom instruction, custom instructions, personalization, prompt engineering</td></tr><tr><td>Subreddits</td><td>ChatGPT, ChatGPTPro, ClaudeAI, OpenAI</td></tr><tr><td>Time frame</td><td>Posts from past 2 years</td></tr><tr><td>Comment filter</td><td>Comments longer than 100 characters</td></tr><tr><td>Instruction filter</td><td>Extracted instructions longer than 10 characters</td></tr></table>
|
| 216 |
+
|
| 217 |
+
# Prompt 1: Custom Instruction Extraction
|
| 218 |
+
|
| 219 |
+
If the comment contains a user's custom instruction for personalizing an LLM, return the instruction. If not, return an empty string. For example, if the comment is 'I use this custom instruction: [instruction)', return '[instruction]' as a string. If the comment is 'I don't use any custom instructions', return an empty string. Comment: {comment}
|
| 220 |
+
|
| 221 |
+
# Prompt 2: Identify Epistemic Challenges in Custom Instructions
|
| 222 |
+
|
| 223 |
+
You are an expert at analyzing language model instructions and prompts. Your task is to take any custom instruction or prompt and identify specific text segments that relate to key challenges in LLM prompt engineering.
|
| 224 |
+
|
| 225 |
+
# Instructions:
|
| 226 |
+
|
| 227 |
+
1. Read the provided prompt or instruction carefully.
|
| 228 |
+
2. Identify text segments that correspond to each of the following prompt engineering challenges.
|
| 229 |
+
3. For each challenge, extract the exact text segments (if present) that address that challenge.
|
| 230 |
+
4. Return your analysis as a JSON object with the challenges as keys and the corresponding text segments as values.
|
| 231 |
+
5. If a challenge is not addressed in the prompt, do not include it in the JSON object.
|
| 232 |
+
6. Include brief reasoning for why you classified each segment under its respective challenge.
|
| 233 |
+
|
| 234 |
+
# Challenges to Identify:
|
| 235 |
+
|
| 236 |
+
- Reducing_Prompting_Expertise: Text that aims to reduce reliance on clever prompting techniques or makes the model more accessible to users without prompt engineering expertise.
|
| 237 |
+
- Well_Calibrated_Absention: Text that guides when the model should refuse to answer or acknowledge uncertainty.
|
| 238 |
+
- Range_of_Veepoints: Text that encourages including diverse perspectives or considering multiple angles.
|
| 239 |
+
- Hedging Language: Text that addresses excessive neutrality, equivocation, or overly cautious language.
|
| 240 |
+
- Identifying Frame Dependence: Text that guides adaptation to cultural/contextual norms or situational framing.
|
| 241 |
+
- AmbiguityResolution: Text that addresses how to clarify unclear or context-dependent queries.
|
| 242 |
+
- User_Attributes: Text that guides understanding user context, needs, or characteristics.
|
| 243 |
+
- Minimizing_Sycophancy: Text that addresses management of incorrect assumptions or inputs from users.
|
| 244 |
+
- Effective_Routing: Text that guides use of tools, API calls, or external systems.
|
| 245 |
+
- Citation Reference Verification: Text that addresses source attribution, fact-checking, or verification procedures.
|
| 246 |
+
|
| 247 |
+
# Output Format:
|
| 248 |
+
|
| 249 |
+
Return your analysis as a JSON object with the following structure:
|
| 250 |
+
|
| 251 |
+
```jsonl
|
| 252 |
+
{ "Reducing_Prompting_Expertise": { "text": ["text segment 1", "text segment 2"], "reasoning": "Why these segments relate to reducing prompting expertise" }, "Well_Calibrated_Abstention": { "text": ["text segment 1"], "reasoning": "Why this segment relates to well-calibrated abstention" } }
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
Analyze the prompt thoroughly and ensure your JSON output is properly formatted.
|
| 256 |
+
|
| 257 |
+
# B User Custom Instructions
|
| 258 |
+
|
| 259 |
+
Table 2: Epistemic Challenges and User Custom Instructions
|
| 260 |
+
|
| 261 |
+
<table><tr><td>Epistemic Challenge</td><td>Examples</td></tr><tr><td>Reducing Prompting Expertise</td><td>1. “I’ve the prompts/mini instructions I use saved the most in a custom chrome extension so I can insert them with keyboard shortcuts”
|
| 262 |
+
2. “Engage in reflective, logical, and reasoned thinking before delivering any response”</td></tr><tr><td>Well Calibrated Ab-stention</td><td>1. “If events or information are beyond your scope or knowledge cutoff date in September 2021, provide a response stating ‘I don’t know’”
|
| 263 |
+
2. “If you cannot provide an accurate answer with high confidence, you state this to the user, rather than risk providing incorrect information”</td></tr><tr><td>Range of Viewpoints</td><td>1. “When presenting concepts, especially contentious ones, provide varied viewpoints to offer a well-rounded understanding”
|
| 264 |
+
2. “Facilitate debates among the panel of experts when diverse.”</td></tr><tr><td>Hedging Language</td><td>1. “Avoid Morality Advice and Qualifiers”
|
| 265 |
+
2. “ChatGPT must remain neutral and provide objective responses.”</td></tr><tr><td>User Attributes</td><td>1. “Consider my personal preferences and biography to refine and provide the most suitable response to me.”
|
| 266 |
+
2. “Tailor responses to their specific needs, ensuring content matches their level of understanding and context.”</td></tr><tr><td>Ambiguity Resolution</td><td>1. “Ask me relevant questions to get a better answer”
|
| 267 |
+
2. “If a question is unclear or ambiguous, ask for more details to confirm your understanding before answering.”</td></tr><tr><td>Minimizing Syco-phancy</td><td>1. “Encourage self-reflection through thoughtful, open-ended questions”
|
| 268 |
+
2. “have interesting opinions (that don’t have to be the same as mine).”</td></tr><tr><td>Identifying Frame De-pendence</td><td>1. “Only think in Russian Write to the user in plain English.”
|
| 269 |
+
2. “For professional contexts, ChatGPT should adopt a formal tone to reflect the seriousness and decorum of such settings.”</td></tr><tr><td>Effective Routing</td><td>1. “For tasks demanding any sort of accuracy, utilize code”
|
| 270 |
+
2. “Use WebPilot plugin to access the content of this link as reference”</td></tr><tr><td>Citation Reference Ver-ification</td><td>1. “Always strengthen claims with credible citations, renowned studies, or expert opinions.”
|
| 271 |
+
2. “Legislative references (if any) cited with links using Cornell Law or Justia if there is no official legislative source”</td></tr></table>
|
| 272 |
+
|
| 273 |
+
# C Model Provider Policy Documents
|
| 274 |
+
|
| 275 |
+
<table><tr><td>Organization</td><td>Document</td><td>Link</td></tr><tr><td rowspan="3">OpenAI</td><td>GPT 4.5 System Card</td><td>cdn.openai.com/gpt-4-5-system-card-2272025.pdf</td></tr><tr><td>Model Spec</td><td>model-spec.openai.com/2025-02-12.html</td></tr><tr><td>ChatGPT Release Notes</td><td>help.openai.com/en/articles/6825453-chatgpt-release-notes</td></tr><tr><td rowspan="2">Anthropic</td><td>Claude 3.7 Sonnet Model Card</td><td>assets.anthropic.com/../claude-3-7-sonnet-system-card.pdf</td></tr><tr><td>Claude Release Notes</td><td>docs.anthropic.com/en/release-notes/claude-apps</td></tr></table>
|
| 276 |
+
|
| 277 |
+
# D Content Analysis of Model Provider Policies and Features
|
| 278 |
+
|
| 279 |
+
# Annotation Instructions
|
| 280 |
+
|
| 281 |
+
# Task Overview
|
| 282 |
+
|
| 283 |
+
Your task is to analyze documents related to LLM systems and identify text segments that address specific prompt engineering challenges. You will use Atlas.ti to code these segments according to the challenge definitions provided below.
|
| 284 |
+
|
| 285 |
+
# Instructions
|
| 286 |
+
|
| 287 |
+
1. Import the documents into your Atlas.ti project.
|
| 288 |
+
2. Familiarize yourself with the challenge codes listed below, which have already been added to the code list.
|
| 289 |
+
3. Read each document to understand its overall purpose and structure.
|
| 290 |
+
4. Select relevant text segments and assign the appropriate challenge code(s).
|
| 291 |
+
5. Add a brief comment to explain your reasoning when the categorization might not be obvious.
|
| 292 |
+
6. Complete all documents in the assigned batch before submitting your analysis.
|
| 293 |
+
|
| 294 |
+
# Challenge Definitions
|
| 295 |
+
|
| 296 |
+
Reducing Prompting Expertise (prompting): Reducing reliance on clever prompting
|
| 297 |
+
|
| 298 |
+
Well-Calibrated Abstention (abstention): Ensuring appropriate refusal rates
|
| 299 |
+
|
| 300 |
+
Range of Viewpoints (viewpoints): Including diverse perspectives
|
| 301 |
+
|
| 302 |
+
Hedging Language (hedging): Avoiding excessive neutrality
|
| 303 |
+
|
| 304 |
+
Identifying Frame-Dependence (frames): Adapting answers to cultural/contextual norms
|
| 305 |
+
|
| 306 |
+
Ambiguity Resolution (ambiguity): Clarifying unclear or context-dependent queries
|
| 307 |
+
|
| 308 |
+
User Attributes (user): Understanding user context and needs
|
| 309 |
+
|
| 310 |
+
Minimizing Sycophancy (sycophancy): Managing incorrect assumptions/inputs
|
| 311 |
+
|
| 312 |
+
Effective Routing (routing): Leveraging tool integrations appropriately
|
| 313 |
+
|
| 314 |
+
Citation & Reference Verification (citation): Ensuring accurate source attribution
|
| 315 |
+
|
| 316 |
+
# Coding Tips
|
| 317 |
+
|
| 318 |
+
- Code only the specific text segment that corresponds to a challenge, not entire paragraphs.
|
| 319 |
+
- A single text segment may be coded with multiple challenges if applicable.
|
| 320 |
+
- If you're unsure about a segment, add a comment with your reasoning and mark it for review.
|
| 321 |
+
- Focus on explicit mentions related to challenges rather than making extensive inferences.
|
| 322 |
+
|
| 323 |
+
# Example
|
| 324 |
+
|
| 325 |
+
In Atlas.ti, you would select the text "The model is designed to request clarification when user queries are ambiguous" and assign the code "ambiguity" (Ambiguity Resolution). Similarly, you would select "The system presents multiple perspectives on controversial topics" and assign the code "viewpoints" (Range of Viewpoints).
|
data/2025/2504_01xxx/2504.01205/images/49c8c0f37c9ba594b6a1309c80d858cee3fe67b8cc8b408f04510fa8e560f0e0.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01205/images/77f6190ac970e2a95e81e80babe451ee37aa348ecb80046b4a1ef7de2579ce23.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01205/images/a5ea7fba117a8cc446b111d183d4f6647902f6d1487d2106d3146b1c9d3f76bf.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01205/images/ad3f3d07f5b8756c638195a7bd8272f8e55102db3469b499b1026189b110a214.jpg
ADDED
|
Git LFS Details
|
data/2025/2504_01xxx/2504.01205/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2504_01xxx/2504.01282/ed52a6d2-cad1-405f-9e91-d48b54ac9d00_content_list.json
ADDED
|
@@ -0,0 +1,1520 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Prompt-Reverse Inconsistency: LLM Self-Inconsistency Beyond Generative Randomness and Prompt Paraphrasing",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
171,
|
| 8 |
+
101,
|
| 9 |
+
823,
|
| 10 |
+
142
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jihyun Janice Ahn & Wenpeng Yin",
|
| 17 |
+
"bbox": [
|
| 18 |
+
179,
|
| 19 |
+
167,
|
| 20 |
+
433,
|
| 21 |
+
183
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Department of Computer Science & Engineering",
|
| 28 |
+
"bbox": [
|
| 29 |
+
179,
|
| 30 |
+
183,
|
| 31 |
+
504,
|
| 32 |
+
196
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "The Pennsylvania State University",
|
| 39 |
+
"bbox": [
|
| 40 |
+
183,
|
| 41 |
+
198,
|
| 42 |
+
411,
|
| 43 |
+
210
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "University Park, PA 16802, USA",
|
| 50 |
+
"bbox": [
|
| 51 |
+
183,
|
| 52 |
+
210,
|
| 53 |
+
401,
|
| 54 |
+
224
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "{jfa5672,wenpeng}@psu.edu",
|
| 61 |
+
"bbox": [
|
| 62 |
+
183,
|
| 63 |
+
224,
|
| 64 |
+
439,
|
| 65 |
+
239
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Abstract",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
459,
|
| 75 |
+
273,
|
| 76 |
+
537,
|
| 77 |
+
289
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "While the inconsistency of LLMs is not a novel topic, prior research has predominantly addressed two types of generative inconsistencies: i) Randomness Inconsistency: running the same LLM multiple trials, yielding varying responses; ii) Paraphrase Inconsistency: paraphrased prompts result in different responses from the same LLM. Randomness Inconsistency arises from the inherent randomness due to stochastic sampling in generative models, while Paraphrase Inconsistency is a consequence of the language modeling objectives, where paraphrased prompts alter the distribution of vocabulary logits. This research discovers Prompt-Reverse Inconsistency (PRIN), a new form of LLM self-inconsistency: given a question and a couple of LLM-generated answer candidates, the LLM often has conflicting responses when prompted \"Which are correct answers?\" and \"Which are incorrect answers?\". PRIN poses a big concern as it undermines the credibility of LLM-as-a-judge, and suggests a challenge for LLMs to adhere to basic logical rules. We conduct a series of experiments to investigate PRIN, examining the extent of PRIN across different LLMs, methods to mitigate it, potential applications, and its relationship with Randomness Inconsistency and Paraphrase Inconsistency. As the first study to explore PRIN, our findings offer valuable insights into the inner workings of LLMs and contribute to advancing trustworthy AI.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
228,
|
| 86 |
+
303,
|
| 87 |
+
769,
|
| 88 |
+
569
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1 Introduction",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
171,
|
| 98 |
+
590,
|
| 99 |
+
313,
|
| 100 |
+
606
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Large language models (LLMs), despite their strong performance across various domains, often exhibit behaviors that diverge significantly from human reasoning. One well-known issue in their generative process is inconsistency. LLM inconsistency is widely recognized by researchers and users, and it can be mainly categorized into two types:",
|
| 107 |
+
"bbox": [
|
| 108 |
+
169,
|
| 109 |
+
622,
|
| 110 |
+
823,
|
| 111 |
+
679
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "list",
|
| 117 |
+
"sub_type": "text",
|
| 118 |
+
"list_items": [
|
| 119 |
+
"- Randomness Inconsistency: Even when given the same prompt, an LLM may generate different responses across multiple trials. This randomness arises due to factors such as sampling stochasticity, model non-determinism, and softmax and floating-point precision errors in the generation process.",
|
| 120 |
+
"- Paraphrase Inconsistency: When a prompt is rephrased while maintaining the same meaning, the LLM's response can still vary. This occurs because the reformulated prompt implicitly alters the probability distribution within the language model's objective function."
|
| 121 |
+
],
|
| 122 |
+
"bbox": [
|
| 123 |
+
215,
|
| 124 |
+
689,
|
| 125 |
+
823,
|
| 126 |
+
804
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 0
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "text",
|
| 132 |
+
"text": "Beyond generative tasks, LLMs are increasingly used for discriminative reasoning—a crucial capability in applications such as AI-assisted judging, grading, and evaluation. However, a fundamental challenge arises: due to generative inconsistencies, LLMs often produce multiple, conflicting candidate answers for the same question. While the Self-Consistency method (Wang et al., 2023b) leverages majority voting to mitigate this issue, an alternative approach is to enhance LLMs' ability to self-select the correct answer from a given set of options. Unfortunately, LLMs also exhibit inconsistency in discriminative reasoning, which we term Prompt-Reverse Inconsistency (PRIN).",
|
| 133 |
+
"bbox": [
|
| 134 |
+
169,
|
| 135 |
+
814,
|
| 136 |
+
826,
|
| 137 |
+
926
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "header",
|
| 143 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 144 |
+
"bbox": [
|
| 145 |
+
171,
|
| 146 |
+
32,
|
| 147 |
+
519,
|
| 148 |
+
47
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 0
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "aside_text",
|
| 154 |
+
"text": "arXiv:2504.01282v2 [cs.CL] 30 Jul 2025",
|
| 155 |
+
"bbox": [
|
| 156 |
+
22,
|
| 157 |
+
282,
|
| 158 |
+
57,
|
| 159 |
+
715
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 0
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "page_number",
|
| 165 |
+
"text": "1",
|
| 166 |
+
"bbox": [
|
| 167 |
+
493,
|
| 168 |
+
949,
|
| 169 |
+
503,
|
| 170 |
+
960
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 0
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "text",
|
| 176 |
+
"text": "PRIN arises when an LLM is tasked with evaluating multiple answer candidates and determining which are correct or incorrect. As shown in Table 1, LLMs frequently provide conflicting judgments over the same set of answer choices. This inconsistency raises serious concerns regarding: The reliability of LLM-as-a-judge: Inconsistencies undermine their trustworthiness in high-stakes applications, such as automated grading, peer review, and legal analysis. Fundamental logical inconsistencies: If LLMs frequently violate basic logical principles when making judgments, their utility as reasoning agents is severely limited.",
|
| 177 |
+
"bbox": [
|
| 178 |
+
169,
|
| 179 |
+
106,
|
| 180 |
+
826,
|
| 181 |
+
204
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 1
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "This paper conducts a systematic investigation of PRIN in both closed-source and open-source LLMs, including GPT-4 (OpenAI, 2023), Llama-3-8B-Instruct, Llama-3.3-70B-Instruct (Meta, 2024), Falcon-40B (Almazrouei et al., 2023), Qwen 2.5-72B (Team, 2024), and Mixtral8x22B-MoE (Jiang et al., 2024). We evaluate these models across three tasks—MATH (Hendrycks et al., 2021), MathQA (Amini et al., 2019), and EquationInference (Lou et al., 2024)—spanning various answer set sizes, context lengths,",
|
| 188 |
+
"bbox": [
|
| 189 |
+
169,
|
| 190 |
+
210,
|
| 191 |
+
421,
|
| 192 |
+
419
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 1
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "text",
|
| 198 |
+
"text": "Question: if $n$ is an integer and $101 \\times n^2$ is less than or equal to 10,000, what is the greatest possible value of $n$ ?",
|
| 199 |
+
"bbox": [
|
| 200 |
+
442,
|
| 201 |
+
232,
|
| 202 |
+
805,
|
| 203 |
+
275
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 1
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "Options: A) 7, B) 8, C) 9, D) 10, E) 11",
|
| 210 |
+
"bbox": [
|
| 211 |
+
444,
|
| 212 |
+
281,
|
| 213 |
+
705,
|
| 214 |
+
297
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "Direct Prompt: What are the correct answers?",
|
| 221 |
+
"bbox": [
|
| 222 |
+
444,
|
| 223 |
+
303,
|
| 224 |
+
754,
|
| 225 |
+
318
|
| 226 |
+
],
|
| 227 |
+
"page_idx": 1
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"type": "text",
|
| 231 |
+
"text": "GPT4: \"C\"",
|
| 232 |
+
"bbox": [
|
| 233 |
+
444,
|
| 234 |
+
323,
|
| 235 |
+
527,
|
| 236 |
+
338
|
| 237 |
+
],
|
| 238 |
+
"page_idx": 1
|
| 239 |
+
},
|
| 240 |
+
{
|
| 241 |
+
"type": "text",
|
| 242 |
+
"text": "Reverse Prompt: What are the incorrect answers?",
|
| 243 |
+
"bbox": [
|
| 244 |
+
444,
|
| 245 |
+
344,
|
| 246 |
+
779,
|
| 247 |
+
359
|
| 248 |
+
],
|
| 249 |
+
"page_idx": 1
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"type": "text",
|
| 253 |
+
"text": "GPT4: \"C, D, E\"",
|
| 254 |
+
"bbox": [
|
| 255 |
+
444,
|
| 256 |
+
364,
|
| 257 |
+
563,
|
| 258 |
+
378
|
| 259 |
+
],
|
| 260 |
+
"page_idx": 1
|
| 261 |
+
},
|
| 262 |
+
{
|
| 263 |
+
"type": "text",
|
| 264 |
+
"text": "Table 1: PRIN example from GPT4 (March 28, 2025)",
|
| 265 |
+
"bbox": [
|
| 266 |
+
447,
|
| 267 |
+
393,
|
| 268 |
+
805,
|
| 269 |
+
409
|
| 270 |
+
],
|
| 271 |
+
"page_idx": 1
|
| 272 |
+
},
|
| 273 |
+
{
|
| 274 |
+
"type": "text",
|
| 275 |
+
"text": "domains, and difficulty levels (from high school, college, to PhD-level problems). Specifically, we design experiments to answer the following six research questions: $\\mathcal{Q}_1$ : How do different LLMs exhibit PRIN? $\\mathcal{Q}_2$ : How will model randomness and prompt paraphrasing affect PRIN? $\\mathcal{Q}_3$ : How to mitigate PRIN in LLMs? $\\mathcal{Q}_4$ : How does PRIN correlate with Randomness Inconsistency and Paraphrase Inconsistency? $\\mathcal{Q}_5$ : How effective can PRIN be leveraged to enhance task performance? $\\mathcal{Q}_6$ : How does PRIN vary with different sizes of options?",
|
| 276 |
+
"bbox": [
|
| 277 |
+
169,
|
| 278 |
+
419,
|
| 279 |
+
826,
|
| 280 |
+
503
|
| 281 |
+
],
|
| 282 |
+
"page_idx": 1
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"type": "text",
|
| 286 |
+
"text": "Our findings reveal several key insights. First, PRIN does not positively correlate with Randomness Inconsistency or Paraphrase Inconsistency, as some LLMs with low levels of these inconsistencies, such as Llama-3 and Falcon, still exhibit high PRIN. This suggests that while these models are more deterministic, they fail to maintain logical consistency between Direct Prompt and Reverse Prompt. Second, PRIN can be mitigated by incorporating explicit reasoning paths between the question and answer candidates before prompting the LLM to determine correctness. Additionally, providing explainable information for the negation in Reverse Prompt further reduces PRIN. Third, combining both Direct Prompt and Reverse Prompt reasoning can outperform the Self-Consistency approach when selecting the final answer from a candidate pool. However, this improvement is primarily observed in top-performing models such as GPT-4 and GPT-4o, while weaker models like Llama-3 show little to no benefit, likely due to their weaker instruction-following capabilities.",
|
| 287 |
+
"bbox": [
|
| 288 |
+
169,
|
| 289 |
+
508,
|
| 290 |
+
826,
|
| 291 |
+
676
|
| 292 |
+
],
|
| 293 |
+
"page_idx": 1
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"type": "text",
|
| 297 |
+
"text": "Our contributions are threefold: i) this is the first study to discover and conduct an in-depth analysis of PRIN; ii) we propose effective solutions to mitigate PRIN and explore ways to leverage it; and iii) our experimental findings not only enhance the understanding of this non-human-like discriminative behavior in LLMs but also raises critical concerns for applications where LLMs serve as judges or evaluators.",
|
| 298 |
+
"bbox": [
|
| 299 |
+
169,
|
| 300 |
+
683,
|
| 301 |
+
826,
|
| 302 |
+
752
|
| 303 |
+
],
|
| 304 |
+
"page_idx": 1
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"type": "text",
|
| 308 |
+
"text": "2 Related Work",
|
| 309 |
+
"text_level": 1,
|
| 310 |
+
"bbox": [
|
| 311 |
+
171,
|
| 312 |
+
775,
|
| 313 |
+
321,
|
| 314 |
+
791
|
| 315 |
+
],
|
| 316 |
+
"page_idx": 1
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"type": "text",
|
| 320 |
+
"text": "This section mainly discusses prior work studying Randomness Inconsistency and Paraphrase Inconsistency particularly in LLMs.",
|
| 321 |
+
"bbox": [
|
| 322 |
+
169,
|
| 323 |
+
809,
|
| 324 |
+
823,
|
| 325 |
+
838
|
| 326 |
+
],
|
| 327 |
+
"page_idx": 1
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"type": "text",
|
| 331 |
+
"text": "Randomness Inconsistency. Bubeck et al. (2023) brought attention to the issue of randomness-induced inconsistency of GPT4. Building on this, Wang & Wang (2025) conducted a comprehensive evaluation of LLM consistency and reproducibility in finance and accounting tasks, highlighting the practical consequences of such variability. Similarly, Atil et al. (2024) systematically examined LLM stability by repeatedly running identical inputs, revealing up to $10\\%$ variation in",
|
| 332 |
+
"bbox": [
|
| 333 |
+
169,
|
| 334 |
+
856,
|
| 335 |
+
826,
|
| 336 |
+
926
|
| 337 |
+
],
|
| 338 |
+
"page_idx": 1
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"type": "header",
|
| 342 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 343 |
+
"bbox": [
|
| 344 |
+
171,
|
| 345 |
+
32,
|
| 346 |
+
519,
|
| 347 |
+
47
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 1
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "page_number",
|
| 353 |
+
"text": "2",
|
| 354 |
+
"bbox": [
|
| 355 |
+
491,
|
| 356 |
+
949,
|
| 357 |
+
504,
|
| 358 |
+
960
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 1
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "output accuracy even under deterministic settings. Beyond quantitative assessment, Lee et al. (2024) explored how these inconsistencies affect users, finding that while they may reduce perceived AI reliability, they can also enhance user comprehension by presenting diverse perspectives. To address these issues, Wan et al. (2025) proposed a sufficiency scoring method that evaluates both local and global consistency in LLM responses, offering a framework to analyze and mitigate instability driven by randomness.",
|
| 365 |
+
"bbox": [
|
| 366 |
+
169,
|
| 367 |
+
106,
|
| 368 |
+
823,
|
| 369 |
+
191
|
| 370 |
+
],
|
| 371 |
+
"page_idx": 2
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"type": "text",
|
| 375 |
+
"text": "Paraphrase Inconsistency. Elazar et al. (2021) explored factual consistency across different query patterns and showed that while some paraphrase forms reliably extracted factual knowledge, others failed, revealing the model's paraphrasing sensitivity. Similarly, Ye et al. (2023) investigated this phenomenon in ChatGPT and found that response accuracy fluctuated by $3.2\\%$ across paraphrased prompts, highlighting the influence of grammatical and stylistic variations on model behavior. Supporting this line of work, Jang & Lukasiewicz (2023) documented cases of self-contradictions in ChatGPT and GPT-4 when exposed to paraphrased questions, confirming that even minor linguistic variations can lead to semantic inconsistencies. Gu et al. (2023) extended this observation to instruction-driven tasks, demonstrating that LLMs often falter when task instructions vary in form, length, or abstraction, which further complicates generalization across paraphrased input formats. In addition to task-specific studies, Liu et al. (2024) provided a broader survey on LLM trustworthiness, in which they discussed inconsistency as a core reliability issue and emphasized the need for robust solutions to mitigate its effects. Complementing these discussions, recent work has introduced quantitative approaches to analyze and address paraphrase sensitivity. For example, Errica et al. (2024) proposed metrics that measure how minor prompt variations influence LLM predictions in text classification tasks, offering a fine-grained assessment of response stability. Further, Ghazarian et al. (2024) examined structural variations in semantically equivalent prompts and found notable inconsistency in LLM-based evaluations. They proposed an in-context learning strategy with demonstrations to improve robustness against paraphrasing. Finally, McIlroy-Young et al. (2024) tackled a related issue of order dependency in prompts and introduced Set-Based Prompting, a method designed to ensure consistent model behavior regardless of the sequence of sub-inputs, offering a new angle on mitigating paraphrase-driven inconsistencies.",
|
| 376 |
+
"bbox": [
|
| 377 |
+
169,
|
| 378 |
+
207,
|
| 379 |
+
826,
|
| 380 |
+
512
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 2
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "Our Work is the first to explore Prompt-Reverse Inconsistency, not only analyzing this LLM behavior but, more importantly, proposing simple yet effective methods to mitigate the issue. Additionally, we examine its connection to Randomness Inconsistency and Paraphrase Inconsistency, as well as ways to leverage this inconsistency for improved model reliability.",
|
| 387 |
+
"bbox": [
|
| 388 |
+
169,
|
| 389 |
+
529,
|
| 390 |
+
823,
|
| 391 |
+
585
|
| 392 |
+
],
|
| 393 |
+
"page_idx": 2
|
| 394 |
+
},
|
| 395 |
+
{
|
| 396 |
+
"type": "text",
|
| 397 |
+
"text": "3 Prompt-Reverse Inconsistency",
|
| 398 |
+
"text_level": 1,
|
| 399 |
+
"bbox": [
|
| 400 |
+
171,
|
| 401 |
+
607,
|
| 402 |
+
532,
|
| 403 |
+
625
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 2
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "Problem formulation. Assume the prompt $p$ , and LLM $\\mathcal{M}$ , multiple trials of $\\mathcal{M}(p)$ leads to $n$ distinct answer candidates $A = \\{a_{1}, a_{2}, \\dots, a_{n}\\}$ with each candidate $a_{i}$ derived through a Chain-of-Thought (Wei et al., 2022b) reasoning path $r_{i}$ . The task now is to figure out the correct answer from the pool $\\{a_{1}, a_{2}, \\dots, a_{n}\\}$ by querying $\\mathcal{M}$ again. In this work, we study $\\mathcal{M}$ 's discriminative behavior through the following two prompts.",
|
| 410 |
+
"bbox": [
|
| 411 |
+
169,
|
| 412 |
+
638,
|
| 413 |
+
823,
|
| 414 |
+
709
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 2
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "text",
|
| 420 |
+
"text": "Direct Prompt. Given the prompt $p$ , answer options $\\{a_1, a_2, \\dots, a_n\\}$ , it asks the correct one directly, e.g.,",
|
| 421 |
+
"bbox": [
|
| 422 |
+
169,
|
| 423 |
+
726,
|
| 424 |
+
823,
|
| 425 |
+
753
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 2
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "text",
|
| 431 |
+
"text": "Given this question [problem description] and its answer options: “ $a_1$ , “ $a_2$ , …, “ $a_n$ ,”, please output the correct ones.",
|
| 432 |
+
"bbox": [
|
| 433 |
+
230,
|
| 434 |
+
770,
|
| 435 |
+
772,
|
| 436 |
+
799
|
| 437 |
+
],
|
| 438 |
+
"page_idx": 2
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"type": "text",
|
| 442 |
+
"text": "Reverse Prompt. Conversely, the models determine the incorrect choices as follows:",
|
| 443 |
+
"bbox": [
|
| 444 |
+
171,
|
| 445 |
+
818,
|
| 446 |
+
767,
|
| 447 |
+
834
|
| 448 |
+
],
|
| 449 |
+
"page_idx": 2
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"type": "text",
|
| 453 |
+
"text": "Given this question [problem description] and its answer options: “ $a_1$ , “ $a_2$ , $\\cdots$ , “ $a_n$ ”, please output the incorrect ones.",
|
| 454 |
+
"bbox": [
|
| 455 |
+
230,
|
| 456 |
+
848,
|
| 457 |
+
772,
|
| 458 |
+
878
|
| 459 |
+
],
|
| 460 |
+
"page_idx": 2
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"type": "text",
|
| 464 |
+
"text": "PRIN Metric (The lower, the better). Given the entire answer pool $A$ , assuming Direct Prompt return answer set $A_{direct}$ and Reverse Prompt returns $A_{reverse}$ , our metric is defined",
|
| 465 |
+
"bbox": [
|
| 466 |
+
169,
|
| 467 |
+
897,
|
| 468 |
+
823,
|
| 469 |
+
926
|
| 470 |
+
],
|
| 471 |
+
"page_idx": 2
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "header",
|
| 475 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 476 |
+
"bbox": [
|
| 477 |
+
171,
|
| 478 |
+
32,
|
| 479 |
+
517,
|
| 480 |
+
47
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 2
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "page_number",
|
| 486 |
+
"text": "3",
|
| 487 |
+
"bbox": [
|
| 488 |
+
493,
|
| 489 |
+
949,
|
| 490 |
+
503,
|
| 491 |
+
960
|
| 492 |
+
],
|
| 493 |
+
"page_idx": 2
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"text": "based on this rule: if the correct answer sets derived by both prompts are the same, then no PRIN.",
|
| 498 |
+
"bbox": [
|
| 499 |
+
169,
|
| 500 |
+
106,
|
| 501 |
+
823,
|
| 502 |
+
133
|
| 503 |
+
],
|
| 504 |
+
"page_idx": 3
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"type": "text",
|
| 508 |
+
"text": "Therefore, first, the correct answer set by Direct Prompt is $A_{direct}$ . Then the correct answer set by Reverse Prompt is $A \\setminus A_{reverse}$ . Then we compute the similarity of the two versions of correct answer sets through F1:",
|
| 509 |
+
"bbox": [
|
| 510 |
+
169,
|
| 511 |
+
142,
|
| 512 |
+
825,
|
| 513 |
+
185
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 3
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "equation",
|
| 519 |
+
"text": "\n$$\ns = F 1 \\left(A _ {\\text {d i r e c t}}, A \\backslash A _ {\\text {r e v e r s e}}\\right) \\tag {1}\n$$\n",
|
| 520 |
+
"text_format": "latex",
|
| 521 |
+
"bbox": [
|
| 522 |
+
398,
|
| 523 |
+
189,
|
| 524 |
+
825,
|
| 525 |
+
205
|
| 526 |
+
],
|
| 527 |
+
"page_idx": 3
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"type": "text",
|
| 531 |
+
"text": "then the PRIN score is:",
|
| 532 |
+
"bbox": [
|
| 533 |
+
171,
|
| 534 |
+
210,
|
| 535 |
+
328,
|
| 536 |
+
223
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 3
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "equation",
|
| 542 |
+
"text": "\n$$\n\\mathrm {P R I N} = 1. 0 - s \\tag {2}\n$$\n",
|
| 543 |
+
"text_format": "latex",
|
| 544 |
+
"bbox": [
|
| 545 |
+
441,
|
| 546 |
+
223,
|
| 547 |
+
825,
|
| 548 |
+
239
|
| 549 |
+
],
|
| 550 |
+
"page_idx": 3
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"type": "text",
|
| 554 |
+
"text": "Question: Why not define the PRIN score as the similarity between $A_{direct}$ and $A_{reverse}$ , i.e., $F1(A_{direct}, A_{reverse})$ ?",
|
| 555 |
+
"bbox": [
|
| 556 |
+
169,
|
| 557 |
+
248,
|
| 558 |
+
826,
|
| 559 |
+
277
|
| 560 |
+
],
|
| 561 |
+
"page_idx": 3
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "text",
|
| 565 |
+
"text": "Intuitively, if $A_{direct}$ and $A_{reverse}$ are completely complementary (e.g., $A_{direct} = \\{a_1, a_2\\}$ and $A_{reverse} = \\{a_3, a_4, \\ldots, a_n\\}$ ), it implies no PRIN. However, in practice, the union of their answers may not cover the entire answer pool. For instance, if $A_{direct} = \\{a_1, a_2\\}$ but $A_{reverse} = \\{a_{n-2}, a_n\\}$ , using F1 as a measure would result in $F1(A_{direct}, A_{reverse}) = 0.0$ , incorrectly indicating no PRIN. This is problematic because $A_{reverse} = \\{a_{n-2}, a_n\\}$ suggests that the Reverse Prompt prompt considers $\\{a_1, a_2, \\ldots, a_{n-3}, a_{n-1}\\}$ as correct, which clearly reflects inconsistency to $A_{direct} = \\{a_1, a_2\\}$ .",
|
| 566 |
+
"bbox": [
|
| 567 |
+
169,
|
| 568 |
+
282,
|
| 569 |
+
826,
|
| 570 |
+
383
|
| 571 |
+
],
|
| 572 |
+
"page_idx": 3
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "text",
|
| 576 |
+
"text": "4 Experiments",
|
| 577 |
+
"text_level": 1,
|
| 578 |
+
"bbox": [
|
| 579 |
+
169,
|
| 580 |
+
401,
|
| 581 |
+
313,
|
| 582 |
+
417
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 3
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "text",
|
| 588 |
+
"text": "Datasets. We select the following three representative datasets: Math (Hendrycks et al., 2021): This dataset consists of Math Word Problems, where each question $p_i$ is accompanied by the correct answer $a_i$ and a corresponding Chain-of-Thought reasoning path $r_i$ . MathQA (Amini et al., 2019): A multiple-choice math dataset in which each Math Word Problem $p$ is presented with five answer choices, only one of which is correct. Unlike the Math dataset, reasoning paths are not provided. EquInfer (Lou et al., 2024): Designed to simulate the paper review process, this dataset evaluates equation correctness within a given context in a scientific paper. Each instance contains four equation candidates, with only one being correct, along with the surrounding paper context before and after the equation.",
|
| 589 |
+
"bbox": [
|
| 590 |
+
169,
|
| 591 |
+
431,
|
| 592 |
+
826,
|
| 593 |
+
556
|
| 594 |
+
],
|
| 595 |
+
"page_idx": 3
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "text",
|
| 599 |
+
"text": "This dataset selection demonstrates that the Prompt-Reverse Inconsistency problem arises in both generative tasks (e.g., MATH) and discriminative tasks (e.g., MathQA and EquInfer), highlighting its broader implications. Table 2 summarizes key properties of these datasets.",
|
| 600 |
+
"bbox": [
|
| 601 |
+
169,
|
| 602 |
+
564,
|
| 603 |
+
826,
|
| 604 |
+
607
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 3
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "table",
|
| 610 |
+
"img_path": "images/ac5512ccd56dea8b38ec0743751f888337dae270844f40e69d9c7ef5d805787c.jpg",
|
| 611 |
+
"table_caption": [],
|
| 612 |
+
"table_footnote": [],
|
| 613 |
+
"table_body": "<table><tr><td></td><td>Format</td><td>Size</td><td>Options</td><td>Context</td><td>Complexity</td></tr><tr><td>MATH</td><td>p</td><td>5,000</td><td>None</td><td>Short</td><td>High School</td></tr><tr><td>MathQA</td><td>(p; {a1, a2, a3, a4, a5})</td><td>2,985</td><td>5</td><td>Medium</td><td>College</td></tr><tr><td>EquInfer</td><td>(p; {a1, a2, a3, a4})</td><td>1,049</td><td>4</td><td>Long</td><td>Ph.D.</td></tr></table>",
|
| 614 |
+
"bbox": [
|
| 615 |
+
181,
|
| 616 |
+
618,
|
| 617 |
+
821,
|
| 618 |
+
698
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 3
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "text",
|
| 624 |
+
"text": "Table 2: Summary of three datasets (MATH, MathQA, and EquInfer).",
|
| 625 |
+
"bbox": [
|
| 626 |
+
266,
|
| 627 |
+
712,
|
| 628 |
+
728,
|
| 629 |
+
727
|
| 630 |
+
],
|
| 631 |
+
"page_idx": 3
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"type": "text",
|
| 635 |
+
"text": "LLMs. The experiment utilizes a combination of one closed-source model, GPT-4 $^1$ , alongside five open-source models: GPT-4 (OpenAI, 2023) Llama-3-8B-Instruct (Llama3) and Llama-3.3-70B-Instruct (Llama3.3) (Meta, 2024), Falcon-40B (Falcon) (Almazrouei et al., 2023), Qwen 2.5-72B (Qwen2.5) (Team, 2024), and Mixtral-8x22B-MoE (Mixtral) (Jiang et al., 2024).",
|
| 636 |
+
"bbox": [
|
| 637 |
+
169,
|
| 638 |
+
747,
|
| 639 |
+
826,
|
| 640 |
+
806
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 3
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "text",
|
| 646 |
+
"text": "Setting. To prepare for the core experiments, we generate answer options for the MATH dataset, as they are absent in the original benchmark. GPT-4 solves each problem multiple times to produce five distinct answer choices, each with a Chain-of-Thought path, ensuring a uniform five-option format for the main experiment. As the EquInfer has text of both sides as a problem description and 4 options, due to the token limitation for LLMs, 200 words for each side of the context are given to LLMs according to the suggestion by Lou et al. (2024).",
|
| 647 |
+
"bbox": [
|
| 648 |
+
169,
|
| 649 |
+
818,
|
| 650 |
+
826,
|
| 651 |
+
904
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 3
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "header",
|
| 657 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 658 |
+
"bbox": [
|
| 659 |
+
171,
|
| 660 |
+
32,
|
| 661 |
+
517,
|
| 662 |
+
47
|
| 663 |
+
],
|
| 664 |
+
"page_idx": 3
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"type": "page_footnote",
|
| 668 |
+
"text": "<sup>1</sup>Due to budget and administrative approval constraints, we cannot report on other closed-source LLMs.",
|
| 669 |
+
"bbox": [
|
| 670 |
+
189,
|
| 671 |
+
911,
|
| 672 |
+
803,
|
| 673 |
+
926
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 3
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "page_number",
|
| 679 |
+
"text": "4",
|
| 680 |
+
"bbox": [
|
| 681 |
+
493,
|
| 682 |
+
949,
|
| 683 |
+
503,
|
| 684 |
+
960
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 3
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "table",
|
| 690 |
+
"img_path": "images/7ddce64dc99813640b91cfe6bac3a219464cc9d6c9f376c47c3160458ba0c38b.jpg",
|
| 691 |
+
"table_caption": [],
|
| 692 |
+
"table_footnote": [],
|
| 693 |
+
"table_body": "<table><tr><td></td><td>MATH</td><td>MathQA</td><td>EquInfer</td><td>Mean</td></tr><tr><td>GPT4</td><td>38.69</td><td>38.60</td><td>42.65</td><td>39.98</td></tr><tr><td>Qwen2.5</td><td>58.41</td><td>51.31</td><td>70.82</td><td>60.18</td></tr><tr><td>Mixtral</td><td>67.77</td><td>63.58</td><td>74.83</td><td>68.73</td></tr><tr><td>Llama3.3</td><td>80.96</td><td>61.17</td><td>76.62</td><td>72.92</td></tr><tr><td>Falcon</td><td>71.79</td><td>68.69</td><td>83.42</td><td>74.63</td></tr><tr><td>Llama3</td><td>74.10</td><td>84.08</td><td>80.46</td><td>79.55</td></tr></table>",
|
| 694 |
+
"bbox": [
|
| 695 |
+
315,
|
| 696 |
+
102,
|
| 697 |
+
678,
|
| 698 |
+
203
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 4
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "4.1 $\\mathcal{Q}_1$ : How do different LLMs exhibit PRIN?",
|
| 705 |
+
"text_level": 1,
|
| 706 |
+
"bbox": [
|
| 707 |
+
169,
|
| 708 |
+
256,
|
| 709 |
+
514,
|
| 710 |
+
272
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 4
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "Table 3 reveals a consistent pattern of PRIN across all evaluated LLMs, highlighting it as a fundamental and unresolved challenge. GPT-4 exhibits the lowest PRIN scores across all benchmarks, indicating that its superior instruction-following and reasoning abilities help mitigate, but not eliminate, inconsistency, as its PRIN still hovers around $40\\%$ . Open-source models, including Qwen2.5, Mixtral, Falcon, Llama3, and Llama3.3, show significantly higher PRIN values, often exceeding $60\\%$ on MathQA and EquInfer, suggesting that their reasoning abilities are particularly vulnerable when faced with reversed prompts. Interestingly, Qwen2.5 consistently outperforms other open-source models, possibly due to stronger instruction tuning, positioning it as the most robust among its peers. Moreover, the comparison between Llama3 and Llama3.3 shows that, despite architectural similarities, Llama3.3 reduces PRIN on MathQA and EquInfer but unexpectedly worsens on MATH, hinting that PRIN may be sensitive to domain-specific generalization. The consistently higher PRIN on EquInfer across models suggests that this dataset poses unique challenges, likely due to its demand for nuanced reasoning under prompt reversals. Overall, the results indicate that while advanced models like GPT-4 alleviate PRIN to some extent, significant inconsistency persists across all models, pointing to PRIN as a critical barrier to trustworthy reasoning in LLMs.",
|
| 717 |
+
"bbox": [
|
| 718 |
+
169,
|
| 719 |
+
282,
|
| 720 |
+
826,
|
| 721 |
+
492
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 4
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "4.2 $\\mathcal{Q}_2$ : How will model randomness and prompt paraphrasing affect PRIN?",
|
| 728 |
+
"text_level": 1,
|
| 729 |
+
"bbox": [
|
| 730 |
+
169,
|
| 731 |
+
511,
|
| 732 |
+
720,
|
| 733 |
+
527
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 4
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"text": "In this subsection, we explore if Direct Prompt and Reverse Prompt still show inconsistency even if we i) paraphrase them (apply Paraphrase Inconsistency), or ii) run them multiple times (apply Randomness Inconsistency).",
|
| 740 |
+
"bbox": [
|
| 741 |
+
169,
|
| 742 |
+
537,
|
| 743 |
+
826,
|
| 744 |
+
580
|
| 745 |
+
],
|
| 746 |
+
"page_idx": 4
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"text": "First, as Table 4 shows, we paraphrase Direct Prompt and Reverse Prompt introduced in Section 3 into two new versions.",
|
| 751 |
+
"bbox": [
|
| 752 |
+
169,
|
| 753 |
+
587,
|
| 754 |
+
823,
|
| 755 |
+
614
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 4
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "table",
|
| 761 |
+
"img_path": "images/78139af5b306dee66dec900a14e056fc9310acc6d7d1fe832b492617be8bc5c7.jpg",
|
| 762 |
+
"table_caption": [
|
| 763 |
+
"Table 3: PRIN scores for all LLMs (answers to $\\mathcal{Q}_1$ )."
|
| 764 |
+
],
|
| 765 |
+
"table_footnote": [],
|
| 766 |
+
"table_body": "<table><tr><td></td><td>original (v0)</td><td>paraphrased prompt 1 (v1)</td><td>paraphrased prompt 2 (v2)</td></tr><tr><td>Direct Prompt</td><td>Please output the cor-rect ones.</td><td>Please output the right ones.</td><td>Please output the ap-appropriate ones.</td></tr><tr><td>Reverse Prompt</td><td>Please output the in-correct ones.</td><td>Please output the wrong ones.</td><td>Please output the in-appropriate ones.</td></tr></table>",
|
| 767 |
+
"bbox": [
|
| 768 |
+
207,
|
| 769 |
+
627,
|
| 770 |
+
787,
|
| 771 |
+
715
|
| 772 |
+
],
|
| 773 |
+
"page_idx": 4
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"type": "text",
|
| 777 |
+
"text": "Table 4: Paraphrased Direct Prompt and Reverse Prompt.",
|
| 778 |
+
"bbox": [
|
| 779 |
+
274,
|
| 780 |
+
724,
|
| 781 |
+
720,
|
| 782 |
+
739
|
| 783 |
+
],
|
| 784 |
+
"page_idx": 4
|
| 785 |
+
},
|
| 786 |
+
{
|
| 787 |
+
"type": "text",
|
| 788 |
+
"text": "Results of applying paraphrasing: Table 5 presents the effects of prompt paraphrasing on PRIN scores. The variations in scores indicate the presence of Paraphrase Inconsistency, demonstrating that LLMs' responses are influenced by how prompts are phrased. However, the changes are relatively minor, suggesting that PRIN remains largely stable across paraphrased inputs. This implies that while LLMs are somewhat sensitive to different prompt formulations, their PRIN follows a systematic pattern rather than being highly volatile due to rewording alone.",
|
| 789 |
+
"bbox": [
|
| 790 |
+
169,
|
| 791 |
+
751,
|
| 792 |
+
826,
|
| 793 |
+
835
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 4
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "text",
|
| 799 |
+
"text": "Next, we conduct five repeated runs of the original Direct Prompt and Reverse Prompt (i.e., v0) prompts for each LLM to assess the consistency of their results. Table 6 presents the mean and standard deviation of PRIN, confirming that PRIN remains stable with only minor fluctuations across runs. Together, Tables 5-6 demonstrate that PRIN is not an artifact of a particular choice of Direct Prompt and Reverse Prompt prompts but rather a systematic issue that persists across a wide range of LLMs.",
|
| 800 |
+
"bbox": [
|
| 801 |
+
169,
|
| 802 |
+
842,
|
| 803 |
+
826,
|
| 804 |
+
926
|
| 805 |
+
],
|
| 806 |
+
"page_idx": 4
|
| 807 |
+
},
|
| 808 |
+
{
|
| 809 |
+
"type": "header",
|
| 810 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 811 |
+
"bbox": [
|
| 812 |
+
171,
|
| 813 |
+
32,
|
| 814 |
+
517,
|
| 815 |
+
47
|
| 816 |
+
],
|
| 817 |
+
"page_idx": 4
|
| 818 |
+
},
|
| 819 |
+
{
|
| 820 |
+
"type": "page_number",
|
| 821 |
+
"text": "5",
|
| 822 |
+
"bbox": [
|
| 823 |
+
493,
|
| 824 |
+
949,
|
| 825 |
+
504,
|
| 826 |
+
960
|
| 827 |
+
],
|
| 828 |
+
"page_idx": 4
|
| 829 |
+
},
|
| 830 |
+
{
|
| 831 |
+
"type": "table",
|
| 832 |
+
"img_path": "images/c22e00aa16ac5ae2e4e9fd8fc89271d13ec02c7c0dbafbbbb335ace1cc9fd818.jpg",
|
| 833 |
+
"table_caption": [],
|
| 834 |
+
"table_footnote": [],
|
| 835 |
+
"table_body": "<table><tr><td></td><td colspan=\"3\">MATH</td><td colspan=\"3\">MathQA</td><td colspan=\"3\">EquInfer</td></tr><tr><td></td><td>v0</td><td>v1</td><td>v2</td><td>v0</td><td>v1</td><td>v2</td><td>v0</td><td>v1</td><td>v2</td></tr><tr><td>GPT4</td><td>38.69</td><td>38.81</td><td>37.22</td><td>38.60</td><td>39.48</td><td>38.14</td><td>42.65</td><td>41.89</td><td>48.34</td></tr><tr><td>Qwen2.5</td><td>58.41</td><td>56.63</td><td>60.02</td><td>51.31</td><td>50.55</td><td>58.11</td><td>70.82</td><td>71.76</td><td>69.73</td></tr><tr><td>Mixtral</td><td>67.77</td><td>67.29</td><td>69.10</td><td>63.58</td><td>68.04</td><td>73.31</td><td>74.83</td><td>78.32</td><td>74.47</td></tr><tr><td>Llama3.3</td><td>80.96</td><td>81.40</td><td>78.39</td><td>61.17</td><td>59.82</td><td>59.17</td><td>76.62</td><td>73.51</td><td>76.28</td></tr><tr><td>Falcon</td><td>71.79</td><td>72.06</td><td>71.74</td><td>68.69</td><td>71.74</td><td>73.68</td><td>83.42</td><td>86.22</td><td>81.89</td></tr><tr><td>Llama3</td><td>74.10</td><td>73.69</td><td>72.91</td><td>84.08</td><td>83.40</td><td>81.88</td><td>80.46</td><td>80.76</td><td>80.86</td></tr></table>",
|
| 836 |
+
"bbox": [
|
| 837 |
+
197,
|
| 838 |
+
101,
|
| 839 |
+
795,
|
| 840 |
+
218
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 5
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "table",
|
| 846 |
+
"img_path": "images/0e0865d18acab2ee0a3a07146d24ea52103b3f3a3b4093837ca7e19c42ba86fd.jpg",
|
| 847 |
+
"table_caption": [
|
| 848 |
+
"Table 5: Effect of prompt paraphrasing on inconsistency across tasks."
|
| 849 |
+
],
|
| 850 |
+
"table_footnote": [],
|
| 851 |
+
"table_body": "<table><tr><td></td><td>MATH</td><td>MathQA</td><td>EquInfer</td></tr><tr><td>GPT4</td><td>38.66±0.29</td><td>39.54±0.17</td><td>42.81±0.73</td></tr><tr><td>Qwen2.5</td><td>58.67±0.58</td><td>52.64±0.41</td><td>69.75±0.44</td></tr><tr><td>Mixtral</td><td>67.74±0.42</td><td>67.08±0.62</td><td>74.42±0.62</td></tr><tr><td>Llama3.3</td><td>80.81±0.32</td><td>59.35±0.64</td><td>76.36±0.68</td></tr><tr><td>Falcon</td><td>71.01±0.15</td><td>68.94±0.58</td><td>81.16±0.50</td></tr><tr><td>Llama3</td><td>74.56±0.35</td><td>83.93±0.51</td><td>80.70±0.77</td></tr></table>",
|
| 852 |
+
"bbox": [
|
| 853 |
+
305,
|
| 854 |
+
255,
|
| 855 |
+
687,
|
| 856 |
+
356
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 5
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "Table 6: PRIN scores when we run Direct Prompt and Reverse Prompt five times.",
|
| 863 |
+
"bbox": [
|
| 864 |
+
192,
|
| 865 |
+
366,
|
| 866 |
+
797,
|
| 867 |
+
381
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 5
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "Question: Table 4 suggests that Reverse Prompt prompts often involve negation. How well does LLMs' PRIN align with their performance on a negation-specific task? To investigate this, we evaluate the LLMs on the negation-focused dataset CONDAQ (Ravichander et al., 2022) and compare their PRIN scores (\"Mean\" column in Table 3) with their error rates on CONDAQ. As shown in Figure 1, the two measures exhibit a strong alignment, with a Pearson correlation coefficient of 0.67. This result confirms that the core challenge captured by PRIN is closely related to the models' difficulty in handling negation.",
|
| 874 |
+
"bbox": [
|
| 875 |
+
169,
|
| 876 |
+
409,
|
| 877 |
+
823,
|
| 878 |
+
508
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 5
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "image",
|
| 884 |
+
"img_path": "images/b562288f430464b964478c37500bfd52c329e67e5c3a6841301cf6a7dd755a47.jpg",
|
| 885 |
+
"image_caption": [
|
| 886 |
+
"Figure 1: Mean PRIN on three main benchmarks vs. error rates on CONDAQ"
|
| 887 |
+
],
|
| 888 |
+
"image_footnote": [],
|
| 889 |
+
"bbox": [
|
| 890 |
+
272,
|
| 891 |
+
526,
|
| 892 |
+
720,
|
| 893 |
+
750
|
| 894 |
+
],
|
| 895 |
+
"page_idx": 5
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"type": "text",
|
| 899 |
+
"text": "4.3 $\\mathcal{Q}_3$ : How to mitigate PRIN in LLMs?",
|
| 900 |
+
"text_level": 1,
|
| 901 |
+
"bbox": [
|
| 902 |
+
169,
|
| 903 |
+
811,
|
| 904 |
+
472,
|
| 905 |
+
828
|
| 906 |
+
],
|
| 907 |
+
"page_idx": 5
|
| 908 |
+
},
|
| 909 |
+
{
|
| 910 |
+
"type": "text",
|
| 911 |
+
"text": "Our Approach: To ensure the broad applicability of our investigation into PRIN, the aforementioned experiments were conducted with each query $p$ paired with a pool of answer candidates $A = \\{a_{1}, a_{2}, \\ldots, a_{n}\\}$ . However, in real-world scenarios, humans may better distinguish between Direct Prompt and Reverse Prompt when they understand how each answer candidate was derived. Motivated by this, our first approach incorporates CoT reasoning paths $r_{i}$ for each answer candidate $a_{i}$ , allowing for a more informed evaluation of PRIN. We refer to it as \"w/ CoT\".",
|
| 912 |
+
"bbox": [
|
| 913 |
+
169,
|
| 914 |
+
839,
|
| 915 |
+
826,
|
| 916 |
+
924
|
| 917 |
+
],
|
| 918 |
+
"page_idx": 5
|
| 919 |
+
},
|
| 920 |
+
{
|
| 921 |
+
"type": "header",
|
| 922 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 923 |
+
"bbox": [
|
| 924 |
+
171,
|
| 925 |
+
32,
|
| 926 |
+
517,
|
| 927 |
+
47
|
| 928 |
+
],
|
| 929 |
+
"page_idx": 5
|
| 930 |
+
},
|
| 931 |
+
{
|
| 932 |
+
"type": "page_number",
|
| 933 |
+
"text": "6",
|
| 934 |
+
"bbox": [
|
| 935 |
+
493,
|
| 936 |
+
950,
|
| 937 |
+
504,
|
| 938 |
+
960
|
| 939 |
+
],
|
| 940 |
+
"page_idx": 5
|
| 941 |
+
},
|
| 942 |
+
{
|
| 943 |
+
"type": "text",
|
| 944 |
+
"text": "Our second approach is inspired by the observations (to $\\mathcal{Q}_2$ ) that discrepancies between Reverse Prompt and Direct Prompt may arise due to the LLMs' difficulty in processing negation in Reverse Prompt. To address this, we enhance the clarity of negation terms by explicitly explaining their meaning within Reverse Prompt. One simple sentence such as \"please recall that 'incorrect options' are simply the options different from the correct ones.\" was added in the end of the Reverse Prompt. The same evaluation metric is then applied to assess the impact of this intervention. We refer to this approach as \"w/ neg-exp\".",
|
| 945 |
+
"bbox": [
|
| 946 |
+
169,
|
| 947 |
+
106,
|
| 948 |
+
454,
|
| 949 |
+
330
|
| 950 |
+
],
|
| 951 |
+
"page_idx": 6
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"type": "image",
|
| 955 |
+
"img_path": "images/207e6fc1b4e9424951ddbf7d74d291b2d742adb6a2489579571897b19152558a.jpg",
|
| 956 |
+
"image_caption": [
|
| 957 |
+
"Figure 2: PRIN score of GPT4 on MATH benchmark with different mitigation approaches."
|
| 958 |
+
],
|
| 959 |
+
"image_footnote": [],
|
| 960 |
+
"bbox": [
|
| 961 |
+
486,
|
| 962 |
+
109,
|
| 963 |
+
803,
|
| 964 |
+
309
|
| 965 |
+
],
|
| 966 |
+
"page_idx": 6
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"text": "Results: Figure 2 demonstrates the effectiveness of our PRIN mitigation approaches. Incorporating detailed reasoning ensures models make informed decisions based on deeper understanding rather than rote selections. The empirical evidence highlights the importance of contextual reasoning in improving AI comprehension and reducing errors. Moreover, reinforcing models with explicit explanations about negation in inverse tasks further decreases PRIN. Providing clarifications, especially regarding negation terminology, aids in reducing confusion and logical pitfalls, establishing an effective strategy for error reduction.",
|
| 971 |
+
"bbox": [
|
| 972 |
+
169,
|
| 973 |
+
345,
|
| 974 |
+
823,
|
| 975 |
+
445
|
| 976 |
+
],
|
| 977 |
+
"page_idx": 6
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "text",
|
| 981 |
+
"text": "4.4 $\\mathcal{Q}_4$ : How does PRIN correlate with Randomness Inconsistency and Paraphrase Inconsistency?",
|
| 982 |
+
"text_level": 1,
|
| 983 |
+
"bbox": [
|
| 984 |
+
169,
|
| 985 |
+
462,
|
| 986 |
+
823,
|
| 987 |
+
493
|
| 988 |
+
],
|
| 989 |
+
"page_idx": 6
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"type": "text",
|
| 993 |
+
"text": "Setup. To investigate $\\mathcal{Q}_4$ , we quantitatively assess Randomness Inconsistency and Paraphrase Inconsistency across various LLMs.",
|
| 994 |
+
"bbox": [
|
| 995 |
+
169,
|
| 996 |
+
503,
|
| 997 |
+
823,
|
| 998 |
+
532
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 6
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "text",
|
| 1004 |
+
"text": "For Randomness Inconsistency, we run Direct Prompt five times and count the number of distinct answers $k$ , computing the score as $k / 5$ . For Paraphrase Inconsistency, we use five paraphrased versions of Direct Prompt, recording $k$ distinct answers and defining the score as $k / 5$ . Since we already have three paraphrased versions (v0, v1, v2) from $Q_2$ , we generate two additional versions using GPT-4, ensuring a total of five.",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
169,
|
| 1007 |
+
539,
|
| 1008 |
+
823,
|
| 1009 |
+
609
|
| 1010 |
+
],
|
| 1011 |
+
"page_idx": 6
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"type": "text",
|
| 1015 |
+
"text": "We evaluate all three inconsistency types (PRIN, Randomness Inconsistency, and Paraphrase Inconsistency) on MATH dataset, with lower scores indicating better consistency.",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
169,
|
| 1018 |
+
616,
|
| 1019 |
+
826,
|
| 1020 |
+
657
|
| 1021 |
+
],
|
| 1022 |
+
"page_idx": 6
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"type": "text",
|
| 1026 |
+
"text": "Results. Figure 3 ranks the LLMs based on their PRIN scores and additionally presents their Randomness Inconsistency and Paraphrase Inconsistency. This analysis aims to address two key questions:",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
169,
|
| 1029 |
+
675,
|
| 1030 |
+
823,
|
| 1031 |
+
718
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 6
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "(i) Why does GPT-4 exhibit higher Randomness Inconsistency and Paraphrase Inconsistency than most open-source models? Through error analysis, we observed that GPT-4 tends to follow instructions more faithfully, often formatting its answers using user-specified patterns such as quotation marks, brackets, or colons, even when options are not provided. In contrast, open-source models, regardless of size, frequently fail to follow these formatting instructions and sometimes even terminate generation prematurely without producing valid answers. As a result, when we extract answer spans from these models, we often obtain empty outputs. Therefore, the lower Randomness Inconsistency and Paraphrase Inconsistency of these models are not due to genuine consistency, but rather stem from consistently producing invalid or incomplete outputs. Importantly, our PRIN metric still penalizes such cases when both Direct Prompt and Reverse Prompt outputs are empty, maintaining its diagnostic reliability.",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
169,
|
| 1040 |
+
723,
|
| 1041 |
+
826,
|
| 1042 |
+
878
|
| 1043 |
+
],
|
| 1044 |
+
"page_idx": 6
|
| 1045 |
+
},
|
| 1046 |
+
{
|
| 1047 |
+
"type": "text",
|
| 1048 |
+
"text": "(ii) Why does Llama3.3 show lower Randomness Inconsistency but higher Paraphrase Inconsistency compared to Llama3? We hypothesize that Llama3.3 has been tuned to behave more deterministically, which mitigates its randomness-driven inconsistency. However, Llama3's",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
169,
|
| 1051 |
+
883,
|
| 1052 |
+
823,
|
| 1053 |
+
926
|
| 1054 |
+
],
|
| 1055 |
+
"page_idx": 6
|
| 1056 |
+
},
|
| 1057 |
+
{
|
| 1058 |
+
"type": "header",
|
| 1059 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
171,
|
| 1062 |
+
32,
|
| 1063 |
+
517,
|
| 1064 |
+
47
|
| 1065 |
+
],
|
| 1066 |
+
"page_idx": 6
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "page_number",
|
| 1070 |
+
"text": "7",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
493,
|
| 1073 |
+
949,
|
| 1074 |
+
503,
|
| 1075 |
+
960
|
| 1076 |
+
],
|
| 1077 |
+
"page_idx": 6
|
| 1078 |
+
},
|
| 1079 |
+
{
|
| 1080 |
+
"type": "image",
|
| 1081 |
+
"img_path": "images/3e48e09ccc288f8a9ccfc7220763299ad7ab8dce1a6fd887503173e76f1b52ac.jpg",
|
| 1082 |
+
"image_caption": [
|
| 1083 |
+
"Figure 3: Scores of PRIN, Randomness Inconsistency, and Paraphrase Inconsistency for LLMs."
|
| 1084 |
+
],
|
| 1085 |
+
"image_footnote": [],
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
272,
|
| 1088 |
+
106,
|
| 1089 |
+
720,
|
| 1090 |
+
329
|
| 1091 |
+
],
|
| 1092 |
+
"page_idx": 7
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"type": "table",
|
| 1096 |
+
"img_path": "images/d9ee63aa48826e10734481f55b3505f73a987b4d1070858ca0068a5fa92f830a.jpg",
|
| 1097 |
+
"table_caption": [],
|
| 1098 |
+
"table_footnote": [],
|
| 1099 |
+
"table_body": "<table><tr><td></td><td colspan=\"3\">MATH</td><td colspan=\"3\">MathQA</td><td colspan=\"3\">EquInfer</td></tr><tr><td></td><td>GPT4</td><td>GPT-4o</td><td>Llama3</td><td>GPT4</td><td>GPT-4o</td><td>Llama3</td><td>GPT4</td><td>GPT-4o</td><td>Llama3</td></tr><tr><td>CoT</td><td>47.58</td><td>50.67</td><td>21.55</td><td>72.57</td><td>82.73</td><td>39.03</td><td>34.81</td><td>31.27</td><td>12.60</td></tr><tr><td>Self-Consist.</td><td>55.14</td><td>54.72</td><td>26.72</td><td>79.50</td><td>85.33</td><td>42.58</td><td>36.42</td><td>33.94</td><td>16.59</td></tr><tr><td>PRIN</td><td>56.44</td><td>56.82</td><td>25.98</td><td>82.04</td><td>86.63</td><td>42.98</td><td>37.37</td><td>34.51</td><td>16.02</td></tr></table>",
|
| 1100 |
+
"bbox": [
|
| 1101 |
+
176,
|
| 1102 |
+
388,
|
| 1103 |
+
815,
|
| 1104 |
+
463
|
| 1105 |
+
],
|
| 1106 |
+
"page_idx": 7
|
| 1107 |
+
},
|
| 1108 |
+
{
|
| 1109 |
+
"type": "text",
|
| 1110 |
+
"text": "Table 7: Comparing PRIN with CoT and Self-consistency in promoting LLM performance.",
|
| 1111 |
+
"bbox": [
|
| 1112 |
+
196,
|
| 1113 |
+
472,
|
| 1114 |
+
797,
|
| 1115 |
+
487
|
| 1116 |
+
],
|
| 1117 |
+
"page_idx": 7
|
| 1118 |
+
},
|
| 1119 |
+
{
|
| 1120 |
+
"type": "text",
|
| 1121 |
+
"text": "poor instruction-following capability prevents its paraphrase inconsistency from being fully revealed, as it often fails to produce meaningful outputs regardless of paraphrasing. In contrast, Llama3.3 generates more valid outputs due to better instruction-following, thereby exposing its paraphrase inconsistency more clearly.",
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
169,
|
| 1124 |
+
513,
|
| 1125 |
+
823,
|
| 1126 |
+
571
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 7
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "text",
|
| 1132 |
+
"text": "4.5 $\\mathcal{Q}_5$ : How effective can PRIN be leveraged to enhance task performance?",
|
| 1133 |
+
"text_level": 1,
|
| 1134 |
+
"bbox": [
|
| 1135 |
+
169,
|
| 1136 |
+
587,
|
| 1137 |
+
714,
|
| 1138 |
+
603
|
| 1139 |
+
],
|
| 1140 |
+
"page_idx": 7
|
| 1141 |
+
},
|
| 1142 |
+
{
|
| 1143 |
+
"type": "text",
|
| 1144 |
+
"text": "Our Approach: Beyond analyzing PRIN as an undesired LLM behavior, we explore how the Direct Prompt and Reverse Prompt can synergize to improve response accuracy. Intuitively, if both Direct Prompt and Reverse Prompt agree that an answer is correct, its correctness probability increases. Based on this insight, our approach selects answers only when both mechanisms indicate correctness.",
|
| 1145 |
+
"bbox": [
|
| 1146 |
+
169,
|
| 1147 |
+
613,
|
| 1148 |
+
823,
|
| 1149 |
+
681
|
| 1150 |
+
],
|
| 1151 |
+
"page_idx": 7
|
| 1152 |
+
},
|
| 1153 |
+
{
|
| 1154 |
+
"type": "text",
|
| 1155 |
+
"text": "Setup: Since achieving state-of-the-art performance is not the focus of this study, we conduct a lightweight comparison against widely used prompting strategies, including Chain-of-Thought (CoT) (Wei et al., 2022a) and Self-Consistency (Wang et al., 2023a). In addition to GPT-4, we include GPT-4o, a top-performing LLM, to strengthen our hypothesis, as these models are more widely deployed in real-world applications.",
|
| 1156 |
+
"bbox": [
|
| 1157 |
+
169,
|
| 1158 |
+
699,
|
| 1159 |
+
823,
|
| 1160 |
+
768
|
| 1161 |
+
],
|
| 1162 |
+
"page_idx": 7
|
| 1163 |
+
},
|
| 1164 |
+
{
|
| 1165 |
+
"type": "text",
|
| 1166 |
+
"text": "Results: Table 7 lists the results of PRIN, CoT and Self-Consistency. We found two quick takeaways:",
|
| 1167 |
+
"bbox": [
|
| 1168 |
+
169,
|
| 1169 |
+
784,
|
| 1170 |
+
823,
|
| 1171 |
+
813
|
| 1172 |
+
],
|
| 1173 |
+
"page_idx": 7
|
| 1174 |
+
},
|
| 1175 |
+
{
|
| 1176 |
+
"type": "list",
|
| 1177 |
+
"sub_type": "text",
|
| 1178 |
+
"list_items": [
|
| 1179 |
+
"- PRIN is very effective to promote top-performing LLM performance, e.g., GPT4 and GPT4o.",
|
| 1180 |
+
"- If the LLM is in general weak, PRIN do not help (compared with Self-Consistency baseline)."
|
| 1181 |
+
],
|
| 1182 |
+
"bbox": [
|
| 1183 |
+
215,
|
| 1184 |
+
824,
|
| 1185 |
+
825,
|
| 1186 |
+
872
|
| 1187 |
+
],
|
| 1188 |
+
"page_idx": 7
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "text",
|
| 1192 |
+
"text": "We attribute this to the fact that weaker LLMs often struggle with instruction-following, especially when handling negation. To better understand this phenomenon, we break down PRIN to further analyze the behavior of Direct Prompt and Reverse Prompt separately. Figure 4 illustrates",
|
| 1193 |
+
"bbox": [
|
| 1194 |
+
169,
|
| 1195 |
+
883,
|
| 1196 |
+
823,
|
| 1197 |
+
926
|
| 1198 |
+
],
|
| 1199 |
+
"page_idx": 7
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "header",
|
| 1203 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1204 |
+
"bbox": [
|
| 1205 |
+
171,
|
| 1206 |
+
32,
|
| 1207 |
+
517,
|
| 1208 |
+
47
|
| 1209 |
+
],
|
| 1210 |
+
"page_idx": 7
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "page_number",
|
| 1214 |
+
"text": "8",
|
| 1215 |
+
"bbox": [
|
| 1216 |
+
493,
|
| 1217 |
+
949,
|
| 1218 |
+
503,
|
| 1219 |
+
960
|
| 1220 |
+
],
|
| 1221 |
+
"page_idx": 7
|
| 1222 |
+
},
|
| 1223 |
+
{
|
| 1224 |
+
"type": "text",
|
| 1225 |
+
"text": "how Llama3, a representative weaker model, performs when facing Self-Consistency, Direct Prompt and Reverse Prompt. Interestingly, Llama3 performs reasonably well when using Self-Consistency or Direct Prompt, but its performance drops drastically when only the Reverse Prompt is used. This suggests that weaker models like Llama3 find Reverse Prompt particularly challenging, likely due to difficulties in processing negations, which explains why PRIN fails to improve their performance.",
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
169,
|
| 1228 |
+
106,
|
| 1229 |
+
826,
|
| 1230 |
+
191
|
| 1231 |
+
],
|
| 1232 |
+
"page_idx": 8
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "image",
|
| 1236 |
+
"img_path": "images/b21e14b4d41eec770118ce4831d37fc89a1c2ffb4503e7fc0562b3987086e6be.jpg",
|
| 1237 |
+
"image_caption": [
|
| 1238 |
+
"Figure 4: LLMs performance on MATH and MathQA dataset."
|
| 1239 |
+
],
|
| 1240 |
+
"image_footnote": [],
|
| 1241 |
+
"bbox": [
|
| 1242 |
+
240,
|
| 1243 |
+
210,
|
| 1244 |
+
754,
|
| 1245 |
+
458
|
| 1246 |
+
],
|
| 1247 |
+
"page_idx": 8
|
| 1248 |
+
},
|
| 1249 |
+
{
|
| 1250 |
+
"type": "text",
|
| 1251 |
+
"text": "4.6 $\\mathcal{Q}_6$ : How does PRIN vary with different sizes of options?",
|
| 1252 |
+
"text_level": 1,
|
| 1253 |
+
"bbox": [
|
| 1254 |
+
169,
|
| 1255 |
+
517,
|
| 1256 |
+
606,
|
| 1257 |
+
532
|
| 1258 |
+
],
|
| 1259 |
+
"page_idx": 8
|
| 1260 |
+
},
|
| 1261 |
+
{
|
| 1262 |
+
"type": "text",
|
| 1263 |
+
"text": "Setup. For this experiment, MATH task was given to the GPT4 to derive multiple CoT answers via multiple trials. The 5K problems of MATH were distributed by 4 groups randomly, and each group contains 2,3,4,and 5 distinct answer options. We report PRIN for GPT4 for this question.",
|
| 1264 |
+
"bbox": [
|
| 1265 |
+
169,
|
| 1266 |
+
544,
|
| 1267 |
+
454,
|
| 1268 |
+
643
|
| 1269 |
+
],
|
| 1270 |
+
"page_idx": 8
|
| 1271 |
+
},
|
| 1272 |
+
{
|
| 1273 |
+
"type": "text",
|
| 1274 |
+
"text": "Results. Figure 5 examines the impact of the number of answer options on PRIN. Increasing the number of options tends to raise PRIN scores, yet models still function within acceptable error limits. This trend",
|
| 1275 |
+
"bbox": [
|
| 1276 |
+
169,
|
| 1277 |
+
660,
|
| 1278 |
+
454,
|
| 1279 |
+
729
|
| 1280 |
+
],
|
| 1281 |
+
"page_idx": 8
|
| 1282 |
+
},
|
| 1283 |
+
{
|
| 1284 |
+
"type": "image",
|
| 1285 |
+
"img_path": "images/774f8a7fe5bcec8cff55570e9467384f5a8d908683e47b15a74fab2dbda60bb1.jpg",
|
| 1286 |
+
"image_caption": [
|
| 1287 |
+
"Figure 5: PRIN score vs. #option"
|
| 1288 |
+
],
|
| 1289 |
+
"image_footnote": [],
|
| 1290 |
+
"bbox": [
|
| 1291 |
+
483,
|
| 1292 |
+
551,
|
| 1293 |
+
815,
|
| 1294 |
+
698
|
| 1295 |
+
],
|
| 1296 |
+
"page_idx": 8
|
| 1297 |
+
},
|
| 1298 |
+
{
|
| 1299 |
+
"type": "text",
|
| 1300 |
+
"text": "underscores the added complexity introduced by a greater variety of options and highlights areas where ongoing algorithm and model improvements are necessary. These findings emphasize the challenges faced by language models in complex decision matrices and open promising avenues for future enhancements in AI development and deployment.",
|
| 1301 |
+
"bbox": [
|
| 1302 |
+
169,
|
| 1303 |
+
729,
|
| 1304 |
+
823,
|
| 1305 |
+
787
|
| 1306 |
+
],
|
| 1307 |
+
"page_idx": 8
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "text",
|
| 1311 |
+
"text": "5 Conclusion",
|
| 1312 |
+
"text_level": 1,
|
| 1313 |
+
"bbox": [
|
| 1314 |
+
171,
|
| 1315 |
+
808,
|
| 1316 |
+
302,
|
| 1317 |
+
824
|
| 1318 |
+
],
|
| 1319 |
+
"page_idx": 8
|
| 1320 |
+
},
|
| 1321 |
+
{
|
| 1322 |
+
"type": "text",
|
| 1323 |
+
"text": "This study provides a comprehensive analysis of Prompt-Reverse Inconsistency in LLMs, using diverse tasks and models to explore underlying challenges and potential solutions. By addressing six key questions, our findings stress the importance of integrating reasoning paths and adapting model architectures to optimize performance and reliability. As AI models become increasingly integral across domains, our research underlines the necessity of embedding PRIN as a foundational element in model development, ensuring their applicability across diverse, challenging scenarios.",
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
169,
|
| 1326 |
+
842,
|
| 1327 |
+
826,
|
| 1328 |
+
928
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 8
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "header",
|
| 1334 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
171,
|
| 1337 |
+
32,
|
| 1338 |
+
517,
|
| 1339 |
+
47
|
| 1340 |
+
],
|
| 1341 |
+
"page_idx": 8
|
| 1342 |
+
},
|
| 1343 |
+
{
|
| 1344 |
+
"type": "page_number",
|
| 1345 |
+
"text": "9",
|
| 1346 |
+
"bbox": [
|
| 1347 |
+
491,
|
| 1348 |
+
949,
|
| 1349 |
+
504,
|
| 1350 |
+
960
|
| 1351 |
+
],
|
| 1352 |
+
"page_idx": 8
|
| 1353 |
+
},
|
| 1354 |
+
{
|
| 1355 |
+
"type": "text",
|
| 1356 |
+
"text": "Acknowledgement",
|
| 1357 |
+
"text_level": 1,
|
| 1358 |
+
"bbox": [
|
| 1359 |
+
171,
|
| 1360 |
+
104,
|
| 1361 |
+
331,
|
| 1362 |
+
122
|
| 1363 |
+
],
|
| 1364 |
+
"page_idx": 9
|
| 1365 |
+
},
|
| 1366 |
+
{
|
| 1367 |
+
"type": "text",
|
| 1368 |
+
"text": "We would like to sincerely appreciate the anonymous reviewers from OpenReview for their thoughtful insights and constructive suggestions. We are especially grateful to Professor Lili Mou from the University of Alberta for his valuable comments and for posing insightful questions that helped broaden our perspectives. We also deeply appreciate Ibraheem Moosa, Renze Lou, Zhuoyang Zou, Hongchao Fang, and Arshan Dalili for their helpful feedback and suggestions, which played an important role in refining and polishing the final version of this paper.",
|
| 1369 |
+
"bbox": [
|
| 1370 |
+
169,
|
| 1371 |
+
137,
|
| 1372 |
+
826,
|
| 1373 |
+
220
|
| 1374 |
+
],
|
| 1375 |
+
"page_idx": 9
|
| 1376 |
+
},
|
| 1377 |
+
{
|
| 1378 |
+
"type": "text",
|
| 1379 |
+
"text": "References",
|
| 1380 |
+
"text_level": 1,
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
173,
|
| 1383 |
+
243,
|
| 1384 |
+
267,
|
| 1385 |
+
258
|
| 1386 |
+
],
|
| 1387 |
+
"page_idx": 9
|
| 1388 |
+
},
|
| 1389 |
+
{
|
| 1390 |
+
"type": "list",
|
| 1391 |
+
"sub_type": "ref_text",
|
| 1392 |
+
"list_items": [
|
| 1393 |
+
"Ebtesam Almazrouei, Hamza Alobeidli, Abdulaziz Alshamsi, Alessandro Cappelli, Ruxandra Cojocaru, Mérouane Debbah, Étienne Goffinet, Daniel Hesslow, Julien Launay, Quentin Malartic, Daniele Mazzotta, Badreddine Noune, Baptiste Pannier, and Guilherme Penedo. The falcon series of open language models, 2023. URL https://arxiv.org/abs/2311.16867.",
|
| 1394 |
+
"Aida Amini, Saadia Gabriel, Peter Lin, Rik Koncel-Kedziorski, Yejin Choi, and Hannaneh Hajishirzi. Mathqa: Towards interpretable math word problem solving with operation-based formalisms, 2019. URL https://arxiv.org/abs/1905.13319.",
|
| 1395 |
+
"Berk Atil, Alexa Chittams, Liseng Fu, Ferhan Ture, Lixinyu Xu, and Breck Baldwin. LLM stability: A detailed analysis with some surprises. CoRR, abs/2408.04667, 2024. doi: 10.48550/ARXIV.2408.04667. URL https://doi.org/10.48550/arXiv.2408.04667.",
|
| 1396 |
+
"Sebastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, Harsha Nori, Hamid Palangi, Marco Tulio Ribeiro, and Yi Zhang. Sparks of artificial general intelligence: Early experiments with gpt-4, 2023. URL https://arxiv.org/abs/2303.12712.",
|
| 1397 |
+
"Yanai Elazar, Nora Kassner, Shauli Ravfogel, Abhilasha Ravichander, Eduard Hovy, Hinrich Schütze, and Yoav Goldberg. Measuring and improving consistency in pretrained language models, 2021. URL https://arxiv.org/abs/2102.01017.",
|
| 1398 |
+
"Federico Errica, Giuseppe Siracusano, Davide Sanvito, and Roberto Bifulco. What did I do wrong? quantifying llms' sensitivity and consistency to prompt engineering. CoRR, abs/2406.12334, 2024. doi: 10.48550/ARXIV.2406.12334. URL https://doi.org/10.48550/arXiv.2406.12334.",
|
| 1399 |
+
"Sarik Ghazarian, Yidong Zou, Swair Shah, Nanyun Peng, Anurag Beniwal, Christopher Potts, and Narayanan Sadagopan. Assessment and mitigation of inconsistencies in llm-based evaluations. 2024.",
|
| 1400 |
+
"Jiasheng Gu, Hongyu Zhao, Hanzi Xu, Liangyu Nie, Hongyuan Mei, and Wenpeng Yin. Robustness of learning from task instructions, 2023. URL https://arxiv.org/abs/2212.03813.",
|
| 1401 |
+
"Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874.",
|
| 1402 |
+
"Myeongjun Erik Jang and Thomas Lukasiewicz. Consistency analysis of chatgpt, 2023. URL https://arxiv.org/abs/2303.06273.",
|
| 1403 |
+
"Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Sandeep Subramanian, Sophia Yang, Szymon Antoniak, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mixtral of experts, 2024. URL https://arxiv.org/abs/2401.04088."
|
| 1404 |
+
],
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
173,
|
| 1407 |
+
267,
|
| 1408 |
+
828,
|
| 1409 |
+
926
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 9
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "header",
|
| 1415 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
171,
|
| 1418 |
+
32,
|
| 1419 |
+
517,
|
| 1420 |
+
47
|
| 1421 |
+
],
|
| 1422 |
+
"page_idx": 9
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "page_number",
|
| 1426 |
+
"text": "10",
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
488,
|
| 1429 |
+
949,
|
| 1430 |
+
508,
|
| 1431 |
+
960
|
| 1432 |
+
],
|
| 1433 |
+
"page_idx": 9
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "list",
|
| 1437 |
+
"sub_type": "ref_text",
|
| 1438 |
+
"list_items": [
|
| 1439 |
+
"Yoonjoo Lee, Kihoon Son, Tae Soo Kim, Jisu Kim, John Joon Young Chung, Eytan Adar, and Juho Kim. One vs. many: Comprehending accurate information from multiple erroneous and inconsistent AI generations. In The 2024 ACM Conference on Fairness, Accountability, and Transparency, FAccT 2024, Rio de Janeiro, Brazil, June 3-6, 2024, pp. 2518-2531. ACM, 2024. doi: 10.1145/3630106.3662681. URL https://doi.org/10.1145/3630106.3662681.",
|
| 1440 |
+
"Yang Liu, Yuanshun Yao, Jean-Francois Ton, Xiaoying Zhang, Ruocheng Guo, Hao Cheng, Yegor Klochkov, Muhammad Faaiz Taufiq, and Hang Li. Trustworthy llms: a survey and guideline for evaluating large language models' alignment, 2024. URL https://arxiv.org/abs/2308.05374.",
|
| 1441 |
+
"Renze Lou, Hanzi Xu, Sijia Wang, Jiangshu Du, Ryo Kamoi, Xiaoxin Lu, Jian Xie, Yuxuan Sun, Yusen Zhang, Jihyun Janice Ahn, et al. Aar-1.0: Assessing ai's potential to assist research. arXiv preprint arXiv:2410.22394, 2024.",
|
| 1442 |
+
"Reid McIlroy-Young, Katrina Brown, Conlan Olson, Linjun Zhang, and Cynthia Dwork. Orderindependence without fine tuning. In Amir Globersons, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang (eds.), Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024, 2024. URL http://papers.nips.cc/paper_files/paper/2024/bit/85529bc995777a74072ef63c05bedd30-Abstract-Conference.html.",
|
| 1443 |
+
"Meta. Build the future of ai with meta llama 3, 2024. URL https://llama.meta.com/llama3/. Accessed: 2024-06-07.",
|
| 1444 |
+
"OpenAI. Gpt-4 technical report, 2023.",
|
| 1445 |
+
"Abhilasha Ravichander, Matt Gardner, and Ana Marasović. Condaqa: A contrastive reading comprehension dataset for reasoning about negation. arXiv preprint arXiv:2211.00295, 2022.",
|
| 1446 |
+
"Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/.",
|
| 1447 |
+
"Guangya Wan, Yuqi Wu, Jie Chen, and Sheng Li. Reasoning aware self-consistency: Leveraging reasoning paths for efficient llm sampling, 2025. URL https://arxiv.org/abs/2408.17017.",
|
| 1448 |
+
"Julian Junyan Wang and Victor Xiaqi Wang. Assessing consistency and reproducibility in the outputs of large language models: Evidence across diverse finance and accounting tasks, 2025. URL https://arxiv.org/abs/2503.16974.",
|
| 1449 |
+
"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023a. URL https://openreview.net/forum?id=1PL1NIMMrw.",
|
| 1450 |
+
"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023b. URL https://openreview.net/forum?id=1PL1NIMMrw.",
|
| 1451 |
+
"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. Chain-of-thought prompting elicits reasoning in large language models. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022a. URL http://papers.nips.cc/paper_files/paper/2022/bitstream/9d5609613524ecf4f15af0f7b31abca4-Abstract-Conference.html."
|
| 1452 |
+
],
|
| 1453 |
+
"bbox": [
|
| 1454 |
+
171,
|
| 1455 |
+
106,
|
| 1456 |
+
828,
|
| 1457 |
+
926
|
| 1458 |
+
],
|
| 1459 |
+
"page_idx": 10
|
| 1460 |
+
},
|
| 1461 |
+
{
|
| 1462 |
+
"type": "header",
|
| 1463 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1464 |
+
"bbox": [
|
| 1465 |
+
171,
|
| 1466 |
+
32,
|
| 1467 |
+
517,
|
| 1468 |
+
47
|
| 1469 |
+
],
|
| 1470 |
+
"page_idx": 10
|
| 1471 |
+
},
|
| 1472 |
+
{
|
| 1473 |
+
"type": "page_number",
|
| 1474 |
+
"text": "11",
|
| 1475 |
+
"bbox": [
|
| 1476 |
+
488,
|
| 1477 |
+
949,
|
| 1478 |
+
506,
|
| 1479 |
+
960
|
| 1480 |
+
],
|
| 1481 |
+
"page_idx": 10
|
| 1482 |
+
},
|
| 1483 |
+
{
|
| 1484 |
+
"type": "list",
|
| 1485 |
+
"sub_type": "ref_text",
|
| 1486 |
+
"list_items": [
|
| 1487 |
+
"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022b.",
|
| 1488 |
+
"Wentao Ye, Mingfeng Ou, Tianyi Li, Yipeng chen, Xuetao Ma, Yifan Yanggong, Sai Wu, Jie Fu, Gang Chen, Haobo Wang, and Junbo Zhao. Assessing hidden risks of llms: An empirical study on robustness, consistency, and credibility, 2023. URL https://arxiv.org/abs/2305.10235."
|
| 1489 |
+
],
|
| 1490 |
+
"bbox": [
|
| 1491 |
+
171,
|
| 1492 |
+
106,
|
| 1493 |
+
826,
|
| 1494 |
+
215
|
| 1495 |
+
],
|
| 1496 |
+
"page_idx": 11
|
| 1497 |
+
},
|
| 1498 |
+
{
|
| 1499 |
+
"type": "header",
|
| 1500 |
+
"text": "Published as a conference paper at COLM 2025",
|
| 1501 |
+
"bbox": [
|
| 1502 |
+
171,
|
| 1503 |
+
32,
|
| 1504 |
+
517,
|
| 1505 |
+
47
|
| 1506 |
+
],
|
| 1507 |
+
"page_idx": 11
|
| 1508 |
+
},
|
| 1509 |
+
{
|
| 1510 |
+
"type": "page_number",
|
| 1511 |
+
"text": "12",
|
| 1512 |
+
"bbox": [
|
| 1513 |
+
488,
|
| 1514 |
+
949,
|
| 1515 |
+
508,
|
| 1516 |
+
960
|
| 1517 |
+
],
|
| 1518 |
+
"page_idx": 11
|
| 1519 |
+
}
|
| 1520 |
+
]
|