diff --git a/.gitattributes b/.gitattributes index 6c2be77a4b73c5e6df2e711d53c966080064d05d..df8c311c1e1fd41d7e196d87a0473c28b10aeee6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3078,3 +3078,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2025/WiCkeD_[[:space:]]A[[:space:]]Simple[[:space:]]Method[[:space:]]to[[:space:]]Make[[:space:]]Multiple[[:space:]]Choice[[:space:]]Benchmarks[[:space:]]More[[:space:]]Challenging/bf7da2a6-e19c-4145-af83-91a11e265552_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/WinSpot_[[:space:]]GUI[[:space:]]Grounding[[:space:]]Benchmark[[:space:]]with[[:space:]]Multimodal[[:space:]]Large[[:space:]]Language[[:space:]]Models/1bdd66fd-4b90-40c0-b56c-7487b424be1b_origin.pdf filter=lfs diff=lfs merge=lfs -text 2025/Zero-Shot[[:space:]]Text-to-Speech[[:space:]]for[[:space:]]Vietnamese/836fe461-bc2f-496a-902d-624f10ca9fa9_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/Chumor[[:space:]]2.0_[[:space:]]Towards[[:space:]]Better[[:space:]]Benchmarking[[:space:]]Chinese[[:space:]]Humor[[:space:]]Understanding[[:space:]]from[[:space:]](Ruo[[:space:]]Zhi[[:space:]]Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/CipherBank_[[:space:]]Exploring[[:space:]]the[[:space:]]Boundary[[:space:]]of[[:space:]]LLM[[:space:]]Reasoning[[:space:]]Capabilities[[:space:]]through[[:space:]]Cryptography[[:space:]]Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/CitaLaw_[[:space:]]Enhancing[[:space:]]LLM[[:space:]]with[[:space:]]Citations[[:space:]]in[[:space:]]Legal[[:space:]]Domain/81aca763-e861-40de-ad3f-640af6cf3d30_origin.pdf filter=lfs diff=lfs merge=lfs -text +2025/ClaimPKG_[[:space:]]Enhancing[[:space:]]Claim[[:space:]]Verification[[:space:]]via[[:space:]]Pseudo-Subgraph[[:space:]]Generation[[:space:]]with[[:space:]]Lightweight[[:space:]]Specialized[[:space:]]LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_content_list.json b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..edb043b4f82763551a78ad19b9a9c665f3e6d132 --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_content_list.json @@ -0,0 +1,2494 @@ +[ + { + "type": "text", + "text": "Chumor 2.0: Towards Better Benchmarking Chinese Humor Understanding from 弱智吧 (Ruo Zhi Ba)", + "text_level": 1, + "bbox": [ + 184, + 89, + 816, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ruiqi He Yushu He Longju Bai Jiarui Liu Zhenjie Sun Zenghao Tang He Wang Hanchen Xia Rada Mihalcea Naihao Deng", + "bbox": [ + 105, + 149, + 892, + 187 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{\\text{©}}$ University of Michigan Carnegie Mellon University Shanghai Jiaotong University {ruiqih, dnaiahao}@umich.edu", + "bbox": [ + 132, + 189, + 865, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 342, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Existing humor datasets and evaluations predominantly focus on English, leaving limited resources for culturally nuanced humor in non-English languages like Chinese. To address this gap, we construct Chumor, the first and the largest Chinese humor explanation dataset. Chumor is sourced from Ruo Zhi Ba (RZB, 弱智吧), a Chinese Reddit-like platform known for sharing intellectually challenging and culturally specific jokes. We test ten LLMs through direct and chain-of-thought prompting, revealing that Chumor poses significant challenges to existing LLMs, with their accuracy slightly above random and far below human. In addition, our analysis highlights that human-annotated humor explanations are significantly better than those generated by GPT-4o and ERNIE $_{4\\text{-turbo}}$ . We release Chumor at https://huggingface.co/datasets/MichiganNLP/Chumor, our project page is at https://github.com/MichiganNLP/Chumor-2.0, our leaderboard is at https://huggingface.co/spaces/MichiganNLP/Chumor-leaderboard, and our codebase is at https://github.com/MichiganNLP/Chumor-2.0.", + "bbox": [ + 141, + 286, + 460, + 657 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 665, + 260, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humor is an intrinsic human trait that touches the core of our social and emotional lives, making it a rich field of study across various disciplines (Lefcourt, 2001; Mihalcea and Strapparava, 2005; Gelkopf et al., 2011; Hessel et al., 2023). With the advent of Large Language Models (LLMs), researchers have evaluated LLMs' performance on diverse tasks (Liu et al., 2023a; Deng et al., 2024; Wu et al., 2023) and observed LLMs' extraordinary performance on many (Zhang et al., 2024b). In contrast, researchers have observed that LLMs still fail to understand humor (Ghanadian et al., 2023). However, with all these studies on humor,", + "bbox": [ + 112, + 690, + 490, + 898 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "most evaluations remain in English (Radev et al., 2016; Hasan et al., 2019). This presents a significant gap, particularly for non-English languages like Chinese, where culturally nuanced humor understanding is unexamined.", + "bbox": [ + 507, + 261, + 884, + 341 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we try to bridge this gap by constructing Chumor, a funny and challenging Chinese humor understanding dataset sourced from Ruo Zhi Ba (RZB, \"弱智吧\" in Chinese), a Chinese version of Reddit platform known for sharing intellectually challenging and culturally specific jokes. This platform provides a set of unique Chinese jokes that incorporate the subtleties and intricacies of Chinese humor. Table 1 provides examples of the jokes from RZB. In addition, Bai et al. (2024) reveal that tuning LLMs on RZB data yields the best performance on Chinese reasoning tasks compared to other data sources, highlighting the significant value of jokes from RZB.", + "bbox": [ + 507, + 342, + 884, + 565 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Unlike existing datasets that focus on tasks such as humor detection, punchline identification, or humor generation, Chumor addresses the challenge of humor explanation. This involves not just identifying humor but understanding the reasoning behind it, a task that requires both linguistic and cultural knowledge. Specifically, Chumor tasks the LLMs with determining whether an explanation fully explains the joke. We source the explanations from GPT-4o and ERNIE $_{4\\text{-turb}}$ , and have the entire dataset manually annotated by five native Chinese speakers. We evaluate ten LLMs from various model families, and reveal that all models perform poorly, lagging significantly behind humans on Chumor. We observe that chain-of-thought prompting does not necessarily improve models performance and can sometimes confuse their reasoning process. In addition, we conduct a case study in which one of the authors annotates the entire dataset, followed by A/B testing conducted by six native Chinese speakers to compare explanations from GPT-4o versus human, and", + "bbox": [ + 507, + 568, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding author of this work.", + "bbox": [ + 134, + 906, + 357, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "21799", + "bbox": [ + 473, + 927, + 527, + 940 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Findings of the Association for Computational Linguistics: ACL 2025, pages 21799-21818 July 27 - August 1, 2025 ©2025 Association for Computational Linguistics", + "bbox": [ + 220, + 945, + 778, + 973 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/06e7b6d86f185e1a78ecc5847f0e155e0b76ce83047bf818ac8e09a4b96a9106.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Cultural
Desc.Require knowledge of specific historical, social, or linguistic contexts.
Ex.(zh)小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Situational
Desc.Involve humor derived from specific contexts, irony, or narrative setups.
Ex.(zh)真可怕, 犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。 (en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Pun-based
Desc.Build on linguistic ambiguity and wordplay, require models to identify dual meanings.
Ex.(zh)你可以在steam上找到GTA,所以水是DNA。 (en) You can find GTA on Steam, so water is DNA.
Homophobic
Desc.Rely on phonetic similarities between words or phrases to create humor.
Ex.(zh)家里的猪油没了,小明只能把植物油倒快点当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Glyph-based
Desc.Exploit the structural or visual elements of Chinese characters to create humor.
Ex.(zh)我把電串難題简化了,现在是电车难题。 (en) I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Cross-lingual
Desc.Involve humor derived from linguistic or phonetic interplay across multiple languages.
Ex.(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。” (en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
", + "bbox": [ + 117, + 80, + 880, + 370 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1: Different types of jokes. Descriptions (Desc.) explain humor mechanisms. Examples (Ex.) illustrate each category. The corresponding explanations can be found in the referenced figures from the rightmost column.", + "bbox": [ + 112, + 378, + 882, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ERNIE $_{4}$ -turbo versus human. Our results indicate that human-annotated joke explanations are significantly better than those produced by GPT-4o or ERNIE $_{4}$ -turbo (Figure 4), with LLMs yielding winning rates of only $6.2\\%$ for GPT-4o and $5.3\\%$ for ERNIE $_{4}$ -turbo compared to humans.", + "bbox": [ + 112, + 432, + 487, + 529 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are threefold:", + "bbox": [ + 131, + 531, + 460, + 545 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. We construct Chumor, a funny and challenging Chinese humor understanding dataset, which is the largest Chinese humor explanation dataset.", + "2. We evaluate ten LLMs on Chumor and reveal the significant challenges Chumor possesses. We highlight that the best accuracy achieved by LLMs is $60.3\\%$ , significantly lower than human's score of $78.3\\%$ .", + "3. We demonstrate that chain-of-thought prompting can hurt LLM's performance in humor reasoning, and that human-annotated joke explanations are significantly better than those produced by GPT-4o and ERNIE $_{4\\text{-turbo}}$ , urging future research on culturally specific humor understanding." + ], + "bbox": [ + 114, + 561, + 489, + 814 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 112, + 829, + 278, + 845 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Humor Datasets. Humor analysis in natural language processing (NLP) encompasses a wide range of tasks, each focused on different aspects of humor. For instance, researchers have proposed datasets", + "bbox": [ + 112, + 857, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "such as “16000 One-Liners” (Mihalcea and Strapparava, 2005), “Pun of the Day” (Yang et al., 2015), and “Ted Laughter” (Chen and Lee, 2017) focused on humor detection to determine whether a given text is humorous or not. Datasets such as “Big Bang Theory” (Bertero and Fung, 2016) aim at pinpointing the punchline in a joke. Tasks for assessing humor intensity include humor level rating, comparison, and ranking. For example, datasets like HumorNorm (Engelthaler and Hills, 2018) and #Hashtag Wars (Potash et al., 2017) quantify humor scores and compare comedic elements, while UR-Funny ranks punchlines based on their perceived impact. Datasets such as “Humicroedit” (Hossain et al., 2019), “ $C^3$ ” (Wang et al., 2022), and “Talk-Funny” (Chen et al., 2024) focus on humor generation, the task of generating or rewriting humorous texts. In addition, we present a comprehensive overview of the existing datasets related to humor in Table 2. We highlight that most existing datasets are in English. Chinese humor, on the other hand, is less explored. Our dataset, Chumor is the first humor explanation dataset in Chinese.", + "bbox": [ + 507, + 432, + 884, + 803 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Culturally Specific Datasets. Recent works underscore the challenges of culturally specific reasoning in LLMs (Shen et al., 2024; AlKhamissi et al., 2024; Pawar et al., 2024; Vayani et al., 2024). These challenges stem from the overrepresentation of Western-centric knowledge and translation ar", + "bbox": [ + 507, + 825, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "21800", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/311d87fe6c6608fd64bf6fb5b29a7b6a5c1a0c90713e0bb0c6f3d448674e59e7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSourcesLan.#(k)Tasks
One Liners (2005)Weben16HR
Pun of the Day (2015)Weben4.8HR PD
Big Bang Theory (2016)TVen44PD
Ted Laughter (2017)TEDen9.4HR PD
#HashtagWars (2017)TVen13HC
HumorNorm (2018)\\( CS^† \\)en5HC
UR-FUNNY (2019)TEDen17PD
Humicroedit (2019)Redditen15HG
rJokes (2020)Redditen57HC
Memotion (2020)Memesen9.8HC
MUMOR (2021)TVen zh30HR
NYT-Captions (2023)NYTen0.7 2.6HE HC
\\( C^3 \\) (2022)Bookszh9.3HG
TalkFunny (2024)Appszh4.1HG
TCHD (2023)-zh26HR HC PD
TTWS (2019)Bookszh9.1PD
CHM (2020)Apps Webzh3.3HC
Memeplate (2022)Apps Webzh5.2HC
Chumor (us)Webzh3.3HE
", + "bbox": [ + 115, + 80, + 482, + 420 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 2: Existing datasets related to humor. For the shorthands in the table, abbreviations represent the following tasks, HR: humor recognition; PD: punchline detection; HC: humor comparison; HG: humor generation; HE: humor explanation †: Crowd-source.", + "bbox": [ + 112, + 429, + 489, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "tifacts, which limit the fairness and effectiveness of multilingual evaluations (Mihalcea et al., 2024). Researchers have proposed various culturally specific datasets such as Global-MMLU (Singh et al., 2024) to evaluate LLMs' cultural knowledge. Chumor adds to this line of effort as it involves rich knowledge specific to Chinese culture.", + "bbox": [ + 112, + 527, + 489, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Chumor Dataset", + "text_level": 1, + "bbox": [ + 112, + 657, + 292, + 671 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Collection. We construct our dataset by including RZB jokes from \"Best Annual Threads\" between 2018 and 2021 that have been previously crawled†. In addition, we directly collect all threads in the \"Moderator's Recommendation\" section from RZB. Each thread in RZB consists of \"标题\"(title),\"一楼\"(content), and several \"跟帖\"(follow-up posts). For threads from Best Annual Threads, the jokes are listed in the follow-up posts, which are selected by the forum moderator. For threads from Moderator's Recommendation, the jokes consist of the title and the content of each thread. We remove the content if it repeats the title.", + "bbox": [ + 110, + 684, + 489, + 894 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Cleaning. We store both the title and the content of the raw data. However, due to the posting restrictions of the platform requiring non-empty content, many posts contain meaningless placeholder texts such as “:”, “!”, “0”, “RT”, and others. We automatically identify and remove these patterns, and only keep the title which is the joke itself. Due to the length limitations on the original platform, many post titles are truncated from the beginning parts of the content. We identify these instances and replace the truncated title with the complete content to get the joke. We also remove duplicates that appear both in the “Moderator’s Recommendation” and the “Best Annual Posts”.", + "bbox": [ + 507, + 84, + 884, + 309 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We manually remove the threads related to forum management and rules, threads that include excessively offensive content, threads with incomplete content, and threads that focus more on philosophical insight rather than humor.", + "bbox": [ + 507, + 310, + 884, + 388 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Humor Explanation Classification. We design a humor explanation classification task that can be easily used to test LLMs' capabilities in humor understanding. Specifically, we use two LLMs, GPT-4o and ERNIE $_{4}$ -turbo to generae explanations for our collected jokes. We manually annotate the generated explanations as either \"fully explain the joke\" (good) or \"partially explain or not explain the joke\" (bad) based on a majority vote among five of the authors who are native Chinese speakers. Each joke, along with its explanation, forms an individual instance in Chumor, leading to a total of 3,339 instances. Among these, 1,454 items are labeled as good and 1,887 as bad explanations.", + "bbox": [ + 507, + 399, + 884, + 625 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Examples from Chumor. We present examples from Chumor in Table 3.", + "bbox": [ + 507, + 633, + 882, + 665 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Humor Categorization in Chumor. We categorize the jokes in RZB into six types in Table 1, with an example provided for each type.", + "bbox": [ + 507, + 674, + 882, + 722 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 507, + 734, + 655, + 751 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Models. We test ten LLMs, five from the open-source LLM families and five from the closed-source LLM families, all capable of handling Chinese. Specifically, we include the open-source LLMs of $\\mathbf{Yi}_{34\\mathbf{B}}$ (01.ai, 2024) from 01.AI, Nemotron70B (NVIDIA, 2024) from NVIDIA, Athene70B (Nexusflow, 2024) from Nexusflow, Qwen2.572B (Qwen, 2024) from Alibaba, Mistral123B (Mistral.ai, 2024) from Mistral AI, alongside the closed-source LLMs of", + "bbox": [ + 507, + 760, + 884, + 920 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$\\dagger$ https://github.com/Leymore/ruozhiba", + "bbox": [ + 134, + 904, + 371, + 920 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "21801", + "bbox": [ + 475, + 927, + 522, + 940 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/2c3b843aeb95b336940e223eafe1b6d59325b6430b91c765731cba7115b7b969.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
JokeExplanationLabelSource
(zh) “你自己掰了一千吨玉米?”“哈哈我瞎掰的”(zh) 这个笑话的幽默之处在于对话中的双关语和出人意料的回答。“掰了一千吨玉米”看似是一个惊人的壮举,而“哈哈我瞎掰的”则瞬间揭露了这其实是一个夸张的玩笑,这种巧妙的反转和语言的戏谑带来了幽默感。goodERNIE4-turbo
(en) “You shucked a thousand tons of corn all by yourself?”“Haha, let's just say there's been plenty of shuckin' going on.”(en) The humor in this joke lies in the pun and the unexpected response in the conversation. “Shucked a thousand tons of corn” seems like an extraordinary achievement, but the reply, ‘haha, let's just say there's been plenty of shuckin' going on,” instantly reveals it to be an exaggerated joke. This clever twist and playful use of language create the comedic effect.
(zh) 小明突然出现在我面前以至于我突然出现在小明面前。(zh) 这个笑话的幽默之处在于它利用了语言的对称性和意外性。小明突然出现导致叙述者的惊讶反应,而叙述者的惊讶反应又反过来让小明感到意外,形成了一个有趣的循环。badGPT-4o
(en) Xiaoming suddenly appeared in front of me, causing me to suddenly appear in front of him.(en) The humor in this joke lies in its use of linguistic symmetry and unexpectedness. Xiao Ming's sudden appearance triggers a surprised reaction from the narrator, which in turn surprises Xiao Ming, creating an amusing loop.
", + "bbox": [ + 117, + 80, + 878, + 262 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 3: Examples from Chumor. The second example's explanation is bad because the joke does not \"creating an amusing loop\". Instead, it relies on linguistic symmetry and the use of a straightforward fact to subvert expectations. The audience anticipates an unexpected outcome due to the setup, but the latter part \"suddenly appear in front of him\" flips the perspective by stating the straightforward fact that because Xiao Ming is in front of the person so the person is in front of Xiao Ming too.", + "bbox": [ + 112, + 280, + 884, + 353 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Gemini $_{1.5-pro}$ (Google, 2024) from Google, GLM $_{4\\text{plus}}$ (BigModel, 2024) from Tsinghua University, GPT-4 $_{\\text{turbo}}$ , GPT-4o (OpenAI, 2023, 2024) from OpenAI, ERNIE $_{4\\text{turbo}}$ (Baidu, 2024) from Baidu. For all the open-source LLMs, we use the instruction-tuned version in our evaluation.", + "bbox": [ + 112, + 376, + 487, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Evaluation Methods. We evaluate these LLMs using two prompting methods: direct prompting (DP) by", + "bbox": [ + 112, + 485, + 485, + 533 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Direct Prompting (DP)", + "text_level": 1, + "bbox": [ + 124, + 546, + 305, + 562 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。", + "bbox": [ + 122, + 577, + 477, + 642 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "笑话:[joke]", + "bbox": [ + 124, + 643, + 226, + 659 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "笑话解释:[explanation]", + "bbox": [ + 124, + 659, + 317, + 675 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Translation", + "text_level": 1, + "bbox": [ + 253, + 677, + 347, + 689 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect.", + "bbox": [ + 122, + 690, + 478, + 785 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Joke: [joke]", + "bbox": [ + 124, + 789, + 215, + 804 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Explanation: [explanation]", + "bbox": [ + 124, + 804, + 322, + 820 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and chain-of-thought (CoT) prompting (Wei et al., 2022) by adding the phrase “请逐步思考,写下过程”“Please think step by step, write down your reasoning process” before determining the label. Appendix F provides the complete prompts. We cal", + "bbox": [ + 112, + 841, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "culate accuracy scores as part of our evaluation. In addition, we provide the false positive rate (FPR), false negative rate (FNR), and Matthews Correlation Coefficient (MCC) in Appendix H in Table 4. The MCC score considers true positives, true negatives, false positives, and false negatives, providing a score between -1 and +1. A score of +1 indicates perfect predictions, 0 reflects random guessing, and -1 means complete disagreement. The best MCC score achieved by LLMs is 0.29, which is close to random guessing, and is significantly lower than the human average of 0.60.", + "bbox": [ + 505, + 378, + 884, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5 Results and Discussions", + "text_level": 1, + "bbox": [ + 507, + 582, + 747, + 596 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall Model Performance. Figure 1 presents the accuracy of different LLMs on Chumor in DP and CoT settings. Appendix H presents additional results and analysis.", + "bbox": [ + 507, + 607, + 882, + 670 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Overall, we observe that all models perform poorly on Chinese humor comprehension, with accuracy scores ranging between $44.6\\%$ and $60.3\\%$ . ERNIE $_{4\\text{-}\\text{turbo}}$ and Gemini $_{1.5\\text{-}\\text{pro}}$ achieve the highest accuracy of $60.3\\%$ , and are just 10 points above the random baseline and far below human performance of $78.3\\%$ , highlighting the difficulty of Chumor and the limitations of these LLMs in understanding Chinese humor.", + "bbox": [ + 507, + 671, + 882, + 815 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Error Analysis by Joke Type. To better understand how LLMs perform on each joke type listed in Table 1, we sample 200 jokes for error analysis. Figure 2 and Figure 17 in Appendix H present the results. The distribution of joke types can be found in Appendix G Figure 15.", + "bbox": [ + 507, + 825, + 882, + 921 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "21802", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c82242bc9d064b5b1e48e0527762f6fc5982d614add9b8428f192d0426397d89.jpg", + "image_caption": [ + "Figure 1: The accuracy of different models' test results in the DP and CoT settings. ERNIE $_{4}$ -turbo and Gemini $_{1.5\\text{-pro}}$ achieve the highest accuracy of $60.3\\%$ ." + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 485, + 407 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We highlight that model performance varies significantly across different joke types. While models generally perform well on Situational jokes, achieving $60.0\\%$ to $70.0\\%$ accuracy in both DP and CoT settings, their performance difference on other joke types is more pronounced. For instance, GLM-4plus achieves $65.0\\%$ accuracy on Homophonic jokes in the DP setting, whereas $\\mathrm{Yi}_{34\\mathrm{B}}$ only reaches $30.0\\%$ . Nematron70\\mathrm{B}\\ performs well on Cultural jokes in the CoT setting with $72.0\\%$ accuracy, but Athene70\\mathrm{B}\\ and ERNIE4-turbo achieve with only $43.0\\%$ and $42.0\\%$ , respectively. Such performance variance highlights LLMs' varied capabilities in specific domains such as cultural reasoning and situational reasoning, revealing the respective limitations of these LLMs.", + "bbox": [ + 112, + 485, + 489, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1 Have LLMs achieved human-level understanding of humor?", + "text_level": 1, + "bbox": [ + 112, + 755, + 428, + 785 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Answer: No. To compare the performance of LLMs with humans, we conduct a human study involving three Chinese native speakers unfamiliar with this work to annotate a randomly chosen subset of 200 examples. Human annotators demonstrate significantly better performance, with an average accuracy of $78.3\\%$ and an MCC score of 0.60, significantly better than the LLMs' best per", + "bbox": [ + 112, + 791, + 489, + 921 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/1751194a5476ee5570e3e726c0ef4c756014c6ce34f4a9fb82ab296870d8687f.jpg", + "image_caption": [ + "Figure 2: DP accuracy on different joke types $(\\%)$ . Here, we sample 200 jokes for error analysis. We highlight that model performance varies significantly across different joke types." + ], + "image_footnote": [], + "bbox": [ + 512, + 80, + 880, + 677 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "formance of $60.3\\%$ accuracy and an MCC score of 0.29 (Figure 16 in Appendix H). Our results indicate that there is a large room of performance improvement for LLMs on Chinese humor understanding.", + "bbox": [ + 507, + 777, + 884, + 857 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2 Does chain-of-thought (CoT) help LLMs' humor understanding?", + "text_level": 1, + "bbox": [ + 507, + 885, + 878, + 917 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "21803", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/058cd55e0d496028949dec0f92033e6cc636eeefdccebf3034d38befd6668549.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example(zh)“大人有大量,小孩倒是不多了。”食人族厨师清点食材说。 \n(en)“Adults are plenty, but children are quite scarce,” said the cannibal chef while counting ingredients.
Joke Explanation(zh)这个笑话的幽默之处在于其运用了惊悚与诙谐的巧妙结合,以及意料之外的语言转折。食人族厨师清点食材的场景已经足够诡异,但他却以“大人有大量,小孩倒是不多了”这样似乎很平常的口吻来评论,这种对比和反差产生了荒诞的幽默感,同时也在探讨一种颠覆性的、黑暗式的玩笑。 \n(en)The humor in this joke lies in its clever combination of horror and wit, as well as the unexpected linguistic twist. The scene of a cannibal chef counting ingredients is already eerie, but the casual remark, "Adults are plenty, but children are quite scarce," delivered in an ordinary tone, creates a stark contrast. This juxtaposition generates absurd humor while exploring a subversive, dark style of comedy.
Model's Answer DP✓(zh)部分/没有解释 \n(en)Partially/Not ExplainedModel's Answer CoT X (zh) ...这句话使用了一个常用的谚语“大人有大量”来双关...比较您的理解和解释:笑话解释涵盖了我的分析要点,指出了对比和反差产生的荒诞幽默感,并提到了黑暗式幽默。解释完全涵盖了笑话的所有幽默点,因此选择“完全解释”。 \n(en) ...This sentence uses a common proverb, "adults are plenty," as a pun... Comparison of your understanding and the explanation: The joke explanation covers the key points of my analysis, highlighting the absurd humor created by contrast and reversal, and mentioning the element of dark humor. The explanation fully addresses all aspects of the joke's humor, so “Fully Explained” is selected.
CoT Failure ReasonsObserving model's explanation shows it recognizes the pun in the joke. However, the joke explanation being evaluated misses the pun. Model correctly identifies this in DP but, in CoT, despite noticing the pun, attempts to justify the flawed explanation, leading to an incorrect judgment.
", + "bbox": [ + 112, + 80, + 884, + 318 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 3: Over-analyzing example by GPT-4o. The GPT-4o model chooses the correct answer in the DP prompting, but chooses the incorrect answer due to over-analyzing in the CoT prompting.", + "bbox": [ + 112, + 325, + 882, + 356 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Answer: No. We observe that CoT does not necessarily improve model performance and, in most cases, even leads to performance decay. For instance, as shown in Figure 1, the accuracy of $\\mathrm{ERNIE}_{4}$ -turbo decreases from $60.3\\%$ to $45.2\\%$ when we switch to CoT prompting, Mistral $_{123B}$ 's performance drops from $55.6\\%$ to $51.2\\%$ , GPT-4o's performance drops from $51.9\\%$ to $50.6\\%$ , GPT-4turbo's performance falls from $52.3\\%$ to $51.3\\%$ . Moreover, the MCC scores present a clearer trend of performance decline under CoT prompting. As shown in Figure 16 in Appendix H, eight of the ten LLMs' MCC scores decrease under CoT prompting. We hypothesize that CoT prompts may not help the model's reasoning when the model lacks a fundamental grasp of humor understanding.", + "bbox": [ + 112, + 373, + 487, + 630 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We observe that under CoT prompting, models like GPT-4o tend to justify incorrect explanations as \"correct\", leading to an increase in false-positive rate from $80.0\\%$ for DP prompting to $85.0\\%$ for CoT prompting (Table 4 in Appendix H). $\\mathrm{ERNIE}_{4}$ -turbo exhibits the largest false-positive rate, rising from $59.8\\%$ to $96.9\\%$ (Table 4 in Appendix H). Figure 3 provides an example where CoT confuses the GPT-4o model. Under the DP prompting, the GPT-4o model chooses the answer correctly. However, CoT prompting causes the model to over-analyze and justify an incorrect explanation.", + "bbox": [ + 112, + 631, + 489, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "On the other hand, models like Nematron $_{70}$ may be overly critical of explanations under CoT prompting, resulting in a false-negative rate from $20.9\\%$ for DP prompting to $46.1\\%$ for CoT prompting (Table 4 in Appendix H). We highlight that a", + "bbox": [ + 112, + 841, + 489, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "recent work demonstrates that CoT can degrade performance in tasks requiring subtle comprehension (Sprague et al., 2024), which aligns with our findings on its limitations in humor interpretation. Figure 14 in Appendix E discusses an example corresponding to the model being overly critical.", + "bbox": [ + 507, + 373, + 884, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3 Case study: can GPT-4o and ERNIE $_{4}$ -turbo explain jokes as well as humans?", + "text_level": 1, + "bbox": [ + 507, + 486, + 880, + 517 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Answer: No. Apart from testing multiple LLMs on Chumor, we conduct case studies on GPT-4o and ERNIE $_{4}$ -turbo to assess the quality of their joke explanations compared to humans. We prompt them to explain the humor in two sentences, consistent with the format of human explanations. Here is the prompt we feed to both LLMs:", + "bbox": [ + 507, + 525, + 882, + 637 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Prompt", + "text_level": 1, + "bbox": [ + 517, + 653, + 584, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "请用两句话解释这个笑话的幽默之处: [joke]", + "bbox": [ + 519, + 684, + 872, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Please explain the joke in two sentences: [joke]", + "bbox": [ + 519, + 718, + 872, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data Annotation. As demonstrated by Hessel et al. (2023), crowd-sourcing typically cannot produce high-quality explanations, following Hessel et al. (2023), one of the authors annotates all the explanations to ensure the quality and consistency.", + "bbox": [ + 507, + 759, + 884, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This is a substantial effort: the author ended up annotating the explanations for 1,951 jokes. The resulting corpus has a mean of 78 Chinese characters of explanation per joke, and the total length, 151,730 Chinese characters, is comparable", + "bbox": [ + 507, + 841, + 882, + 921 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "21804", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f29ecc8a0d970beaa864265793e8e2763bf420cfe75aaaa9c975bdd6a3d526dc.jpg", + "image_caption": [ + "Figure 4: Annotated preference for whether human explanation is preferred (\"Human wins\") or the explanation from LLMs is preferred (\"LLM wins\"). Humans' explanation is significantly preferred over LLMs'." + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 487, + 164 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in length to a novella†.", + "bbox": [ + 112, + 247, + 285, + 263 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Setup. To fairly evaluate which explanation is better, we conduct A/B testing by presenting the humor explanation from one LLM and from human to six college students, asking them to annotate their preference of the explanation for each joke. These college students are native Chinese speakers who grew up in China, therefore they have a deep understanding of the cultural terms and trending terms in China. We note that the preference annotation requires a substantial effort as each annotator reads through a total length of around 300k Chinese characters†. We end up with three preference annotations for each joke. The preference annotation achieve a $61.4\\%$ agreement rate among annotators (Appendix B).", + "bbox": [ + 112, + 273, + 487, + 513 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We use the winning rate as our measure to compare LLMs' explanation versus human explanation, taking the majority vote among all annotators for each example. In addition, if all annotators disagree, we assign an \"Undecided\" label. Appendix C provides the annotation instructions we present to the annotators.", + "bbox": [ + 112, + 514, + 489, + 626 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overall Results. Figure 4 reports the wining rate of explanations from human versus GPT-4o and ERNIE $_{4\\text{-turb}}$ . We can see that human explanations are significantly better than those from both LLMs, with humans winning over $50\\%$ of the time, while LLMs win in only $2 - 3\\%$ of cases.", + "bbox": [ + 112, + 636, + 489, + 733 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Error Analysis. Figure 5 shows the overall distribution of error types for GPT-4o and ERNIE $_{4\\text{-}\\text{turbo}}$ on Chumor in terms of their humor explanations. This error analysis is conducted by an individual who is not involved in writing the original explanations, ensuring an unbiased evaluation. GPT-4o", + "bbox": [ + 112, + 741, + 489, + 838 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a8d2f4eda47cac68bcfb4deb23736b0a2064f14ab806d5e36881a22d801cd584.jpg", + "image_caption": [ + "Figure 5: Distribution of error types for GPT-4o and ERNIE $_{4\\text{-}\\text{turbo}}$ . We sample 200 examples to calculate the distribution of these error types. We note that each example may correspond to multiple error types. We highlight that ERNIE $_{4\\text{-}\\text{turbo}}$ demonstrates a lower error rate on cultural jokes, while GPT-4o demonstrates a lower error rate on contextual or pun-based jokes." + ], + "image_footnote": [], + "bbox": [ + 531, + 80, + 863, + 269 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "is more prone to errors categorized as \"cultural unawareness\" (29.5% of all its explanations) compared to $\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}$ (10.5%). We suspect that $\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}$ is more familiar with Chinese culture as it is likely trained on a larger Chinese corpus than GPT-4o. However, GPT-4o performs better on cases requiring an understanding of contexts or puns, suggesting its strong reasoning ability. We provide three error cases for GPT-4o here and additional cases for both GPT-4o and $\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}$ in Appendix E. In the following examples in Figure 6, Figure 7 and Figure 8, we highlight key phrases that induce humor in green, and underscore the errors in red.", + "bbox": [ + 507, + 399, + 884, + 623 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Error Type I: Cultural Unawareness. LLMs may fail to explain a joke due to a lack of awareness of certain cultural knowledge. For instance, the example in Figure 6 requires knowledge of a superstitious belief in Chinese culture, getting a haircut in the first lunar month brings death to your uncle, and the explanation from GPT-4o fails to connect to this Chinese cultural belief. We hypothesize that while LLMs are pre-trained on Internet-scale corpora, such culturally specific knowledge can still be challenging for them to grasp. Moreover, even when they have acquired such cultural knowledge, they may fail to relate to it as we humans do during the reasoning process.", + "bbox": [ + 507, + 636, + 882, + 860 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Error Type II: Pun-based Error. LLMs may fail to identify multiple meanings of a single word within a joke, causing them to fail on pun-based", + "bbox": [ + 507, + 873, + 882, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "†The total length of our explanations surpasses the Chinese version of The Great Gatsby (100k Chinese characters), and is about half the length of the Chinese version of Wuthering Heights (325k Chinese characters).", + "bbox": [ + 112, + 846, + 487, + 894 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "This is about the same length of the Chinese version of Wuthering Heights (325k Chinese characters).", + "bbox": [ + 112, + 894, + 487, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "21805", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/fd22924f367bbc40b6bc117f3e8567c7a93fee0bc5f6c0abaee35136a3313460.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example(zh) 小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Correct Humor Explanation“Getting a haircut in the first lunar month will brings death to your uncle.” is a popular superstitious saying in Chinese culture. In this joke, Xiao Ming gets hair extensions in the first month, which reverses the original logic and absurdly results in “bringing his uncle back to life.”
GPT-4o's Answer(zh) ...它利用了“接发”和“接发”的双关语:一方面是指理发店的接发服务,另一方面是指正月里“接福”的传统习俗。 (en) ...the pun on “接发”: it refers to both hair extension at a salon and the traditional practice of “receiving blessings” during the first lunar month.
Failure ReasonsFail to grasp expressions, slang, or specific usages unique to a particular cultural context.
", + "bbox": [ + 114, + 83, + 485, + 343 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/1759ba788f746021f1cf6f5f2ae3d9f39cebde49087d16cf36f2a1bc41a2179e.jpg", + "table_caption": [ + "Figure 6: Culture unawareness example." + ], + "table_footnote": [], + "table_body": "
Example(zh) 你可以在steam上找到GTA,所以水是DNA。
(en) You can find GTA on Steam,so water is DNA.
Correct \nHumor \nExplanationNormally,“Steam”refers to a gaming platform and “GTA”refers to the “Grand Theft Auto” game series. The joke uses a pun, where “steam”literally means the gaseous form of water, and “G”,“T”,and “A” represent Guanine, Thymine, and Adenine, respectively—three of the four nucleotides (AGTC) that make up DNA.
GPT-4o's \nAnswer(zh)...这种明显错误的类比制造了荒诞的效果,引人发笑。
(en)...This clearly wrong analogy creates an absurd effect.
Failure \nReasonsFail to discover multiple meanings of a word in the jokes
", + "bbox": [ + 115, + 384, + 485, + 627 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "jokes where humor lies in inverting the conventional usage of words. In Figure 7, GPT-4o fails to grasp the transition from the video game terms \"Steam\", \"GTA\" to the scientific terminologies \"G\", \"T\", \"A\" that form DNA. Typically, \"Steam\" refers to a game platform, and \"GTA\" refers to the game series \"Grand Theft Auto\". The joke employs a pun on words where \"steam\" in its literal sense means water vapor, and \"GTA\" can represent not only the video game, but guanine, thymine, and adenine, which are nucleotides involved in the structure of DNA. Such jokes require LLMs to identify puns and the reason for the association of the multiple meanings. Furthermore, the process requires LLMs to bridge the logic gap between these terms, such as", + "bbox": [ + 110, + 680, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/8ceb2e56d703fdfeff32a52c4d61cd642d04b0aedff70427b09f0f3851329a20.jpg", + "table_caption": [ + "Figure 7: Pun-based error example." + ], + "table_footnote": [], + "table_body": "
Example(zh) 家里的猪油没了,小明只能把 植物油倒快点 当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Correct \nHumor \nExplanationWhen pronounced quickly,“vegetable” (pronounced as chih woo) sounds very similar to “pig” (pronounced as choo) in Chinese, leading Xiaoming to comically try using vegetable oil as lard by pouring it quickly.
GPT-4o's \nAnswer(zh) ...小明误解了猪油和植物油的区别,认为只要倒得快,植物油就能变成猪油。笑点在于他天真的逻辑和对常识的搞笑误解。 (en) ... Xiaoming misunderstood that if vegetable oil is poured quickly, it can turn into lard, highlighting his naive logic and misinterpretation of common sense.
Failure \nReasonsFail to exploit homophones or near-homophones in the jokes.
", + "bbox": [ + 509, + 83, + 882, + 338 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 8: Homophonic error example.", + "bbox": [ + 564, + 348, + 823, + 363 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "\"steam\" and \"GTA\", and an unusual context, like \"water is DNA\". The overall process requires both scientific knowledge and creative thinking, which are challenging for LLMs.", + "bbox": [ + 505, + 382, + 882, + 447 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Error Type III: Homophonic Error. The example in Figure 8 requires LLMs to reason over the pronunciation as “植物” (pronounced as chih woo, meaning “vegetable”) sounds very similar to “猪” (pronounced as choo, meaning “pig”) in Chinese when we speak it fast enough. The humor arises from the contrast between the similarity in pronunciation and the disparity in meaning between the two terms. Such contrasts may be sparse in the training corpus of LLMs, and also demand a deep connection across different modalities to link pronunciation with the meaning behind these terms, which poses significant challenges to LLMs.", + "bbox": [ + 507, + 460, + 884, + 670 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 684, + 640, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce Chumor, a Chinese humor understanding dataset that captures intellectually challenging and culturally specific humor in Chinese. Our analysis reveals that Chumor remains difficult even for advanced LLMs, with a significant performance gap between LLMs and humans. Furthermore, we find that chain-of-thought reasoning does not improve LLMs' humor comprehension and, in some cases, leads to over-analysis and incorrect interpretations. Additionally, models such as GPT-4o and ERNIE $_{4}$ -turbo struggle to explain jokes as effectively as humans, highlighting fundamental challenges in humor reasoning. These findings un", + "bbox": [ + 505, + 712, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "21806", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "derscore the unique difficulties that Chinese humor presents to LLMs. We hope that Chumor can advance non-English humor research and contribute to evaluating LLMs' reasoning abilities across diverse cultural backgrounds.", + "bbox": [ + 112, + 84, + 492, + 167 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 177, + 220, + 192 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We try our best to test the Chinese humor understanding ability of different LLMs. However, due to the limited budget and API access, we cannot evaluate all possible LLMs in this paper. We encourage future research to conduct further evaluations of humor understanding abilities in LLMs. In the meantime, we emphasize that our research focuses primarily on demonstrating how humor understanding remains a significant challenge, even for SOTA LLMs. Our work shows that along with many other problems (Ignat et al., 2024), humor understanding, especially non-English and culturally specific humor understanding, remains an unsolved problem in the era of LLMs. We hope Chumor can contribute to non-English humor understanding evaluations for future multilingual LLMs.", + "bbox": [ + 115, + 202, + 490, + 460 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ethics Statement", + "text_level": 1, + "bbox": [ + 114, + 472, + 265, + 487 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We have made every effort to filter out excessively offensive content in RZB. However, due to the subjective nature of humor, some of our jokes may still be perceived as offensive by individuals with different cultural or personal standards. To address these concerns, we strongly recommend that researchers use Chumor with cultural sensitivity, recognizing that the jokes in the dataset reflect the sociocultural context in which they were created. We encourage users of Chumor to approach the dataset with caution, remaining mindful of its potential to cause offense or harm, particularly when applying it in research or applications that involve diverse audiences or address sensitive topics. We wish to foster an ethical and responsible approach to data collection and usage, and we welcome constructive feedback from the research community and stakeholders to continually improve Chumor and mitigate potential harm.", + "bbox": [ + 115, + 497, + 489, + 803 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgement", + "text_level": 1, + "bbox": [ + 114, + 815, + 278, + 832 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The GPT experiments are supported by credit from OpenAI through OpenAI Researcher Access assigned to Naihao Deng. We appreciate Qiang Liu, and Xiaoyue Shi for helping with the human study.", + "bbox": [ + 112, + 841, + 490, + 906 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 510, + 83, + 608, + 98 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "01.ai. 2024. Yi-34b model card. https://huggingface.co/01-ai/Yi-34B. Accessed: 2024-12-10.", + "Marah Abdin, Jyoti Aneja, Harkirat Behl, Sébastien Bubeck, Ronen Eldan, Suriya Gunasekar, Michael Harrison, Russell J. Hewett, Mojan Javaheripi, Piero Kauffmann, James R. Lee, Yin Tat Lee, Yuanzhi Li, Weishung Liu, Caio C. T. Mendes, Anh Nguyen, Eric Price, Gustavo de Rosa, Olli Saarikivi, and 8 others. 2024. Phi-4 technical report. Preprint, arXiv:2412.08905.", + "Badr AlKhamissi, Muhammad ElNokrashy, Mai Alkhamissi, and Mona Diab. 2024. Investigating cultural alignment of large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12404-12422, Bangkok, Thailand. Association for Computational Linguistics.", + "Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, and 29 others. 2023. Qwen technical report. Preprint, arXiv:2309.16609.", + "Yuelin Bai, Xinrun Du, Yiming Liang, Yonggang Jin, Ziqiang Liu, Junting Zhou, Tianyu Zheng, Xincheng Zhang, Nuo Ma, Zekun Wang, and 1 others. 2024. Coig-cqia: Quality is all you need for chinese instruction fine-tuning. arXiv preprint arXiv:2403.18058.", + "Baidu. 2024. Ernie-4.0-turbo. https://cloud.baidu. com/doc/WENXINWORKSHOP/s/71xwwtafj. Accessed: 2024-12-10.", + "Dario Bertero and Pascale Fung. 2016. Deep learning of audio and language features for humor prediction. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 496-501, Porto-rož, Slovenia. European Language Resources Association (ELRA).", + "BigModel. 2024. Glm-4 model documentation. https://bigmodel.cn/dev/howuse/glm-4. Accessed: 2024-12-10.", + "Lei Chen and Chong Min Lee. 2017. Predicting audience's laughter during presentations using convolutional neural network. In Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications, pages 86-90, Copenhagen, Denmark. Association for Computational Linguistics.", + "Yuyan Chen, Zhixu Li, Jiaqing Liang, Yanghua Xiao, Bang Liu, and Yunwen Chen. 2023. Can pre-trained language models understand chinese humor? In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining, WSDM '23, page 465-480, New York, NY, USA. Association for Computing Machinery." + ], + "bbox": [ + 509, + 105, + 885, + 921 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "21807", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuyan Chen, Yichen Yuan, Panjun Liu, Dayiheng Liu, Qinghao Guan, Mengfei Guo, Haiming Peng, Bang Liu, Zhixu Li, and Yanghua Xiao. 2024. Talk funny! a large-scale humor response dataset with chain-of-humor interpretation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17826-17834.", + "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. Preprint, arXiv:1803.05457.", + "Peter T. Daniels and William Bright. 1996. The world's writing systems. Oxford University Press.", + "Naihao Deng, Zhenjie Sun, Ruiqi He, Aman Sikka, Yu-long Chen, Lin Ma, Yue Zhang, and Rada Mihalcea. 2024. Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424.", + "Naihao Deng, Xinliang Zhang, Siyang Liu, Winston Wu, Lu Wang, and Rada Mihalcea. 2023. You are what you annotate: Towards better models through annotator representations. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 12475–12498, Singapore. Association for Computational Linguistics.", + "Xinrun Du, Zhouliang Yu, Songyang Gao, Ding Pan, Yuyang Cheng, Ziyang Ma, Ruibin Yuan, Xingwei Qu, Jiaheng Liu, Tianyu Zheng, and 1 others. 2024. Chinese tiny llm: Pretraining a chinese-centric large language model. arXiv preprint arXiv:2404.04167.", + "Tomas Engelthaler and Thomas T Hills. 2018. Humor norms for 4,997 english words. Behavior research methods, 50:1116-1124.", + "WILLIAM F. FRY. 1994. The biology of humor. HUMOR, 7(2):111-126.", + "Marc Gelkopf and 1 others. 2011. The use of humor in serious mental illness: A review. Evidence-Based Complementary and Alternative Medicine, 2011.", + "Matthew Gervais and David Sloan Wilson. 2005. The evolution and functions of laughter and humor: A synthetic approach. *The Quarterly review of biology*, 80(4):395-430.", + "Hamideh Ghanaian, Isar Nejadgholi, and Hussein Al Osman. 2023. ChatGPT for suicide risk assessment on social media: Quantitative evaluation of model performance, potentials and limitations. In Proceedings of the 13th Workshop on Computational Approaches to Subjectivity, Sentiment, & Social Media Analysis, pages 172-183, Toronto, Canada. Association for Computational Linguistics.", + "Google. 2024. Gemini 1.5 pro model documentation. https://ai.google.dev/gemini-api/docs/ models/gemini#gemini-1.5-pro. Accessed: 2024-12-10." + ], + "bbox": [ + 115, + 85, + 489, + 917 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Md Kamrul Hasan, Wasifur Rahman, AmirAli Bagher Zadeh, Jianyuan Zhong, Md Iftekhar Tanveer, Louis-Philippe Morency, and Mohammed (Ehsan) Hoque. 2019. UR-FUNNY: A multimodal language dataset for understanding humor. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2046-2056, Hong Kong, China. Association for Computational Linguistics.", + "Jack Hessel, Ana Marasovic, Jena D. Hwang, Lillian Lee, Jeff Da, Rowan Zellers, Robert Mankoff, and Yejin Choi. 2023. Do androids laugh at electric sheep? humor \"understanding\" benchmarks from the new yorker caption contest. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 688-714, Toronto, Canada. Association for Computational Linguistics.", + "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \"president vows to cut hair\": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota. Association for Computational Linguistics.", + "Oana Ignat, Zhijing Jin, Artem Abzaliev, Laura Biester, Santiago Castro, Naihao Deng, Xinyi Gao, Aylin Ece Gunal, Jacky He, Ashkan Kazemi, Muhammad Khalifa, Namho Koh, Andrew Lee, Siyang Liu, Do June Min, Shinka Mori, Joan C. Nwatu, Veronica Perez-Rosas, Siqi Shen, and 3 others. 2024. Has it all been solved? open NLP research questions not solved by large language models. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 8050-8094, Torino, Italia. ELRA and ICCL.", + "Herbert M Lefcourt. 2001. *Humor: The psychology of living buoyantly*. Springer Science & Business Media.", + "Zefeng Li, Hongfei Lin, Liang Yang, Bo Xu, and Shaowu Zhang. 2022. Memeplate: A chinese multimodal dataset for humor understanding in meme templates. In *Natural Language Processing and Chinese Computing*, pages 527-538, Cham. Springer International Publishing.", + "Hanmeng Liu, Ruoxi Ning, Zhiyang Teng, Jian Liu, Qiji Zhou, and Yue Zhang. 2023a. Evaluating the logical reasoning ability of chatgpt and gpt-4. arXiv preprint arXiv:2304.03439.", + "Siyang Liu, Naihao Deng, Sahand Sabour, Yilin Jia, Minlie Huang, and Rada Mihalcea. 2023b. Task-adaptive tokenization: Enhancing long-form text generation efficacy in mental health and beyond. In Proceedings of the 2023 Conference on Empirical Meth" + ], + "bbox": [ + 510, + 85, + 880, + 919 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "21808", + "bbox": [ + 475, + 928, + 524, + 940 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ods in Natural Language Processing, pages 15264-15281, Singapore. Association for Computational Linguistics.", + "Xin Liu, Baosong Yang, Dayiheng Liu, Haibo Zhang, Weihua Luo, Min Zhang, Haiying Zhang, and Jinsong Su. 2021. Bridging subword gaps in pretrainfinetune paradigm for natural language generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6001-6011, Online. Association for Computational Linguistics.", + "Paul E McGhee. 1971. Development of the humor response: A review of the literature. Psychological Bulletin, 76(5):328.", + "Rada Mihalcea, Oana Ignat, Longju Bai, Angana Borah, Luis Chiruzzo, Zhijing Jin, Claude Kwizera, Joan Nwatu, Soujanya Poria, and Thamar Solorio. 2024. Why ai is weird and should not be this way: Towards ai for everyone, with everyone, by everyone. arXiv preprint arXiv:2410.16315.", + "Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recognition. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 531-538, Vancouver, British Columbia, Canada. Association for Computational Linguistics.", + "Mistral.ai. 2024. Mistral-large-instruct-2407 model card. https://huggingface.co/mistralai/Mistral-Large-Instruct-2407. Accessed: 2024-12-10.", + "Nexusflow. 2024. Athene-70b model card. https://huggingface.co/Nexusflow/Athene-70B. Accessed: 2024-12-10.", + "NVIDIA. 2024. Llama-3.1-nemotron-70b-instruct-hf model card. https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF. Accessed: 2024-12-10.", + "OpenAI. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.", + "OpenAI. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276.", + "Siddhesh Pawar, Junyeong Park, Jiho Jin, Arnav Arora, Junho Myung, Srishti Yadav, Faiz Ghifari Haznitrama, Inhwa Song, Alice Oh, and Isabelle Augenstein. 2024. Survey of cultural awareness in language models: Text and beyond. arXiv preprint arXiv:2411.00860.", + "Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. SemEval-2017 task 6: #HashtagWars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57, Vancouver, Canada. Association for Computational Linguistics." + ], + "bbox": [ + 115, + 85, + 487, + 919 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Qwen. 2024. Qwen2.5-72b-instruct model card. https://huggingface.co/Qwen/Qwen2.5-72B-Instruct. Accessed: 2024-12-10.", + "Dragomir Radev, Amanda Stent, Joel Tetreault, Aasish Pappu, Aikaterini Iliakopoulou, Agustin Chanfreau, Paloma de Juan, Jordi Vallmitjana, Alejandro Jaimes, Rahul Jha, and Robert Mankoff. 2016. Humor in collective discourse: Unsupervised funniness detection in the new yorker cartoon caption contest. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 475-479, Porto Roz, Slovenia. European Language Resources Association (ELRA).", + "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. Gpqa: A graduate-level google-proof q&a benchmark. Preprint, arXiv:2311.12022.", + "Chhavi Sharma, Deepesh Bhageria, William Scott, Srinivas PYKL, Amitava Das, Tanmoy Chakraborty, Viswanath Pulabaigari, and Björn Gambäck. 2020. SemEval-2020 task 8: Memotion analysis-the visuolinguial metaphor! In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 759-773, Barcelona (online). International Committee for Computational Linguistics.", + "Siqi Shen, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, Soujanya Poria, and Rada Mihalcea. 2024. Understanding the capabilities and limitations of large language models for cultural commonsense. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5668-5680, Mexico City, Mexico. Association for Computational Linguistics.", + "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David I. Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, and 4 others. 2024. Global mmlu: Understanding and addressing cultural and linguistic biases in multilingual evaluation. Preprint, arXiv:2412.03304.", + "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. 2024. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. Preprint, arXiv:2409.12183.", + "Honglin Sun and Daniel Jurafsky. 2004. Shallow semantic parsing of Chinese. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 249-256, Boston, Massachusetts, USA. Association for Computational Linguistics." + ], + "bbox": [ + 510, + 85, + 882, + 919 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "21809", + "bbox": [ + 475, + 928, + 524, + 940 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Weiwei Sun, Zhifang Sui, Meng Wang, and Xin Wang. 2009. Chinese semantic role labeling with shallow parsing. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, pages 1475-1483, Singapore. Association for Computational Linguistics.", + "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. Llama: Open and efficient foundation language models. Preprint, arXiv:2302.13971.", + "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, and 49 others. 2023b. Llama 2: Open foundation and fine-tuned chat models. Preprint, arXiv:2307.09288.", + "Yuen-Hsien Tseng, Wun-Syuan Wu, Chia-Yueh Chang, Hsueh-Chih Chen, and Wei-Lun Hsu. 2020. Development and validation of a corpus for machine humor comprehension. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 1346-1352, Marseille, France. European Language Resources Association.", + "Ashmal Vayani, Dinura Dissanayake, Hasindri Watawana, Noor Ahsan, Nevasini Sasikumar, Omkar Thawakar, Henok Biadglin Ademtew, Yahya Hmaiti, Amandeep Kumar, Kartik Kuckreja, and 1 others. 2024. All languages matter: Evaluating Imms on culturally diverse 100 languages. arXiv preprint arXiv:2411.16508.", + "Benyou Wang, Xiang Wu, Xiaokang Liu, Jianquan Li, Prayag Tiwari, and Qianqian Xie. 2022. Can language models make fun? a case study in chinese comical crosstalk. In Annual Meeting of the Association for Computational Linguistics.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837.", + "Orion Weller and Kevin Seppi. 2020. The rJokes dataset: a large scale humor collection. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 6136-6141, Marseille, France. European Language Resources Association.", + "Jiaming Wu, Hongfei Lin, Liang Yang, and Bo Xu. 2021. Mumor: A multimodal dataset for humor detection in conversations. In *Natural Language Processing and Chinese Computing: 10th CCF International Conference*, NLPCC 2021, Qingdao, China, October 13–17, 2021, Proceedings, Part I, page 619–627, Berlin, Heidelberg. Springer-Verlag." + ], + "bbox": [ + 115, + 85, + 489, + 920 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yufan Wu, Yinghui He, Yilin Jia, Rada Mihalcea, Yu-long Chen, and Naihao Deng. 2023. Hi-ToM: A benchmark for evaluating higher-order theory of mind reasoning in large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 10691-10706, Singapore. Association for Computational Linguistics.", + "Diyi Yang, Alon Lavie, Chris Dyer, and Eduard Hovy. 2015. Humor recognition and humor anchor extraction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2367-2376, Lisbon, Portugal. Association for Computational Linguistics.", + "Dongyu Zhang, Heting Zhang, Xikai Liu, Hongfei Lin, and Feng Xia. 2019. Telling the whole story: A manually annotated chinese dataset for the analysis of humor in jokes. In Conference on Empirical Methods in Natural Language Processing.", + "Min Zhang, Jianfeng He, Taoran Ji, and Chang-Tien Lu. 2024a. Don't go to extremes: Revealing the excessive sensitivity and calibration limitations of llms in implicit hate speech detection. Preprint, arXiv:2402.11406.", + "Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B. Hashimoto. 2024b. Benchmarking Large Language Models for News Summarization. Transactions of the Association for Computational Linguistics, 12:39-57.", + "Jun Zhao, Zhihao Zhang, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024. Llama beyond english: An empirical study on language capability transfer. arXiv preprint arXiv:2401.01055.", + "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. Preprint, arXiv:2311.07911." + ], + "bbox": [ + 510, + 85, + 884, + 619 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "21810", + "bbox": [ + 475, + 928, + 524, + 940 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A Contributions", + "text_level": 1, + "bbox": [ + 114, + 84, + 275, + 99 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Idea Proposal. Naihao Deng proposed the high-level idea of constructing a humor understanding benchmark sourced from RZB data.", + "bbox": [ + 112, + 139, + 489, + 186 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Background Survey. Ruiqi He surveyed the humor-related tasks.", + "bbox": [ + 112, + 199, + 485, + 230 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Data Processing. Ruiqi He crawled and processed the jokes from RZB.", + "bbox": [ + 112, + 243, + 485, + 274 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Annotation. Ruiqi He annotated the explanations for the RZB jokes. Yushu He, Longju Bai, Jiarui Liu, Zhenjie Sun, Zhenghao Tang, He Wang, Nai-hao Deng conducted the preference annotations.", + "bbox": [ + 112, + 287, + 487, + 351 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Experiments. Ruiqi He, Hanchen Xia, and Naihao Deng conducted the experiments.", + "bbox": [ + 112, + 363, + 485, + 395 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Result Aggregation. Ruiqi He, Naihao Deng, Yushu He aggregated the results.", + "bbox": [ + 112, + 407, + 485, + 439 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Paper Writing. Ruiqi He and Naihao Deng drafted the paper. Other authors provided revisions and feedback on the paper.", + "bbox": [ + 112, + 451, + 485, + 500 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Naihao Deng organized the research.", + "bbox": [ + 114, + 512, + 389, + 527 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Agreement Rate Calculation", + "text_level": 1, + "bbox": [ + 114, + 580, + 396, + 596 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We calculate the percentage agreement rate among annotators who annotate their preferences between explanations from LLMs and humans. The results show an average inter-annotator agreement of $61.9\\%$ for GPT-4o and $60.9\\%$ for $\\mathrm{ERNIE}_{4}$ -turbo. Given the inherent subjectivity of humor interpretation tasks (Deng et al., 2023), the combined average agreement percentage of $61.4\\%$ is decent.", + "bbox": [ + 112, + 636, + 489, + 764 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C Annotation Instructions for Preference Annotation", + "text_level": 1, + "bbox": [ + 114, + 816, + 485, + 848 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We include the following instructions for the preference annotations of the joke explanations:", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Instruction", + "text_level": 1, + "bbox": [ + 519, + 86, + 611, + 99 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "“在这个标注中,你将会看到一个笑话和对这个笑话的幽默之处的两个解释,请你比较哪个解释更好的解释了这个笑话的幽默之处,并从以下三个标签中选择:", + "bbox": [ + 515, + 115, + 870, + 179 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. 解释1", + "2. 解释2", + "3. 一样好”" + ], + "bbox": [ + 519, + 180, + 605, + 227 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Translation", + "text_level": 1, + "bbox": [ + 648, + 230, + 742, + 242 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "\"In this annotation task, you will see a joke along with two explanations of its humor. Please compare which explanation better explains the reason why this joke is funny and choose from the following three labels:", + "bbox": [ + 515, + 244, + 873, + 325 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Explanation 1", + "2. Explanation 2", + "3. Both are equally good.\"" + ], + "bbox": [ + 519, + 326, + 715, + 374 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "For each example, we randomly assign the explanations from the LLMs and the human as Explanation 1 and Explanation 2 to ensure a fair comparison.", + "bbox": [ + 507, + 395, + 884, + 458 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D Discussion on Evaluation Setting", + "text_level": 1, + "bbox": [ + 509, + 474, + 830, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Why Zero-Shot w.o. SFT? The primary research objective of this paper is to determine how well foundational LLMs can understand Chinese humor without relying on supervised fine-tuning for this binary classification task. The focus is on investigating the innate humor-understanding ability of these models through zero-shot and zero-shot CoT prompting. This aligns with the overarching goal of examining their general capabilities without additional task-specific training. From a human behavior perspective, individuals generally acquire a sense of humor through exposure and experience rather than explicit classroom instruction (McGhee, 1971; FRY, 1994; Gervais and Wilson, 2005). Analogously, our purpose lies in evaluating the models' intrinsic ability to recognize and interpret humor without deliberate, task-specific fine-tuning. Moreover, the experimental design follows practices from other benchmarks, such as GPQA (Rein et al., 2023), AI2ARC (Clark et al., 2018), and IFEVAL (Zhou et al., 2023), which do not provide predefined train/dev/test splits. Under these conditions, it is common to assess models in a zero-shot manner to directly evaluate their capabilities on each respective task (Touvron et al., 2023a,b; Bai et al., 2023; Abdin et al., 2024).", + "bbox": [ + 507, + 502, + 884, + 920 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "21811", + "bbox": [ + 475, + 927, + 522, + 940 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/46535b0b36dd29ef0e96b1dab5b19ca84f0d5d1cf30a0cd676e0654f53fcb999.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example(zh)真可怕,犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。
(en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Correct Humor Explanation“between us” can refer to “either one of us” or literally means the actual physical position.
GPT-4o's Answer(zh)…暗示他们自己可能是犯罪嫌疑人…
(en)…it suggests the bailiffs themselves might be criminal suspects…
Failure ReasonsFail to address the literal meaning.
", + "bbox": [ + 114, + 84, + 489, + 293 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "E More Error Cases", + "text_level": 1, + "bbox": [ + 114, + 359, + 309, + 375 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We note that many examples here encompass multiple error types, highlighting the complexity of Chumor.", + "bbox": [ + 112, + 386, + 489, + 434 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Insufficient Contextual Understanding. LLMs may fail to ground their responses to the context when they explain the joke. For instance, in the example in Figure 9, \"between us\" typically means \"either you or me\", but it also has the literal meaning to indicate the person standing \"between us\", which is the right interpretation given that the two bailiffs are talking about the criminal. However, GPT-4o only reasons that \"the criminal is either you or me\" but fails to capture the literal meaning from the context. We hypothesize that in the pretraining corpus, \"between us\" most likely acquires the meaning of \"either you or me\" rather than the literal meaning in a scenario like this, which creates a bias that prevents the model from reasoning about the literal interpretation required for this specific explanation.", + "bbox": [ + 112, + 444, + 489, + 718 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Glyph-based Error. LLMs may fail to interpret the visual puns based on the shape or form of Chinese characters. Glyph-based humor in Chinese leverages its logographic writing system, where characters integrate both semantic and visual elements. Unlike the phonemic alphabet used in English, Chinese characters' pictorial and ideographic nature allows for visual puns in jokes (Daniels and Bright, 1996). In the example in Figure 10, \"simplify\" does not refer to simplifying the tally problem conceptually, but to simplifying the traditional Chinese characters to simplified Chinese", + "bbox": [ + 112, + 728, + 489, + 921 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/8e9b7c6a740fafcc6643850905b76cb94233b6ddef0b447eaa715f0bae3b01b0.jpg", + "table_caption": [ + "Figure 9: Insufficient contextual understanding example." + ], + "table_footnote": [], + "table_body": "
Example(zh)我把電車難題簡化了,現在是电车难题。
(en)I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Correct Humor ExplanationThe joke does not actually simplify the scenario of the trolley problem; instead, it converts the traditional Chinese characters “電車難題” into the simplified Chinese characters “电车难题”.
GPT-4o's Answer(zh)...这个笑话的幽默之处在于通过将“電車難題”中的“難”字去掉,变成了“电车題”。
(en)...The humor of this joke lies in removing the character “hard” from “the hard tally problem”, turning it into “the tally problem”.
Failure ReasonsFail to interpret the visual puns in the shape or form of the characters.
", + "bbox": [ + 510, + 83, + 884, + 305 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/d467f8decb9e010f942f43b21e3fb0ea4d93dce6a3dfa366ded7f9e6b50eca2f.jpg", + "table_caption": [ + "Figure 10: Glyph-based error example." + ], + "table_footnote": [], + "table_body": "
Example(zh) 小王订了张飞去北京的机票,给张飞省了一大笔钱。
(en) Xiaowang booked an airline ticket to Beijing, saving Zhang Fei a lot of money.
Correct \nHumor \nExplanationThere are two ways to parse the first half of the sentence: \n(1) 小王/订了/张/飞去/北京的/机票 XiaoWang/ booked/ a/ ticket to fly to Beijing. \n(2) 小王/订了/张飞/去北京的/机票 XiaoWang / booked/ Zhangfei/ a ticket/ to Beijing.
Typically, people would interpret in the first way as Zhangfei is a fictional figure and do not appear in the daily conversation. However, the second half of the sentence confirms that the second way of parsing turns out to be correct. Such contrast is hilarious.
GPT-4o's \nAnswer(zh) ...由于历史人物张飞已经不在世,所以给他省了一大笔钱。
(en) ... since the historical figure Zhang Fei is dead and cannot take a flight, the joke humorously suggests that he saved a lot of money.
Failure \nReasonsFail to parse textual elements in a sentence dynamically.
", + "bbox": [ + 510, + 347, + 884, + 678 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 11: Parsing error example.", + "bbox": [ + 579, + 689, + 810, + 703 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "characters as the traditional Chinese characters are also termed as \"complicated characters\". However, LLMs struggle to reason such graphemic differences as there are no explicit connections between the textual meaning and visual representations of the glyphs.", + "bbox": [ + 507, + 731, + 884, + 829 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Parsing Error. LLMs often fail to parse sentences in multiple ways simultaneously, leading to difficulties in explaining jokes that require different parsing for the same sentence. In the example in Figure 11, the humor hinges on the ambiguity of", + "bbox": [ + 507, + 841, + 884, + 921 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "21812", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "the phrase \"张飞\", which can be interpreted either as part of a verb phrase implying \"a ticket flying to Beijing\" or as a proper noun, referring to the historical figure Zhang Fei. This ambiguity stems from the flexibility of the Chinese language, where each character can function independently as a word or combine with others to form new words or phrases. There are decades of research studying the problem of parsing Chinese (Sun and Jurafsky, 2004; Sun et al., 2009). Recently, researchers have proposed task-specific tokenization approaches that adapt the parsing process to better align with downstream tasks (Liu et al., 2021, 2023b). However, how to incorporate different ways of parsing at one time still remains challenging.", + "bbox": [ + 112, + 84, + 492, + 326 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/4d1e041edbe6852fb969e635d43982bfe36d895fdd82a978fd714c831e6677e5.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example(zh) 五步蛇的危险性是李白的两倍。 (en) The danger of a five-step snake is twice that of Li Bai.
Correct Humor ExplanationKill one in ten steps is a line from a poem by the ancient Chinese poet Li Bai, suggesting that while it takes Li Bai ten steps to kill, a person can die within five steps of being bitten by the “Five-Step Snake” hence it is twice as dangerous as Li Bai.
(zh) …李白的名字听起来像“二步”,所以“五步蛇”的危险性被戏称为是“二步”的两倍。
GPT-4o's Answer(en) …Li Bai's (pronounced as lee bye) name sounds similar to “two steps” (pronounced as r boo) in Chinese, so the danger of “Five-Step Snake” is humorously claimed to be twice that of “two steps”
Failure ReasonsFabricate ungrounded details or facts.
", + "bbox": [ + 115, + 337, + 487, + 596 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 12: Hallucinations example.", + "bbox": [ + 179, + 609, + 421, + 625 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Hallucinations. LLMs may fabricate ungrounded details or facts in joke explanations. For instance, in the explanation in Figure 12, GPT-4o claims that \"Li Bai's name sounds similar to two steps\", while \"Li Bai\" (pronounced as lee bye) does not sound like \"two steps\" (pronounced as $r$ boo).", + "bbox": [ + 112, + 646, + 489, + 758 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "On the other hand, the correct explanation requires an understanding of a Chinese poem from Li Bai, \"十步杀一人\" (The warrior kills a person for every ten steps). This line praises the courage of the soldiers, but the joke deliberately portrays this as a characteristic of Li Bai. Therefore, compared to Li Bai who can kill a person in ten steps, a five-step snake, which can kill a person in five steps, is twice as dangerous as Li Bai. Such explanation requires LLMs to have a deep understanding of Chinese", + "bbox": [ + 112, + 760, + 489, + 921 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "culture and reason over cultural references, posing a great challenge to current LLMs. Although recent works have made progress towards building LLMs beyond English (Du et al., 2024; Zhao et al., 2024), building an LLM that can comprehend such nuanced Chinese cultural terms can be extremely hard.", + "bbox": [ + 507, + 84, + 884, + 197 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "21813", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/604942534d6cac5e355ee3bfb93d957a29a9cc0c270157a5be6db68d00b0046b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Example(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。”(en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
Correct Humor ExplanationCao Cao, Xiahou Dun, and Ma Dai were historical figures from the Three Kingdoms period in ancient China. The phrase “saw Ma Dai” (pronounced as chuh dow ma die) not only literally refers to spotting Ma Dai but also play on similar pronunciation with the Japanese phrase “まじと待て” (chotto matte), which means to “wait a moment.”
GPT-4o's Answer(zh) ... 这个笑话的幽默之处在于利用了“马岱”这个名字的谐音梗,听起来像“妈的”,让人误以为夏侯惇在骂人。(en) ... a pun involving the name "Ma Dai," which sounds like "damn" in Chinese. It sounds like Xiahou Dun is swearing at Cao Cao.
Failure ReasonsFail to recognize information/elements across different languages.
", + "bbox": [ + 114, + 84, + 489, + 385 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Cross-lingual Error. LLMs may fail to recognize elements or information across different languages. In the explanation in Figure 13, GPT-4o attempts to link the pronunciation of \"Ma Dai\" to other Chinese terms but fails to identify the similar pronunciations across the Chinese term \"瞧到马岱\" (pronounced as chuh dow ma die, meaning \"saw Ma Dai\") and the Japanese term \"ちよと待て\" (chotto matte, meaning \"wait a moment\"). Such cases require LLMs to connect pronunciations across languages, which may be rare in the LLMs' pre-training corpus and poses significant challenges to current LLMs.", + "bbox": [ + 110, + 437, + 489, + 645 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Overcritical. Figure 14 shows when the model overly criticizes an explanation, it focuses excessively on minor details, neglecting the major point and ultimately misjudging the explanation. The joke relies on common sense about life, death, and testing rather than cultural knowledge. Under the CoT prompting, the model recognizes that the explanation identifies the main punchlines but overly criticizes the lack of cultural context.", + "bbox": [ + 110, + 653, + 489, + 799 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Excessive Sensitivity. For $\\mathrm{ERNIE}_{4}$ -turbo, in addition to errors across all the aforementioned categories, it also demonstrates excessive sensitivity to certain examples. Specifically, when content includes languages related to hate speech but used in non-harmful contexts, $\\mathrm{ERNIE}_{4}$ -turbo refuses to provide an explanation. During our evalua", + "bbox": [ + 112, + 808, + 490, + 921 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/c01566a1c5ddc72f0d260e31ba078b1f5aa93658afc3313cf4e76de7871f4a51.jpg", + "table_caption": [ + "Figure 13: Cross-lingual error example." + ], + "table_footnote": [], + "table_body": "
Example(zh)网上传疯了的超准测试题!20道题轻松判断出你有没有死!(en)The viral online quiz! 20 questions to easily determine if you're alive or dead!
Joke Explanation(zh)这个笑话的幽默之处在于它用夸张的方式提出了一个显而易见的问题——判断自己是否还活着,这本身就不需要测试。其次,标题中“超准测试题”和“轻松判断”这样的描述让人觉得荒谬可笑,因为活着与否是最基本的感知,不需要任何测试。
Model's Answer DP(en)The humor in this joke lies in its exaggerated premise of asking an obvious question—determining whether one is still alive, which inherently doesn't require any test. Additionally, the descriptions in the title, such as "super accurate quiz" and "easily determine," come across as absurdly funny because being alive is the most basic perception, requiring no testing at all.
Model's Answer CoT(zh)完全解释(en)Fully Explained
(zh)...最终判断:部分/没有解释理由:虽然解释准确地指出了笑话的主要笑点(夸张性和标题的矛盾),但它没有深入探讨这些元素为什么会产生幽默效果,也没有讨论潜在的文化或认知背景...
(en)...Final judgment: Partially/Not explained Reason: While the explanation accurately identifies the main punchlines of the joke (exaggeration and the contradiction in the title), it does not delve into why these elements create a humorous effect or discuss the potential cultural or cognitive context behind it...
", + "bbox": [ + 509, + 83, + 884, + 508 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 14: Overcritical example by Nematron $_{70\\mathrm{B}}$ . The Nematron $_{70\\mathrm{B}}$ model selects the correct answer in the DP prompting, but selects the incorrect answer due to being overly critical in the CoT prompting.", + "bbox": [ + 507, + 520, + 882, + 577 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "tion, we observe this excessive sensitivity in the $\\mathrm{ERNIE}_{4}$ -turbo's responses to humor related to medical ethics and political discussions. This suggests that correctly understanding the context and the language toxicity remains an open challenge (Zhang et al., 2024a). Such issues are particularly critical for humor explanation, as misclassifying non-toxic context can cause the responses to deviate from the intended humor.", + "bbox": [ + 507, + 615, + 884, + 760 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "F Prompts for DP and CoT in Chumor", + "text_level": 1, + "bbox": [ + 507, + 797, + 858, + 814 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This section outlines the prompts used in Chumor to evaluate whether an explanation fully explains a joke. Two prompting strategies are adopted: Direct Prompting (DP) and Chain of Thought (CoT). Below are the details of each approach:", + "bbox": [ + 507, + 841, + 884, + 921 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "21814", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Direct Prompting (DP)", + "text_level": 1, + "bbox": [ + 124, + 86, + 307, + 102 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。", + "bbox": [ + 122, + 117, + 477, + 181 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "笑话:[joke]", + "bbox": [ + 124, + 183, + 226, + 199 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "笑话解释:[explanation]", + "bbox": [ + 124, + 199, + 315, + 215 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Translation", + "text_level": 1, + "bbox": [ + 253, + 216, + 347, + 229 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect.", + "bbox": [ + 122, + 231, + 478, + 326 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Joke: [joke]", + "bbox": [ + 124, + 328, + 215, + 344 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Explanation: [explanation]", + "bbox": [ + 124, + 344, + 324, + 360 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The DP prompt is designed to encourage concise decision-making. It directly asks the model to evaluate the completeness of the explanation without requiring reasoning or justification.", + "bbox": [ + 112, + 380, + 487, + 445 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Chain of Thought (CoT)", + "text_level": 1, + "bbox": [ + 124, + 458, + 317, + 472 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "你将看到一个笑话以及对这个笑话的解释。请逐步思考,写下过程并最终判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”。", + "bbox": [ + 122, + 488, + 477, + 552 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "笑话:[joke]", + "bbox": [ + 124, + 555, + 226, + 570 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "笑话解释:[explanation]", + "bbox": [ + 124, + 571, + 315, + 586 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Translation", + "text_level": 1, + "bbox": [ + 253, + 588, + 347, + 600 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You will see a joke and an explanation of the joke. Please think step by step, write down your reasoning process, and finally determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explains\" or \"partially/does not explain.\"", + "bbox": [ + 122, + 602, + 477, + 699 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Joke: [joke]", + "bbox": [ + 124, + 700, + 215, + 715 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Explanation: [explanation]", + "bbox": [ + 124, + 717, + 324, + 731 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The CoT prompt, in contrast, requires the model to reason step by step before reaching a conclusion. This approach aims to improve transparency by explicitly documenting the thought process behind the evaluation.", + "bbox": [ + 112, + 752, + 487, + 831 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "G Joke Type Distribution in Chumor", + "text_level": 1, + "bbox": [ + 112, + 846, + 448, + 863 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We sampled 200 datapoints from Chumorto analyze the distribution of joke types, as shown in Figure 15. Note that a single joke may belong to", + "bbox": [ + 112, + 873, + 487, + 921 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/326993549316df513bb9806d9d31515b487772d05a316df9b370565647db6178.jpg", + "image_caption": [ + "Figure 15: Distribution of Joke Types in 200 Sampled Datapoints." + ], + "image_footnote": [], + "bbox": [ + 529, + 80, + 862, + 195 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "multiple categories, as it can exhibit features of more than one joke type.", + "bbox": [ + 507, + 253, + 882, + 286 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/6df1b3c527229283c9d7de199eea2d6285b3eca5b0cdc18e78ecc11bf3f068e7.jpg", + "image_caption": [ + "H Detailed Results of Experiments", + "Figure 16: The Matthew's correlation coefficient of different models' test results in DP and CoT." + ], + "image_footnote": [], + "bbox": [ + 510, + 335, + 880, + 771 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For evaluation, we input each prompt into the model and collect its responses, comparing them to the labels in Chumor. A model's response is considered correct if it matches the reference label. If the model provides an incorrect answer or doesn't generate a response at all (due to safety protocols or", + "bbox": [ + 507, + 825, + 882, + 921 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "21815", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "filtering sensitive terms), it is marked as incorrect. Such scenario is rare, occurring only 21 times in our experiments, and exclusively with GLM-4plus.", + "bbox": [ + 112, + 84, + 487, + 133 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We highlight that CoT prompting at most cases degrade the models' performance on Chumor. As shown in Figure 16, only $\\mathrm{Athene}_{70\\mathrm{B}}$ achieves a significant improvement. However, this is offset by its poorest performance under DP prompting among the models. GPT-4o shows a slight improvement, with its MCC score increasing from 0.19 to 0.20. And all other eight models exhibit different degrees of performance decline.", + "bbox": [ + 112, + 133, + 487, + 275 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/6141e183d684833465b3a4c899603522d9953ad32c94768a7106eaf7f5d4f1c1.jpg", + "image_caption": [ + "Figure 17: CoT accuracy on different joke types $(\\%)$ ." + ], + "image_footnote": [], + "bbox": [ + 117, + 287, + 485, + 881 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "21816", + "bbox": [ + 475, + 928, + 524, + 940 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/a2350fa2fb3236f14324d34d9047f95e61b4fa92dc3097fd9d164c5d29fee80a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelDPCoT
MCCACC (%)FPR (%)FNR (%)MCCACC (%)FPR (%)FNR (%)
Yi34B0.1044.9597.240.210.0947.1789.305.44
Nemotron70B0.1956.3061.2620.870.1457.1740.2846.14
Athene70B0.0844.5997.830.280.1247.2691.102.89
ERNIE4-turbo0.2960.2959.8313.570.1145.1696.930.14
QWen2.572B0.1948.4690.670.690.1749.4586.913.31
Mistral123B0.2255.5669.2612.190.1651.1879.928.40
Gemini1.5-pro0.2454.0077.425.170.1960.3233.8147.31
GLM-4plus0.2455.5672.288.260.1458.1332.9653.44
GPT-4o0.1951.8780.026.680.2050.6485.003.03
GPT-4turbo0.2052.3279.286.610.1751.2780.876.96
", + "bbox": [ + 216, + 102, + 781, + 309 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/b4755b453fe86ec9166c55a4a81fe89079260d39914b7d616f67a02f63b1c2f2.jpg", + "table_caption": [ + "Table 4: Performance metrics for explanation evaluation including Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR)." + ], + "table_footnote": [], + "table_body": "
ModelSourceDPCoT
MCCACC(%)FPR(%)FNR(%)MCCACC(%)FPR(%)FNR(%)
Athene70BOverall0.0844.5997.830.280.1247.2691.102.89
ERNIE Bot0.1252.3897.150.000.1554.2491.132.13
GPT-4o0.0333.9098.510.860.0837.6791.064.50
ERNIE-turboOverall0.2960.2959.8313.570.1145.1696.930.14
ERNIE Bot0.2358.6478.145.990.1653.4794.830.10
GPT-4o0.2762.5441.3829.550.0433.7699.040.21
Gemini1.5-proOverall0.2454.0077.425.170.1960.3233.8147.31
ERNIE Bot0.2760.6674.135.890.2360.8728.6249.24
GPT-4o0.2144.8580.743.640.1759.5639.0443.25
GLM-4plusOverall0.2455.5672.288.260.1458.1332.9653.44
ERNIE Bot0.2559.8374.976.700.1557.5637.0647.61
GPT-4o0.2149.6869.5711.560.0658.9228.8365.74
GPT-4turboOverall0.2052.3279.286.610.1751.2780.876.96
ERNIE Bot0.2057.2580.995.990.2258.7576.147.72
GPT-4o0.1845.5677.557.920.1341.0185.645.35
GPT-4oOverall0.1951.8780.026.680.2050.6485.003.03
ERNIE Bot0.2157.8279.416.400.2458.0782.472.94
GPT-4o0.1643.7180.647.280.1540.4487.553.21
Nemotron70BOverall0.1956.3061.2620.870.1457.1740.2846.14
ERNIE Bot0.2260.6656.8122.540.1457.0439.1846.60
GPT-4o0.1850.3265.7417.340.1357.3641.3845.18
Mistral123BOverall0.2255.5669.2612.190.1651.1879.928.40
ERNIE Bot0.2561.1365.1513.600.1857.0479.737.61
GPT-4o0.2047.9073.409.210.1243.1480.1110.06
Qwen2.572BOverall0.1948.4690.670.690.1749.4586.913.31
ERNIE Bot0.1954.4592.610.300.1855.5488.072.54
GPT-4o0.1740.2388.721.500.1441.0885.744.93
Yi34BOverall0.1044.9597.240.210.0947.1789.305.44
ERNIE Bot0.1553.4294.720.300.1153.9988.385.28
GPT-4o0.0333.3399.790.000.0737.8190.215.78
", + "bbox": [ + 142, + 395, + 853, + 858 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 5: Detailed performance metrics with source for explanation evaluation of Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR).", + "bbox": [ + 112, + 866, + 882, + 897 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "21817", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/1eb9aa8f833d0b31f3f4e2b3363e87ef9f41484df688e9a2dffef02e4ccdab4a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelPromptingCross-lingualGlyph-basedHomophonemicPun-basedSituationalCultural
Athene70BDP0.000.0030.0044.0061.0042.00
CoT0.0025.0030.0044.0059.0043.00
ERNIE4-turboDP50.0050.0060.0061.0070.0063.00
CoT0.000.0030.0043.0059.0042.00
Gemini1.5-proDP50.0050.0055.0063.0067.0061.00
CoT50.0075.0070.0061.0066.0069.00
GLM-4plusDP50.0025.0065.0060.0069.0060.00
CoT50.00100.0075.0064.0060.0061.00
GPT-4turboDP50.0025.0040.0057.0067.0055.00
CoT50.0025.0045.0054.0062.0056.00
GPT-4oDP0.0050.0035.0049.0063.0054.00
CoT0.0050.0035.0050.0062.0053.00
Nemotron70BDP50.0050.0065.0063.0062.0060.00
CoT100.00100.0065.0066.0060.0072.00
Mistral123BDP50.0050.0055.0061.0065.0061.00
CoT50.000.0040.0053.0066.0055.00
Qwen2.572BDP0.0050.0035.0047.0064.0051.00
CoT0.0050.0040.0053.0063.0053.00
Yi34BDP0.000.0030.0043.0060.0044.00
CoT0.0025.0040.0049.0063.0052.00
", + "bbox": [ + 146, + 324, + 850, + 649 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 6: Performance metrics by joke type for explanation evaluation accuracy(%)", + "bbox": [ + 216, + 658, + 778, + 673 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "21818", + "bbox": [ + 475, + 928, + 524, + 940 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_model.json b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5b1c9ac013b367b2daa69d88086983f64344806c --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_model.json @@ -0,0 +1,3309 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.186, + 0.09, + 0.817, + 0.131 + ], + "angle": 0, + "content": "Chumor 2.0: Towards Better Benchmarking Chinese Humor Understanding from 弱智吧 (Ruo Zhi Ba)" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.15, + 0.894, + 0.189 + ], + "angle": 0, + "content": "Ruiqi He Yushu He Longju Bai Jiarui Liu Zhenjie Sun Zenghao Tang He Wang Hanchen Xia Rada Mihalcea Naihao Deng" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.19, + 0.867, + 0.224 + ], + "angle": 0, + "content": "\\(^{\\text{©}}\\)University of Michigan Carnegie Mellon University Shanghai Jiaotong University {ruiqih, dnaiahao}@umich.edu" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.343, + 0.277 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.287, + 0.461, + 0.658 + ], + "angle": 0, + "content": "Existing humor datasets and evaluations predominantly focus on English, leaving limited resources for culturally nuanced humor in non-English languages like Chinese. To address this gap, we construct Chumor, the first and the largest Chinese humor explanation dataset. Chumor is sourced from Ruo Zhi Ba (RZB, 弱智吧), a Chinese Reddit-like platform known for sharing intellectually challenging and culturally specific jokes. We test ten LLMs through direct and chain-of-thought prompting, revealing that Chumor poses significant challenges to existing LLMs, with their accuracy slightly above random and far below human. In addition, our analysis highlights that human-annotated humor explanations are significantly better than those generated by GPT-4o and ERNIE\\(_{4\\text{-turbo}}\\). We release Chumor at https://huggingface.co/datasets/MichiganNLP/Chumor, our project page is at https://github.com/MichiganNLP/Chumor-2.0, our leaderboard is at https://huggingface.co/spaces/MichiganNLP/Chumor-leaderboard, and our codebase is at https://github.com/MichiganNLP/Chumor-2.0." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.666, + 0.262, + 0.682 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.691, + 0.491, + 0.9 + ], + "angle": 0, + "content": "Humor is an intrinsic human trait that touches the core of our social and emotional lives, making it a rich field of study across various disciplines (Lefcourt, 2001; Mihalcea and Strapparava, 2005; Gelkopf et al., 2011; Hessel et al., 2023). With the advent of Large Language Models (LLMs), researchers have evaluated LLMs' performance on diverse tasks (Liu et al., 2023a; Deng et al., 2024; Wu et al., 2023) and observed LLMs' extraordinary performance on many (Zhang et al., 2024b). In contrast, researchers have observed that LLMs still fail to understand humor (Ghanadian et al., 2023). However, with all these studies on humor," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.262, + 0.885, + 0.342 + ], + "angle": 0, + "content": "most evaluations remain in English (Radev et al., 2016; Hasan et al., 2019). This presents a significant gap, particularly for non-English languages like Chinese, where culturally nuanced humor understanding is unexamined." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.343, + 0.885, + 0.567 + ], + "angle": 0, + "content": "In this paper, we try to bridge this gap by constructing Chumor, a funny and challenging Chinese humor understanding dataset sourced from Ruo Zhi Ba (RZB, \"弱智吧\" in Chinese), a Chinese version of Reddit platform known for sharing intellectually challenging and culturally specific jokes. This platform provides a set of unique Chinese jokes that incorporate the subtleties and intricacies of Chinese humor. Table 1 provides examples of the jokes from RZB. In addition, Bai et al. (2024) reveal that tuning LLMs on RZB data yields the best performance on Chinese reasoning tasks compared to other data sources, highlighting the significant value of jokes from RZB." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.569, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Unlike existing datasets that focus on tasks such as humor detection, punchline identification, or humor generation, Chumor addresses the challenge of humor explanation. This involves not just identifying humor but understanding the reasoning behind it, a task that requires both linguistic and cultural knowledge. Specifically, Chumor tasks the LLMs with determining whether an explanation fully explains the joke. We source the explanations from GPT-4o and ERNIE\\(_{4\\text{-turb}}\\), and have the entire dataset manually annotated by five native Chinese speakers. We evaluate ten LLMs from various model families, and reveal that all models perform poorly, lagging significantly behind humans on Chumor. We observe that chain-of-thought prompting does not necessarily improve models performance and can sometimes confuse their reasoning process. In addition, we conduct a case study in which one of the authors annotates the entire dataset, followed by A/B testing conducted by six native Chinese speakers to compare explanations from GPT-4o versus human, and" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.907, + 0.359, + 0.921 + ], + "angle": 0, + "content": "†Corresponding author of this work." + }, + { + "type": "page_number", + "bbox": [ + 0.475, + 0.928, + 0.528, + 0.941 + ], + "angle": 0, + "content": "21799" + }, + { + "type": "footer", + "bbox": [ + 0.221, + 0.946, + 0.779, + 0.974 + ], + "angle": 0, + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 21799-21818 July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.118, + 0.082, + 0.881, + 0.371 + ], + "angle": 0, + "content": "
Cultural
Desc.Require knowledge of specific historical, social, or linguistic contexts.
Ex.(zh)小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Situational
Desc.Involve humor derived from specific contexts, irony, or narrative setups.
Ex.(zh)真可怕, 犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。 (en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Pun-based
Desc.Build on linguistic ambiguity and wordplay, require models to identify dual meanings.
Ex.(zh)你可以在steam上找到GTA,所以水是DNA。 (en) You can find GTA on Steam, so water is DNA.
Homophobic
Desc.Rely on phonetic similarities between words or phrases to create humor.
Ex.(zh)家里的猪油没了,小明只能把植物油倒快点当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Glyph-based
Desc.Exploit the structural or visual elements of Chinese characters to create humor.
Ex.(zh)我把電串難題简化了,现在是电车难题。 (en) I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Cross-lingual
Desc.Involve humor derived from linguistic or phonetic interplay across multiple languages.
Ex.(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。” (en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.379, + 0.884, + 0.41 + ], + "angle": 0, + "content": "Table 1: Different types of jokes. Descriptions (Desc.) explain humor mechanisms. Examples (Ex.) illustrate each category. The corresponding explanations can be found in the referenced figures from the rightmost column." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.434, + 0.489, + 0.53 + ], + "angle": 0, + "content": "ERNIE\\(_{4}\\)-turbo versus human. Our results indicate that human-annotated joke explanations are significantly better than those produced by GPT-4o or ERNIE\\(_{4}\\)-turbo (Figure 4), with LLMs yielding winning rates of only \\(6.2\\%\\) for GPT-4o and \\(5.3\\%\\) for ERNIE\\(_{4}\\)-turbo compared to humans." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.532, + 0.462, + 0.546 + ], + "angle": 0, + "content": "In summary, our contributions are threefold:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.562, + 0.486, + 0.611 + ], + "angle": 0, + "content": "1. We construct Chumor, a funny and challenging Chinese humor understanding dataset, which is the largest Chinese humor explanation dataset." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.624, + 0.489, + 0.704 + ], + "angle": 0, + "content": "2. We evaluate ten LLMs on Chumor and reveal the significant challenges Chumor possesses. We highlight that the best accuracy achieved by LLMs is \\(60.3\\%\\), significantly lower than human's score of \\(78.3\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.718, + 0.49, + 0.815 + ], + "angle": 0, + "content": "3. We demonstrate that chain-of-thought prompting can hurt LLM's performance in humor reasoning, and that human-annotated joke explanations are significantly better than those produced by GPT-4o and ERNIE\\(_{4\\text{-turbo}}\\), urging future research on culturally specific humor understanding." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.562, + 0.49, + 0.815 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.83, + 0.279, + 0.846 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.858, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Humor Datasets. Humor analysis in natural language processing (NLP) encompasses a wide range of tasks, each focused on different aspects of humor. For instance, researchers have proposed datasets" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.434, + 0.885, + 0.804 + ], + "angle": 0, + "content": "such as “16000 One-Liners” (Mihalcea and Strapparava, 2005), “Pun of the Day” (Yang et al., 2015), and “Ted Laughter” (Chen and Lee, 2017) focused on humor detection to determine whether a given text is humorous or not. Datasets such as “Big Bang Theory” (Bertero and Fung, 2016) aim at pinpointing the punchline in a joke. Tasks for assessing humor intensity include humor level rating, comparison, and ranking. For example, datasets like HumorNorm (Engelthaler and Hills, 2018) and #Hashtag Wars (Potash et al., 2017) quantify humor scores and compare comedic elements, while UR-Funny ranks punchlines based on their perceived impact. Datasets such as “Humicroedit” (Hossain et al., 2019), “\\(C^3\\)” (Wang et al., 2022), and “Talk-Funny” (Chen et al., 2024) focus on humor generation, the task of generating or rewriting humorous texts. In addition, we present a comprehensive overview of the existing datasets related to humor in Table 2. We highlight that most existing datasets are in English. Chinese humor, on the other hand, is less explored. Our dataset, Chumor is the first humor explanation dataset in Chinese." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Culturally Specific Datasets. Recent works underscore the challenges of culturally specific reasoning in LLMs (Shen et al., 2024; AlKhamissi et al., 2024; Pawar et al., 2024; Vayani et al., 2024). These challenges stem from the overrepresentation of Western-centric knowledge and translation ar" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21800" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.116, + 0.082, + 0.483, + 0.421 + ], + "angle": 0, + "content": "
DatasetSourcesLan.#(k)Tasks
One Liners (2005)Weben16HR
Pun of the Day (2015)Weben4.8HR PD
Big Bang Theory (2016)TVen44PD
Ted Laughter (2017)TEDen9.4HR PD
#HashtagWars (2017)TVen13HC
HumorNorm (2018)\\( CS^† \\)en5HC
UR-FUNNY (2019)TEDen17PD
Humicroedit (2019)Redditen15HG
rJokes (2020)Redditen57HC
Memotion (2020)Memesen9.8HC
MUMOR (2021)TVen zh30HR
NYT-Captions (2023)NYTen0.7 2.6HE HC
\\( C^3 \\) (2022)Bookszh9.3HG
TalkFunny (2024)Appszh4.1HG
TCHD (2023)-zh26HR HC PD
TTWS (2019)Bookszh9.1PD
CHM (2020)Apps Webzh3.3HC
Memeplate (2022)Apps Webzh5.2HC
Chumor (us)Webzh3.3HE
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.43, + 0.49, + 0.504 + ], + "angle": 0, + "content": "Table 2: Existing datasets related to humor. For the shorthands in the table, abbreviations represent the following tasks, HR: humor recognition; PD: punchline detection; HC: humor comparison; HG: humor generation; HE: humor explanation †: Crowd-source." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.529, + 0.49, + 0.643 + ], + "angle": 0, + "content": "tifacts, which limit the fairness and effectiveness of multilingual evaluations (Mihalcea et al., 2024). Researchers have proposed various culturally specific datasets such as Global-MMLU (Singh et al., 2024) to evaluate LLMs' cultural knowledge. Chumor adds to this line of effort as it involves rich knowledge specific to Chinese culture." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.658, + 0.293, + 0.673 + ], + "angle": 0, + "content": "3 Chumor Dataset" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.685, + 0.49, + 0.895 + ], + "angle": 0, + "content": "Data Collection. We construct our dataset by including RZB jokes from \"Best Annual Threads\" between 2018 and 2021 that have been previously crawled†. In addition, we directly collect all threads in the \"Moderator's Recommendation\" section from RZB. Each thread in RZB consists of \"标题\"(title),\"一楼\"(content), and several \"跟帖\"(follow-up posts). For threads from Best Annual Threads, the jokes are listed in the follow-up posts, which are selected by the forum moderator. For threads from Moderator's Recommendation, the jokes consist of the title and the content of each thread. We remove the content if it repeats the title." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.31 + ], + "angle": 0, + "content": "Data Cleaning. We store both the title and the content of the raw data. However, due to the posting restrictions of the platform requiring non-empty content, many posts contain meaningless placeholder texts such as “:”, “!”, “0”, “RT”, and others. We automatically identify and remove these patterns, and only keep the title which is the joke itself. Due to the length limitations on the original platform, many post titles are truncated from the beginning parts of the content. We identify these instances and replace the truncated title with the complete content to get the joke. We also remove duplicates that appear both in the “Moderator’s Recommendation” and the “Best Annual Posts”." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.311, + 0.885, + 0.39 + ], + "angle": 0, + "content": "We manually remove the threads related to forum management and rules, threads that include excessively offensive content, threads with incomplete content, and threads that focus more on philosophical insight rather than humor." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.4, + 0.885, + 0.626 + ], + "angle": 0, + "content": "Humor Explanation Classification. We design a humor explanation classification task that can be easily used to test LLMs' capabilities in humor understanding. Specifically, we use two LLMs, GPT-4o and ERNIE\\(_{4}\\)-turbo to generae explanations for our collected jokes. We manually annotate the generated explanations as either \"fully explain the joke\" (good) or \"partially explain or not explain the joke\" (bad) based on a majority vote among five of the authors who are native Chinese speakers. Each joke, along with its explanation, forms an individual instance in Chumor, leading to a total of 3,339 instances. Among these, 1,454 items are labeled as good and 1,887 as bad explanations." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.634, + 0.884, + 0.666 + ], + "angle": 0, + "content": "Data Examples from Chumor. We present examples from Chumor in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.675, + 0.884, + 0.724 + ], + "angle": 0, + "content": "Humor Categorization in Chumor. We categorize the jokes in RZB into six types in Table 1, with an example provided for each type." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.736, + 0.656, + 0.752 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Models. We test ten LLMs, five from the open-source LLM families and five from the closed-source LLM families, all capable of handling Chinese. Specifically, we include the open-source LLMs of \\(\\mathbf{Yi}_{34\\mathbf{B}}\\) (01.ai, 2024) from 01.AI, Nemotron70B (NVIDIA, 2024) from NVIDIA, Athene70B (Nexusflow, 2024) from Nexusflow, Qwen2.572B (Qwen, 2024) from Alibaba, Mistral123B (Mistral.ai, 2024) from Mistral AI, alongside the closed-source LLMs of" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.906, + 0.372, + 0.921 + ], + "angle": 0, + "content": "\\(\\dagger\\) https://github.com/Leymore/ruozhiba" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.524, + 0.941 + ], + "angle": 0, + "content": "21801" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.082, + 0.88, + 0.263 + ], + "angle": 0, + "content": "
JokeExplanationLabelSource
(zh) “你自己掰了一千吨玉米?”“哈哈我瞎掰的”(zh) 这个笑话的幽默之处在于对话中的双关语和出人意料的回答。“掰了一千吨玉米”看似是一个惊人的壮举,而“哈哈我瞎掰的”则瞬间揭露了这其实是一个夸张的玩笑,这种巧妙的反转和语言的戏谑带来了幽默感。goodERNIE4-turbo
(en) “You shucked a thousand tons of corn all by yourself?”“Haha, let's just say there's been plenty of shuckin' going on.”(en) The humor in this joke lies in the pun and the unexpected response in the conversation. “Shucked a thousand tons of corn” seems like an extraordinary achievement, but the reply, ‘haha, let's just say there's been plenty of shuckin' going on,” instantly reveals it to be an exaggerated joke. This clever twist and playful use of language create the comedic effect.
(zh) 小明突然出现在我面前以至于我突然出现在小明面前。(zh) 这个笑话的幽默之处在于它利用了语言的对称性和意外性。小明突然出现导致叙述者的惊讶反应,而叙述者的惊讶反应又反过来让小明感到意外,形成了一个有趣的循环。badGPT-4o
(en) Xiaoming suddenly appeared in front of me, causing me to suddenly appear in front of him.(en) The humor in this joke lies in its use of linguistic symmetry and unexpectedness. Xiao Ming's sudden appearance triggers a surprised reaction from the narrator, which in turn surprises Xiao Ming, creating an amusing loop.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.281, + 0.885, + 0.354 + ], + "angle": 0, + "content": "Table 3: Examples from Chumor. The second example's explanation is bad because the joke does not \"creating an amusing loop\". Instead, it relies on linguistic symmetry and the use of a straightforward fact to subvert expectations. The audience anticipates an unexpected outcome due to the setup, but the latter part \"suddenly appear in front of him\" flips the perspective by stating the straightforward fact that because Xiao Ming is in front of the person so the person is in front of Xiao Ming too." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.378, + 0.489, + 0.475 + ], + "angle": 0, + "content": "Gemini\\(_{1.5-pro}\\) (Google, 2024) from Google, GLM\\(_{4\\text{plus}}\\) (BigModel, 2024) from Tsinghua University, GPT-4\\(_{\\text{turbo}}\\), GPT-4o (OpenAI, 2023, 2024) from OpenAI, ERNIE\\(_{4\\text{turbo}}\\) (Baidu, 2024) from Baidu. For all the open-source LLMs, we use the instruction-tuned version in our evaluation." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.486, + 0.487, + 0.534 + ], + "angle": 0, + "content": "Evaluation Methods. We evaluate these LLMs using two prompting methods: direct prompting (DP) by" + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.547, + 0.307, + 0.563 + ], + "angle": 0, + "content": "Direct Prompting (DP)" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.578, + 0.478, + 0.643 + ], + "angle": 0, + "content": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.644, + 0.227, + 0.66 + ], + "angle": 0, + "content": "笑话:[joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.66, + 0.318, + 0.676 + ], + "angle": 0, + "content": "笑话解释:[explanation]" + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.678, + 0.348, + 0.69 + ], + "angle": 0, + "content": "Translation" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.692, + 0.48, + 0.787 + ], + "angle": 0, + "content": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.79, + 0.216, + 0.805 + ], + "angle": 0, + "content": "Joke: [joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.806, + 0.324, + 0.821 + ], + "angle": 0, + "content": "Explanation: [explanation]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "and chain-of-thought (CoT) prompting (Wei et al., 2022) by adding the phrase “请逐步思考,写下过程”“Please think step by step, write down your reasoning process” before determining the label. Appendix F provides the complete prompts. We cal" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.379, + 0.885, + 0.571 + ], + "angle": 0, + "content": "culate accuracy scores as part of our evaluation. In addition, we provide the false positive rate (FPR), false negative rate (FNR), and Matthews Correlation Coefficient (MCC) in Appendix H in Table 4. The MCC score considers true positives, true negatives, false positives, and false negatives, providing a score between -1 and +1. A score of +1 indicates perfect predictions, 0 reflects random guessing, and -1 means complete disagreement. The best MCC score achieved by LLMs is 0.29, which is close to random guessing, and is significantly lower than the human average of 0.60." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.583, + 0.749, + 0.598 + ], + "angle": 0, + "content": "5 Results and Discussions" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.608, + 0.883, + 0.671 + ], + "angle": 0, + "content": "Overall Model Performance. Figure 1 presents the accuracy of different LLMs on Chumor in DP and CoT settings. Appendix H presents additional results and analysis." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.673, + 0.884, + 0.816 + ], + "angle": 0, + "content": "Overall, we observe that all models perform poorly on Chinese humor comprehension, with accuracy scores ranging between \\(44.6\\%\\) and \\(60.3\\%\\). ERNIE\\(_{4\\text{-}\\text{turbo}}\\) and Gemini\\(_{1.5\\text{-}\\text{pro}}\\) achieve the highest accuracy of \\(60.3\\%\\), and are just 10 points above the random baseline and far below human performance of \\(78.3\\%\\), highlighting the difficulty of Chumor and the limitations of these LLMs in understanding Chinese humor." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Error Analysis by Joke Type. To better understand how LLMs perform on each joke type listed in Table 1, we sample 200 jokes for error analysis. Figure 2 and Figure 17 in Appendix H present the results. The distribution of joke types can be found in Appendix G Figure 15." + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21802" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.082, + 0.486, + 0.408 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.417, + 0.49, + 0.462 + ], + "angle": 0, + "content": "Figure 1: The accuracy of different models' test results in the DP and CoT settings. ERNIE\\(_{4}\\)-turbo and Gemini\\(_{1.5\\text{-pro}}\\) achieve the highest accuracy of \\(60.3\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.486, + 0.49, + 0.742 + ], + "angle": 0, + "content": "We highlight that model performance varies significantly across different joke types. While models generally perform well on Situational jokes, achieving \\(60.0\\%\\) to \\(70.0\\%\\) accuracy in both DP and CoT settings, their performance difference on other joke types is more pronounced. For instance, GLM-4plus achieves \\(65.0\\%\\) accuracy on Homophonic jokes in the DP setting, whereas \\(\\mathrm{Yi}_{34\\mathrm{B}}\\) only reaches \\(30.0\\%\\). Nematron70\\mathrm{B}\\ performs well on Cultural jokes in the CoT setting with \\(72.0\\%\\) accuracy, but Athene70\\mathrm{B}\\ and ERNIE4-turbo achieve with only \\(43.0\\%\\) and \\(42.0\\%\\), respectively. Such performance variance highlights LLMs' varied capabilities in specific domains such as cultural reasoning and situational reasoning, revealing the respective limitations of these LLMs." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.756, + 0.429, + 0.787 + ], + "angle": 0, + "content": "5.1 Have LLMs achieved human-level understanding of humor?" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Answer: No. To compare the performance of LLMs with humans, we conduct a human study involving three Chinese native speakers unfamiliar with this work to annotate a randomly chosen subset of 200 examples. Human annotators demonstrate significantly better performance, with an average accuracy of \\(78.3\\%\\) and an MCC score of 0.60, significantly better than the LLMs' best per" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.082, + 0.882, + 0.678 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.685, + 0.885, + 0.743 + ], + "angle": 0, + "content": "Figure 2: DP accuracy on different joke types \\((\\%)\\). Here, we sample 200 jokes for error analysis. We highlight that model performance varies significantly across different joke types." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.778, + 0.885, + 0.858 + ], + "angle": 0, + "content": "formance of \\(60.3\\%\\) accuracy and an MCC score of 0.29 (Figure 16 in Appendix H). Our results indicate that there is a large room of performance improvement for LLMs on Chinese humor understanding." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.887, + 0.88, + 0.918 + ], + "angle": 0, + "content": "5.2 Does chain-of-thought (CoT) help LLMs' humor understanding?" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21803" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.113, + 0.081, + 0.885, + 0.319 + ], + "angle": 0, + "content": "
Example(zh)“大人有大量,小孩倒是不多了。”食人族厨师清点食材说。 \n(en)“Adults are plenty, but children are quite scarce,” said the cannibal chef while counting ingredients.
Joke Explanation(zh)这个笑话的幽默之处在于其运用了惊悚与诙谐的巧妙结合,以及意料之外的语言转折。食人族厨师清点食材的场景已经足够诡异,但他却以“大人有大量,小孩倒是不多了”这样似乎很平常的口吻来评论,这种对比和反差产生了荒诞的幽默感,同时也在探讨一种颠覆性的、黑暗式的玩笑。 \n(en)The humor in this joke lies in its clever combination of horror and wit, as well as the unexpected linguistic twist. The scene of a cannibal chef counting ingredients is already eerie, but the casual remark, "Adults are plenty, but children are quite scarce," delivered in an ordinary tone, creates a stark contrast. This juxtaposition generates absurd humor while exploring a subversive, dark style of comedy.
Model's Answer DP✓(zh)部分/没有解释 \n(en)Partially/Not ExplainedModel's Answer CoT X (zh) ...这句话使用了一个常用的谚语“大人有大量”来双关...比较您的理解和解释:笑话解释涵盖了我的分析要点,指出了对比和反差产生的荒诞幽默感,并提到了黑暗式幽默。解释完全涵盖了笑话的所有幽默点,因此选择“完全解释”。 \n(en) ...This sentence uses a common proverb, "adults are plenty," as a pun... Comparison of your understanding and the explanation: The joke explanation covers the key points of my analysis, highlighting the absurd humor created by contrast and reversal, and mentioning the element of dark humor. The explanation fully addresses all aspects of the joke's humor, so “Fully Explained” is selected.
CoT Failure ReasonsObserving model's explanation shows it recognizes the pun in the joke. However, the joke explanation being evaluated misses the pun. Model correctly identifies this in DP but, in CoT, despite noticing the pun, attempts to justify the flawed explanation, leading to an incorrect judgment.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.326, + 0.884, + 0.357 + ], + "angle": 0, + "content": "Figure 3: Over-analyzing example by GPT-4o. The GPT-4o model chooses the correct answer in the DP prompting, but chooses the incorrect answer due to over-analyzing in the CoT prompting." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.374, + 0.489, + 0.631 + ], + "angle": 0, + "content": "Answer: No. We observe that CoT does not necessarily improve model performance and, in most cases, even leads to performance decay. For instance, as shown in Figure 1, the accuracy of \\(\\mathrm{ERNIE}_{4}\\)-turbo decreases from \\(60.3\\%\\) to \\(45.2\\%\\) when we switch to CoT prompting, Mistral\\(_{123B}\\)'s performance drops from \\(55.6\\%\\) to \\(51.2\\%\\), GPT-4o's performance drops from \\(51.9\\%\\) to \\(50.6\\%\\), GPT-4turbo's performance falls from \\(52.3\\%\\) to \\(51.3\\%\\). Moreover, the MCC scores present a clearer trend of performance decline under CoT prompting. As shown in Figure 16 in Appendix H, eight of the ten LLMs' MCC scores decrease under CoT prompting. We hypothesize that CoT prompts may not help the model's reasoning when the model lacks a fundamental grasp of humor understanding." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.632, + 0.49, + 0.84 + ], + "angle": 0, + "content": "We observe that under CoT prompting, models like GPT-4o tend to justify incorrect explanations as \"correct\", leading to an increase in false-positive rate from \\(80.0\\%\\) for DP prompting to \\(85.0\\%\\) for CoT prompting (Table 4 in Appendix H). \\(\\mathrm{ERNIE}_{4}\\)-turbo exhibits the largest false-positive rate, rising from \\(59.8\\%\\) to \\(96.9\\%\\) (Table 4 in Appendix H). Figure 3 provides an example where CoT confuses the GPT-4o model. Under the DP prompting, the GPT-4o model chooses the answer correctly. However, CoT prompting causes the model to over-analyze and justify an incorrect explanation." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "On the other hand, models like Nematron\\(_{70}\\) may be overly critical of explanations under CoT prompting, resulting in a false-negative rate from \\(20.9\\%\\) for DP prompting to \\(46.1\\%\\) for CoT prompting (Table 4 in Appendix H). We highlight that a" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.374, + 0.885, + 0.472 + ], + "angle": 0, + "content": "recent work demonstrates that CoT can degrade performance in tasks requiring subtle comprehension (Sprague et al., 2024), which aligns with our findings on its limitations in humor interpretation. Figure 14 in Appendix E discusses an example corresponding to the model being overly critical." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.487, + 0.882, + 0.518 + ], + "angle": 0, + "content": "5.3 Case study: can GPT-4o and ERNIE\\(_{4}\\)-turbo explain jokes as well as humans?" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.526, + 0.884, + 0.638 + ], + "angle": 0, + "content": "Answer: No. Apart from testing multiple LLMs on Chumor, we conduct case studies on GPT-4o and ERNIE\\(_{4}\\)-turbo to assess the quality of their joke explanations compared to humans. We prompt them to explain the humor in two sentences, consistent with the format of human explanations. Here is the prompt we feed to both LLMs:" + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.654, + 0.586, + 0.67 + ], + "angle": 0, + "content": "Prompt" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.685, + 0.873, + 0.716 + ], + "angle": 0, + "content": "请用两句话解释这个笑话的幽默之处: [joke]" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.719, + 0.873, + 0.735 + ], + "angle": 0, + "content": "Please explain the joke in two sentences: [joke]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.76, + 0.885, + 0.84 + ], + "angle": 0, + "content": "Data Annotation. As demonstrated by Hessel et al. (2023), crowd-sourcing typically cannot produce high-quality explanations, following Hessel et al. (2023), one of the authors annotates all the explanations to ensure the quality and consistency." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "This is a substantial effort: the author ended up annotating the explanations for 1,951 jokes. The resulting corpus has a mean of 78 Chinese characters of explanation per joke, and the total length, 151,730 Chinese characters, is comparable" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21804" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.081, + 0.488, + 0.165 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.173, + 0.49, + 0.232 + ], + "angle": 0, + "content": "Figure 4: Annotated preference for whether human explanation is preferred (\"Human wins\") or the explanation from LLMs is preferred (\"LLM wins\"). Humans' explanation is significantly preferred over LLMs'." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.248, + 0.286, + 0.265 + ], + "angle": 0, + "content": "in length to a novella†." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.274, + 0.489, + 0.514 + ], + "angle": 0, + "content": "Evaluation Setup. To fairly evaluate which explanation is better, we conduct A/B testing by presenting the humor explanation from one LLM and from human to six college students, asking them to annotate their preference of the explanation for each joke. These college students are native Chinese speakers who grew up in China, therefore they have a deep understanding of the cultural terms and trending terms in China. We note that the preference annotation requires a substantial effort as each annotator reads through a total length of around 300k Chinese characters†. We end up with three preference annotations for each joke. The preference annotation achieve a \\(61.4\\%\\) agreement rate among annotators (Appendix B)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.515, + 0.49, + 0.627 + ], + "angle": 0, + "content": "We use the winning rate as our measure to compare LLMs' explanation versus human explanation, taking the majority vote among all annotators for each example. In addition, if all annotators disagree, we assign an \"Undecided\" label. Appendix C provides the annotation instructions we present to the annotators." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.637, + 0.49, + 0.734 + ], + "angle": 0, + "content": "Overall Results. Figure 4 reports the wining rate of explanations from human versus GPT-4o and ERNIE\\(_{4\\text{-turb}}\\). We can see that human explanations are significantly better than those from both LLMs, with humans winning over \\(50\\%\\) of the time, while LLMs win in only \\(2 - 3\\%\\) of cases." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.742, + 0.49, + 0.839 + ], + "angle": 0, + "content": "Error Analysis. Figure 5 shows the overall distribution of error types for GPT-4o and ERNIE\\(_{4\\text{-}\\text{turbo}}\\) on Chumor in terms of their humor explanations. This error analysis is conducted by an individual who is not involved in writing the original explanations, ensuring an unbiased evaluation. GPT-4o" + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.082, + 0.865, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.279, + 0.885, + 0.381 + ], + "angle": 0, + "content": "Figure 5: Distribution of error types for GPT-4o and ERNIE\\(_{4\\text{-}\\text{turbo}}\\). We sample 200 examples to calculate the distribution of these error types. We note that each example may correspond to multiple error types. We highlight that ERNIE\\(_{4\\text{-}\\text{turbo}}\\) demonstrates a lower error rate on cultural jokes, while GPT-4o demonstrates a lower error rate on contextual or pun-based jokes." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.4, + 0.885, + 0.624 + ], + "angle": 0, + "content": "is more prone to errors categorized as \"cultural unawareness\" (29.5% of all its explanations) compared to \\(\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}\\) (10.5%). We suspect that \\(\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}\\) is more familiar with Chinese culture as it is likely trained on a larger Chinese corpus than GPT-4o. However, GPT-4o performs better on cases requiring an understanding of contexts or puns, suggesting its strong reasoning ability. We provide three error cases for GPT-4o here and additional cases for both GPT-4o and \\(\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}\\) in Appendix E. In the following examples in Figure 6, Figure 7 and Figure 8, we highlight key phrases that induce humor in green, and underscore the errors in red." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.637, + 0.884, + 0.862 + ], + "angle": 0, + "content": "Error Type I: Cultural Unawareness. LLMs may fail to explain a joke due to a lack of awareness of certain cultural knowledge. For instance, the example in Figure 6 requires knowledge of a superstitious belief in Chinese culture, getting a haircut in the first lunar month brings death to your uncle, and the explanation from GPT-4o fails to connect to this Chinese cultural belief. We hypothesize that while LLMs are pre-trained on Internet-scale corpora, such culturally specific knowledge can still be challenging for them to grasp. Moreover, even when they have acquired such cultural knowledge, they may fail to relate to it as we humans do during the reasoning process." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.874, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Error Type II: Pun-based Error. LLMs may fail to identify multiple meanings of a single word within a joke, causing them to fail on pun-based" + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.847, + 0.488, + 0.895 + ], + "angle": 0, + "content": "†The total length of our explanations surpasses the Chinese version of The Great Gatsby (100k Chinese characters), and is about half the length of the Chinese version of Wuthering Heights (325k Chinese characters)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.895, + 0.488, + 0.922 + ], + "angle": 0, + "content": "This is about the same length of the Chinese version of Wuthering Heights (325k Chinese characters)." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.847, + 0.488, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21805" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.084, + 0.487, + 0.344 + ], + "angle": 0, + "content": "
Example(zh) 小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Correct Humor Explanation“Getting a haircut in the first lunar month will brings death to your uncle.” is a popular superstitious saying in Chinese culture. In this joke, Xiao Ming gets hair extensions in the first month, which reverses the original logic and absurdly results in “bringing his uncle back to life.”
GPT-4o's Answer(zh) ...它利用了“接发”和“接发”的双关语:一方面是指理发店的接发服务,另一方面是指正月里“接福”的传统习俗。 (en) ...the pun on “接发”: it refers to both hair extension at a salon and the traditional practice of “receiving blessings” during the first lunar month.
Failure ReasonsFail to grasp expressions, slang, or specific usages unique to a particular cultural context.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.163, + 0.356, + 0.438, + 0.371 + ], + "angle": 0, + "content": "Figure 6: Culture unawareness example." + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.385, + 0.487, + 0.629 + ], + "angle": 0, + "content": "
Example(zh) 你可以在steam上找到GTA,所以水是DNA。
(en) You can find GTA on Steam,so water is DNA.
Correct \nHumor \nExplanationNormally,“Steam”refers to a gaming platform and “GTA”refers to the “Grand Theft Auto” game series. The joke uses a pun, where “steam”literally means the gaseous form of water, and “G”,“T”,and “A” represent Guanine, Thymine, and Adenine, respectively—three of the four nucleotides (AGTC) that make up DNA.
GPT-4o's \nAnswer(zh)...这种明显错误的类比制造了荒诞的效果,引人发笑。
(en)...This clearly wrong analogy creates an absurd effect.
Failure \nReasonsFail to discover multiple meanings of a word in the jokes
" + }, + { + "type": "table_caption", + "bbox": [ + 0.179, + 0.64, + 0.421, + 0.654 + ], + "angle": 0, + "content": "Figure 7: Pun-based error example." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.681, + 0.49, + 0.922 + ], + "angle": 0, + "content": "jokes where humor lies in inverting the conventional usage of words. In Figure 7, GPT-4o fails to grasp the transition from the video game terms \"Steam\", \"GTA\" to the scientific terminologies \"G\", \"T\", \"A\" that form DNA. Typically, \"Steam\" refers to a game platform, and \"GTA\" refers to the game series \"Grand Theft Auto\". The joke employs a pun on words where \"steam\" in its literal sense means water vapor, and \"GTA\" can represent not only the video game, but guanine, thymine, and adenine, which are nucleotides involved in the structure of DNA. Such jokes require LLMs to identify puns and the reason for the association of the multiple meanings. Furthermore, the process requires LLMs to bridge the logic gap between these terms, such as" + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.084, + 0.883, + 0.339 + ], + "angle": 0, + "content": "
Example(zh) 家里的猪油没了,小明只能把 植物油倒快点 当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Correct \nHumor \nExplanationWhen pronounced quickly,“vegetable” (pronounced as chih woo) sounds very similar to “pig” (pronounced as choo) in Chinese, leading Xiaoming to comically try using vegetable oil as lard by pouring it quickly.
GPT-4o's \nAnswer(zh) ...小明误解了猪油和植物油的区别,认为只要倒得快,植物油就能变成猪油。笑点在于他天真的逻辑和对常识的搞笑误解。 (en) ... Xiaoming misunderstood that if vegetable oil is poured quickly, it can turn into lard, highlighting his naive logic and misinterpretation of common sense.
Failure \nReasonsFail to exploit homophones or near-homophones in the jokes.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.565, + 0.349, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Figure 8: Homophonic error example." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.384, + 0.883, + 0.448 + ], + "angle": 0, + "content": "\"steam\" and \"GTA\", and an unusual context, like \"water is DNA\". The overall process requires both scientific knowledge and creative thinking, which are challenging for LLMs." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.461, + 0.885, + 0.671 + ], + "angle": 0, + "content": "Error Type III: Homophonic Error. The example in Figure 8 requires LLMs to reason over the pronunciation as “植物” (pronounced as chih woo, meaning “vegetable”) sounds very similar to “猪” (pronounced as choo, meaning “pig”) in Chinese when we speak it fast enough. The humor arises from the contrast between the similarity in pronunciation and the disparity in meaning between the two terms. Such contrasts may be sparse in the training corpus of LLMs, and also demand a deep connection across different modalities to link pronunciation with the meaning behind these terms, which poses significant challenges to LLMs." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.685, + 0.642, + 0.7 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.713, + 0.885, + 0.922 + ], + "angle": 0, + "content": "We introduce Chumor, a Chinese humor understanding dataset that captures intellectually challenging and culturally specific humor in Chinese. Our analysis reveals that Chumor remains difficult even for advanced LLMs, with a significant performance gap between LLMs and humans. Furthermore, we find that chain-of-thought reasoning does not improve LLMs' humor comprehension and, in some cases, leads to over-analysis and incorrect interpretations. Additionally, models such as GPT-4o and ERNIE\\(_{4}\\)-turbo struggle to explain jokes as effectively as humans, highlighting fundamental challenges in humor reasoning. These findings un" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21806" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.168 + ], + "angle": 0, + "content": "derscore the unique difficulties that Chinese humor presents to LLMs. We hope that Chumor can advance non-English humor research and contribute to evaluating LLMs' reasoning abilities across diverse cultural backgrounds." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.178, + 0.221, + 0.193 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.203, + 0.492, + 0.461 + ], + "angle": 0, + "content": "We try our best to test the Chinese humor understanding ability of different LLMs. However, due to the limited budget and API access, we cannot evaluate all possible LLMs in this paper. We encourage future research to conduct further evaluations of humor understanding abilities in LLMs. In the meantime, we emphasize that our research focuses primarily on demonstrating how humor understanding remains a significant challenge, even for SOTA LLMs. Our work shows that along with many other problems (Ignat et al., 2024), humor understanding, especially non-English and culturally specific humor understanding, remains an unsolved problem in the era of LLMs. We hope Chumor can contribute to non-English humor understanding evaluations for future multilingual LLMs." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.473, + 0.266, + 0.488 + ], + "angle": 0, + "content": "Ethics Statement" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.498, + 0.49, + 0.804 + ], + "angle": 0, + "content": "We have made every effort to filter out excessively offensive content in RZB. However, due to the subjective nature of humor, some of our jokes may still be perceived as offensive by individuals with different cultural or personal standards. To address these concerns, we strongly recommend that researchers use Chumor with cultural sensitivity, recognizing that the jokes in the dataset reflect the sociocultural context in which they were created. We encourage users of Chumor to approach the dataset with caution, remaining mindful of its potential to cause offense or harm, particularly when applying it in research or applications that involve diverse audiences or address sensitive topics. We wish to foster an ethical and responsible approach to data collection and usage, and we welcome constructive feedback from the research community and stakeholders to continually improve Chumor and mitigate potential harm." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.816, + 0.279, + 0.833 + ], + "angle": 0, + "content": "Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.491, + 0.907 + ], + "angle": 0, + "content": "The GPT experiments are supported by credit from OpenAI through OpenAI Researcher Access assigned to Naihao Deng. We appreciate Qiang Liu, and Xiaoyue Shi for helping with the human study." + }, + { + "type": "title", + "bbox": [ + 0.511, + 0.084, + 0.61, + 0.099 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.107, + 0.886, + 0.147 + ], + "angle": 0, + "content": "01.ai. 2024. Yi-34b model card. https://huggingface.co/01-ai/Yi-34B. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.156, + 0.886, + 0.262 + ], + "angle": 0, + "content": "Marah Abdin, Jyoti Aneja, Harkirat Behl, Sébastien Bubeck, Ronen Eldan, Suriya Gunasekar, Michael Harrison, Russell J. Hewett, Mojan Javaheripi, Piero Kauffmann, James R. Lee, Yin Tat Lee, Yuanzhi Li, Weishung Liu, Caio C. T. Mendes, Anh Nguyen, Eric Price, Gustavo de Rosa, Olli Saarikivi, and 8 others. 2024. Phi-4 technical report. Preprint, arXiv:2412.08905." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.271, + 0.886, + 0.365 + ], + "angle": 0, + "content": "Badr AlKhamissi, Muhammad ElNokrashy, Mai Alkhamissi, and Mona Diab. 2024. Investigating cultural alignment of large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12404-12422, Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.373, + 0.886, + 0.453 + ], + "angle": 0, + "content": "Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, and 29 others. 2023. Qwen technical report. Preprint, arXiv:2309.16609." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.462, + 0.886, + 0.53 + ], + "angle": 0, + "content": "Yuelin Bai, Xinrun Du, Yiming Liang, Yonggang Jin, Ziqiang Liu, Junting Zhou, Tianyu Zheng, Xincheng Zhang, Nuo Ma, Zekun Wang, and 1 others. 2024. Coig-cqia: Quality is all you need for chinese instruction fine-tuning. arXiv preprint arXiv:2403.18058." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.538, + 0.886, + 0.579 + ], + "angle": 0, + "content": "Baidu. 2024. Ernie-4.0-turbo. https://cloud.baidu. com/doc/WENXINWORKSHOP/s/71xwwtafj. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.588, + 0.886, + 0.668 + ], + "angle": 0, + "content": "Dario Bertero and Pascale Fung. 2016. Deep learning of audio and language features for humor prediction. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 496-501, Porto-rož, Slovenia. European Language Resources Association (ELRA)." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.677, + 0.886, + 0.717 + ], + "angle": 0, + "content": "BigModel. 2024. Glm-4 model documentation. https://bigmodel.cn/dev/howuse/glm-4. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.726, + 0.886, + 0.819 + ], + "angle": 0, + "content": "Lei Chen and Chong Min Lee. 2017. Predicting audience's laughter during presentations using convolutional neural network. In Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications, pages 86-90, Copenhagen, Denmark. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.828, + 0.886, + 0.922 + ], + "angle": 0, + "content": "Yuyan Chen, Zhixu Li, Jiaqing Liang, Yanghua Xiao, Bang Liu, and Yunwen Chen. 2023. Can pre-trained language models understand chinese humor? In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining, WSDM '23, page 465-480, New York, NY, USA. Association for Computing Machinery." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.107, + 0.886, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21807" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.086, + 0.49, + 0.177 + ], + "angle": 0, + "content": "Yuyan Chen, Yichen Yuan, Panjun Liu, Dayiheng Liu, Qinghao Guan, Mengfei Guo, Haiming Peng, Bang Liu, Zhixu Li, and Yanghua Xiao. 2024. Talk funny! a large-scale humor response dataset with chain-of-humor interpretation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17826-17834." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.188, + 0.489, + 0.254 + ], + "angle": 0, + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. Preprint, arXiv:1803.05457." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.264, + 0.486, + 0.291 + ], + "angle": 0, + "content": "Peter T. Daniels and William Bright. 1996. The world's writing systems. Oxford University Press." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.3, + 0.487, + 0.366 + ], + "angle": 0, + "content": "Naihao Deng, Zhenjie Sun, Ruiqi He, Aman Sikka, Yu-long Chen, Lin Ma, Yue Zhang, and Rada Mihalcea. 2024. Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.376, + 0.487, + 0.468 + ], + "angle": 0, + "content": "Naihao Deng, Xinliang Zhang, Siyang Liu, Winston Wu, Lu Wang, and Rada Mihalcea. 2023. You are what you annotate: Towards better models through annotator representations. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 12475–12498, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.478, + 0.487, + 0.544 + ], + "angle": 0, + "content": "Xinrun Du, Zhouliang Yu, Songyang Gao, Ding Pan, Yuyang Cheng, Ziyang Ma, Ruibin Yuan, Xingwei Qu, Jiaheng Liu, Tianyu Zheng, and 1 others. 2024. Chinese tiny llm: Pretraining a chinese-centric large language model. arXiv preprint arXiv:2404.04167." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.554, + 0.486, + 0.593 + ], + "angle": 0, + "content": "Tomas Engelthaler and Thomas T Hills. 2018. Humor norms for 4,997 english words. Behavior research methods, 50:1116-1124." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.604, + 0.487, + 0.629 + ], + "angle": 0, + "content": "WILLIAM F. FRY. 1994. The biology of humor. HUMOR, 7(2):111-126." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.64, + 0.486, + 0.68 + ], + "angle": 0, + "content": "Marc Gelkopf and 1 others. 2011. The use of humor in serious mental illness: A review. Evidence-Based Complementary and Alternative Medicine, 2011." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.69, + 0.487, + 0.742 + ], + "angle": 0, + "content": "Matthew Gervais and David Sloan Wilson. 2005. The evolution and functions of laughter and humor: A synthetic approach. *The Quarterly review of biology*, 80(4):395-430." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.753, + 0.487, + 0.857 + ], + "angle": 0, + "content": "Hamideh Ghanaian, Isar Nejadgholi, and Hussein Al Osman. 2023. ChatGPT for suicide risk assessment on social media: Quantitative evaluation of model performance, potentials and limitations. In Proceedings of the 13th Workshop on Computational Approaches to Subjectivity, Sentiment, & Social Media Analysis, pages 172-183, Toronto, Canada. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.868, + 0.487, + 0.919 + ], + "angle": 0, + "content": "Google. 2024. Gemini 1.5 pro model documentation. https://ai.google.dev/gemini-api/docs/ models/gemini#gemini-1.5-pro. Accessed: 2024-12-10." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.919 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.086, + 0.882, + 0.217 + ], + "angle": 0, + "content": "Md Kamrul Hasan, Wasifur Rahman, AmirAli Bagher Zadeh, Jianyuan Zhong, Md Iftekhar Tanveer, Louis-Philippe Morency, and Mohammed (Ehsan) Hoque. 2019. UR-FUNNY: A multimodal language dataset for understanding humor. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2046-2056, Hong Kong, China. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.228, + 0.882, + 0.345 + ], + "angle": 0, + "content": "Jack Hessel, Ana Marasovic, Jena D. Hwang, Lillian Lee, Jeff Da, Rowan Zellers, Robert Mankoff, and Yejin Choi. 2023. Do androids laugh at electric sheep? humor \"understanding\" benchmarks from the new yorker caption contest. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 688-714, Toronto, Canada. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.356, + 0.882, + 0.473 + ], + "angle": 0, + "content": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \"president vows to cut hair\": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.485, + 0.882, + 0.64 + ], + "angle": 0, + "content": "Oana Ignat, Zhijing Jin, Artem Abzaliev, Laura Biester, Santiago Castro, Naihao Deng, Xinyi Gao, Aylin Ece Gunal, Jacky He, Ashkan Kazemi, Muhammad Khalifa, Namho Koh, Andrew Lee, Siyang Liu, Do June Min, Shinka Mori, Joan C. Nwatu, Veronica Perez-Rosas, Siqi Shen, and 3 others. 2024. Has it all been solved? open NLP research questions not solved by large language models. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 8050-8094, Torino, Italia. ELRA and ICCL." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.652, + 0.881, + 0.69 + ], + "angle": 0, + "content": "Herbert M Lefcourt. 2001. *Humor: The psychology of living buoyantly*. Springer Science & Business Media." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.702, + 0.882, + 0.781 + ], + "angle": 0, + "content": "Zefeng Li, Hongfei Lin, Liang Yang, Bo Xu, and Shaowu Zhang. 2022. Memeplate: A chinese multimodal dataset for humor understanding in meme templates. In *Natural Language Processing and Chinese Computing*, pages 527-538, Cham. Springer International Publishing." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.791, + 0.881, + 0.843 + ], + "angle": 0, + "content": "Hanmeng Liu, Ruoxi Ning, Zhiyang Teng, Jian Liu, Qiji Zhou, and Yue Zhang. 2023a. Evaluating the logical reasoning ability of chatgpt and gpt-4. arXiv preprint arXiv:2304.03439." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.855, + 0.882, + 0.92 + ], + "angle": 0, + "content": "Siyang Liu, Naihao Deng, Sahand Sabour, Yilin Jia, Minlie Huang, and Rada Mihalcea. 2023b. Task-adaptive tokenization: Enhancing long-form text generation efficacy in mental health and beyond. In Proceedings of the 2023 Conference on Empirical Meth" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.882, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.929, + 0.525, + 0.941 + ], + "angle": 0, + "content": "21808" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.489, + 0.126 + ], + "angle": 0, + "content": "ods in Natural Language Processing, pages 15264-15281, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.135, + 0.488, + 0.265 + ], + "angle": 0, + "content": "Xin Liu, Baosong Yang, Dayiheng Liu, Haibo Zhang, Weihua Luo, Min Zhang, Haiying Zhang, and Jinsong Su. 2021. Bridging subword gaps in pretrainfinetune paradigm for natural language generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6001-6011, Online. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.275, + 0.487, + 0.314 + ], + "angle": 0, + "content": "Paul E McGhee. 1971. Development of the humor response: A review of the literature. Psychological Bulletin, 76(5):328." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.323, + 0.487, + 0.402 + ], + "angle": 0, + "content": "Rada Mihalcea, Oana Ignat, Longju Bai, Angana Borah, Luis Chiruzzo, Zhijing Jin, Claude Kwizera, Joan Nwatu, Soujanya Poria, and Thamar Solorio. 2024. Why ai is weird and should not be this way: Towards ai for everyone, with everyone, by everyone. arXiv preprint arXiv:2410.16315." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.41, + 0.487, + 0.502 + ], + "angle": 0, + "content": "Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recognition. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 531-538, Vancouver, British Columbia, Canada. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.511, + 0.487, + 0.563 + ], + "angle": 0, + "content": "Mistral.ai. 2024. Mistral-large-instruct-2407 model card. https://huggingface.co/mistralai/Mistral-Large-Instruct-2407. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.573, + 0.487, + 0.612 + ], + "angle": 0, + "content": "Nexusflow. 2024. Athene-70b model card. https://huggingface.co/Nexusflow/Athene-70B. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.621, + 0.487, + 0.673 + ], + "angle": 0, + "content": "NVIDIA. 2024. Llama-3.1-nemotron-70b-instruct-hf model card. https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.683, + 0.486, + 0.709 + ], + "angle": 0, + "content": "OpenAI. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.719, + 0.486, + 0.744 + ], + "angle": 0, + "content": "OpenAI. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.754, + 0.487, + 0.831 + ], + "angle": 0, + "content": "Siddhesh Pawar, Junyeong Park, Jiho Jin, Arnav Arora, Junho Myung, Srishti Yadav, Faiz Ghifari Haznitrama, Inhwa Song, Alice Oh, and Isabelle Augenstein. 2024. Survey of cultural awareness in language models: Text and beyond. arXiv preprint arXiv:2411.00860." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.842, + 0.487, + 0.92 + ], + "angle": 0, + "content": "Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. SemEval-2017 task 6: #HashtagWars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57, Vancouver, Canada. Association for Computational Linguistics." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.489, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.125 + ], + "angle": 0, + "content": "Qwen. 2024. Qwen2.5-72b-instruct model card. https://huggingface.co/Qwen/Qwen2.5-72B-Instruct. Accessed: 2024-12-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.135, + 0.883, + 0.265 + ], + "angle": 0, + "content": "Dragomir Radev, Amanda Stent, Joel Tetreault, Aasish Pappu, Aikaterini Iliakopoulou, Agustin Chanfreau, Paloma de Juan, Jordi Vallmitjana, Alejandro Jaimes, Rahul Jha, and Robert Mankoff. 2016. Humor in collective discourse: Unsupervised funniness detection in the new yorker cartoon caption contest. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 475-479, Porto Roz, Slovenia. European Language Resources Association (ELRA)." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.274, + 0.883, + 0.34 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. Gpqa: A graduate-level google-proof q&a benchmark. Preprint, arXiv:2311.12022." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.348, + 0.883, + 0.454 + ], + "angle": 0, + "content": "Chhavi Sharma, Deepesh Bhageria, William Scott, Srinivas PYKL, Amitava Das, Tanmoy Chakraborty, Viswanath Pulabaigari, and Björn Gambäck. 2020. SemEval-2020 task 8: Memotion analysis-the visuolinguial metaphor! In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 759-773, Barcelona (online). International Committee for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.462, + 0.883, + 0.593 + ], + "angle": 0, + "content": "Siqi Shen, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, Soujanya Poria, and Rada Mihalcea. 2024. Understanding the capabilities and limitations of large language models for cultural commonsense. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5668-5680, Mexico City, Mexico. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.602, + 0.883, + 0.732 + ], + "angle": 0, + "content": "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David I. Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, and 4 others. 2024. Global mmlu: Understanding and addressing cultural and linguistic biases in multilingual evaluation. Preprint, arXiv:2412.03304." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.741, + 0.883, + 0.82 + ], + "angle": 0, + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. 2024. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. Preprint, arXiv:2409.12183." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.828, + 0.883, + 0.92 + ], + "angle": 0, + "content": "Honglin Sun and Daniel Jurafsky. 2004. Shallow semantic parsing of Chinese. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 249-256, Boston, Massachusetts, USA. Association for Computational Linguistics." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.929, + 0.525, + 0.941 + ], + "angle": 0, + "content": "21809" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.166 + ], + "angle": 0, + "content": "Weiwei Sun, Zhifang Sui, Meng Wang, and Xin Wang. 2009. Chinese semantic role labeling with shallow parsing. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, pages 1475-1483, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.174, + 0.49, + 0.265 + ], + "angle": 0, + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. Llama: Open and efficient foundation language models. Preprint, arXiv:2302.13971." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.275, + 0.49, + 0.38 + ], + "angle": 0, + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, and 49 others. 2023b. Llama 2: Open foundation and fine-tuned chat models. Preprint, arXiv:2307.09288." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.389, + 0.49, + 0.48 + ], + "angle": 0, + "content": "Yuen-Hsien Tseng, Wun-Syuan Wu, Chia-Yueh Chang, Hsueh-Chih Chen, and Wei-Lun Hsu. 2020. Development and validation of a corpus for machine humor comprehension. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 1346-1352, Marseille, France. European Language Resources Association." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.49, + 0.49, + 0.581 + ], + "angle": 0, + "content": "Ashmal Vayani, Dinura Dissanayake, Hasindri Watawana, Noor Ahsan, Nevasini Sasikumar, Omkar Thawakar, Henok Biadglin Ademtew, Yahya Hmaiti, Amandeep Kumar, Kartik Kuckreja, and 1 others. 2024. All languages matter: Evaluating Imms on culturally diverse 100 languages. arXiv preprint arXiv:2411.16508." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.591, + 0.49, + 0.657 + ], + "angle": 0, + "content": "Benyou Wang, Xiang Wu, Xiaokang Liu, Jianquan Li, Prayag Tiwari, and Qianqian Xie. 2022. Can language models make fun? a case study in chinese comical crosstalk. In Annual Meeting of the Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.666, + 0.49, + 0.744 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.753, + 0.49, + 0.82 + ], + "angle": 0, + "content": "Orion Weller and Kevin Seppi. 2020. The rJokes dataset: a large scale humor collection. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 6136-6141, Marseille, France. European Language Resources Association." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.828, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Jiaming Wu, Hongfei Lin, Liang Yang, and Bo Xu. 2021. Mumor: A multimodal dataset for humor detection in conversations. In *Natural Language Processing and Chinese Computing: 10th CCF International Conference*, NLPCC 2021, Qingdao, China, October 13–17, 2021, Proceedings, Part I, page 619–627, Berlin, Heidelberg. Springer-Verlag." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.885, + 0.179 + ], + "angle": 0, + "content": "Yufan Wu, Yinghui He, Yilin Jia, Rada Mihalcea, Yu-long Chen, and Naihao Deng. 2023. Hi-ToM: A benchmark for evaluating higher-order theory of mind reasoning in large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 10691-10706, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.188, + 0.885, + 0.268 + ], + "angle": 0, + "content": "Diyi Yang, Alon Lavie, Chris Dyer, and Eduard Hovy. 2015. Humor recognition and humor anchor extraction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2367-2376, Lisbon, Portugal. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.277, + 0.885, + 0.343 + ], + "angle": 0, + "content": "Dongyu Zhang, Heting Zhang, Xikai Liu, Hongfei Lin, and Feng Xia. 2019. Telling the whole story: A manually annotated chinese dataset for the analysis of humor in jokes. In Conference on Empirical Methods in Natural Language Processing." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.352, + 0.885, + 0.417 + ], + "angle": 0, + "content": "Min Zhang, Jianfeng He, Taoran Ji, and Chang-Tien Lu. 2024a. Don't go to extremes: Revealing the excessive sensitivity and calibration limitations of llms in implicit hate speech detection. Preprint, arXiv:2402.11406." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.428, + 0.885, + 0.495 + ], + "angle": 0, + "content": "Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B. Hashimoto. 2024b. Benchmarking Large Language Models for News Summarization. Transactions of the Association for Computational Linguistics, 12:39-57." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.503, + 0.885, + 0.557 + ], + "angle": 0, + "content": "Jun Zhao, Zhihao Zhang, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024. Llama beyond english: An empirical study on language capability transfer. arXiv preprint arXiv:2401.01055." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.566, + 0.885, + 0.62 + ], + "angle": 0, + "content": "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. Preprint, arXiv:2311.07911." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.885, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.929, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21810" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.276, + 0.1 + ], + "angle": 0, + "content": "A Contributions" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.14, + 0.49, + 0.187 + ], + "angle": 0, + "content": "Idea Proposal. Naihao Deng proposed the high-level idea of constructing a humor understanding benchmark sourced from RZB data." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.2, + 0.487, + 0.231 + ], + "angle": 0, + "content": "Background Survey. Ruiqi He surveyed the humor-related tasks." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.244, + 0.487, + 0.275 + ], + "angle": 0, + "content": "Data Processing. Ruiqi He crawled and processed the jokes from RZB." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.288, + 0.489, + 0.352 + ], + "angle": 0, + "content": "Annotation. Ruiqi He annotated the explanations for the RZB jokes. Yushu He, Longju Bai, Jiarui Liu, Zhenjie Sun, Zhenghao Tang, He Wang, Nai-hao Deng conducted the preference annotations." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.364, + 0.487, + 0.396 + ], + "angle": 0, + "content": "Experiments. Ruiqi He, Hanchen Xia, and Naihao Deng conducted the experiments." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.408, + 0.487, + 0.44 + ], + "angle": 0, + "content": "Result Aggregation. Ruiqi He, Naihao Deng, Yushu He aggregated the results." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.453, + 0.487, + 0.501 + ], + "angle": 0, + "content": "Paper Writing. Ruiqi He and Naihao Deng drafted the paper. Other authors provided revisions and feedback on the paper." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.513, + 0.39, + 0.528 + ], + "angle": 0, + "content": "Naihao Deng organized the research." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.581, + 0.397, + 0.597 + ], + "angle": 0, + "content": "B Agreement Rate Calculation" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.637, + 0.49, + 0.765 + ], + "angle": 0, + "content": "We calculate the percentage agreement rate among annotators who annotate their preferences between explanations from LLMs and humans. The results show an average inter-annotator agreement of \\(61.9\\%\\) for GPT-4o and \\(60.9\\%\\) for \\(\\mathrm{ERNIE}_{4}\\) -turbo. Given the inherent subjectivity of humor interpretation tasks (Deng et al., 2023), the combined average agreement percentage of \\(61.4\\%\\) is decent." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.818, + 0.487, + 0.85 + ], + "angle": 0, + "content": "C Annotation Instructions for Preference Annotation" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We include the following instructions for the preference annotations of the joke explanations:" + }, + { + "type": "title", + "bbox": [ + 0.52, + 0.087, + 0.612, + 0.101 + ], + "angle": 0, + "content": "Instruction" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.116, + 0.872, + 0.18 + ], + "angle": 0, + "content": "“在这个标注中,你将会看到一个笑话和对这个笑话的幽默之处的两个解释,请你比较哪个解释更好的解释了这个笑话的幽默之处,并从以下三个标签中选择:" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.181, + 0.586, + 0.196 + ], + "angle": 0, + "content": "1. 解释1" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.198, + 0.589, + 0.212 + ], + "angle": 0, + "content": "2. 解释2" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.213, + 0.606, + 0.228 + ], + "angle": 0, + "content": "3. 一样好”" + }, + { + "type": "list", + "bbox": [ + 0.521, + 0.181, + 0.606, + 0.228 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.65, + 0.231, + 0.744, + 0.243 + ], + "angle": 0, + "content": "Translation" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.246, + 0.875, + 0.326 + ], + "angle": 0, + "content": "\"In this annotation task, you will see a joke along with two explanations of its humor. Please compare which explanation better explains the reason why this joke is funny and choose from the following three labels:" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.327, + 0.643, + 0.342 + ], + "angle": 0, + "content": "1. Explanation 1" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.344, + 0.645, + 0.358 + ], + "angle": 0, + "content": "2. Explanation 2" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.359, + 0.717, + 0.375 + ], + "angle": 0, + "content": "3. Both are equally good.\"" + }, + { + "type": "list", + "bbox": [ + 0.521, + 0.327, + 0.717, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.396, + 0.885, + 0.459 + ], + "angle": 0, + "content": "For each example, we randomly assign the explanations from the LLMs and the human as Explanation 1 and Explanation 2 to ensure a fair comparison." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.475, + 0.831, + 0.492 + ], + "angle": 0, + "content": "D Discussion on Evaluation Setting" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.503, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Why Zero-Shot w.o. SFT? The primary research objective of this paper is to determine how well foundational LLMs can understand Chinese humor without relying on supervised fine-tuning for this binary classification task. The focus is on investigating the innate humor-understanding ability of these models through zero-shot and zero-shot CoT prompting. This aligns with the overarching goal of examining their general capabilities without additional task-specific training. From a human behavior perspective, individuals generally acquire a sense of humor through exposure and experience rather than explicit classroom instruction (McGhee, 1971; FRY, 1994; Gervais and Wilson, 2005). Analogously, our purpose lies in evaluating the models' intrinsic ability to recognize and interpret humor without deliberate, task-specific fine-tuning. Moreover, the experimental design follows practices from other benchmarks, such as GPQA (Rein et al., 2023), AI2ARC (Clark et al., 2018), and IFEVAL (Zhou et al., 2023), which do not provide predefined train/dev/test splits. Under these conditions, it is common to assess models in a zero-shot manner to directly evaluate their capabilities on each respective task (Touvron et al., 2023a,b; Bai et al., 2023; Abdin et al., 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.928, + 0.524, + 0.941 + ], + "angle": 0, + "content": "21811" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.085, + 0.49, + 0.294 + ], + "angle": 0, + "content": "
Example(zh)真可怕,犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。
(en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Correct Humor Explanation“between us” can refer to “either one of us” or literally means the actual physical position.
GPT-4o's Answer(zh)…暗示他们自己可能是犯罪嫌疑人…
(en)…it suggests the bailiffs themselves might be criminal suspects…
Failure ReasonsFail to address the literal meaning.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.306, + 0.49, + 0.336 + ], + "angle": 0, + "content": "Figure 9: Insufficient contextual understanding example." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.36, + 0.31, + 0.376 + ], + "angle": 0, + "content": "E More Error Cases" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.387, + 0.49, + 0.435 + ], + "angle": 0, + "content": "We note that many examples here encompass multiple error types, highlighting the complexity of Chumor." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.445, + 0.49, + 0.719 + ], + "angle": 0, + "content": "Insufficient Contextual Understanding. LLMs may fail to ground their responses to the context when they explain the joke. For instance, in the example in Figure 9, \"between us\" typically means \"either you or me\", but it also has the literal meaning to indicate the person standing \"between us\", which is the right interpretation given that the two bailiffs are talking about the criminal. However, GPT-4o only reasons that \"the criminal is either you or me\" but fails to capture the literal meaning from the context. We hypothesize that in the pretraining corpus, \"between us\" most likely acquires the meaning of \"either you or me\" rather than the literal meaning in a scenario like this, which creates a bias that prevents the model from reasoning about the literal interpretation required for this specific explanation." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Glyph-based Error. LLMs may fail to interpret the visual puns based on the shape or form of Chinese characters. Glyph-based humor in Chinese leverages its logographic writing system, where characters integrate both semantic and visual elements. Unlike the phonemic alphabet used in English, Chinese characters' pictorial and ideographic nature allows for visual puns in jokes (Daniels and Bright, 1996). In the example in Figure 10, \"simplify\" does not refer to simplifying the tally problem conceptually, but to simplifying the traditional Chinese characters to simplified Chinese" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.084, + 0.885, + 0.306 + ], + "angle": 0, + "content": "
Example(zh)我把電車難題簡化了,現在是电车难题。
(en)I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Correct Humor ExplanationThe joke does not actually simplify the scenario of the trolley problem; instead, it converts the traditional Chinese characters “電車難題” into the simplified Chinese characters “电车难题”.
GPT-4o's Answer(zh)...这个笑话的幽默之处在于通过将“電車難題”中的“難”字去掉,变成了“电车題”。
(en)...The humor of this joke lies in removing the character “hard” from “the hard tally problem”, turning it into “the tally problem”.
Failure ReasonsFail to interpret the visual puns in the shape or form of the characters.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.563, + 0.317, + 0.829, + 0.332 + ], + "angle": 0, + "content": "Figure 10: Glyph-based error example." + }, + { + "type": "table", + "bbox": [ + 0.511, + 0.348, + 0.885, + 0.68 + ], + "angle": 0, + "content": "
Example(zh) 小王订了张飞去北京的机票,给张飞省了一大笔钱。
(en) Xiaowang booked an airline ticket to Beijing, saving Zhang Fei a lot of money.
Correct \nHumor \nExplanationThere are two ways to parse the first half of the sentence: \n(1) 小王/订了/张/飞去/北京的/机票 XiaoWang/ booked/ a/ ticket to fly to Beijing. \n(2) 小王/订了/张飞/去北京的/机票 XiaoWang / booked/ Zhangfei/ a ticket/ to Beijing.
Typically, people would interpret in the first way as Zhangfei is a fictional figure and do not appear in the daily conversation. However, the second half of the sentence confirms that the second way of parsing turns out to be correct. Such contrast is hilarious.
GPT-4o's \nAnswer(zh) ...由于历史人物张飞已经不在世,所以给他省了一大笔钱。
(en) ... since the historical figure Zhang Fei is dead and cannot take a flight, the joke humorously suggests that he saved a lot of money.
Failure \nReasonsFail to parse textual elements in a sentence dynamically.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.58, + 0.69, + 0.811, + 0.705 + ], + "angle": 0, + "content": "Figure 11: Parsing error example." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.732, + 0.885, + 0.83 + ], + "angle": 0, + "content": "characters as the traditional Chinese characters are also termed as \"complicated characters\". However, LLMs struggle to reason such graphemic differences as there are no explicit connections between the textual meaning and visual representations of the glyphs." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Parsing Error. LLMs often fail to parse sentences in multiple ways simultaneously, leading to difficulties in explaining jokes that require different parsing for the same sentence. In the example in Figure 11, the humor hinges on the ambiguity of" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21812" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.327 + ], + "angle": 0, + "content": "the phrase \"张飞\", which can be interpreted either as part of a verb phrase implying \"a ticket flying to Beijing\" or as a proper noun, referring to the historical figure Zhang Fei. This ambiguity stems from the flexibility of the Chinese language, where each character can function independently as a word or combine with others to form new words or phrases. There are decades of research studying the problem of parsing Chinese (Sun and Jurafsky, 2004; Sun et al., 2009). Recently, researchers have proposed task-specific tokenization approaches that adapt the parsing process to better align with downstream tasks (Liu et al., 2021, 2023b). However, how to incorporate different ways of parsing at one time still remains challenging." + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.338, + 0.489, + 0.598 + ], + "angle": 0, + "content": "
Example(zh) 五步蛇的危险性是李白的两倍。 (en) The danger of a five-step snake is twice that of Li Bai.
Correct Humor ExplanationKill one in ten steps is a line from a poem by the ancient Chinese poet Li Bai, suggesting that while it takes Li Bai ten steps to kill, a person can die within five steps of being bitten by the “Five-Step Snake” hence it is twice as dangerous as Li Bai.
(zh) …李白的名字听起来像“二步”,所以“五步蛇”的危险性被戏称为是“二步”的两倍。
GPT-4o's Answer(en) …Li Bai's (pronounced as lee bye) name sounds similar to “two steps” (pronounced as r boo) in Chinese, so the danger of “Five-Step Snake” is humorously claimed to be twice that of “two steps”
Failure ReasonsFabricate ungrounded details or facts.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.18, + 0.611, + 0.423, + 0.626 + ], + "angle": 0, + "content": "Figure 12: Hallucinations example." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.648, + 0.49, + 0.759 + ], + "angle": 0, + "content": "Hallucinations. LLMs may fabricate ungrounded details or facts in joke explanations. For instance, in the explanation in Figure 12, GPT-4o claims that \"Li Bai's name sounds similar to two steps\", while \"Li Bai\" (pronounced as lee bye) does not sound like \"two steps\" (pronounced as \\( r \\) boo)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.762, + 0.49, + 0.922 + ], + "angle": 0, + "content": "On the other hand, the correct explanation requires an understanding of a Chinese poem from Li Bai, \"十步杀一人\" (The warrior kills a person for every ten steps). This line praises the courage of the soldiers, but the joke deliberately portrays this as a characteristic of Li Bai. Therefore, compared to Li Bai who can kill a person in ten steps, a five-step snake, which can kill a person in five steps, is twice as dangerous as Li Bai. Such explanation requires LLMs to have a deep understanding of Chinese" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.198 + ], + "angle": 0, + "content": "culture and reason over cultural references, posing a great challenge to current LLMs. Although recent works have made progress towards building LLMs beyond English (Du et al., 2024; Zhao et al., 2024), building an LLM that can comprehend such nuanced Chinese cultural terms can be extremely hard." + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21813" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.085, + 0.49, + 0.386 + ], + "angle": 0, + "content": "
Example(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。”(en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
Correct Humor ExplanationCao Cao, Xiahou Dun, and Ma Dai were historical figures from the Three Kingdoms period in ancient China. The phrase “saw Ma Dai” (pronounced as chuh dow ma die) not only literally refers to spotting Ma Dai but also play on similar pronunciation with the Japanese phrase “まじと待て” (chotto matte), which means to “wait a moment.”
GPT-4o's Answer(zh) ... 这个笑话的幽默之处在于利用了“马岱”这个名字的谐音梗,听起来像“妈的”,让人误以为夏侯惇在骂人。(en) ... a pun involving the name "Ma Dai," which sounds like "damn" in Chinese. It sounds like Xiahou Dun is swearing at Cao Cao.
Failure ReasonsFail to recognize information/elements across different languages.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.165, + 0.398, + 0.437, + 0.413 + ], + "angle": 0, + "content": "Figure 13: Cross-lingual error example." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.438, + 0.49, + 0.646 + ], + "angle": 0, + "content": "Cross-lingual Error. LLMs may fail to recognize elements or information across different languages. In the explanation in Figure 13, GPT-4o attempts to link the pronunciation of \"Ma Dai\" to other Chinese terms but fails to identify the similar pronunciations across the Chinese term \"瞧到马岱\" (pronounced as chuh dow ma die, meaning \"saw Ma Dai\") and the Japanese term \"ちよと待て\" (chotto matte, meaning \"wait a moment\"). Such cases require LLMs to connect pronunciations across languages, which may be rare in the LLMs' pre-training corpus and poses significant challenges to current LLMs." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.655, + 0.49, + 0.8 + ], + "angle": 0, + "content": "Overcritical. Figure 14 shows when the model overly criticizes an explanation, it focuses excessively on minor details, neglecting the major point and ultimately misjudging the explanation. The joke relies on common sense about life, death, and testing rather than cultural knowledge. Under the CoT prompting, the model recognizes that the explanation identifies the main punchlines but overly criticizes the lack of cultural context." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.491, + 0.922 + ], + "angle": 0, + "content": "Excessive Sensitivity. For \\(\\mathrm{ERNIE}_{4}\\)-turbo, in addition to errors across all the aforementioned categories, it also demonstrates excessive sensitivity to certain examples. Specifically, when content includes languages related to hate speech but used in non-harmful contexts, \\(\\mathrm{ERNIE}_{4}\\)-turbo refuses to provide an explanation. During our evalua" + }, + { + "type": "table", + "bbox": [ + 0.51, + 0.084, + 0.885, + 0.509 + ], + "angle": 0, + "content": "
Example(zh)网上传疯了的超准测试题!20道题轻松判断出你有没有死!(en)The viral online quiz! 20 questions to easily determine if you're alive or dead!
Joke Explanation(zh)这个笑话的幽默之处在于它用夸张的方式提出了一个显而易见的问题——判断自己是否还活着,这本身就不需要测试。其次,标题中“超准测试题”和“轻松判断”这样的描述让人觉得荒谬可笑,因为活着与否是最基本的感知,不需要任何测试。
Model's Answer DP(en)The humor in this joke lies in its exaggerated premise of asking an obvious question—determining whether one is still alive, which inherently doesn't require any test. Additionally, the descriptions in the title, such as "super accurate quiz" and "easily determine," come across as absurdly funny because being alive is the most basic perception, requiring no testing at all.
Model's Answer CoT(zh)完全解释(en)Fully Explained
(zh)...最终判断:部分/没有解释理由:虽然解释准确地指出了笑话的主要笑点(夸张性和标题的矛盾),但它没有深入探讨这些元素为什么会产生幽默效果,也没有讨论潜在的文化或认知背景...
(en)...Final judgment: Partially/Not explained Reason: While the explanation accurately identifies the main punchlines of the joke (exaggeration and the contradiction in the title), it does not delve into why these elements create a humorous effect or discuss the potential cultural or cognitive context behind it...
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.521, + 0.884, + 0.579 + ], + "angle": 0, + "content": "Figure 14: Overcritical example by Nematron\\(_{70\\mathrm{B}}\\). The Nematron\\(_{70\\mathrm{B}}\\) model selects the correct answer in the DP prompting, but selects the incorrect answer due to being overly critical in the CoT prompting." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.617, + 0.885, + 0.761 + ], + "angle": 0, + "content": "tion, we observe this excessive sensitivity in the \\(\\mathrm{ERNIE}_{4}\\)-turbo's responses to humor related to medical ethics and political discussions. This suggests that correctly understanding the context and the language toxicity remains an open challenge (Zhang et al., 2024a). Such issues are particularly critical for humor explanation, as misclassifying non-toxic context can cause the responses to deviate from the intended humor." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.798, + 0.86, + 0.815 + ], + "angle": 0, + "content": "F Prompts for DP and CoT in Chumor" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.885, + 0.922 + ], + "angle": 0, + "content": "This section outlines the prompts used in Chumor to evaluate whether an explanation fully explains a joke. Two prompting strategies are adopted: Direct Prompting (DP) and Chain of Thought (CoT). Below are the details of each approach:" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21814" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.125, + 0.087, + 0.308, + 0.103 + ], + "angle": 0, + "content": "Direct Prompting (DP)" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.118, + 0.478, + 0.183 + ], + "angle": 0, + "content": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.184, + 0.227, + 0.2 + ], + "angle": 0, + "content": "笑话:[joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.2, + 0.317, + 0.216 + ], + "angle": 0, + "content": "笑话解释:[explanation]" + }, + { + "type": "title", + "bbox": [ + 0.254, + 0.217, + 0.348, + 0.23 + ], + "angle": 0, + "content": "Translation" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.233, + 0.48, + 0.327 + ], + "angle": 0, + "content": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.329, + 0.216, + 0.345 + ], + "angle": 0, + "content": "Joke: [joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.346, + 0.325, + 0.361 + ], + "angle": 0, + "content": "Explanation: [explanation]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.381, + 0.489, + 0.446 + ], + "angle": 0, + "content": "The DP prompt is designed to encourage concise decision-making. It directly asks the model to evaluate the completeness of the explanation without requiring reasoning or justification." + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.459, + 0.319, + 0.473 + ], + "angle": 0, + "content": "Chain of Thought (CoT)" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.489, + 0.478, + 0.554 + ], + "angle": 0, + "content": "你将看到一个笑话以及对这个笑话的解释。请逐步思考,写下过程并最终判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”。" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.556, + 0.227, + 0.571 + ], + "angle": 0, + "content": "笑话:[joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.572, + 0.317, + 0.587 + ], + "angle": 0, + "content": "笑话解释:[explanation]" + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.589, + 0.348, + 0.601 + ], + "angle": 0, + "content": "Translation" + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.604, + 0.478, + 0.7 + ], + "angle": 0, + "content": "You will see a joke and an explanation of the joke. Please think step by step, write down your reasoning process, and finally determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explains\" or \"partially/does not explain.\"" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.701, + 0.216, + 0.716 + ], + "angle": 0, + "content": "Joke: [joke]" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.718, + 0.325, + 0.732 + ], + "angle": 0, + "content": "Explanation: [explanation]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.753, + 0.489, + 0.832 + ], + "angle": 0, + "content": "The CoT prompt, in contrast, requires the model to reason step by step before reaching a conclusion. This approach aims to improve transparency by explicitly documenting the thought process behind the evaluation." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.847, + 0.449, + 0.864 + ], + "angle": 0, + "content": "G Joke Type Distribution in Chumor" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.922 + ], + "angle": 0, + "content": "We sampled 200 datapoints from Chumorto analyze the distribution of joke types, as shown in Figure 15. Note that a single joke may belong to" + }, + { + "type": "image", + "bbox": [ + 0.53, + 0.082, + 0.863, + 0.196 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.205, + 0.882, + 0.234 + ], + "angle": 0, + "content": "Figure 15: Distribution of Joke Types in 200 Sampled Datapoints." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.254, + 0.883, + 0.287 + ], + "angle": 0, + "content": "multiple categories, as it can exhibit features of more than one joke type." + }, + { + "type": "image_caption", + "bbox": [ + 0.51, + 0.299, + 0.826, + 0.316 + ], + "angle": 0, + "content": "H Detailed Results of Experiments" + }, + { + "type": "image", + "bbox": [ + 0.512, + 0.336, + 0.882, + 0.772 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.78, + 0.883, + 0.808 + ], + "angle": 0, + "content": "Figure 16: The Matthew's correlation coefficient of different models' test results in DP and CoT." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "For evaluation, we input each prompt into the model and collect its responses, comparing them to the labels in Chumor. A model's response is considered correct if it matches the reference label. If the model provides an incorrect answer or doesn't generate a response at all (due to safety protocols or" + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21815" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.488, + 0.134 + ], + "angle": 0, + "content": "filtering sensitive terms), it is marked as incorrect. Such scenario is rare, occurring only 21 times in our experiments, and exclusively with GLM-4plus." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.134, + 0.489, + 0.277 + ], + "angle": 0, + "content": "We highlight that CoT prompting at most cases degrade the models' performance on Chumor. As shown in Figure 16, only \\(\\mathrm{Athene}_{70\\mathrm{B}}\\) achieves a significant improvement. However, this is offset by its poorest performance under DP prompting among the models. GPT-4o shows a slight improvement, with its MCC score increasing from 0.19 to 0.20. And all other eight models exhibit different degrees of performance decline." + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.288, + 0.486, + 0.882 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.122, + 0.892, + 0.479, + 0.907 + ], + "angle": 0, + "content": "Figure 17: CoT accuracy on different joke types \\((\\%)\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.929, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21816" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.217, + 0.103, + 0.782, + 0.31 + ], + "angle": 0, + "content": "
ModelDPCoT
MCCACC (%)FPR (%)FNR (%)MCCACC (%)FPR (%)FNR (%)
Yi34B0.1044.9597.240.210.0947.1789.305.44
Nemotron70B0.1956.3061.2620.870.1457.1740.2846.14
Athene70B0.0844.5997.830.280.1247.2691.102.89
ERNIE4-turbo0.2960.2959.8313.570.1145.1696.930.14
QWen2.572B0.1948.4690.670.690.1749.4586.913.31
Mistral123B0.2255.5669.2612.190.1651.1879.928.40
Gemini1.5-pro0.2454.0077.425.170.1960.3233.8147.31
GLM-4plus0.2455.5672.288.260.1458.1332.9653.44
GPT-4o0.1951.8780.026.680.2050.6485.003.03
GPT-4turbo0.2052.3279.286.610.1751.2780.876.96
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.32, + 0.884, + 0.351 + ], + "angle": 0, + "content": "Table 4: Performance metrics for explanation evaluation including Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR)." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.397, + 0.855, + 0.859 + ], + "angle": 0, + "content": "
ModelSourceDPCoT
MCCACC(%)FPR(%)FNR(%)MCCACC(%)FPR(%)FNR(%)
Athene70BOverall0.0844.5997.830.280.1247.2691.102.89
ERNIE Bot0.1252.3897.150.000.1554.2491.132.13
GPT-4o0.0333.9098.510.860.0837.6791.064.50
ERNIE-turboOverall0.2960.2959.8313.570.1145.1696.930.14
ERNIE Bot0.2358.6478.145.990.1653.4794.830.10
GPT-4o0.2762.5441.3829.550.0433.7699.040.21
Gemini1.5-proOverall0.2454.0077.425.170.1960.3233.8147.31
ERNIE Bot0.2760.6674.135.890.2360.8728.6249.24
GPT-4o0.2144.8580.743.640.1759.5639.0443.25
GLM-4plusOverall0.2455.5672.288.260.1458.1332.9653.44
ERNIE Bot0.2559.8374.976.700.1557.5637.0647.61
GPT-4o0.2149.6869.5711.560.0658.9228.8365.74
GPT-4turboOverall0.2052.3279.286.610.1751.2780.876.96
ERNIE Bot0.2057.2580.995.990.2258.7576.147.72
GPT-4o0.1845.5677.557.920.1341.0185.645.35
GPT-4oOverall0.1951.8780.026.680.2050.6485.003.03
ERNIE Bot0.2157.8279.416.400.2458.0782.472.94
GPT-4o0.1643.7180.647.280.1540.4487.553.21
Nemotron70BOverall0.1956.3061.2620.870.1457.1740.2846.14
ERNIE Bot0.2260.6656.8122.540.1457.0439.1846.60
GPT-4o0.1850.3265.7417.340.1357.3641.3845.18
Mistral123BOverall0.2255.5669.2612.190.1651.1879.928.40
ERNIE Bot0.2561.1365.1513.600.1857.0479.737.61
GPT-4o0.2047.9073.409.210.1243.1480.1110.06
Qwen2.572BOverall0.1948.4690.670.690.1749.4586.913.31
ERNIE Bot0.1954.4592.610.300.1855.5488.072.54
GPT-4o0.1740.2388.721.500.1441.0885.744.93
Yi34BOverall0.1044.9597.240.210.0947.1789.305.44
ERNIE Bot0.1553.4294.720.300.1153.9988.385.28
GPT-4o0.0333.3399.790.000.0737.8190.215.78
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.868, + 0.884, + 0.898 + ], + "angle": 0, + "content": "Table 5: Detailed performance metrics with source for explanation evaluation of Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR)." + }, + { + "type": "page_number", + "bbox": [ + 0.476, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "21817" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.147, + 0.325, + 0.851, + 0.65 + ], + "angle": 0, + "content": "
ModelPromptingCross-lingualGlyph-basedHomophonemicPun-basedSituationalCultural
Athene70BDP0.000.0030.0044.0061.0042.00
CoT0.0025.0030.0044.0059.0043.00
ERNIE4-turboDP50.0050.0060.0061.0070.0063.00
CoT0.000.0030.0043.0059.0042.00
Gemini1.5-proDP50.0050.0055.0063.0067.0061.00
CoT50.0075.0070.0061.0066.0069.00
GLM-4plusDP50.0025.0065.0060.0069.0060.00
CoT50.00100.0075.0064.0060.0061.00
GPT-4turboDP50.0025.0040.0057.0067.0055.00
CoT50.0025.0045.0054.0062.0056.00
GPT-4oDP0.0050.0035.0049.0063.0054.00
CoT0.0050.0035.0050.0062.0053.00
Nemotron70BDP50.0050.0065.0063.0062.0060.00
CoT100.00100.0065.0066.0060.0072.00
Mistral123BDP50.0050.0055.0061.0065.0061.00
CoT50.000.0040.0053.0066.0055.00
Qwen2.572BDP0.0050.0035.0047.0064.0051.00
CoT0.0050.0040.0053.0063.0053.00
Yi34BDP0.000.0030.0043.0060.0044.00
CoT0.0025.0040.0049.0063.0052.00
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.659, + 0.779, + 0.674 + ], + "angle": 0, + "content": "Table 6: Performance metrics by joke type for explanation evaluation accuracy(%)" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.929, + 0.525, + 0.941 + ], + "angle": 0, + "content": "21818" + } + ] +] \ No newline at end of file diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_origin.pdf b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..74f32207d9cd0a5bd63ab0fe6ee2d860bd804294 --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/5abdc7b1-9bd7-4584-b650-ffb46a145cb6_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b33a9ae872a841a764ea57f4ee011b6f0cad588715303957ebb7ffbe7a428750 +size 2441661 diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/full.md b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e81e803622cf09b7444e7469923ae68492e40f79 --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/full.md @@ -0,0 +1,466 @@ +# Chumor 2.0: Towards Better Benchmarking Chinese Humor Understanding from 弱智吧 (Ruo Zhi Ba) + +Ruiqi He Yushu He Longju Bai Jiarui Liu Zhenjie Sun Zenghao Tang He Wang Hanchen Xia Rada Mihalcea Naihao Deng + +$^{\text{©}}$ University of Michigan Carnegie Mellon University Shanghai Jiaotong University {ruiqih, dnaiahao}@umich.edu + +# Abstract + +Existing humor datasets and evaluations predominantly focus on English, leaving limited resources for culturally nuanced humor in non-English languages like Chinese. To address this gap, we construct Chumor, the first and the largest Chinese humor explanation dataset. Chumor is sourced from Ruo Zhi Ba (RZB, 弱智吧), a Chinese Reddit-like platform known for sharing intellectually challenging and culturally specific jokes. We test ten LLMs through direct and chain-of-thought prompting, revealing that Chumor poses significant challenges to existing LLMs, with their accuracy slightly above random and far below human. In addition, our analysis highlights that human-annotated humor explanations are significantly better than those generated by GPT-4o and ERNIE $_{4\text{-turbo}}$ . We release Chumor at https://huggingface.co/datasets/MichiganNLP/Chumor, our project page is at https://github.com/MichiganNLP/Chumor-2.0, our leaderboard is at https://huggingface.co/spaces/MichiganNLP/Chumor-leaderboard, and our codebase is at https://github.com/MichiganNLP/Chumor-2.0. + +# 1 Introduction + +Humor is an intrinsic human trait that touches the core of our social and emotional lives, making it a rich field of study across various disciplines (Lefcourt, 2001; Mihalcea and Strapparava, 2005; Gelkopf et al., 2011; Hessel et al., 2023). With the advent of Large Language Models (LLMs), researchers have evaluated LLMs' performance on diverse tasks (Liu et al., 2023a; Deng et al., 2024; Wu et al., 2023) and observed LLMs' extraordinary performance on many (Zhang et al., 2024b). In contrast, researchers have observed that LLMs still fail to understand humor (Ghanadian et al., 2023). However, with all these studies on humor, + +most evaluations remain in English (Radev et al., 2016; Hasan et al., 2019). This presents a significant gap, particularly for non-English languages like Chinese, where culturally nuanced humor understanding is unexamined. + +In this paper, we try to bridge this gap by constructing Chumor, a funny and challenging Chinese humor understanding dataset sourced from Ruo Zhi Ba (RZB, "弱智吧" in Chinese), a Chinese version of Reddit platform known for sharing intellectually challenging and culturally specific jokes. This platform provides a set of unique Chinese jokes that incorporate the subtleties and intricacies of Chinese humor. Table 1 provides examples of the jokes from RZB. In addition, Bai et al. (2024) reveal that tuning LLMs on RZB data yields the best performance on Chinese reasoning tasks compared to other data sources, highlighting the significant value of jokes from RZB. + +Unlike existing datasets that focus on tasks such as humor detection, punchline identification, or humor generation, Chumor addresses the challenge of humor explanation. This involves not just identifying humor but understanding the reasoning behind it, a task that requires both linguistic and cultural knowledge. Specifically, Chumor tasks the LLMs with determining whether an explanation fully explains the joke. We source the explanations from GPT-4o and ERNIE $_{4\text{-turb}}$ , and have the entire dataset manually annotated by five native Chinese speakers. We evaluate ten LLMs from various model families, and reveal that all models perform poorly, lagging significantly behind humans on Chumor. We observe that chain-of-thought prompting does not necessarily improve models performance and can sometimes confuse their reasoning process. In addition, we conduct a case study in which one of the authors annotates the entire dataset, followed by A/B testing conducted by six native Chinese speakers to compare explanations from GPT-4o versus human, and + +
Cultural
Desc.Require knowledge of specific historical, social, or linguistic contexts.
Ex.(zh)小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Situational
Desc.Involve humor derived from specific contexts, irony, or narrative setups.
Ex.(zh)真可怕, 犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。 (en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Pun-based
Desc.Build on linguistic ambiguity and wordplay, require models to identify dual meanings.
Ex.(zh)你可以在steam上找到GTA,所以水是DNA。 (en) You can find GTA on Steam, so water is DNA.
Homophobic
Desc.Rely on phonetic similarities between words or phrases to create humor.
Ex.(zh)家里的猪油没了,小明只能把植物油倒快点当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Glyph-based
Desc.Exploit the structural or visual elements of Chinese characters to create humor.
Ex.(zh)我把電串難題简化了,现在是电车难题。 (en) I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Cross-lingual
Desc.Involve humor derived from linguistic or phonetic interplay across multiple languages.
Ex.(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。” (en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
+ +Table 1: Different types of jokes. Descriptions (Desc.) explain humor mechanisms. Examples (Ex.) illustrate each category. The corresponding explanations can be found in the referenced figures from the rightmost column. + +ERNIE $_{4}$ -turbo versus human. Our results indicate that human-annotated joke explanations are significantly better than those produced by GPT-4o or ERNIE $_{4}$ -turbo (Figure 4), with LLMs yielding winning rates of only $6.2\%$ for GPT-4o and $5.3\%$ for ERNIE $_{4}$ -turbo compared to humans. + +In summary, our contributions are threefold: + +1. We construct Chumor, a funny and challenging Chinese humor understanding dataset, which is the largest Chinese humor explanation dataset. +2. We evaluate ten LLMs on Chumor and reveal the significant challenges Chumor possesses. We highlight that the best accuracy achieved by LLMs is $60.3\%$ , significantly lower than human's score of $78.3\%$ . +3. We demonstrate that chain-of-thought prompting can hurt LLM's performance in humor reasoning, and that human-annotated joke explanations are significantly better than those produced by GPT-4o and ERNIE $_{4\text{-turbo}}$ , urging future research on culturally specific humor understanding. + +# 2 Related Works + +Humor Datasets. Humor analysis in natural language processing (NLP) encompasses a wide range of tasks, each focused on different aspects of humor. For instance, researchers have proposed datasets + +such as “16000 One-Liners” (Mihalcea and Strapparava, 2005), “Pun of the Day” (Yang et al., 2015), and “Ted Laughter” (Chen and Lee, 2017) focused on humor detection to determine whether a given text is humorous or not. Datasets such as “Big Bang Theory” (Bertero and Fung, 2016) aim at pinpointing the punchline in a joke. Tasks for assessing humor intensity include humor level rating, comparison, and ranking. For example, datasets like HumorNorm (Engelthaler and Hills, 2018) and #Hashtag Wars (Potash et al., 2017) quantify humor scores and compare comedic elements, while UR-Funny ranks punchlines based on their perceived impact. Datasets such as “Humicroedit” (Hossain et al., 2019), “ $C^3$ ” (Wang et al., 2022), and “Talk-Funny” (Chen et al., 2024) focus on humor generation, the task of generating or rewriting humorous texts. In addition, we present a comprehensive overview of the existing datasets related to humor in Table 2. We highlight that most existing datasets are in English. Chinese humor, on the other hand, is less explored. Our dataset, Chumor is the first humor explanation dataset in Chinese. + +Culturally Specific Datasets. Recent works underscore the challenges of culturally specific reasoning in LLMs (Shen et al., 2024; AlKhamissi et al., 2024; Pawar et al., 2024; Vayani et al., 2024). These challenges stem from the overrepresentation of Western-centric knowledge and translation ar + +
DatasetSourcesLan.#(k)Tasks
One Liners (2005)Weben16HR
Pun of the Day (2015)Weben4.8HR PD
Big Bang Theory (2016)TVen44PD
Ted Laughter (2017)TEDen9.4HR PD
#HashtagWars (2017)TVen13HC
HumorNorm (2018)\( CS^† \)en5HC
UR-FUNNY (2019)TEDen17PD
Humicroedit (2019)Redditen15HG
rJokes (2020)Redditen57HC
Memotion (2020)Memesen9.8HC
MUMOR (2021)TVen zh30HR
NYT-Captions (2023)NYTen0.7 2.6HE HC
\( C^3 \) (2022)Bookszh9.3HG
TalkFunny (2024)Appszh4.1HG
TCHD (2023)-zh26HR HC PD
TTWS (2019)Bookszh9.1PD
CHM (2020)Apps Webzh3.3HC
Memeplate (2022)Apps Webzh5.2HC
Chumor (us)Webzh3.3HE
+ +Table 2: Existing datasets related to humor. For the shorthands in the table, abbreviations represent the following tasks, HR: humor recognition; PD: punchline detection; HC: humor comparison; HG: humor generation; HE: humor explanation †: Crowd-source. + +tifacts, which limit the fairness and effectiveness of multilingual evaluations (Mihalcea et al., 2024). Researchers have proposed various culturally specific datasets such as Global-MMLU (Singh et al., 2024) to evaluate LLMs' cultural knowledge. Chumor adds to this line of effort as it involves rich knowledge specific to Chinese culture. + +# 3 Chumor Dataset + +Data Collection. We construct our dataset by including RZB jokes from "Best Annual Threads" between 2018 and 2021 that have been previously crawled†. In addition, we directly collect all threads in the "Moderator's Recommendation" section from RZB. Each thread in RZB consists of "标题"(title),"一楼"(content), and several "跟帖"(follow-up posts). For threads from Best Annual Threads, the jokes are listed in the follow-up posts, which are selected by the forum moderator. For threads from Moderator's Recommendation, the jokes consist of the title and the content of each thread. We remove the content if it repeats the title. + +Data Cleaning. We store both the title and the content of the raw data. However, due to the posting restrictions of the platform requiring non-empty content, many posts contain meaningless placeholder texts such as “:”, “!”, “0”, “RT”, and others. We automatically identify and remove these patterns, and only keep the title which is the joke itself. Due to the length limitations on the original platform, many post titles are truncated from the beginning parts of the content. We identify these instances and replace the truncated title with the complete content to get the joke. We also remove duplicates that appear both in the “Moderator’s Recommendation” and the “Best Annual Posts”. + +We manually remove the threads related to forum management and rules, threads that include excessively offensive content, threads with incomplete content, and threads that focus more on philosophical insight rather than humor. + +Humor Explanation Classification. We design a humor explanation classification task that can be easily used to test LLMs' capabilities in humor understanding. Specifically, we use two LLMs, GPT-4o and ERNIE $_{4}$ -turbo to generae explanations for our collected jokes. We manually annotate the generated explanations as either "fully explain the joke" (good) or "partially explain or not explain the joke" (bad) based on a majority vote among five of the authors who are native Chinese speakers. Each joke, along with its explanation, forms an individual instance in Chumor, leading to a total of 3,339 instances. Among these, 1,454 items are labeled as good and 1,887 as bad explanations. + +Data Examples from Chumor. We present examples from Chumor in Table 3. + +Humor Categorization in Chumor. We categorize the jokes in RZB into six types in Table 1, with an example provided for each type. + +# 4 Experiments + +Models. We test ten LLMs, five from the open-source LLM families and five from the closed-source LLM families, all capable of handling Chinese. Specifically, we include the open-source LLMs of $\mathbf{Yi}_{34\mathbf{B}}$ (01.ai, 2024) from 01.AI, Nemotron70B (NVIDIA, 2024) from NVIDIA, Athene70B (Nexusflow, 2024) from Nexusflow, Qwen2.572B (Qwen, 2024) from Alibaba, Mistral123B (Mistral.ai, 2024) from Mistral AI, alongside the closed-source LLMs of + +
JokeExplanationLabelSource
(zh) “你自己掰了一千吨玉米?”“哈哈我瞎掰的”(zh) 这个笑话的幽默之处在于对话中的双关语和出人意料的回答。“掰了一千吨玉米”看似是一个惊人的壮举,而“哈哈我瞎掰的”则瞬间揭露了这其实是一个夸张的玩笑,这种巧妙的反转和语言的戏谑带来了幽默感。goodERNIE4-turbo
(en) “You shucked a thousand tons of corn all by yourself?”“Haha, let's just say there's been plenty of shuckin' going on.”(en) The humor in this joke lies in the pun and the unexpected response in the conversation. “Shucked a thousand tons of corn” seems like an extraordinary achievement, but the reply, ‘haha, let's just say there's been plenty of shuckin' going on,” instantly reveals it to be an exaggerated joke. This clever twist and playful use of language create the comedic effect.
(zh) 小明突然出现在我面前以至于我突然出现在小明面前。(zh) 这个笑话的幽默之处在于它利用了语言的对称性和意外性。小明突然出现导致叙述者的惊讶反应,而叙述者的惊讶反应又反过来让小明感到意外,形成了一个有趣的循环。badGPT-4o
(en) Xiaoming suddenly appeared in front of me, causing me to suddenly appear in front of him.(en) The humor in this joke lies in its use of linguistic symmetry and unexpectedness. Xiao Ming's sudden appearance triggers a surprised reaction from the narrator, which in turn surprises Xiao Ming, creating an amusing loop.
+ +Table 3: Examples from Chumor. The second example's explanation is bad because the joke does not "creating an amusing loop". Instead, it relies on linguistic symmetry and the use of a straightforward fact to subvert expectations. The audience anticipates an unexpected outcome due to the setup, but the latter part "suddenly appear in front of him" flips the perspective by stating the straightforward fact that because Xiao Ming is in front of the person so the person is in front of Xiao Ming too. + +Gemini $_{1.5-pro}$ (Google, 2024) from Google, GLM $_{4\text{plus}}$ (BigModel, 2024) from Tsinghua University, GPT-4 $_{\text{turbo}}$ , GPT-4o (OpenAI, 2023, 2024) from OpenAI, ERNIE $_{4\text{turbo}}$ (Baidu, 2024) from Baidu. For all the open-source LLMs, we use the instruction-tuned version in our evaluation. + +Evaluation Methods. We evaluate these LLMs using two prompting methods: direct prompting (DP) by + +# Direct Prompting (DP) + +你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。 + +笑话:[joke] + +笑话解释:[explanation] + +# Translation + +You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either "fully explain" or "partially/does not explain." You do not need to explain why it is correct or incorrect. + +Joke: [joke] + +Explanation: [explanation] + +and chain-of-thought (CoT) prompting (Wei et al., 2022) by adding the phrase “请逐步思考,写下过程”“Please think step by step, write down your reasoning process” before determining the label. Appendix F provides the complete prompts. We cal + +culate accuracy scores as part of our evaluation. In addition, we provide the false positive rate (FPR), false negative rate (FNR), and Matthews Correlation Coefficient (MCC) in Appendix H in Table 4. The MCC score considers true positives, true negatives, false positives, and false negatives, providing a score between -1 and +1. A score of +1 indicates perfect predictions, 0 reflects random guessing, and -1 means complete disagreement. The best MCC score achieved by LLMs is 0.29, which is close to random guessing, and is significantly lower than the human average of 0.60. + +# 5 Results and Discussions + +Overall Model Performance. Figure 1 presents the accuracy of different LLMs on Chumor in DP and CoT settings. Appendix H presents additional results and analysis. + +Overall, we observe that all models perform poorly on Chinese humor comprehension, with accuracy scores ranging between $44.6\%$ and $60.3\%$ . ERNIE $_{4\text{-}\text{turbo}}$ and Gemini $_{1.5\text{-}\text{pro}}$ achieve the highest accuracy of $60.3\%$ , and are just 10 points above the random baseline and far below human performance of $78.3\%$ , highlighting the difficulty of Chumor and the limitations of these LLMs in understanding Chinese humor. + +Error Analysis by Joke Type. To better understand how LLMs perform on each joke type listed in Table 1, we sample 200 jokes for error analysis. Figure 2 and Figure 17 in Appendix H present the results. The distribution of joke types can be found in Appendix G Figure 15. + +![](images/c82242bc9d064b5b1e48e0527762f6fc5982d614add9b8428f192d0426397d89.jpg) +Figure 1: The accuracy of different models' test results in the DP and CoT settings. ERNIE $_{4}$ -turbo and Gemini $_{1.5\text{-pro}}$ achieve the highest accuracy of $60.3\%$ . + +We highlight that model performance varies significantly across different joke types. While models generally perform well on Situational jokes, achieving $60.0\%$ to $70.0\%$ accuracy in both DP and CoT settings, their performance difference on other joke types is more pronounced. For instance, GLM-4plus achieves $65.0\%$ accuracy on Homophonic jokes in the DP setting, whereas $\mathrm{Yi}_{34\mathrm{B}}$ only reaches $30.0\%$ . Nematron70\mathrm{B}\ performs well on Cultural jokes in the CoT setting with $72.0\%$ accuracy, but Athene70\mathrm{B}\ and ERNIE4-turbo achieve with only $43.0\%$ and $42.0\%$ , respectively. Such performance variance highlights LLMs' varied capabilities in specific domains such as cultural reasoning and situational reasoning, revealing the respective limitations of these LLMs. + +# 5.1 Have LLMs achieved human-level understanding of humor? + +Answer: No. To compare the performance of LLMs with humans, we conduct a human study involving three Chinese native speakers unfamiliar with this work to annotate a randomly chosen subset of 200 examples. Human annotators demonstrate significantly better performance, with an average accuracy of $78.3\%$ and an MCC score of 0.60, significantly better than the LLMs' best per + +![](images/1751194a5476ee5570e3e726c0ef4c756014c6ce34f4a9fb82ab296870d8687f.jpg) +Figure 2: DP accuracy on different joke types $(\%)$ . Here, we sample 200 jokes for error analysis. We highlight that model performance varies significantly across different joke types. + +formance of $60.3\%$ accuracy and an MCC score of 0.29 (Figure 16 in Appendix H). Our results indicate that there is a large room of performance improvement for LLMs on Chinese humor understanding. + +# 5.2 Does chain-of-thought (CoT) help LLMs' humor understanding? + +
Example(zh)“大人有大量,小孩倒是不多了。”食人族厨师清点食材说。 +(en)“Adults are plenty, but children are quite scarce,” said the cannibal chef while counting ingredients.
Joke Explanation(zh)这个笑话的幽默之处在于其运用了惊悚与诙谐的巧妙结合,以及意料之外的语言转折。食人族厨师清点食材的场景已经足够诡异,但他却以“大人有大量,小孩倒是不多了”这样似乎很平常的口吻来评论,这种对比和反差产生了荒诞的幽默感,同时也在探讨一种颠覆性的、黑暗式的玩笑。 +(en)The humor in this joke lies in its clever combination of horror and wit, as well as the unexpected linguistic twist. The scene of a cannibal chef counting ingredients is already eerie, but the casual remark, "Adults are plenty, but children are quite scarce," delivered in an ordinary tone, creates a stark contrast. This juxtaposition generates absurd humor while exploring a subversive, dark style of comedy.
Model's Answer DP✓(zh)部分/没有解释 +(en)Partially/Not ExplainedModel's Answer CoT X (zh) ...这句话使用了一个常用的谚语“大人有大量”来双关...比较您的理解和解释:笑话解释涵盖了我的分析要点,指出了对比和反差产生的荒诞幽默感,并提到了黑暗式幽默。解释完全涵盖了笑话的所有幽默点,因此选择“完全解释”。 +(en) ...This sentence uses a common proverb, "adults are plenty," as a pun... Comparison of your understanding and the explanation: The joke explanation covers the key points of my analysis, highlighting the absurd humor created by contrast and reversal, and mentioning the element of dark humor. The explanation fully addresses all aspects of the joke's humor, so “Fully Explained” is selected.
CoT Failure ReasonsObserving model's explanation shows it recognizes the pun in the joke. However, the joke explanation being evaluated misses the pun. Model correctly identifies this in DP but, in CoT, despite noticing the pun, attempts to justify the flawed explanation, leading to an incorrect judgment.
+ +Figure 3: Over-analyzing example by GPT-4o. The GPT-4o model chooses the correct answer in the DP prompting, but chooses the incorrect answer due to over-analyzing in the CoT prompting. + +Answer: No. We observe that CoT does not necessarily improve model performance and, in most cases, even leads to performance decay. For instance, as shown in Figure 1, the accuracy of $\mathrm{ERNIE}_{4}$ -turbo decreases from $60.3\%$ to $45.2\%$ when we switch to CoT prompting, Mistral $_{123B}$ 's performance drops from $55.6\%$ to $51.2\%$ , GPT-4o's performance drops from $51.9\%$ to $50.6\%$ , GPT-4turbo's performance falls from $52.3\%$ to $51.3\%$ . Moreover, the MCC scores present a clearer trend of performance decline under CoT prompting. As shown in Figure 16 in Appendix H, eight of the ten LLMs' MCC scores decrease under CoT prompting. We hypothesize that CoT prompts may not help the model's reasoning when the model lacks a fundamental grasp of humor understanding. + +We observe that under CoT prompting, models like GPT-4o tend to justify incorrect explanations as "correct", leading to an increase in false-positive rate from $80.0\%$ for DP prompting to $85.0\%$ for CoT prompting (Table 4 in Appendix H). $\mathrm{ERNIE}_{4}$ -turbo exhibits the largest false-positive rate, rising from $59.8\%$ to $96.9\%$ (Table 4 in Appendix H). Figure 3 provides an example where CoT confuses the GPT-4o model. Under the DP prompting, the GPT-4o model chooses the answer correctly. However, CoT prompting causes the model to over-analyze and justify an incorrect explanation. + +On the other hand, models like Nematron $_{70}$ may be overly critical of explanations under CoT prompting, resulting in a false-negative rate from $20.9\%$ for DP prompting to $46.1\%$ for CoT prompting (Table 4 in Appendix H). We highlight that a + +recent work demonstrates that CoT can degrade performance in tasks requiring subtle comprehension (Sprague et al., 2024), which aligns with our findings on its limitations in humor interpretation. Figure 14 in Appendix E discusses an example corresponding to the model being overly critical. + +# 5.3 Case study: can GPT-4o and ERNIE $_{4}$ -turbo explain jokes as well as humans? + +Answer: No. Apart from testing multiple LLMs on Chumor, we conduct case studies on GPT-4o and ERNIE $_{4}$ -turbo to assess the quality of their joke explanations compared to humans. We prompt them to explain the humor in two sentences, consistent with the format of human explanations. Here is the prompt we feed to both LLMs: + +# Prompt + +请用两句话解释这个笑话的幽默之处: [joke] + +Please explain the joke in two sentences: [joke] + +Data Annotation. As demonstrated by Hessel et al. (2023), crowd-sourcing typically cannot produce high-quality explanations, following Hessel et al. (2023), one of the authors annotates all the explanations to ensure the quality and consistency. + +This is a substantial effort: the author ended up annotating the explanations for 1,951 jokes. The resulting corpus has a mean of 78 Chinese characters of explanation per joke, and the total length, 151,730 Chinese characters, is comparable + +![](images/f29ecc8a0d970beaa864265793e8e2763bf420cfe75aaaa9c975bdd6a3d526dc.jpg) +Figure 4: Annotated preference for whether human explanation is preferred ("Human wins") or the explanation from LLMs is preferred ("LLM wins"). Humans' explanation is significantly preferred over LLMs'. + +in length to a novella†. + +Evaluation Setup. To fairly evaluate which explanation is better, we conduct A/B testing by presenting the humor explanation from one LLM and from human to six college students, asking them to annotate their preference of the explanation for each joke. These college students are native Chinese speakers who grew up in China, therefore they have a deep understanding of the cultural terms and trending terms in China. We note that the preference annotation requires a substantial effort as each annotator reads through a total length of around 300k Chinese characters†. We end up with three preference annotations for each joke. The preference annotation achieve a $61.4\%$ agreement rate among annotators (Appendix B). + +We use the winning rate as our measure to compare LLMs' explanation versus human explanation, taking the majority vote among all annotators for each example. In addition, if all annotators disagree, we assign an "Undecided" label. Appendix C provides the annotation instructions we present to the annotators. + +Overall Results. Figure 4 reports the wining rate of explanations from human versus GPT-4o and ERNIE $_{4\text{-turb}}$ . We can see that human explanations are significantly better than those from both LLMs, with humans winning over $50\%$ of the time, while LLMs win in only $2 - 3\%$ of cases. + +Error Analysis. Figure 5 shows the overall distribution of error types for GPT-4o and ERNIE $_{4\text{-}\text{turbo}}$ on Chumor in terms of their humor explanations. This error analysis is conducted by an individual who is not involved in writing the original explanations, ensuring an unbiased evaluation. GPT-4o + +![](images/a8d2f4eda47cac68bcfb4deb23736b0a2064f14ab806d5e36881a22d801cd584.jpg) +Figure 5: Distribution of error types for GPT-4o and ERNIE $_{4\text{-}\text{turbo}}$ . We sample 200 examples to calculate the distribution of these error types. We note that each example may correspond to multiple error types. We highlight that ERNIE $_{4\text{-}\text{turbo}}$ demonstrates a lower error rate on cultural jokes, while GPT-4o demonstrates a lower error rate on contextual or pun-based jokes. + +is more prone to errors categorized as "cultural unawareness" (29.5% of all its explanations) compared to $\mathrm{ERNIE}_{4\text{-}\text{turbo}}$ (10.5%). We suspect that $\mathrm{ERNIE}_{4\text{-}\text{turbo}}$ is more familiar with Chinese culture as it is likely trained on a larger Chinese corpus than GPT-4o. However, GPT-4o performs better on cases requiring an understanding of contexts or puns, suggesting its strong reasoning ability. We provide three error cases for GPT-4o here and additional cases for both GPT-4o and $\mathrm{ERNIE}_{4\text{-}\text{turbo}}$ in Appendix E. In the following examples in Figure 6, Figure 7 and Figure 8, we highlight key phrases that induce humor in green, and underscore the errors in red. + +Error Type I: Cultural Unawareness. LLMs may fail to explain a joke due to a lack of awareness of certain cultural knowledge. For instance, the example in Figure 6 requires knowledge of a superstitious belief in Chinese culture, getting a haircut in the first lunar month brings death to your uncle, and the explanation from GPT-4o fails to connect to this Chinese cultural belief. We hypothesize that while LLMs are pre-trained on Internet-scale corpora, such culturally specific knowledge can still be challenging for them to grasp. Moreover, even when they have acquired such cultural knowledge, they may fail to relate to it as we humans do during the reasoning process. + +Error Type II: Pun-based Error. LLMs may fail to identify multiple meanings of a single word within a joke, causing them to fail on pun-based + +
Example(zh) 小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Correct Humor Explanation“Getting a haircut in the first lunar month will brings death to your uncle.” is a popular superstitious saying in Chinese culture. In this joke, Xiao Ming gets hair extensions in the first month, which reverses the original logic and absurdly results in “bringing his uncle back to life.”
GPT-4o's Answer(zh) ...它利用了“接发”和“接发”的双关语:一方面是指理发店的接发服务,另一方面是指正月里“接福”的传统习俗。 (en) ...the pun on “接发”: it refers to both hair extension at a salon and the traditional practice of “receiving blessings” during the first lunar month.
Failure ReasonsFail to grasp expressions, slang, or specific usages unique to a particular cultural context.
+ +Figure 6: Culture unawareness example. + +
Example(zh) 你可以在steam上找到GTA,所以水是DNA。
(en) You can find GTA on Steam,so water is DNA.
Correct +Humor +ExplanationNormally,“Steam”refers to a gaming platform and “GTA”refers to the “Grand Theft Auto” game series. The joke uses a pun, where “steam”literally means the gaseous form of water, and “G”,“T”,and “A” represent Guanine, Thymine, and Adenine, respectively—three of the four nucleotides (AGTC) that make up DNA.
GPT-4o's +Answer(zh)...这种明显错误的类比制造了荒诞的效果,引人发笑。
(en)...This clearly wrong analogy creates an absurd effect.
Failure +ReasonsFail to discover multiple meanings of a word in the jokes
+ +jokes where humor lies in inverting the conventional usage of words. In Figure 7, GPT-4o fails to grasp the transition from the video game terms "Steam", "GTA" to the scientific terminologies "G", "T", "A" that form DNA. Typically, "Steam" refers to a game platform, and "GTA" refers to the game series "Grand Theft Auto". The joke employs a pun on words where "steam" in its literal sense means water vapor, and "GTA" can represent not only the video game, but guanine, thymine, and adenine, which are nucleotides involved in the structure of DNA. Such jokes require LLMs to identify puns and the reason for the association of the multiple meanings. Furthermore, the process requires LLMs to bridge the logic gap between these terms, such as + +Figure 7: Pun-based error example. + +
Example(zh) 家里的猪油没了,小明只能把 植物油倒快点 当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Correct +Humor +ExplanationWhen pronounced quickly,“vegetable” (pronounced as chih woo) sounds very similar to “pig” (pronounced as choo) in Chinese, leading Xiaoming to comically try using vegetable oil as lard by pouring it quickly.
GPT-4o's +Answer(zh) ...小明误解了猪油和植物油的区别,认为只要倒得快,植物油就能变成猪油。笑点在于他天真的逻辑和对常识的搞笑误解。 (en) ... Xiaoming misunderstood that if vegetable oil is poured quickly, it can turn into lard, highlighting his naive logic and misinterpretation of common sense.
Failure +ReasonsFail to exploit homophones or near-homophones in the jokes.
+ +Figure 8: Homophonic error example. + +"steam" and "GTA", and an unusual context, like "water is DNA". The overall process requires both scientific knowledge and creative thinking, which are challenging for LLMs. + +Error Type III: Homophonic Error. The example in Figure 8 requires LLMs to reason over the pronunciation as “植物” (pronounced as chih woo, meaning “vegetable”) sounds very similar to “猪” (pronounced as choo, meaning “pig”) in Chinese when we speak it fast enough. The humor arises from the contrast between the similarity in pronunciation and the disparity in meaning between the two terms. Such contrasts may be sparse in the training corpus of LLMs, and also demand a deep connection across different modalities to link pronunciation with the meaning behind these terms, which poses significant challenges to LLMs. + +# 6 Conclusion + +We introduce Chumor, a Chinese humor understanding dataset that captures intellectually challenging and culturally specific humor in Chinese. Our analysis reveals that Chumor remains difficult even for advanced LLMs, with a significant performance gap between LLMs and humans. Furthermore, we find that chain-of-thought reasoning does not improve LLMs' humor comprehension and, in some cases, leads to over-analysis and incorrect interpretations. Additionally, models such as GPT-4o and ERNIE $_{4}$ -turbo struggle to explain jokes as effectively as humans, highlighting fundamental challenges in humor reasoning. These findings un + +derscore the unique difficulties that Chinese humor presents to LLMs. We hope that Chumor can advance non-English humor research and contribute to evaluating LLMs' reasoning abilities across diverse cultural backgrounds. + +# Limitations + +We try our best to test the Chinese humor understanding ability of different LLMs. However, due to the limited budget and API access, we cannot evaluate all possible LLMs in this paper. We encourage future research to conduct further evaluations of humor understanding abilities in LLMs. In the meantime, we emphasize that our research focuses primarily on demonstrating how humor understanding remains a significant challenge, even for SOTA LLMs. Our work shows that along with many other problems (Ignat et al., 2024), humor understanding, especially non-English and culturally specific humor understanding, remains an unsolved problem in the era of LLMs. We hope Chumor can contribute to non-English humor understanding evaluations for future multilingual LLMs. + +# Ethics Statement + +We have made every effort to filter out excessively offensive content in RZB. However, due to the subjective nature of humor, some of our jokes may still be perceived as offensive by individuals with different cultural or personal standards. To address these concerns, we strongly recommend that researchers use Chumor with cultural sensitivity, recognizing that the jokes in the dataset reflect the sociocultural context in which they were created. We encourage users of Chumor to approach the dataset with caution, remaining mindful of its potential to cause offense or harm, particularly when applying it in research or applications that involve diverse audiences or address sensitive topics. We wish to foster an ethical and responsible approach to data collection and usage, and we welcome constructive feedback from the research community and stakeholders to continually improve Chumor and mitigate potential harm. + +# Acknowledgement + +The GPT experiments are supported by credit from OpenAI through OpenAI Researcher Access assigned to Naihao Deng. We appreciate Qiang Liu, and Xiaoyue Shi for helping with the human study. + +# References + +01.ai. 2024. Yi-34b model card. https://huggingface.co/01-ai/Yi-34B. Accessed: 2024-12-10. +Marah Abdin, Jyoti Aneja, Harkirat Behl, Sébastien Bubeck, Ronen Eldan, Suriya Gunasekar, Michael Harrison, Russell J. Hewett, Mojan Javaheripi, Piero Kauffmann, James R. Lee, Yin Tat Lee, Yuanzhi Li, Weishung Liu, Caio C. T. Mendes, Anh Nguyen, Eric Price, Gustavo de Rosa, Olli Saarikivi, and 8 others. 2024. Phi-4 technical report. Preprint, arXiv:2412.08905. +Badr AlKhamissi, Muhammad ElNokrashy, Mai Alkhamissi, and Mona Diab. 2024. Investigating cultural alignment of large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12404-12422, Bangkok, Thailand. Association for Computational Linguistics. +Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, and 29 others. 2023. Qwen technical report. Preprint, arXiv:2309.16609. +Yuelin Bai, Xinrun Du, Yiming Liang, Yonggang Jin, Ziqiang Liu, Junting Zhou, Tianyu Zheng, Xincheng Zhang, Nuo Ma, Zekun Wang, and 1 others. 2024. Coig-cqia: Quality is all you need for chinese instruction fine-tuning. arXiv preprint arXiv:2403.18058. +Baidu. 2024. Ernie-4.0-turbo. https://cloud.baidu. com/doc/WENXINWORKSHOP/s/71xwwtafj. Accessed: 2024-12-10. +Dario Bertero and Pascale Fung. 2016. Deep learning of audio and language features for humor prediction. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 496-501, Porto-rož, Slovenia. European Language Resources Association (ELRA). +BigModel. 2024. Glm-4 model documentation. https://bigmodel.cn/dev/howuse/glm-4. Accessed: 2024-12-10. +Lei Chen and Chong Min Lee. 2017. Predicting audience's laughter during presentations using convolutional neural network. In Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications, pages 86-90, Copenhagen, Denmark. Association for Computational Linguistics. +Yuyan Chen, Zhixu Li, Jiaqing Liang, Yanghua Xiao, Bang Liu, and Yunwen Chen. 2023. Can pre-trained language models understand chinese humor? In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining, WSDM '23, page 465-480, New York, NY, USA. Association for Computing Machinery. + +Yuyan Chen, Yichen Yuan, Panjun Liu, Dayiheng Liu, Qinghao Guan, Mengfei Guo, Haiming Peng, Bang Liu, Zhixu Li, and Yanghua Xiao. 2024. Talk funny! a large-scale humor response dataset with chain-of-humor interpretation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17826-17834. +Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. Preprint, arXiv:1803.05457. +Peter T. Daniels and William Bright. 1996. The world's writing systems. Oxford University Press. +Naihao Deng, Zhenjie Sun, Ruiqi He, Aman Sikka, Yu-long Chen, Lin Ma, Yue Zhang, and Rada Mihalcea. 2024. Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424. +Naihao Deng, Xinliang Zhang, Siyang Liu, Winston Wu, Lu Wang, and Rada Mihalcea. 2023. You are what you annotate: Towards better models through annotator representations. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 12475–12498, Singapore. Association for Computational Linguistics. +Xinrun Du, Zhouliang Yu, Songyang Gao, Ding Pan, Yuyang Cheng, Ziyang Ma, Ruibin Yuan, Xingwei Qu, Jiaheng Liu, Tianyu Zheng, and 1 others. 2024. Chinese tiny llm: Pretraining a chinese-centric large language model. arXiv preprint arXiv:2404.04167. +Tomas Engelthaler and Thomas T Hills. 2018. Humor norms for 4,997 english words. Behavior research methods, 50:1116-1124. +WILLIAM F. FRY. 1994. The biology of humor. HUMOR, 7(2):111-126. +Marc Gelkopf and 1 others. 2011. The use of humor in serious mental illness: A review. Evidence-Based Complementary and Alternative Medicine, 2011. +Matthew Gervais and David Sloan Wilson. 2005. The evolution and functions of laughter and humor: A synthetic approach. *The Quarterly review of biology*, 80(4):395-430. +Hamideh Ghanaian, Isar Nejadgholi, and Hussein Al Osman. 2023. ChatGPT for suicide risk assessment on social media: Quantitative evaluation of model performance, potentials and limitations. In Proceedings of the 13th Workshop on Computational Approaches to Subjectivity, Sentiment, & Social Media Analysis, pages 172-183, Toronto, Canada. Association for Computational Linguistics. +Google. 2024. Gemini 1.5 pro model documentation. https://ai.google.dev/gemini-api/docs/ models/gemini#gemini-1.5-pro. Accessed: 2024-12-10. + +Md Kamrul Hasan, Wasifur Rahman, AmirAli Bagher Zadeh, Jianyuan Zhong, Md Iftekhar Tanveer, Louis-Philippe Morency, and Mohammed (Ehsan) Hoque. 2019. UR-FUNNY: A multimodal language dataset for understanding humor. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2046-2056, Hong Kong, China. Association for Computational Linguistics. +Jack Hessel, Ana Marasovic, Jena D. Hwang, Lillian Lee, Jeff Da, Rowan Zellers, Robert Mankoff, and Yejin Choi. 2023. Do androids laugh at electric sheep? humor "understanding" benchmarks from the new yorker caption contest. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 688-714, Toronto, Canada. Association for Computational Linguistics. +Nabil Hossain, John Krumm, and Michael Gamon. 2019. "president vows to cut hair": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota. Association for Computational Linguistics. +Oana Ignat, Zhijing Jin, Artem Abzaliev, Laura Biester, Santiago Castro, Naihao Deng, Xinyi Gao, Aylin Ece Gunal, Jacky He, Ashkan Kazemi, Muhammad Khalifa, Namho Koh, Andrew Lee, Siyang Liu, Do June Min, Shinka Mori, Joan C. Nwatu, Veronica Perez-Rosas, Siqi Shen, and 3 others. 2024. Has it all been solved? open NLP research questions not solved by large language models. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 8050-8094, Torino, Italia. ELRA and ICCL. +Herbert M Lefcourt. 2001. *Humor: The psychology of living buoyantly*. Springer Science & Business Media. +Zefeng Li, Hongfei Lin, Liang Yang, Bo Xu, and Shaowu Zhang. 2022. Memeplate: A chinese multimodal dataset for humor understanding in meme templates. In *Natural Language Processing and Chinese Computing*, pages 527-538, Cham. Springer International Publishing. +Hanmeng Liu, Ruoxi Ning, Zhiyang Teng, Jian Liu, Qiji Zhou, and Yue Zhang. 2023a. Evaluating the logical reasoning ability of chatgpt and gpt-4. arXiv preprint arXiv:2304.03439. +Siyang Liu, Naihao Deng, Sahand Sabour, Yilin Jia, Minlie Huang, and Rada Mihalcea. 2023b. Task-adaptive tokenization: Enhancing long-form text generation efficacy in mental health and beyond. In Proceedings of the 2023 Conference on Empirical Meth + +ods in Natural Language Processing, pages 15264-15281, Singapore. Association for Computational Linguistics. +Xin Liu, Baosong Yang, Dayiheng Liu, Haibo Zhang, Weihua Luo, Min Zhang, Haiying Zhang, and Jinsong Su. 2021. Bridging subword gaps in pretrainfinetune paradigm for natural language generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6001-6011, Online. Association for Computational Linguistics. +Paul E McGhee. 1971. Development of the humor response: A review of the literature. Psychological Bulletin, 76(5):328. +Rada Mihalcea, Oana Ignat, Longju Bai, Angana Borah, Luis Chiruzzo, Zhijing Jin, Claude Kwizera, Joan Nwatu, Soujanya Poria, and Thamar Solorio. 2024. Why ai is weird and should not be this way: Towards ai for everyone, with everyone, by everyone. arXiv preprint arXiv:2410.16315. +Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recognition. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 531-538, Vancouver, British Columbia, Canada. Association for Computational Linguistics. +Mistral.ai. 2024. Mistral-large-instruct-2407 model card. https://huggingface.co/mistralai/Mistral-Large-Instruct-2407. Accessed: 2024-12-10. +Nexusflow. 2024. Athene-70b model card. https://huggingface.co/Nexusflow/Athene-70B. Accessed: 2024-12-10. +NVIDIA. 2024. Llama-3.1-nemotron-70b-instruct-hf model card. https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF. Accessed: 2024-12-10. +OpenAI. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774. +OpenAI. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276. +Siddhesh Pawar, Junyeong Park, Jiho Jin, Arnav Arora, Junho Myung, Srishti Yadav, Faiz Ghifari Haznitrama, Inhwa Song, Alice Oh, and Isabelle Augenstein. 2024. Survey of cultural awareness in language models: Text and beyond. arXiv preprint arXiv:2411.00860. +Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. SemEval-2017 task 6: #HashtagWars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57, Vancouver, Canada. Association for Computational Linguistics. + +Qwen. 2024. Qwen2.5-72b-instruct model card. https://huggingface.co/Qwen/Qwen2.5-72B-Instruct. Accessed: 2024-12-10. +Dragomir Radev, Amanda Stent, Joel Tetreault, Aasish Pappu, Aikaterini Iliakopoulou, Agustin Chanfreau, Paloma de Juan, Jordi Vallmitjana, Alejandro Jaimes, Rahul Jha, and Robert Mankoff. 2016. Humor in collective discourse: Unsupervised funniness detection in the new yorker cartoon caption contest. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 475-479, Porto Roz, Slovenia. European Language Resources Association (ELRA). +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. Gpqa: A graduate-level google-proof q&a benchmark. Preprint, arXiv:2311.12022. +Chhavi Sharma, Deepesh Bhageria, William Scott, Srinivas PYKL, Amitava Das, Tanmoy Chakraborty, Viswanath Pulabaigari, and Björn Gambäck. 2020. SemEval-2020 task 8: Memotion analysis-the visuolinguial metaphor! In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 759-773, Barcelona (online). International Committee for Computational Linguistics. +Siqi Shen, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, Soujanya Poria, and Rada Mihalcea. 2024. Understanding the capabilities and limitations of large language models for cultural commonsense. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5668-5680, Mexico City, Mexico. Association for Computational Linguistics. +Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David I. Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, and 4 others. 2024. Global mmlu: Understanding and addressing cultural and linguistic biases in multilingual evaluation. Preprint, arXiv:2412.03304. +Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. 2024. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. Preprint, arXiv:2409.12183. +Honglin Sun and Daniel Jurafsky. 2004. Shallow semantic parsing of Chinese. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 249-256, Boston, Massachusetts, USA. Association for Computational Linguistics. + +Weiwei Sun, Zhifang Sui, Meng Wang, and Xin Wang. 2009. Chinese semantic role labeling with shallow parsing. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, pages 1475-1483, Singapore. Association for Computational Linguistics. +Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. Llama: Open and efficient foundation language models. Preprint, arXiv:2302.13971. +Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, and 49 others. 2023b. Llama 2: Open foundation and fine-tuned chat models. Preprint, arXiv:2307.09288. +Yuen-Hsien Tseng, Wun-Syuan Wu, Chia-Yueh Chang, Hsueh-Chih Chen, and Wei-Lun Hsu. 2020. Development and validation of a corpus for machine humor comprehension. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 1346-1352, Marseille, France. European Language Resources Association. +Ashmal Vayani, Dinura Dissanayake, Hasindri Watawana, Noor Ahsan, Nevasini Sasikumar, Omkar Thawakar, Henok Biadglin Ademtew, Yahya Hmaiti, Amandeep Kumar, Kartik Kuckreja, and 1 others. 2024. All languages matter: Evaluating Imms on culturally diverse 100 languages. arXiv preprint arXiv:2411.16508. +Benyou Wang, Xiang Wu, Xiaokang Liu, Jianquan Li, Prayag Tiwari, and Qianqian Xie. 2022. Can language models make fun? a case study in chinese comical crosstalk. In Annual Meeting of the Association for Computational Linguistics. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837. +Orion Weller and Kevin Seppi. 2020. The rJokes dataset: a large scale humor collection. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 6136-6141, Marseille, France. European Language Resources Association. +Jiaming Wu, Hongfei Lin, Liang Yang, and Bo Xu. 2021. Mumor: A multimodal dataset for humor detection in conversations. In *Natural Language Processing and Chinese Computing: 10th CCF International Conference*, NLPCC 2021, Qingdao, China, October 13–17, 2021, Proceedings, Part I, page 619–627, Berlin, Heidelberg. Springer-Verlag. + +Yufan Wu, Yinghui He, Yilin Jia, Rada Mihalcea, Yu-long Chen, and Naihao Deng. 2023. Hi-ToM: A benchmark for evaluating higher-order theory of mind reasoning in large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 10691-10706, Singapore. Association for Computational Linguistics. +Diyi Yang, Alon Lavie, Chris Dyer, and Eduard Hovy. 2015. Humor recognition and humor anchor extraction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2367-2376, Lisbon, Portugal. Association for Computational Linguistics. +Dongyu Zhang, Heting Zhang, Xikai Liu, Hongfei Lin, and Feng Xia. 2019. Telling the whole story: A manually annotated chinese dataset for the analysis of humor in jokes. In Conference on Empirical Methods in Natural Language Processing. +Min Zhang, Jianfeng He, Taoran Ji, and Chang-Tien Lu. 2024a. Don't go to extremes: Revealing the excessive sensitivity and calibration limitations of llms in implicit hate speech detection. Preprint, arXiv:2402.11406. +Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B. Hashimoto. 2024b. Benchmarking Large Language Models for News Summarization. Transactions of the Association for Computational Linguistics, 12:39-57. +Jun Zhao, Zhihao Zhang, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024. Llama beyond english: An empirical study on language capability transfer. arXiv preprint arXiv:2401.01055. +Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. Preprint, arXiv:2311.07911. + +# A Contributions + +Idea Proposal. Naihao Deng proposed the high-level idea of constructing a humor understanding benchmark sourced from RZB data. + +Background Survey. Ruiqi He surveyed the humor-related tasks. + +Data Processing. Ruiqi He crawled and processed the jokes from RZB. + +Annotation. Ruiqi He annotated the explanations for the RZB jokes. Yushu He, Longju Bai, Jiarui Liu, Zhenjie Sun, Zhenghao Tang, He Wang, Nai-hao Deng conducted the preference annotations. + +Experiments. Ruiqi He, Hanchen Xia, and Naihao Deng conducted the experiments. + +Result Aggregation. Ruiqi He, Naihao Deng, Yushu He aggregated the results. + +Paper Writing. Ruiqi He and Naihao Deng drafted the paper. Other authors provided revisions and feedback on the paper. + +Naihao Deng organized the research. + +# B Agreement Rate Calculation + +We calculate the percentage agreement rate among annotators who annotate their preferences between explanations from LLMs and humans. The results show an average inter-annotator agreement of $61.9\%$ for GPT-4o and $60.9\%$ for $\mathrm{ERNIE}_{4}$ -turbo. Given the inherent subjectivity of humor interpretation tasks (Deng et al., 2023), the combined average agreement percentage of $61.4\%$ is decent. + +# C Annotation Instructions for Preference Annotation + +We include the following instructions for the preference annotations of the joke explanations: + +# Instruction + +“在这个标注中,你将会看到一个笑话和对这个笑话的幽默之处的两个解释,请你比较哪个解释更好的解释了这个笑话的幽默之处,并从以下三个标签中选择: + +1. 解释1 +2. 解释2 +3. 一样好” + +# Translation + +"In this annotation task, you will see a joke along with two explanations of its humor. Please compare which explanation better explains the reason why this joke is funny and choose from the following three labels: + +1. Explanation 1 +2. Explanation 2 +3. Both are equally good." + +For each example, we randomly assign the explanations from the LLMs and the human as Explanation 1 and Explanation 2 to ensure a fair comparison. + +# D Discussion on Evaluation Setting + +Why Zero-Shot w.o. SFT? The primary research objective of this paper is to determine how well foundational LLMs can understand Chinese humor without relying on supervised fine-tuning for this binary classification task. The focus is on investigating the innate humor-understanding ability of these models through zero-shot and zero-shot CoT prompting. This aligns with the overarching goal of examining their general capabilities without additional task-specific training. From a human behavior perspective, individuals generally acquire a sense of humor through exposure and experience rather than explicit classroom instruction (McGhee, 1971; FRY, 1994; Gervais and Wilson, 2005). Analogously, our purpose lies in evaluating the models' intrinsic ability to recognize and interpret humor without deliberate, task-specific fine-tuning. Moreover, the experimental design follows practices from other benchmarks, such as GPQA (Rein et al., 2023), AI2ARC (Clark et al., 2018), and IFEVAL (Zhou et al., 2023), which do not provide predefined train/dev/test splits. Under these conditions, it is common to assess models in a zero-shot manner to directly evaluate their capabilities on each respective task (Touvron et al., 2023a,b; Bai et al., 2023; Abdin et al., 2024). + +
Example(zh)真可怕,犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。
(en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Correct Humor Explanation“between us” can refer to “either one of us” or literally means the actual physical position.
GPT-4o's Answer(zh)…暗示他们自己可能是犯罪嫌疑人…
(en)…it suggests the bailiffs themselves might be criminal suspects…
Failure ReasonsFail to address the literal meaning.
+ +# E More Error Cases + +We note that many examples here encompass multiple error types, highlighting the complexity of Chumor. + +Insufficient Contextual Understanding. LLMs may fail to ground their responses to the context when they explain the joke. For instance, in the example in Figure 9, "between us" typically means "either you or me", but it also has the literal meaning to indicate the person standing "between us", which is the right interpretation given that the two bailiffs are talking about the criminal. However, GPT-4o only reasons that "the criminal is either you or me" but fails to capture the literal meaning from the context. We hypothesize that in the pretraining corpus, "between us" most likely acquires the meaning of "either you or me" rather than the literal meaning in a scenario like this, which creates a bias that prevents the model from reasoning about the literal interpretation required for this specific explanation. + +Glyph-based Error. LLMs may fail to interpret the visual puns based on the shape or form of Chinese characters. Glyph-based humor in Chinese leverages its logographic writing system, where characters integrate both semantic and visual elements. Unlike the phonemic alphabet used in English, Chinese characters' pictorial and ideographic nature allows for visual puns in jokes (Daniels and Bright, 1996). In the example in Figure 10, "simplify" does not refer to simplifying the tally problem conceptually, but to simplifying the traditional Chinese characters to simplified Chinese + +Figure 9: Insufficient contextual understanding example. + +
Example(zh)我把電車難題簡化了,現在是电车难题。
(en)I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Correct Humor ExplanationThe joke does not actually simplify the scenario of the trolley problem; instead, it converts the traditional Chinese characters “電車難題” into the simplified Chinese characters “电车难题”.
GPT-4o's Answer(zh)...这个笑话的幽默之处在于通过将“電車難題”中的“難”字去掉,变成了“电车題”。
(en)...The humor of this joke lies in removing the character “hard” from “the hard tally problem”, turning it into “the tally problem”.
Failure ReasonsFail to interpret the visual puns in the shape or form of the characters.
+ +Figure 10: Glyph-based error example. + +
Example(zh) 小王订了张飞去北京的机票,给张飞省了一大笔钱。
(en) Xiaowang booked an airline ticket to Beijing, saving Zhang Fei a lot of money.
Correct +Humor +ExplanationThere are two ways to parse the first half of the sentence: +(1) 小王/订了/张/飞去/北京的/机票 XiaoWang/ booked/ a/ ticket to fly to Beijing. +(2) 小王/订了/张飞/去北京的/机票 XiaoWang / booked/ Zhangfei/ a ticket/ to Beijing.
Typically, people would interpret in the first way as Zhangfei is a fictional figure and do not appear in the daily conversation. However, the second half of the sentence confirms that the second way of parsing turns out to be correct. Such contrast is hilarious.
GPT-4o's +Answer(zh) ...由于历史人物张飞已经不在世,所以给他省了一大笔钱。
(en) ... since the historical figure Zhang Fei is dead and cannot take a flight, the joke humorously suggests that he saved a lot of money.
Failure +ReasonsFail to parse textual elements in a sentence dynamically.
+ +Figure 11: Parsing error example. + +characters as the traditional Chinese characters are also termed as "complicated characters". However, LLMs struggle to reason such graphemic differences as there are no explicit connections between the textual meaning and visual representations of the glyphs. + +Parsing Error. LLMs often fail to parse sentences in multiple ways simultaneously, leading to difficulties in explaining jokes that require different parsing for the same sentence. In the example in Figure 11, the humor hinges on the ambiguity of + +the phrase "张飞", which can be interpreted either as part of a verb phrase implying "a ticket flying to Beijing" or as a proper noun, referring to the historical figure Zhang Fei. This ambiguity stems from the flexibility of the Chinese language, where each character can function independently as a word or combine with others to form new words or phrases. There are decades of research studying the problem of parsing Chinese (Sun and Jurafsky, 2004; Sun et al., 2009). Recently, researchers have proposed task-specific tokenization approaches that adapt the parsing process to better align with downstream tasks (Liu et al., 2021, 2023b). However, how to incorporate different ways of parsing at one time still remains challenging. + +
Example(zh) 五步蛇的危险性是李白的两倍。 (en) The danger of a five-step snake is twice that of Li Bai.
Correct Humor ExplanationKill one in ten steps is a line from a poem by the ancient Chinese poet Li Bai, suggesting that while it takes Li Bai ten steps to kill, a person can die within five steps of being bitten by the “Five-Step Snake” hence it is twice as dangerous as Li Bai.
(zh) …李白的名字听起来像“二步”,所以“五步蛇”的危险性被戏称为是“二步”的两倍。
GPT-4o's Answer(en) …Li Bai's (pronounced as lee bye) name sounds similar to “two steps” (pronounced as r boo) in Chinese, so the danger of “Five-Step Snake” is humorously claimed to be twice that of “two steps”
Failure ReasonsFabricate ungrounded details or facts.
+ +Figure 12: Hallucinations example. + +Hallucinations. LLMs may fabricate ungrounded details or facts in joke explanations. For instance, in the explanation in Figure 12, GPT-4o claims that "Li Bai's name sounds similar to two steps", while "Li Bai" (pronounced as lee bye) does not sound like "two steps" (pronounced as $r$ boo). + +On the other hand, the correct explanation requires an understanding of a Chinese poem from Li Bai, "十步杀一人" (The warrior kills a person for every ten steps). This line praises the courage of the soldiers, but the joke deliberately portrays this as a characteristic of Li Bai. Therefore, compared to Li Bai who can kill a person in ten steps, a five-step snake, which can kill a person in five steps, is twice as dangerous as Li Bai. Such explanation requires LLMs to have a deep understanding of Chinese + +culture and reason over cultural references, posing a great challenge to current LLMs. Although recent works have made progress towards building LLMs beyond English (Du et al., 2024; Zhao et al., 2024), building an LLM that can comprehend such nuanced Chinese cultural terms can be extremely hard. + +
Example(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。”(en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
Correct Humor ExplanationCao Cao, Xiahou Dun, and Ma Dai were historical figures from the Three Kingdoms period in ancient China. The phrase “saw Ma Dai” (pronounced as chuh dow ma die) not only literally refers to spotting Ma Dai but also play on similar pronunciation with the Japanese phrase “まじと待て” (chotto matte), which means to “wait a moment.”
GPT-4o's Answer(zh) ... 这个笑话的幽默之处在于利用了“马岱”这个名字的谐音梗,听起来像“妈的”,让人误以为夏侯惇在骂人。(en) ... a pun involving the name "Ma Dai," which sounds like "damn" in Chinese. It sounds like Xiahou Dun is swearing at Cao Cao.
Failure ReasonsFail to recognize information/elements across different languages.
+ +Cross-lingual Error. LLMs may fail to recognize elements or information across different languages. In the explanation in Figure 13, GPT-4o attempts to link the pronunciation of "Ma Dai" to other Chinese terms but fails to identify the similar pronunciations across the Chinese term "瞧到马岱" (pronounced as chuh dow ma die, meaning "saw Ma Dai") and the Japanese term "ちよと待て" (chotto matte, meaning "wait a moment"). Such cases require LLMs to connect pronunciations across languages, which may be rare in the LLMs' pre-training corpus and poses significant challenges to current LLMs. + +Overcritical. Figure 14 shows when the model overly criticizes an explanation, it focuses excessively on minor details, neglecting the major point and ultimately misjudging the explanation. The joke relies on common sense about life, death, and testing rather than cultural knowledge. Under the CoT prompting, the model recognizes that the explanation identifies the main punchlines but overly criticizes the lack of cultural context. + +Excessive Sensitivity. For $\mathrm{ERNIE}_{4}$ -turbo, in addition to errors across all the aforementioned categories, it also demonstrates excessive sensitivity to certain examples. Specifically, when content includes languages related to hate speech but used in non-harmful contexts, $\mathrm{ERNIE}_{4}$ -turbo refuses to provide an explanation. During our evalua + +Figure 13: Cross-lingual error example. + +
Example(zh)网上传疯了的超准测试题!20道题轻松判断出你有没有死!(en)The viral online quiz! 20 questions to easily determine if you're alive or dead!
Joke Explanation(zh)这个笑话的幽默之处在于它用夸张的方式提出了一个显而易见的问题——判断自己是否还活着,这本身就不需要测试。其次,标题中“超准测试题”和“轻松判断”这样的描述让人觉得荒谬可笑,因为活着与否是最基本的感知,不需要任何测试。
Model's Answer DP(en)The humor in this joke lies in its exaggerated premise of asking an obvious question—determining whether one is still alive, which inherently doesn't require any test. Additionally, the descriptions in the title, such as "super accurate quiz" and "easily determine," come across as absurdly funny because being alive is the most basic perception, requiring no testing at all.
Model's Answer CoT(zh)完全解释(en)Fully Explained
(zh)...最终判断:部分/没有解释理由:虽然解释准确地指出了笑话的主要笑点(夸张性和标题的矛盾),但它没有深入探讨这些元素为什么会产生幽默效果,也没有讨论潜在的文化或认知背景...
(en)...Final judgment: Partially/Not explained Reason: While the explanation accurately identifies the main punchlines of the joke (exaggeration and the contradiction in the title), it does not delve into why these elements create a humorous effect or discuss the potential cultural or cognitive context behind it...
+ +Figure 14: Overcritical example by Nematron $_{70\mathrm{B}}$ . The Nematron $_{70\mathrm{B}}$ model selects the correct answer in the DP prompting, but selects the incorrect answer due to being overly critical in the CoT prompting. + +tion, we observe this excessive sensitivity in the $\mathrm{ERNIE}_{4}$ -turbo's responses to humor related to medical ethics and political discussions. This suggests that correctly understanding the context and the language toxicity remains an open challenge (Zhang et al., 2024a). Such issues are particularly critical for humor explanation, as misclassifying non-toxic context can cause the responses to deviate from the intended humor. + +# F Prompts for DP and CoT in Chumor + +This section outlines the prompts used in Chumor to evaluate whether an explanation fully explains a joke. Two prompting strategies are adopted: Direct Prompting (DP) and Chain of Thought (CoT). Below are the details of each approach: + +# Direct Prompting (DP) + +你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。 + +笑话:[joke] + +笑话解释:[explanation] + +# Translation + +You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either "fully explain" or "partially/does not explain." You do not need to explain why it is correct or incorrect. + +Joke: [joke] + +Explanation: [explanation] + +The DP prompt is designed to encourage concise decision-making. It directly asks the model to evaluate the completeness of the explanation without requiring reasoning or justification. + +# Chain of Thought (CoT) + +你将看到一个笑话以及对这个笑话的解释。请逐步思考,写下过程并最终判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”。 + +笑话:[joke] + +笑话解释:[explanation] + +# Translation + +You will see a joke and an explanation of the joke. Please think step by step, write down your reasoning process, and finally determine whether this explanation fully explains the joke. Based on your judgment, choose either "fully explains" or "partially/does not explain." + +Joke: [joke] + +Explanation: [explanation] + +The CoT prompt, in contrast, requires the model to reason step by step before reaching a conclusion. This approach aims to improve transparency by explicitly documenting the thought process behind the evaluation. + +# G Joke Type Distribution in Chumor + +We sampled 200 datapoints from Chumorto analyze the distribution of joke types, as shown in Figure 15. Note that a single joke may belong to + +![](images/326993549316df513bb9806d9d31515b487772d05a316df9b370565647db6178.jpg) +Figure 15: Distribution of Joke Types in 200 Sampled Datapoints. + +multiple categories, as it can exhibit features of more than one joke type. + +![](images/6df1b3c527229283c9d7de199eea2d6285b3eca5b0cdc18e78ecc11bf3f068e7.jpg) +H Detailed Results of Experiments +Figure 16: The Matthew's correlation coefficient of different models' test results in DP and CoT. + +For evaluation, we input each prompt into the model and collect its responses, comparing them to the labels in Chumor. A model's response is considered correct if it matches the reference label. If the model provides an incorrect answer or doesn't generate a response at all (due to safety protocols or + +filtering sensitive terms), it is marked as incorrect. Such scenario is rare, occurring only 21 times in our experiments, and exclusively with GLM-4plus. + +We highlight that CoT prompting at most cases degrade the models' performance on Chumor. As shown in Figure 16, only $\mathrm{Athene}_{70\mathrm{B}}$ achieves a significant improvement. However, this is offset by its poorest performance under DP prompting among the models. GPT-4o shows a slight improvement, with its MCC score increasing from 0.19 to 0.20. And all other eight models exhibit different degrees of performance decline. + +![](images/6141e183d684833465b3a4c899603522d9953ad32c94768a7106eaf7f5d4f1c1.jpg) +Figure 17: CoT accuracy on different joke types $(\%)$ . + +
ModelDPCoT
MCCACC (%)FPR (%)FNR (%)MCCACC (%)FPR (%)FNR (%)
Yi34B0.1044.9597.240.210.0947.1789.305.44
Nemotron70B0.1956.3061.2620.870.1457.1740.2846.14
Athene70B0.0844.5997.830.280.1247.2691.102.89
ERNIE4-turbo0.2960.2959.8313.570.1145.1696.930.14
QWen2.572B0.1948.4690.670.690.1749.4586.913.31
Mistral123B0.2255.5669.2612.190.1651.1879.928.40
Gemini1.5-pro0.2454.0077.425.170.1960.3233.8147.31
GLM-4plus0.2455.5672.288.260.1458.1332.9653.44
GPT-4o0.1951.8780.026.680.2050.6485.003.03
GPT-4turbo0.2052.3279.286.610.1751.2780.876.96
+ +Table 4: Performance metrics for explanation evaluation including Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR). + +
ModelSourceDPCoT
MCCACC(%)FPR(%)FNR(%)MCCACC(%)FPR(%)FNR(%)
Athene70BOverall0.0844.5997.830.280.1247.2691.102.89
ERNIE Bot0.1252.3897.150.000.1554.2491.132.13
GPT-4o0.0333.9098.510.860.0837.6791.064.50
ERNIE-turboOverall0.2960.2959.8313.570.1145.1696.930.14
ERNIE Bot0.2358.6478.145.990.1653.4794.830.10
GPT-4o0.2762.5441.3829.550.0433.7699.040.21
Gemini1.5-proOverall0.2454.0077.425.170.1960.3233.8147.31
ERNIE Bot0.2760.6674.135.890.2360.8728.6249.24
GPT-4o0.2144.8580.743.640.1759.5639.0443.25
GLM-4plusOverall0.2455.5672.288.260.1458.1332.9653.44
ERNIE Bot0.2559.8374.976.700.1557.5637.0647.61
GPT-4o0.2149.6869.5711.560.0658.9228.8365.74
GPT-4turboOverall0.2052.3279.286.610.1751.2780.876.96
ERNIE Bot0.2057.2580.995.990.2258.7576.147.72
GPT-4o0.1845.5677.557.920.1341.0185.645.35
GPT-4oOverall0.1951.8780.026.680.2050.6485.003.03
ERNIE Bot0.2157.8279.416.400.2458.0782.472.94
GPT-4o0.1643.7180.647.280.1540.4487.553.21
Nemotron70BOverall0.1956.3061.2620.870.1457.1740.2846.14
ERNIE Bot0.2260.6656.8122.540.1457.0439.1846.60
GPT-4o0.1850.3265.7417.340.1357.3641.3845.18
Mistral123BOverall0.2255.5669.2612.190.1651.1879.928.40
ERNIE Bot0.2561.1365.1513.600.1857.0479.737.61
GPT-4o0.2047.9073.409.210.1243.1480.1110.06
Qwen2.572BOverall0.1948.4690.670.690.1749.4586.913.31
ERNIE Bot0.1954.4592.610.300.1855.5488.072.54
GPT-4o0.1740.2388.721.500.1441.0885.744.93
Yi34BOverall0.1044.9597.240.210.0947.1789.305.44
ERNIE Bot0.1553.4294.720.300.1153.9988.385.28
GPT-4o0.0333.3399.790.000.0737.8190.215.78
+ +Table 5: Detailed performance metrics with source for explanation evaluation of Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR). + +
ModelPromptingCross-lingualGlyph-basedHomophonemicPun-basedSituationalCultural
Athene70BDP0.000.0030.0044.0061.0042.00
CoT0.0025.0030.0044.0059.0043.00
ERNIE4-turboDP50.0050.0060.0061.0070.0063.00
CoT0.000.0030.0043.0059.0042.00
Gemini1.5-proDP50.0050.0055.0063.0067.0061.00
CoT50.0075.0070.0061.0066.0069.00
GLM-4plusDP50.0025.0065.0060.0069.0060.00
CoT50.00100.0075.0064.0060.0061.00
GPT-4turboDP50.0025.0040.0057.0067.0055.00
CoT50.0025.0045.0054.0062.0056.00
GPT-4oDP0.0050.0035.0049.0063.0054.00
CoT0.0050.0035.0050.0062.0053.00
Nemotron70BDP50.0050.0065.0063.0062.0060.00
CoT100.00100.0065.0066.0060.0072.00
Mistral123BDP50.0050.0055.0061.0065.0061.00
CoT50.000.0040.0053.0066.0055.00
Qwen2.572BDP0.0050.0035.0047.0064.0051.00
CoT0.0050.0040.0053.0063.0053.00
Yi34BDP0.000.0030.0043.0060.0044.00
CoT0.0025.0040.0049.0063.0052.00
+ +Table 6: Performance metrics by joke type for explanation evaluation accuracy(%) \ No newline at end of file diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/images.zip b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..d105d3f819e95290063d7a249282a5c9cab320c9 --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79cbd84c0370412138750dee0a2c00e06d928e1aeee9b375d3354567859c8110 +size 1748890 diff --git a/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/layout.json b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1fabd8a5771c62bd1262844cc8e4397bb380bddc --- /dev/null +++ b/2025/Chumor 2.0_ Towards Better Benchmarking Chinese Humor Understanding from (Ruo Zhi Ba)/layout.json @@ -0,0 +1,11716 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 110, + 75, + 486, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 75, + 486, + 110 + ], + "spans": [ + { + "bbox": [ + 110, + 75, + 486, + 110 + ], + "type": "text", + "content": "Chumor 2.0: Towards Better Benchmarking Chinese Humor Understanding from 弱智吧 (Ruo Zhi Ba)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 126, + 531, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 126, + 531, + 158 + ], + "spans": [ + { + "bbox": [ + 63, + 126, + 531, + 158 + ], + "type": "text", + "content": "Ruiqi He Yushu He Longju Bai Jiarui Liu Zhenjie Sun Zenghao Tang He Wang Hanchen Xia Rada Mihalcea Naihao Deng" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 159, + 515, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 159, + 515, + 188 + ], + "spans": [ + { + "bbox": [ + 79, + 159, + 515, + 188 + ], + "type": "inline_equation", + "content": "^{\\text{©}}" + }, + { + "bbox": [ + 79, + 159, + 515, + 188 + ], + "type": "text", + "content": "University of Michigan Carnegie Mellon University Shanghai Jiaotong University {ruiqih, dnaiahao}@umich.edu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 219, + 204, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 204, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 204, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 84, + 241, + 274, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 241, + 274, + 553 + ], + "spans": [ + { + "bbox": [ + 84, + 241, + 274, + 553 + ], + "type": "text", + "content": "Existing humor datasets and evaluations predominantly focus on English, leaving limited resources for culturally nuanced humor in non-English languages like Chinese. To address this gap, we construct Chumor, the first and the largest Chinese humor explanation dataset. Chumor is sourced from Ruo Zhi Ba (RZB, 弱智吧), a Chinese Reddit-like platform known for sharing intellectually challenging and culturally specific jokes. We test ten LLMs through direct and chain-of-thought prompting, revealing that Chumor poses significant challenges to existing LLMs, with their accuracy slightly above random and far below human. In addition, our analysis highlights that human-annotated humor explanations are significantly better than those generated by GPT-4o and ERNIE" + }, + { + "bbox": [ + 84, + 241, + 274, + 553 + ], + "type": "inline_equation", + "content": "_{4\\text{-turbo}}" + }, + { + "bbox": [ + 84, + 241, + 274, + 553 + ], + "type": "text", + "content": ". We release Chumor at https://huggingface.co/datasets/MichiganNLP/Chumor, our project page is at https://github.com/MichiganNLP/Chumor-2.0, our leaderboard is at https://huggingface.co/spaces/MichiganNLP/Chumor-leaderboard, and our codebase is at https://github.com/MichiganNLP/Chumor-2.0." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 560, + 155, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 560, + 155, + 573 + ], + "spans": [ + { + "bbox": [ + 68, + 560, + 155, + 573 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 581, + 292, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 581, + 292, + 756 + ], + "spans": [ + { + "bbox": [ + 67, + 581, + 292, + 756 + ], + "type": "text", + "content": "Humor is an intrinsic human trait that touches the core of our social and emotional lives, making it a rich field of study across various disciplines (Lefcourt, 2001; Mihalcea and Strapparava, 2005; Gelkopf et al., 2011; Hessel et al., 2023). With the advent of Large Language Models (LLMs), researchers have evaluated LLMs' performance on diverse tasks (Liu et al., 2023a; Deng et al., 2024; Wu et al., 2023) and observed LLMs' extraordinary performance on many (Zhang et al., 2024b). In contrast, researchers have observed that LLMs still fail to understand humor (Ghanadian et al., 2023). However, with all these studies on humor," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 220, + 526, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 220, + 526, + 287 + ], + "spans": [ + { + "bbox": [ + 302, + 220, + 526, + 287 + ], + "type": "text", + "content": "most evaluations remain in English (Radev et al., 2016; Hasan et al., 2019). This presents a significant gap, particularly for non-English languages like Chinese, where culturally nuanced humor understanding is unexamined." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 288, + 526, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 288, + 526, + 476 + ], + "spans": [ + { + "bbox": [ + 302, + 288, + 526, + 476 + ], + "type": "text", + "content": "In this paper, we try to bridge this gap by constructing Chumor, a funny and challenging Chinese humor understanding dataset sourced from Ruo Zhi Ba (RZB, \"弱智吧\" in Chinese), a Chinese version of Reddit platform known for sharing intellectually challenging and culturally specific jokes. This platform provides a set of unique Chinese jokes that incorporate the subtleties and intricacies of Chinese humor. Table 1 provides examples of the jokes from RZB. In addition, Bai et al. (2024) reveal that tuning LLMs on RZB data yields the best performance on Chinese reasoning tasks compared to other data sources, highlighting the significant value of jokes from RZB." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "text", + "content": "Unlike existing datasets that focus on tasks such as humor detection, punchline identification, or humor generation, Chumor addresses the challenge of humor explanation. This involves not just identifying humor but understanding the reasoning behind it, a task that requires both linguistic and cultural knowledge. Specifically, Chumor tasks the LLMs with determining whether an explanation fully explains the joke. We source the explanations from GPT-4o and ERNIE" + }, + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "inline_equation", + "content": "_{4\\text{-turb}}" + }, + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "text", + "content": ", and have the entire dataset manually annotated by five native Chinese speakers. We evaluate ten LLMs from various model families, and reveal that all models perform poorly, lagging significantly behind humans on Chumor. We observe that chain-of-thought prompting does not necessarily improve models performance and can sometimes confuse their reasoning process. In addition, we conduct a case study in which one of the authors annotates the entire dataset, followed by A/B testing conducted by six native Chinese speakers to compare explanations from GPT-4o versus human, and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 762, + 213, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 762, + 213, + 774 + ], + "spans": [ + { + "bbox": [ + 80, + 762, + 213, + 774 + ], + "type": "text", + "content": "†Corresponding author of this work." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 282, + 780, + 314, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 780, + 314, + 791 + ], + "spans": [ + { + "bbox": [ + 282, + 780, + 314, + 791 + ], + "type": "text", + "content": "21799" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 131, + 795, + 463, + 819 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 795, + 463, + 819 + ], + "spans": [ + { + "bbox": [ + 131, + 795, + 463, + 819 + ], + "type": "text", + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 21799-21818 July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 68, + 524, + 312 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 524, + 312 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 524, + 312 + ], + "type": "table", + "html": "
Cultural
Desc.Require knowledge of specific historical, social, or linguistic contexts.
Ex.(zh)小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Situational
Desc.Involve humor derived from specific contexts, irony, or narrative setups.
Ex.(zh)真可怕, 犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。 (en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Pun-based
Desc.Build on linguistic ambiguity and wordplay, require models to identify dual meanings.
Ex.(zh)你可以在steam上找到GTA,所以水是DNA。 (en) You can find GTA on Steam, so water is DNA.
Homophobic
Desc.Rely on phonetic similarities between words or phrases to create humor.
Ex.(zh)家里的猪油没了,小明只能把植物油倒快点当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Glyph-based
Desc.Exploit the structural or visual elements of Chinese characters to create humor.
Ex.(zh)我把電串難題简化了,现在是电车难题。 (en) I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Cross-lingual
Desc.Involve humor derived from linguistic or phonetic interplay across multiple languages.
Ex.(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。” (en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
", + "image_path": "06e7b6d86f185e1a78ecc5847f0e155e0b76ce83047bf818ac8e09a4b96a9106.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 318, + 525, + 344 + ], + "lines": [ + { + "bbox": [ + 67, + 318, + 525, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 318, + 525, + 344 + ], + "type": "text", + "content": "Table 1: Different types of jokes. Descriptions (Desc.) explain humor mechanisms. Examples (Ex.) illustrate each category. The corresponding explanations can be found in the referenced figures from the rightmost column." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "spans": [ + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": "ERNIE" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": "-turbo versus human. Our results indicate that human-annotated joke explanations are significantly better than those produced by GPT-4o or ERNIE" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": "-turbo (Figure 4), with LLMs yielding winning rates of only " + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "inline_equation", + "content": "6.2\\%" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": " for GPT-4o and " + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "inline_equation", + "content": "5.3\\%" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": " for ERNIE" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 67, + 364, + 290, + 445 + ], + "type": "text", + "content": "-turbo compared to humans." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 447, + 274, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 447, + 274, + 459 + ], + "spans": [ + { + "bbox": [ + 78, + 447, + 274, + 459 + ], + "type": "text", + "content": "In summary, our contributions are threefold:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 472, + 291, + 685 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 69, + 472, + 289, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 472, + 289, + 513 + ], + "spans": [ + { + "bbox": [ + 69, + 472, + 289, + 513 + ], + "type": "text", + "content": "1. We construct Chumor, a funny and challenging Chinese humor understanding dataset, which is the largest Chinese humor explanation dataset." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "spans": [ + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "text", + "content": "2. We evaluate ten LLMs on Chumor and reveal the significant challenges Chumor possesses. We highlight that the best accuracy achieved by LLMs is " + }, + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "text", + "content": ", significantly lower than human's score of " + }, + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "inline_equation", + "content": "78.3\\%" + }, + { + "bbox": [ + 68, + 524, + 290, + 592 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 603, + 291, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 603, + 291, + 685 + ], + "spans": [ + { + "bbox": [ + 68, + 603, + 291, + 685 + ], + "type": "text", + "content": "3. We demonstrate that chain-of-thought prompting can hurt LLM's performance in humor reasoning, and that human-annotated joke explanations are significantly better than those produced by GPT-4o and ERNIE" + }, + { + "bbox": [ + 68, + 603, + 291, + 685 + ], + "type": "inline_equation", + "content": "_{4\\text{-turbo}}" + }, + { + "bbox": [ + 68, + 603, + 291, + 685 + ], + "type": "text", + "content": ", urging future research on culturally specific humor understanding." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 698, + 166, + 711 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 166, + 711 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 166, + 711 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": "Humor Datasets. Humor analysis in natural language processing (NLP) encompasses a wide range of tasks, each focused on different aspects of humor. For instance, researchers have proposed datasets" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 364, + 526, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 364, + 526, + 676 + ], + "spans": [ + { + "bbox": [ + 302, + 364, + 526, + 676 + ], + "type": "text", + "content": "such as “16000 One-Liners” (Mihalcea and Strapparava, 2005), “Pun of the Day” (Yang et al., 2015), and “Ted Laughter” (Chen and Lee, 2017) focused on humor detection to determine whether a given text is humorous or not. Datasets such as “Big Bang Theory” (Bertero and Fung, 2016) aim at pinpointing the punchline in a joke. Tasks for assessing humor intensity include humor level rating, comparison, and ranking. For example, datasets like HumorNorm (Engelthaler and Hills, 2018) and #Hashtag Wars (Potash et al., 2017) quantify humor scores and compare comedic elements, while UR-Funny ranks punchlines based on their perceived impact. Datasets such as “Humicroedit” (Hossain et al., 2019), “" + }, + { + "bbox": [ + 302, + 364, + 526, + 676 + ], + "type": "inline_equation", + "content": "C^3" + }, + { + "bbox": [ + 302, + 364, + 526, + 676 + ], + "type": "text", + "content": "” (Wang et al., 2022), and “Talk-Funny” (Chen et al., 2024) focus on humor generation, the task of generating or rewriting humorous texts. In addition, we present a comprehensive overview of the existing datasets related to humor in Table 2. We highlight that most existing datasets are in English. Chinese humor, on the other hand, is less explored. Our dataset, Chumor is the first humor explanation dataset in Chinese." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 694, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 526, + 775 + ], + "type": "text", + "content": "Culturally Specific Datasets. Recent works underscore the challenges of culturally specific reasoning in LLMs (Shen et al., 2024; AlKhamissi et al., 2024; Pawar et al., 2024; Vayani et al., 2024). These challenges stem from the overrepresentation of Western-centric knowledge and translation ar" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21800" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 68, + 287, + 354 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 287, + 354 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 287, + 354 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 287, + 354 + ], + "type": "table", + "html": "
DatasetSourcesLan.#(k)Tasks
One Liners (2005)Weben16HR
Pun of the Day (2015)Weben4.8HR PD
Big Bang Theory (2016)TVen44PD
Ted Laughter (2017)TEDen9.4HR PD
#HashtagWars (2017)TVen13HC
HumorNorm (2018)\\( CS^† \\)en5HC
UR-FUNNY (2019)TEDen17PD
Humicroedit (2019)Redditen15HG
rJokes (2020)Redditen57HC
Memotion (2020)Memesen9.8HC
MUMOR (2021)TVen zh30HR
NYT-Captions (2023)NYTen0.7 2.6HE HC
\\( C^3 \\) (2022)Bookszh9.3HG
TalkFunny (2024)Appszh4.1HG
TCHD (2023)-zh26HR HC PD
TTWS (2019)Bookszh9.1PD
CHM (2020)Apps Webzh3.3HC
Memeplate (2022)Apps Webzh5.2HC
Chumor (us)Webzh3.3HE
", + "image_path": "311d87fe6c6608fd64bf6fb5b29a7b6a5c1a0c90713e0bb0c6f3d448674e59e7.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 361, + 291, + 423 + ], + "lines": [ + { + "bbox": [ + 67, + 361, + 291, + 423 + ], + "spans": [ + { + "bbox": [ + 67, + 361, + 291, + 423 + ], + "type": "text", + "content": "Table 2: Existing datasets related to humor. For the shorthands in the table, abbreviations represent the following tasks, HR: humor recognition; PD: punchline detection; HC: humor comparison; HG: humor generation; HE: humor explanation †: Crowd-source." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 444, + 291, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 444, + 291, + 540 + ], + "spans": [ + { + "bbox": [ + 67, + 444, + 291, + 540 + ], + "type": "text", + "content": "tifacts, which limit the fairness and effectiveness of multilingual evaluations (Mihalcea et al., 2024). Researchers have proposed various culturally specific datasets such as Global-MMLU (Singh et al., 2024) to evaluate LLMs' cultural knowledge. Chumor adds to this line of effort as it involves rich knowledge specific to Chinese culture." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 553, + 174, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 553, + 174, + 565 + ], + "spans": [ + { + "bbox": [ + 67, + 553, + 174, + 565 + ], + "type": "text", + "content": "3 Chumor Dataset" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 576, + 291, + 752 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 576, + 291, + 752 + ], + "spans": [ + { + "bbox": [ + 66, + 576, + 291, + 752 + ], + "type": "text", + "content": "Data Collection. We construct our dataset by including RZB jokes from \"Best Annual Threads\" between 2018 and 2021 that have been previously crawled†. In addition, we directly collect all threads in the \"Moderator's Recommendation\" section from RZB. Each thread in RZB consists of \"标题\"(title),\"一楼\"(content), and several \"跟帖\"(follow-up posts). For threads from Best Annual Threads, the jokes are listed in the follow-up posts, which are selected by the forum moderator. For threads from Moderator's Recommendation, the jokes consist of the title and the content of each thread. We remove the content if it repeats the title." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "type": "text", + "content": "Data Cleaning. We store both the title and the content of the raw data. However, due to the posting restrictions of the platform requiring non-empty content, many posts contain meaningless placeholder texts such as “:”, “!”, “0”, “RT”, and others. We automatically identify and remove these patterns, and only keep the title which is the joke itself. Due to the length limitations on the original platform, many post titles are truncated from the beginning parts of the content. We identify these instances and replace the truncated title with the complete content to get the joke. We also remove duplicates that appear both in the “Moderator’s Recommendation” and the “Best Annual Posts”." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 261, + 526, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 261, + 526, + 327 + ], + "spans": [ + { + "bbox": [ + 302, + 261, + 526, + 327 + ], + "type": "text", + "content": "We manually remove the threads related to forum management and rules, threads that include excessively offensive content, threads with incomplete content, and threads that focus more on philosophical insight rather than humor." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 336, + 526, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 336, + 526, + 526 + ], + "spans": [ + { + "bbox": [ + 302, + 336, + 526, + 526 + ], + "type": "text", + "content": "Humor Explanation Classification. We design a humor explanation classification task that can be easily used to test LLMs' capabilities in humor understanding. Specifically, we use two LLMs, GPT-4o and ERNIE" + }, + { + "bbox": [ + 302, + 336, + 526, + 526 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 302, + 336, + 526, + 526 + ], + "type": "text", + "content": "-turbo to generae explanations for our collected jokes. We manually annotate the generated explanations as either \"fully explain the joke\" (good) or \"partially explain or not explain the joke\" (bad) based on a majority vote among five of the authors who are native Chinese speakers. Each joke, along with its explanation, forms an individual instance in Chumor, leading to a total of 3,339 instances. Among these, 1,454 items are labeled as good and 1,887 as bad explanations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 533, + 525, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 533, + 525, + 560 + ], + "spans": [ + { + "bbox": [ + 302, + 533, + 525, + 560 + ], + "type": "text", + "content": "Data Examples from Chumor. We present examples from Chumor in Table 3." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 567, + 525, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 567, + 525, + 608 + ], + "spans": [ + { + "bbox": [ + 302, + 567, + 525, + 608 + ], + "type": "text", + "content": "Humor Categorization in Chumor. We categorize the jokes in RZB into six types in Table 1, with an example provided for each type." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 618, + 390, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 618, + 390, + 632 + ], + "spans": [ + { + "bbox": [ + 302, + 618, + 390, + 632 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 640, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 526, + 774 + ], + "type": "text", + "content": "Models. We test ten LLMs, five from the open-source LLM families and five from the closed-source LLM families, all capable of handling Chinese. Specifically, we include the open-source LLMs of " + }, + { + "bbox": [ + 302, + 640, + 526, + 774 + ], + "type": "inline_equation", + "content": "\\mathbf{Yi}_{34\\mathbf{B}}" + }, + { + "bbox": [ + 302, + 640, + 526, + 774 + ], + "type": "text", + "content": " (01.ai, 2024) from 01.AI, Nemotron70B (NVIDIA, 2024) from NVIDIA, Athene70B (Nexusflow, 2024) from Nexusflow, Qwen2.572B (Qwen, 2024) from Alibaba, Mistral123B (Mistral.ai, 2024) from Mistral AI, alongside the closed-source LLMs of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 761, + 221, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 761, + 221, + 774 + ], + "spans": [ + { + "bbox": [ + 80, + 761, + 221, + 774 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 80, + 761, + 221, + 774 + ], + "type": "text", + "content": " https://github.com/Leymore/ruozhiba" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "type": "text", + "content": "21801" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 68, + 523, + 221 + ], + "blocks": [ + { + "bbox": [ + 70, + 68, + 523, + 221 + ], + "lines": [ + { + "bbox": [ + 70, + 68, + 523, + 221 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 523, + 221 + ], + "type": "table", + "html": "
JokeExplanationLabelSource
(zh) “你自己掰了一千吨玉米?”“哈哈我瞎掰的”(zh) 这个笑话的幽默之处在于对话中的双关语和出人意料的回答。“掰了一千吨玉米”看似是一个惊人的壮举,而“哈哈我瞎掰的”则瞬间揭露了这其实是一个夸张的玩笑,这种巧妙的反转和语言的戏谑带来了幽默感。goodERNIE4-turbo
(en) “You shucked a thousand tons of corn all by yourself?”“Haha, let's just say there's been plenty of shuckin' going on.”(en) The humor in this joke lies in the pun and the unexpected response in the conversation. “Shucked a thousand tons of corn” seems like an extraordinary achievement, but the reply, ‘haha, let's just say there's been plenty of shuckin' going on,” instantly reveals it to be an exaggerated joke. This clever twist and playful use of language create the comedic effect.
(zh) 小明突然出现在我面前以至于我突然出现在小明面前。(zh) 这个笑话的幽默之处在于它利用了语言的对称性和意外性。小明突然出现导致叙述者的惊讶反应,而叙述者的惊讶反应又反过来让小明感到意外,形成了一个有趣的循环。badGPT-4o
(en) Xiaoming suddenly appeared in front of me, causing me to suddenly appear in front of him.(en) The humor in this joke lies in its use of linguistic symmetry and unexpectedness. Xiao Ming's sudden appearance triggers a surprised reaction from the narrator, which in turn surprises Xiao Ming, creating an amusing loop.
", + "image_path": "2c3b843aeb95b336940e223eafe1b6d59325b6430b91c765731cba7115b7b969.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 236, + 526, + 297 + ], + "lines": [ + { + "bbox": [ + 67, + 236, + 526, + 297 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 526, + 297 + ], + "type": "text", + "content": "Table 3: Examples from Chumor. The second example's explanation is bad because the joke does not \"creating an amusing loop\". Instead, it relies on linguistic symmetry and the use of a straightforward fact to subvert expectations. The audience anticipates an unexpected outcome due to the setup, but the latter part \"suddenly appear in front of him\" flips the perspective by stating the straightforward fact that because Xiao Ming is in front of the person so the person is in front of Xiao Ming too." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "spans": [ + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "content": "Gemini" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "inline_equation", + "content": "_{1.5-pro}" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "content": " (Google, 2024) from Google, GLM" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "inline_equation", + "content": "_{4\\text{plus}}" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "content": " (BigModel, 2024) from Tsinghua University, GPT-4" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "inline_equation", + "content": "_{\\text{turbo}}" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "content": ", GPT-4o (OpenAI, 2023, 2024) from OpenAI, ERNIE" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "inline_equation", + "content": "_{4\\text{turbo}}" + }, + { + "bbox": [ + 67, + 317, + 290, + 399 + ], + "type": "text", + "content": " (Baidu, 2024) from Baidu. For all the open-source LLMs, we use the instruction-tuned version in our evaluation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 408, + 289, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 289, + 449 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 289, + 449 + ], + "type": "text", + "content": "Evaluation Methods. We evaluate these LLMs using two prompting methods: direct prompting (DP) by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 460, + 182, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 460, + 182, + 473 + ], + "spans": [ + { + "bbox": [ + 74, + 460, + 182, + 473 + ], + "type": "text", + "content": "Direct Prompting (DP)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 486, + 284, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 486, + 284, + 540 + ], + "spans": [ + { + "bbox": [ + 73, + 486, + 284, + 540 + ], + "type": "text", + "content": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 541, + 135, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 541, + 135, + 555 + ], + "spans": [ + { + "bbox": [ + 74, + 541, + 135, + 555 + ], + "type": "text", + "content": "笑话:[joke]" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 555, + 189, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 555, + 189, + 568 + ], + "spans": [ + { + "bbox": [ + 74, + 555, + 189, + 568 + ], + "type": "text", + "content": "笑话解释:[explanation]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 151, + 570, + 207, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 570, + 207, + 580 + ], + "spans": [ + { + "bbox": [ + 151, + 570, + 207, + 580 + ], + "type": "text", + "content": "Translation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 581, + 285, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 581, + 285, + 661 + ], + "spans": [ + { + "bbox": [ + 73, + 581, + 285, + 661 + ], + "type": "text", + "content": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 74, + 664, + 128, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 664, + 128, + 677 + ], + "spans": [ + { + "bbox": [ + 74, + 664, + 128, + 677 + ], + "type": "text", + "content": "Joke: [joke]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 677, + 192, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 677, + 192, + 690 + ], + "spans": [ + { + "bbox": [ + 74, + 677, + 192, + 690 + ], + "type": "text", + "content": "Explanation: [explanation]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": "and chain-of-thought (CoT) prompting (Wei et al., 2022) by adding the phrase “请逐步思考,写下过程”“Please think step by step, write down your reasoning process” before determining the label. Appendix F provides the complete prompts. We cal" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 301, + 318, + 526, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 318, + 526, + 480 + ], + "spans": [ + { + "bbox": [ + 301, + 318, + 526, + 480 + ], + "type": "text", + "content": "culate accuracy scores as part of our evaluation. In addition, we provide the false positive rate (FPR), false negative rate (FNR), and Matthews Correlation Coefficient (MCC) in Appendix H in Table 4. The MCC score considers true positives, true negatives, false positives, and false negatives, providing a score between -1 and +1. A score of +1 indicates perfect predictions, 0 reflects random guessing, and -1 means complete disagreement. The best MCC score achieved by LLMs is 0.29, which is close to random guessing, and is significantly lower than the human average of 0.60." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 490, + 445, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 490, + 445, + 502 + ], + "spans": [ + { + "bbox": [ + 302, + 490, + 445, + 502 + ], + "type": "text", + "content": "5 Results and Discussions" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 511, + 525, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 511, + 525, + 564 + ], + "spans": [ + { + "bbox": [ + 302, + 511, + 525, + 564 + ], + "type": "text", + "content": "Overall Model Performance. Figure 1 presents the accuracy of different LLMs on Chumor in DP and CoT settings. Appendix H presents additional results and analysis." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "spans": [ + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": "Overall, we observe that all models perform poorly on Chinese humor comprehension, with accuracy scores ranging between " + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "44.6\\%" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": ". ERNIE" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": " and Gemini" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "_{1.5\\text{-}\\text{pro}}" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": " achieve the highest accuracy of " + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": ", and are just 10 points above the random baseline and far below human performance of " + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "inline_equation", + "content": "78.3\\%" + }, + { + "bbox": [ + 302, + 565, + 525, + 686 + ], + "type": "text", + "content": ", highlighting the difficulty of Chumor and the limitations of these LLMs in understanding Chinese humor." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "content": "Error Analysis by Joke Type. To better understand how LLMs perform on each joke type listed in Table 1, we sample 200 jokes for error analysis. Figure 2 and Figure 17 in Appendix H present the results. The distribution of joke types can be found in Appendix G Figure 15." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21802" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 289, + 343 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 289, + 343 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 289, + 343 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 289, + 343 + ], + "type": "image", + "image_path": "c82242bc9d064b5b1e48e0527762f6fc5982d614add9b8428f192d0426397d89.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "lines": [ + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "text", + "content": "Figure 1: The accuracy of different models' test results in the DP and CoT settings. ERNIE" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "text", + "content": "-turbo and Gemini" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "inline_equation", + "content": "_{1.5\\text{-pro}}" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "text", + "content": " achieve the highest accuracy of " + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 67, + 350, + 291, + 388 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": "We highlight that model performance varies significantly across different joke types. While models generally perform well on Situational jokes, achieving " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "60.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "70.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " accuracy in both DP and CoT settings, their performance difference on other joke types is more pronounced. For instance, GLM-4plus achieves " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "65.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " accuracy on Homophonic jokes in the DP setting, whereas " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "\\mathrm{Yi}_{34\\mathrm{B}}" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " only reaches " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "30.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": ". Nematron70\\mathrm{B}\\ performs well on Cultural jokes in the CoT setting with " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "72.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " accuracy, but Athene70\\mathrm{B}\\ and ERNIE4-turbo achieve with only " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "43.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "inline_equation", + "content": "42.0\\%" + }, + { + "bbox": [ + 67, + 408, + 291, + 624 + ], + "type": "text", + "content": ", respectively. Such performance variance highlights LLMs' varied capabilities in specific domains such as cultural reasoning and situational reasoning, revealing the respective limitations of these LLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 635, + 255, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 635, + 255, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 635, + 255, + 661 + ], + "type": "text", + "content": "5.1 Have LLMs achieved human-level understanding of humor?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "Answer: No. To compare the performance of LLMs with humans, we conduct a human study involving three Chinese native speakers unfamiliar with this work to annotate a randomly chosen subset of 200 examples. Human annotators demonstrate significantly better performance, with an average accuracy of " + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "inline_equation", + "content": "78.3\\%" + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": " and an MCC score of 0.60, significantly better than the LLMs' best per" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 305, + 68, + 524, + 570 + ], + "blocks": [ + { + "bbox": [ + 305, + 68, + 524, + 570 + ], + "lines": [ + { + "bbox": [ + 305, + 68, + 524, + 570 + ], + "spans": [ + { + "bbox": [ + 305, + 68, + 524, + 570 + ], + "type": "image", + "image_path": "1751194a5476ee5570e3e726c0ef4c756014c6ce34f4a9fb82ab296870d8687f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 576, + 526, + 624 + ], + "lines": [ + { + "bbox": [ + 302, + 576, + 526, + 624 + ], + "spans": [ + { + "bbox": [ + 302, + 576, + 526, + 624 + ], + "type": "text", + "content": "Figure 2: DP accuracy on different joke types " + }, + { + "bbox": [ + 302, + 576, + 526, + 624 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 302, + 576, + 526, + 624 + ], + "type": "text", + "content": ". Here, we sample 200 jokes for error analysis. We highlight that model performance varies significantly across different joke types." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 654, + 526, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 654, + 526, + 721 + ], + "spans": [ + { + "bbox": [ + 302, + 654, + 526, + 721 + ], + "type": "text", + "content": "formance of " + }, + { + "bbox": [ + 302, + 654, + 526, + 721 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 302, + 654, + 526, + 721 + ], + "type": "text", + "content": " accuracy and an MCC score of 0.29 (Figure 16 in Appendix H). Our results indicate that there is a large room of performance improvement for LLMs on Chinese humor understanding." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 745, + 523, + 772 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 745, + 523, + 772 + ], + "spans": [ + { + "bbox": [ + 302, + 745, + 523, + 772 + ], + "type": "text", + "content": "5.2 Does chain-of-thought (CoT) help LLMs' humor understanding?" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21803" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 67, + 68, + 526, + 268 + ], + "blocks": [ + { + "bbox": [ + 67, + 68, + 526, + 268 + ], + "lines": [ + { + "bbox": [ + 67, + 68, + 526, + 268 + ], + "spans": [ + { + "bbox": [ + 67, + 68, + 526, + 268 + ], + "type": "table", + "html": "
Example(zh)“大人有大量,小孩倒是不多了。”食人族厨师清点食材说。 \n(en)“Adults are plenty, but children are quite scarce,” said the cannibal chef while counting ingredients.
Joke Explanation(zh)这个笑话的幽默之处在于其运用了惊悚与诙谐的巧妙结合,以及意料之外的语言转折。食人族厨师清点食材的场景已经足够诡异,但他却以“大人有大量,小孩倒是不多了”这样似乎很平常的口吻来评论,这种对比和反差产生了荒诞的幽默感,同时也在探讨一种颠覆性的、黑暗式的玩笑。 \n(en)The humor in this joke lies in its clever combination of horror and wit, as well as the unexpected linguistic twist. The scene of a cannibal chef counting ingredients is already eerie, but the casual remark, "Adults are plenty, but children are quite scarce," delivered in an ordinary tone, creates a stark contrast. This juxtaposition generates absurd humor while exploring a subversive, dark style of comedy.
Model's Answer DP✓(zh)部分/没有解释 \n(en)Partially/Not ExplainedModel's Answer CoT X (zh) ...这句话使用了一个常用的谚语“大人有大量”来双关...比较您的理解和解释:笑话解释涵盖了我的分析要点,指出了对比和反差产生的荒诞幽默感,并提到了黑暗式幽默。解释完全涵盖了笑话的所有幽默点,因此选择“完全解释”。 \n(en) ...This sentence uses a common proverb, "adults are plenty," as a pun... Comparison of your understanding and the explanation: The joke explanation covers the key points of my analysis, highlighting the absurd humor created by contrast and reversal, and mentioning the element of dark humor. The explanation fully addresses all aspects of the joke's humor, so “Fully Explained” is selected.
CoT Failure ReasonsObserving model's explanation shows it recognizes the pun in the joke. However, the joke explanation being evaluated misses the pun. Model correctly identifies this in DP but, in CoT, despite noticing the pun, attempts to justify the flawed explanation, leading to an incorrect judgment.
", + "image_path": "058cd55e0d496028949dec0f92033e6cc636eeefdccebf3034d38befd6668549.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 274, + 525, + 300 + ], + "lines": [ + { + "bbox": [ + 67, + 274, + 525, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 525, + 300 + ], + "type": "text", + "content": "Figure 3: Over-analyzing example by GPT-4o. The GPT-4o model chooses the correct answer in the DP prompting, but chooses the incorrect answer due to over-analyzing in the CoT prompting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "spans": [ + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": "Answer: No. We observe that CoT does not necessarily improve model performance and, in most cases, even leads to performance decay. For instance, as shown in Figure 1, the accuracy of " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": "-turbo decreases from " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "60.3\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "45.2\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": " when we switch to CoT prompting, Mistral" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "_{123B}" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": "'s performance drops from " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "55.6\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "51.2\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": ", GPT-4o's performance drops from " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "51.9\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "50.6\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": ", GPT-4turbo's performance falls from " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "52.3\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "inline_equation", + "content": "51.3\\%" + }, + { + "bbox": [ + 67, + 314, + 290, + 530 + ], + "type": "text", + "content": ". Moreover, the MCC scores present a clearer trend of performance decline under CoT prompting. As shown in Figure 16 in Appendix H, eight of the ten LLMs' MCC scores decrease under CoT prompting. We hypothesize that CoT prompts may not help the model's reasoning when the model lacks a fundamental grasp of humor understanding." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": "We observe that under CoT prompting, models like GPT-4o tend to justify incorrect explanations as \"correct\", leading to an increase in false-positive rate from " + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "inline_equation", + "content": "80.0\\%" + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": " for DP prompting to " + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "inline_equation", + "content": "85.0\\%" + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": " for CoT prompting (Table 4 in Appendix H). " + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": "-turbo exhibits the largest false-positive rate, rising from " + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "inline_equation", + "content": "59.8\\%" + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "inline_equation", + "content": "96.9\\%" + }, + { + "bbox": [ + 67, + 531, + 291, + 706 + ], + "type": "text", + "content": " (Table 4 in Appendix H). Figure 3 provides an example where CoT confuses the GPT-4o model. Under the DP prompting, the GPT-4o model chooses the answer correctly. However, CoT prompting causes the model to over-analyze and justify an incorrect explanation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": "On the other hand, models like Nematron" + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "inline_equation", + "content": "_{70}" + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": " may be overly critical of explanations under CoT prompting, resulting in a false-negative rate from " + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "inline_equation", + "content": "20.9\\%" + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": " for DP prompting to " + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "inline_equation", + "content": "46.1\\%" + }, + { + "bbox": [ + 67, + 708, + 291, + 775 + ], + "type": "text", + "content": " for CoT prompting (Table 4 in Appendix H). We highlight that a" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 314, + 526, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 314, + 526, + 396 + ], + "spans": [ + { + "bbox": [ + 302, + 314, + 526, + 396 + ], + "type": "text", + "content": "recent work demonstrates that CoT can degrade performance in tasks requiring subtle comprehension (Sprague et al., 2024), which aligns with our findings on its limitations in humor interpretation. Figure 14 in Appendix E discusses an example corresponding to the model being overly critical." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 409, + 524, + 435 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 409, + 524, + 435 + ], + "spans": [ + { + "bbox": [ + 302, + 409, + 524, + 435 + ], + "type": "text", + "content": "5.3 Case study: can GPT-4o and ERNIE" + }, + { + "bbox": [ + 302, + 409, + 524, + 435 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 302, + 409, + 524, + 435 + ], + "type": "text", + "content": "-turbo explain jokes as well as humans?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 442, + 525, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 442, + 525, + 536 + ], + "spans": [ + { + "bbox": [ + 302, + 442, + 525, + 536 + ], + "type": "text", + "content": "Answer: No. Apart from testing multiple LLMs on Chumor, we conduct case studies on GPT-4o and ERNIE" + }, + { + "bbox": [ + 302, + 442, + 525, + 536 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 302, + 442, + 525, + 536 + ], + "type": "text", + "content": "-turbo to assess the quality of their joke explanations compared to humans. We prompt them to explain the humor in two sentences, consistent with the format of human explanations. Here is the prompt we feed to both LLMs:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 550, + 348, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 550, + 348, + 563 + ], + "spans": [ + { + "bbox": [ + 308, + 550, + 348, + 563 + ], + "type": "text", + "content": "Prompt" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 576, + 519, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 576, + 519, + 602 + ], + "spans": [ + { + "bbox": [ + 309, + 576, + 519, + 602 + ], + "type": "text", + "content": "请用两句话解释这个笑话的幽默之处: [joke]" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 604, + 519, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 604, + 519, + 618 + ], + "spans": [ + { + "bbox": [ + 309, + 604, + 519, + 618 + ], + "type": "text", + "content": "Please explain the joke in two sentences: [joke]" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 639, + 526, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 639, + 526, + 706 + ], + "spans": [ + { + "bbox": [ + 302, + 639, + 526, + 706 + ], + "type": "text", + "content": "Data Annotation. As demonstrated by Hessel et al. (2023), crowd-sourcing typically cannot produce high-quality explanations, following Hessel et al. (2023), one of the authors annotates all the explanations to ensure the quality and consistency." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "This is a substantial effort: the author ended up annotating the explanations for 1,951 jokes. The resulting corpus has a mean of 78 Chinese characters of explanation per joke, and the total length, 151,730 Chinese characters, is comparable" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21804" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 290, + 138 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 290, + 138 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 290, + 138 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 290, + 138 + ], + "type": "image", + "image_path": "f29ecc8a0d970beaa864265793e8e2763bf420cfe75aaaa9c975bdd6a3d526dc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 145, + 291, + 195 + ], + "lines": [ + { + "bbox": [ + 67, + 145, + 291, + 195 + ], + "spans": [ + { + "bbox": [ + 67, + 145, + 291, + 195 + ], + "type": "text", + "content": "Figure 4: Annotated preference for whether human explanation is preferred (\"Human wins\") or the explanation from LLMs is preferred (\"LLM wins\"). Humans' explanation is significantly preferred over LLMs'." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 208, + 170, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 208, + 170, + 222 + ], + "spans": [ + { + "bbox": [ + 67, + 208, + 170, + 222 + ], + "type": "text", + "content": "in length to a novella†." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 230, + 290, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 230, + 290, + 432 + ], + "spans": [ + { + "bbox": [ + 67, + 230, + 290, + 432 + ], + "type": "text", + "content": "Evaluation Setup. To fairly evaluate which explanation is better, we conduct A/B testing by presenting the humor explanation from one LLM and from human to six college students, asking them to annotate their preference of the explanation for each joke. These college students are native Chinese speakers who grew up in China, therefore they have a deep understanding of the cultural terms and trending terms in China. We note that the preference annotation requires a substantial effort as each annotator reads through a total length of around 300k Chinese characters†. We end up with three preference annotations for each joke. The preference annotation achieve a " + }, + { + "bbox": [ + 67, + 230, + 290, + 432 + ], + "type": "inline_equation", + "content": "61.4\\%" + }, + { + "bbox": [ + 67, + 230, + 290, + 432 + ], + "type": "text", + "content": " agreement rate among annotators (Appendix B)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 433, + 291, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 291, + 527 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 291, + 527 + ], + "type": "text", + "content": "We use the winning rate as our measure to compare LLMs' explanation versus human explanation, taking the majority vote among all annotators for each example. In addition, if all annotators disagree, we assign an \"Undecided\" label. Appendix C provides the annotation instructions we present to the annotators." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "spans": [ + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "text", + "content": "Overall Results. Figure 4 reports the wining rate of explanations from human versus GPT-4o and ERNIE" + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "inline_equation", + "content": "_{4\\text{-turb}}" + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "text", + "content": ". We can see that human explanations are significantly better than those from both LLMs, with humans winning over " + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "text", + "content": " of the time, while LLMs win in only " + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "inline_equation", + "content": "2 - 3\\%" + }, + { + "bbox": [ + 67, + 535, + 291, + 617 + ], + "type": "text", + "content": " of cases." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 624, + 291, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 624, + 291, + 705 + ], + "spans": [ + { + "bbox": [ + 67, + 624, + 291, + 705 + ], + "type": "text", + "content": "Error Analysis. Figure 5 shows the overall distribution of error types for GPT-4o and ERNIE" + }, + { + "bbox": [ + 67, + 624, + 291, + 705 + ], + "type": "inline_equation", + "content": "_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 67, + 624, + 291, + 705 + ], + "type": "text", + "content": " on Chumor in terms of their humor explanations. This error analysis is conducted by an individual who is not involved in writing the original explanations, ensuring an unbiased evaluation. GPT-4o" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 316, + 68, + 514, + 227 + ], + "blocks": [ + { + "bbox": [ + 316, + 68, + 514, + 227 + ], + "lines": [ + { + "bbox": [ + 316, + 68, + 514, + 227 + ], + "spans": [ + { + "bbox": [ + 316, + 68, + 514, + 227 + ], + "type": "image", + "image_path": "a8d2f4eda47cac68bcfb4deb23736b0a2064f14ab806d5e36881a22d801cd584.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "lines": [ + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "spans": [ + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "type": "text", + "content": "Figure 5: Distribution of error types for GPT-4o and ERNIE" + }, + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "type": "inline_equation", + "content": "_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "type": "text", + "content": ". We sample 200 examples to calculate the distribution of these error types. We note that each example may correspond to multiple error types. We highlight that ERNIE" + }, + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "type": "inline_equation", + "content": "_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 234, + 526, + 320 + ], + "type": "text", + "content": " demonstrates a lower error rate on cultural jokes, while GPT-4o demonstrates a lower error rate on contextual or pun-based jokes." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "spans": [ + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "text", + "content": "is more prone to errors categorized as \"cultural unawareness\" (29.5% of all its explanations) compared to " + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "text", + "content": " (10.5%). We suspect that " + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "text", + "content": " is more familiar with Chinese culture as it is likely trained on a larger Chinese corpus than GPT-4o. However, GPT-4o performs better on cases requiring an understanding of contexts or puns, suggesting its strong reasoning ability. We provide three error cases for GPT-4o here and additional cases for both GPT-4o and " + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4\\text{-}\\text{turbo}}" + }, + { + "bbox": [ + 302, + 336, + 526, + 524 + ], + "type": "text", + "content": " in Appendix E. In the following examples in Figure 6, Figure 7 and Figure 8, we highlight key phrases that induce humor in green, and underscore the errors in red." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 535, + 525, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 535, + 525, + 724 + ], + "spans": [ + { + "bbox": [ + 302, + 535, + 525, + 724 + ], + "type": "text", + "content": "Error Type I: Cultural Unawareness. LLMs may fail to explain a joke due to a lack of awareness of certain cultural knowledge. For instance, the example in Figure 6 requires knowledge of a superstitious belief in Chinese culture, getting a haircut in the first lunar month brings death to your uncle, and the explanation from GPT-4o fails to connect to this Chinese cultural belief. We hypothesize that while LLMs are pre-trained on Internet-scale corpora, such culturally specific knowledge can still be challenging for them to grasp. Moreover, even when they have acquired such cultural knowledge, they may fail to relate to it as we humans do during the reasoning process." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "type": "text", + "content": "Error Type II: Pun-based Error. LLMs may fail to identify multiple meanings of a single word within a joke, causing them to fail on pun-based" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 712, + 290, + 752 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 712, + 290, + 752 + ], + "spans": [ + { + "bbox": [ + 67, + 712, + 290, + 752 + ], + "type": "text", + "content": "†The total length of our explanations surpasses the Chinese version of The Great Gatsby (100k Chinese characters), and is about half the length of the Chinese version of Wuthering Heights (325k Chinese characters)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "text", + "content": "This is about the same length of the Chinese version of Wuthering Heights (325k Chinese characters)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21805" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 70, + 289, + 289 + ], + "blocks": [ + { + "bbox": [ + 68, + 70, + 289, + 289 + ], + "lines": [ + { + "bbox": [ + 68, + 70, + 289, + 289 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 289, + 289 + ], + "type": "table", + "html": "
Example(zh) 小明在正月接发竟导致舅舅复活。 (en) Xiaoming got hair extensions during the first lunar month, which astonishingly brought his uncle back to life.
Correct Humor Explanation“Getting a haircut in the first lunar month will brings death to your uncle.” is a popular superstitious saying in Chinese culture. In this joke, Xiao Ming gets hair extensions in the first month, which reverses the original logic and absurdly results in “bringing his uncle back to life.”
GPT-4o's Answer(zh) ...它利用了“接发”和“接发”的双关语:一方面是指理发店的接发服务,另一方面是指正月里“接福”的传统习俗。 (en) ...the pun on “接发”: it refers to both hair extension at a salon and the traditional practice of “receiving blessings” during the first lunar month.
Failure ReasonsFail to grasp expressions, slang, or specific usages unique to a particular cultural context.
", + "image_path": "fd22924f367bbc40b6bc117f3e8567c7a93fee0bc5f6c0abaee35136a3313460.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 69, + 323, + 289, + 528 + ], + "blocks": [ + { + "bbox": [ + 96, + 299, + 260, + 312 + ], + "lines": [ + { + "bbox": [ + 96, + 299, + 260, + 312 + ], + "spans": [ + { + "bbox": [ + 96, + 299, + 260, + 312 + ], + "type": "text", + "content": "Figure 6: Culture unawareness example." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 323, + 289, + 528 + ], + "lines": [ + { + "bbox": [ + 69, + 323, + 289, + 528 + ], + "spans": [ + { + "bbox": [ + 69, + 323, + 289, + 528 + ], + "type": "table", + "html": "
Example(zh) 你可以在steam上找到GTA,所以水是DNA。
(en) You can find GTA on Steam,so water is DNA.
Correct \nHumor \nExplanationNormally,“Steam”refers to a gaming platform and “GTA”refers to the “Grand Theft Auto” game series. The joke uses a pun, where “steam”literally means the gaseous form of water, and “G”,“T”,and “A” represent Guanine, Thymine, and Adenine, respectively—three of the four nucleotides (AGTC) that make up DNA.
GPT-4o's \nAnswer(zh)...这种明显错误的类比制造了荒诞的效果,引人发笑。
(en)...This clearly wrong analogy creates an absurd effect.
Failure \nReasonsFail to discover multiple meanings of a word in the jokes
", + "image_path": "1759ba788f746021f1cf6f5f2ae3d9f39cebde49087d16cf36f2a1bc41a2179e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 572, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 572, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 66, + 572, + 291, + 775 + ], + "type": "text", + "content": "jokes where humor lies in inverting the conventional usage of words. In Figure 7, GPT-4o fails to grasp the transition from the video game terms \"Steam\", \"GTA\" to the scientific terminologies \"G\", \"T\", \"A\" that form DNA. Typically, \"Steam\" refers to a game platform, and \"GTA\" refers to the game series \"Grand Theft Auto\". The joke employs a pun on words where \"steam\" in its literal sense means water vapor, and \"GTA\" can represent not only the video game, but guanine, thymine, and adenine, which are nucleotides involved in the structure of DNA. Such jokes require LLMs to identify puns and the reason for the association of the multiple meanings. Furthermore, the process requires LLMs to bridge the logic gap between these terms, such as" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 303, + 70, + 525, + 285 + ], + "blocks": [ + { + "bbox": [ + 106, + 538, + 250, + 550 + ], + "lines": [ + { + "bbox": [ + 106, + 538, + 250, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 250, + 550 + ], + "type": "text", + "content": "Figure 7: Pun-based error example." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 303, + 70, + 525, + 285 + ], + "lines": [ + { + "bbox": [ + 303, + 70, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 525, + 285 + ], + "type": "table", + "html": "
Example(zh) 家里的猪油没了,小明只能把 植物油倒快点 当猪油用了。 (en) With the lard gone, Xiaoming had to pour the vegetable oil quickly to use it like lard.
Correct \nHumor \nExplanationWhen pronounced quickly,“vegetable” (pronounced as chih woo) sounds very similar to “pig” (pronounced as choo) in Chinese, leading Xiaoming to comically try using vegetable oil as lard by pouring it quickly.
GPT-4o's \nAnswer(zh) ...小明误解了猪油和植物油的区别,认为只要倒得快,植物油就能变成猪油。笑点在于他天真的逻辑和对常识的搞笑误解。 (en) ... Xiaoming misunderstood that if vegetable oil is poured quickly, it can turn into lard, highlighting his naive logic and misinterpretation of common sense.
Failure \nReasonsFail to exploit homophones or near-homophones in the jokes.
", + "image_path": "8ceb2e56d703fdfeff32a52c4d61cd642d04b0aedff70427b09f0f3851329a20.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 336, + 293, + 490, + 306 + ], + "lines": [ + { + "bbox": [ + 336, + 293, + 490, + 306 + ], + "spans": [ + { + "bbox": [ + 336, + 293, + 490, + 306 + ], + "type": "text", + "content": "Figure 8: Homophonic error example." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 301, + 322, + 525, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 322, + 525, + 376 + ], + "spans": [ + { + "bbox": [ + 301, + 322, + 525, + 376 + ], + "type": "text", + "content": "\"steam\" and \"GTA\", and an unusual context, like \"water is DNA\". The overall process requires both scientific knowledge and creative thinking, which are challenging for LLMs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 387, + 526, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 387, + 526, + 564 + ], + "spans": [ + { + "bbox": [ + 302, + 387, + 526, + 564 + ], + "type": "text", + "content": "Error Type III: Homophonic Error. The example in Figure 8 requires LLMs to reason over the pronunciation as “植物” (pronounced as chih woo, meaning “vegetable”) sounds very similar to “猪” (pronounced as choo, meaning “pig”) in Chinese when we speak it fast enough. The humor arises from the contrast between the similarity in pronunciation and the disparity in meaning between the two terms. Such contrasts may be sparse in the training corpus of LLMs, and also demand a deep connection across different modalities to link pronunciation with the meaning behind these terms, which poses significant challenges to LLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 576, + 381, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 576, + 381, + 588 + ], + "spans": [ + { + "bbox": [ + 302, + 576, + 381, + 588 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "text", + "content": "We introduce Chumor, a Chinese humor understanding dataset that captures intellectually challenging and culturally specific humor in Chinese. Our analysis reveals that Chumor remains difficult even for advanced LLMs, with a significant performance gap between LLMs and humans. Furthermore, we find that chain-of-thought reasoning does not improve LLMs' humor comprehension and, in some cases, leads to over-analysis and incorrect interpretations. Additionally, models such as GPT-4o and ERNIE" + }, + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "inline_equation", + "content": "_{4}" + }, + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "text", + "content": "-turbo struggle to explain jokes as effectively as humans, highlighting fundamental challenges in humor reasoning. These findings un" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21806" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 141 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 141 + ], + "type": "text", + "content": "derscore the unique difficulties that Chinese humor presents to LLMs. We hope that Chumor can advance non-English humor research and contribute to evaluating LLMs' reasoning abilities across diverse cultural backgrounds." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 149, + 131, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 149, + 131, + 162 + ], + "spans": [ + { + "bbox": [ + 68, + 149, + 131, + 162 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 170, + 292, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 170, + 292, + 387 + ], + "spans": [ + { + "bbox": [ + 69, + 170, + 292, + 387 + ], + "type": "text", + "content": "We try our best to test the Chinese humor understanding ability of different LLMs. However, due to the limited budget and API access, we cannot evaluate all possible LLMs in this paper. We encourage future research to conduct further evaluations of humor understanding abilities in LLMs. In the meantime, we emphasize that our research focuses primarily on demonstrating how humor understanding remains a significant challenge, even for SOTA LLMs. Our work shows that along with many other problems (Ignat et al., 2024), humor understanding, especially non-English and culturally specific humor understanding, remains an unsolved problem in the era of LLMs. We hope Chumor can contribute to non-English humor understanding evaluations for future multilingual LLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 397, + 158, + 410 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 397, + 158, + 410 + ], + "spans": [ + { + "bbox": [ + 68, + 397, + 158, + 410 + ], + "type": "text", + "content": "Ethics Statement" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 418, + 291, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 291, + 676 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 291, + 676 + ], + "type": "text", + "content": "We have made every effort to filter out excessively offensive content in RZB. However, due to the subjective nature of humor, some of our jokes may still be perceived as offensive by individuals with different cultural or personal standards. To address these concerns, we strongly recommend that researchers use Chumor with cultural sensitivity, recognizing that the jokes in the dataset reflect the sociocultural context in which they were created. We encourage users of Chumor to approach the dataset with caution, remaining mindful of its potential to cause offense or harm, particularly when applying it in research or applications that involve diverse audiences or address sensitive topics. We wish to foster an ethical and responsible approach to data collection and usage, and we welcome constructive feedback from the research community and stakeholders to continually improve Chumor and mitigate potential harm." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 686, + 166, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 686, + 166, + 700 + ], + "spans": [ + { + "bbox": [ + 68, + 686, + 166, + 700 + ], + "type": "text", + "content": "Acknowledgement" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 708, + 292, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 292, + 762 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 292, + 762 + ], + "type": "text", + "content": "The GPT experiments are supported by credit from OpenAI through OpenAI Researcher Access assigned to Naihao Deng. We appreciate Qiang Liu, and Xiaoyue Shi for helping with the human study." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "spans": [ + { + "bbox": [ + 304, + 70, + 362, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 89, + 527, + 775 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 303, + 89, + 527, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 89, + 527, + 123 + ], + "spans": [ + { + "bbox": [ + 303, + 89, + 527, + 123 + ], + "type": "text", + "content": "01.ai. 2024. Yi-34b model card. https://huggingface.co/01-ai/Yi-34B. Accessed: 2024-12-10." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 304, + 131, + 527, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 131, + 527, + 220 + ], + "spans": [ + { + "bbox": [ + 304, + 131, + 527, + 220 + ], + "type": "text", + "content": "Marah Abdin, Jyoti Aneja, Harkirat Behl, Sébastien Bubeck, Ronen Eldan, Suriya Gunasekar, Michael Harrison, Russell J. Hewett, Mojan Javaheripi, Piero Kauffmann, James R. Lee, Yin Tat Lee, Yuanzhi Li, Weishung Liu, Caio C. T. Mendes, Anh Nguyen, Eric Price, Gustavo de Rosa, Olli Saarikivi, and 8 others. 2024. Phi-4 technical report. Preprint, arXiv:2412.08905." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 227, + 527, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 227, + 527, + 306 + ], + "spans": [ + { + "bbox": [ + 304, + 227, + 527, + 306 + ], + "type": "text", + "content": "Badr AlKhamissi, Muhammad ElNokrashy, Mai Alkhamissi, and Mona Diab. 2024. Investigating cultural alignment of large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 12404-12422, Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 313, + 527, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 313, + 527, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 313, + 527, + 380 + ], + "type": "text", + "content": "Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, and 29 others. 2023. Qwen technical report. Preprint, arXiv:2309.16609." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 388, + 527, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 527, + 445 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 527, + 445 + ], + "type": "text", + "content": "Yuelin Bai, Xinrun Du, Yiming Liang, Yonggang Jin, Ziqiang Liu, Junting Zhou, Tianyu Zheng, Xincheng Zhang, Nuo Ma, Zekun Wang, and 1 others. 2024. Coig-cqia: Quality is all you need for chinese instruction fine-tuning. arXiv preprint arXiv:2403.18058." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 452, + 527, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 452, + 527, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 452, + 527, + 486 + ], + "type": "text", + "content": "Baidu. 2024. Ernie-4.0-turbo. https://cloud.baidu. com/doc/WENXINWORKSHOP/s/71xwwtafj. Accessed: 2024-12-10." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 494, + 527, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 527, + 561 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 527, + 561 + ], + "type": "text", + "content": "Dario Bertero and Pascale Fung. 2016. Deep learning of audio and language features for humor prediction. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 496-501, Porto-rož, Slovenia. European Language Resources Association (ELRA)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 569, + 527, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 569, + 527, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 569, + 527, + 602 + ], + "type": "text", + "content": "BigModel. 2024. Glm-4 model documentation. https://bigmodel.cn/dev/howuse/glm-4. Accessed: 2024-12-10." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 610, + 527, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 610, + 527, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 610, + 527, + 688 + ], + "type": "text", + "content": "Lei Chen and Chong Min Lee. 2017. Predicting audience's laughter during presentations using convolutional neural network. In Proceedings of the 12th Workshop on Innovative Use of NLP for Building Educational Applications, pages 86-90, Copenhagen, Denmark. Association for Computational Linguistics." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 696, + 527, + 775 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 696, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 304, + 696, + 527, + 775 + ], + "type": "text", + "content": "Yuyan Chen, Zhixu Li, Jiaqing Liang, Yanghua Xiao, Bang Liu, and Yunwen Chen. 2023. Can pre-trained language models understand chinese humor? In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining, WSDM '23, page 465-480, New York, NY, USA. Association for Computing Machinery." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21807" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 772 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 70, + 72, + 291, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 291, + 148 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 291, + 148 + ], + "type": "text", + "content": "Yuyan Chen, Yichen Yuan, Panjun Liu, Dayiheng Liu, Qinghao Guan, Mengfei Guo, Haiming Peng, Bang Liu, Zhixu Li, and Yanghua Xiao. 2024. Talk funny! a large-scale humor response dataset with chain-of-humor interpretation. Proceedings of the AAAI Conference on Artificial Intelligence, 38(16):17826-17834." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 158, + 290, + 213 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 158, + 290, + 213 + ], + "spans": [ + { + "bbox": [ + 69, + 158, + 290, + 213 + ], + "type": "text", + "content": "Peter Clark, Isaac Cowhey, Oren Etzioni, Tushar Khot, Ashish Sabharwal, Carissa Schoenick, and Oyvind Tafjord. 2018. Think you have solved question answering? try arc, the ai2 reasoning challenge. Preprint, arXiv:1803.05457." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 222, + 289, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 222, + 289, + 244 + ], + "spans": [ + { + "bbox": [ + 69, + 222, + 289, + 244 + ], + "type": "text", + "content": "Peter T. Daniels and William Bright. 1996. The world's writing systems. Oxford University Press." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 252, + 289, + 307 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 252, + 289, + 307 + ], + "spans": [ + { + "bbox": [ + 69, + 252, + 289, + 307 + ], + "type": "text", + "content": "Naihao Deng, Zhenjie Sun, Ruiqi He, Aman Sikka, Yu-long Chen, Lin Ma, Yue Zhang, and Rada Mihalcea. 2024. Tables as images? exploring the strengths and limitations of llms on multimodal representations of tabular data. arXiv preprint arXiv:2402.12424." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 316, + 289, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 316, + 289, + 393 + ], + "spans": [ + { + "bbox": [ + 69, + 316, + 289, + 393 + ], + "type": "text", + "content": "Naihao Deng, Xinliang Zhang, Siyang Liu, Winston Wu, Lu Wang, and Rada Mihalcea. 2023. You are what you annotate: Towards better models through annotator representations. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 12475–12498, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 401, + 289, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 401, + 289, + 457 + ], + "spans": [ + { + "bbox": [ + 69, + 401, + 289, + 457 + ], + "type": "text", + "content": "Xinrun Du, Zhouliang Yu, Songyang Gao, Ding Pan, Yuyang Cheng, Ziyang Ma, Ruibin Yuan, Xingwei Qu, Jiaheng Liu, Tianyu Zheng, and 1 others. 2024. Chinese tiny llm: Pretraining a chinese-centric large language model. arXiv preprint arXiv:2404.04167." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 465, + 289, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 465, + 289, + 498 + ], + "spans": [ + { + "bbox": [ + 69, + 465, + 289, + 498 + ], + "type": "text", + "content": "Tomas Engelthaler and Thomas T Hills. 2018. Humor norms for 4,997 english words. Behavior research methods, 50:1116-1124." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 507, + 289, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 507, + 289, + 528 + ], + "spans": [ + { + "bbox": [ + 69, + 507, + 289, + 528 + ], + "type": "text", + "content": "WILLIAM F. FRY. 1994. The biology of humor. HUMOR, 7(2):111-126." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 538, + 289, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 538, + 289, + 571 + ], + "spans": [ + { + "bbox": [ + 69, + 538, + 289, + 571 + ], + "type": "text", + "content": "Marc Gelkopf and 1 others. 2011. The use of humor in serious mental illness: A review. Evidence-Based Complementary and Alternative Medicine, 2011." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 580, + 289, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 580, + 289, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 580, + 289, + 624 + ], + "type": "text", + "content": "Matthew Gervais and David Sloan Wilson. 2005. The evolution and functions of laughter and humor: A synthetic approach. *The Quarterly review of biology*, 80(4):395-430." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 633, + 289, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 633, + 289, + 720 + ], + "spans": [ + { + "bbox": [ + 69, + 633, + 289, + 720 + ], + "type": "text", + "content": "Hamideh Ghanaian, Isar Nejadgholi, and Hussein Al Osman. 2023. ChatGPT for suicide risk assessment on social media: Quantitative evaluation of model performance, potentials and limitations. In Proceedings of the 13th Workshop on Computational Approaches to Subjectivity, Sentiment, & Social Media Analysis, pages 172-183, Toronto, Canada. Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "type": "text", + "content": "Google. 2024. Gemini 1.5 pro model documentation. https://ai.google.dev/gemini-api/docs/ models/gemini#gemini-1.5-pro. Accessed: 2024-12-10." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 773 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 182 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 182 + ], + "type": "text", + "content": "Md Kamrul Hasan, Wasifur Rahman, AmirAli Bagher Zadeh, Jianyuan Zhong, Md Iftekhar Tanveer, Louis-Philippe Morency, and Mohammed (Ehsan) Hoque. 2019. UR-FUNNY: A multimodal language dataset for understanding humor. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 2046-2056, Hong Kong, China. Association for Computational Linguistics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 191, + 524, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 191, + 524, + 290 + ], + "spans": [ + { + "bbox": [ + 304, + 191, + 524, + 290 + ], + "type": "text", + "content": "Jack Hessel, Ana Marasovic, Jena D. Hwang, Lillian Lee, Jeff Da, Rowan Zellers, Robert Mankoff, and Yejin Choi. 2023. Do androids laugh at electric sheep? humor \"understanding\" benchmarks from the new yorker caption contest. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 688-714, Toronto, Canada. Association for Computational Linguistics." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 299, + 524, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 299, + 524, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 299, + 524, + 397 + ], + "type": "text", + "content": "Nabil Hossain, John Krumm, and Michael Gamon. 2019. \"president vows to cut hair\": Dataset and analysis of creative text editing for humorous headlines. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 133-142, Minneapolis, Minnesota. Association for Computational Linguistics." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 407, + 524, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 407, + 524, + 538 + ], + "spans": [ + { + "bbox": [ + 304, + 407, + 524, + 538 + ], + "type": "text", + "content": "Oana Ignat, Zhijing Jin, Artem Abzaliev, Laura Biester, Santiago Castro, Naihao Deng, Xinyi Gao, Aylin Ece Gunal, Jacky He, Ashkan Kazemi, Muhammad Khalifa, Namho Koh, Andrew Lee, Siyang Liu, Do June Min, Shinka Mori, Joan C. Nwatu, Veronica Perez-Rosas, Siqi Shen, and 3 others. 2024. Has it all been solved? open NLP research questions not solved by large language models. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024), pages 8050-8094, Torino, Italia. ELRA and ICCL." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 548, + 524, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 548, + 524, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 548, + 524, + 580 + ], + "type": "text", + "content": "Herbert M Lefcourt. 2001. *Humor: The psychology of living buoyantly*. Springer Science & Business Media." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 590, + 524, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 590, + 524, + 656 + ], + "spans": [ + { + "bbox": [ + 304, + 590, + 524, + 656 + ], + "type": "text", + "content": "Zefeng Li, Hongfei Lin, Liang Yang, Bo Xu, and Shaowu Zhang. 2022. Memeplate: A chinese multimodal dataset for humor understanding in meme templates. In *Natural Language Processing and Chinese Computing*, pages 527-538, Cham. Springer International Publishing." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 665, + 524, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 665, + 524, + 708 + ], + "spans": [ + { + "bbox": [ + 304, + 665, + 524, + 708 + ], + "type": "text", + "content": "Hanmeng Liu, Ruoxi Ning, Zhiyang Teng, Jian Liu, Qiji Zhou, and Yue Zhang. 2023a. Evaluating the logical reasoning ability of chatgpt and gpt-4. arXiv preprint arXiv:2304.03439." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 719, + 524, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 719, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 719, + 524, + 773 + ], + "type": "text", + "content": "Siyang Liu, Naihao Deng, Sahand Sabour, Yilin Jia, Minlie Huang, and Rada Mihalcea. 2023b. Task-adaptive tokenization: Enhancing long-form text generation efficacy in mental health and beyond. In Proceedings of the 2023 Conference on Empirical Meth" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "text", + "content": "21808" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 290, + 773 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "type": "text", + "content": "ods in Natural Language Processing, pages 15264-15281, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 113, + 290, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 113, + 290, + 222 + ], + "spans": [ + { + "bbox": [ + 69, + 113, + 290, + 222 + ], + "type": "text", + "content": "Xin Liu, Baosong Yang, Dayiheng Liu, Haibo Zhang, Weihua Luo, Min Zhang, Haiying Zhang, and Jinsong Su. 2021. Bridging subword gaps in pretrainfinetune paradigm for natural language generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6001-6011, Online. Association for Computational Linguistics." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 231, + 289, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 231, + 289, + 264 + ], + "spans": [ + { + "bbox": [ + 69, + 231, + 289, + 264 + ], + "type": "text", + "content": "Paul E McGhee. 1971. Development of the humor response: A review of the literature. Psychological Bulletin, 76(5):328." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 271, + 289, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 271, + 289, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 271, + 289, + 338 + ], + "type": "text", + "content": "Rada Mihalcea, Oana Ignat, Longju Bai, Angana Borah, Luis Chiruzzo, Zhijing Jin, Claude Kwizera, Joan Nwatu, Soujanya Poria, and Thamar Solorio. 2024. Why ai is weird and should not be this way: Towards ai for everyone, with everyone, by everyone. arXiv preprint arXiv:2410.16315." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 344, + 289, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 344, + 289, + 422 + ], + "spans": [ + { + "bbox": [ + 69, + 344, + 289, + 422 + ], + "type": "text", + "content": "Rada Mihalcea and Carlo Strapparava. 2005. Making computers laugh: Investigations in automatic humor recognition. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 531-538, Vancouver, British Columbia, Canada. Association for Computational Linguistics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 429, + 289, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 429, + 289, + 473 + ], + "spans": [ + { + "bbox": [ + 69, + 429, + 289, + 473 + ], + "type": "text", + "content": "Mistral.ai. 2024. Mistral-large-instruct-2407 model card. https://huggingface.co/mistralai/Mistral-Large-Instruct-2407. Accessed: 2024-12-10." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 481, + 289, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 289, + 514 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 289, + 514 + ], + "type": "text", + "content": "Nexusflow. 2024. Athene-70b model card. https://huggingface.co/Nexusflow/Athene-70B. Accessed: 2024-12-10." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 522, + 289, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 522, + 289, + 565 + ], + "spans": [ + { + "bbox": [ + 69, + 522, + 289, + 565 + ], + "type": "text", + "content": "NVIDIA. 2024. Llama-3.1-nemotron-70b-instruct-hf model card. https://huggingface.co/nvidia/Llama-3.1-Nemotron-70B-Instruct-HF. Accessed: 2024-12-10." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 574, + 289, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 574, + 289, + 596 + ], + "spans": [ + { + "bbox": [ + 69, + 574, + 289, + 596 + ], + "type": "text", + "content": "OpenAI. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 604, + 289, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 604, + 289, + 625 + ], + "spans": [ + { + "bbox": [ + 69, + 604, + 289, + 625 + ], + "type": "text", + "content": "OpenAI. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 634, + 289, + 698 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 634, + 289, + 698 + ], + "spans": [ + { + "bbox": [ + 69, + 634, + 289, + 698 + ], + "type": "text", + "content": "Siddhesh Pawar, Junyeong Park, Jiho Jin, Arnav Arora, Junho Myung, Srishti Yadav, Faiz Ghifari Haznitrama, Inhwa Song, Alice Oh, and Isabelle Augenstein. 2024. Survey of cultural awareness in language models: Text and beyond. arXiv preprint arXiv:2411.00860." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 708, + 289, + 773 + ], + "type": "text", + "content": "Peter Potash, Alexey Romanov, and Anna Rumshisky. 2017. SemEval-2017 task 6: #HashtagWars: Learning a sense of humor. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 49-57, Vancouver, Canada. Association for Computational Linguistics." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 105 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 105 + ], + "type": "text", + "content": "Qwen. 2024. Qwen2.5-72b-instruct model card. https://huggingface.co/Qwen/Qwen2.5-72B-Instruct. Accessed: 2024-12-10." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 113, + 525, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 113, + 525, + 222 + ], + "spans": [ + { + "bbox": [ + 304, + 113, + 525, + 222 + ], + "type": "text", + "content": "Dragomir Radev, Amanda Stent, Joel Tetreault, Aasish Pappu, Aikaterini Iliakopoulou, Agustin Chanfreau, Paloma de Juan, Jordi Vallmitjana, Alejandro Jaimes, Rahul Jha, and Robert Mankoff. 2016. Humor in collective discourse: Unsupervised funniness detection in the new yorker cartoon caption contest. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 475-479, Porto Roz, Slovenia. European Language Resources Association (ELRA)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 230, + 525, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 230, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 304, + 230, + 525, + 285 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2023. Gpqa: A graduate-level google-proof q&a benchmark. Preprint, arXiv:2311.12022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 292, + 525, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 292, + 525, + 381 + ], + "spans": [ + { + "bbox": [ + 304, + 292, + 525, + 381 + ], + "type": "text", + "content": "Chhavi Sharma, Deepesh Bhageria, William Scott, Srinivas PYKL, Amitava Das, Tanmoy Chakraborty, Viswanath Pulabaigari, and Björn Gambäck. 2020. SemEval-2020 task 8: Memotion analysis-the visuolinguial metaphor! In Proceedings of the Fourteenth Workshop on Semantic Evaluation, pages 759-773, Barcelona (online). International Committee for Computational Linguistics." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 388, + 525, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 388, + 525, + 498 + ], + "spans": [ + { + "bbox": [ + 304, + 388, + 525, + 498 + ], + "type": "text", + "content": "Siqi Shen, Lajanugen Logeswaran, Moontae Lee, Honglak Lee, Soujanya Poria, and Rada Mihalcea. 2024. Understanding the capabilities and limitations of large language models for cultural commonsense. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 5668-5680, Mexico City, Mexico. Association for Computational Linguistics." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 506, + 525, + 615 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 506, + 525, + 615 + ], + "spans": [ + { + "bbox": [ + 304, + 506, + 525, + 615 + ], + "type": "text", + "content": "Shivalika Singh, Angelika Romanou, Clémentine Fourrier, David I. Adelani, Jian Gang Ngui, Daniel Vila-Suero, Peerat Limkonchotiwat, Kelly Marchisio, Wei Qi Leong, Yosephine Susanto, Raymond Ng, Shayne Longpre, Wei-Yin Ko, Madeline Smith, Antoine Bosselut, Alice Oh, Andre F. T. Martins, Leshem Choshen, Daphne Ippolito, and 4 others. 2024. Global mmlu: Understanding and addressing cultural and linguistic biases in multilingual evaluation. Preprint, arXiv:2412.03304." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 623, + 525, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 623, + 525, + 689 + ], + "spans": [ + { + "bbox": [ + 304, + 623, + 525, + 689 + ], + "type": "text", + "content": "Zayne Sprague, Fangcong Yin, Juan Diego Rodriguez, Dongwei Jiang, Manya Wadhwa, Prasann Singhal, Xinyu Zhao, Xi Ye, Kyle Mahowald, and Greg Durrett. 2024. To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning. Preprint, arXiv:2409.12183." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 696, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 696, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 696, + 525, + 773 + ], + "type": "text", + "content": "Honglin Sun and Daniel Jurafsky. 2004. Shallow semantic parsing of Chinese. In Proceedings of the Human Language Technology Conference of the North American Chapter of the Association for Computational Linguistics: HLT-NAACL 2004, pages 249-256, Boston, Massachusetts, USA. Association for Computational Linguistics." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "text", + "content": "21809" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "text", + "content": "Weiwei Sun, Zhifang Sui, Meng Wang, and Xin Wang. 2009. Chinese semantic role labeling with shallow parsing. In Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing, pages 1475-1483, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 146, + 291, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 146, + 291, + 222 + ], + "spans": [ + { + "bbox": [ + 69, + 146, + 291, + 222 + ], + "type": "text", + "content": "Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023a. Llama: Open and efficient foundation language models. Preprint, arXiv:2302.13971." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 231, + 291, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 231, + 291, + 319 + ], + "spans": [ + { + "bbox": [ + 69, + 231, + 291, + 319 + ], + "type": "text", + "content": "Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, and 49 others. 2023b. Llama 2: Open foundation and fine-tuned chat models. Preprint, arXiv:2307.09288." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "type": "text", + "content": "Yuen-Hsien Tseng, Wun-Syuan Wu, Chia-Yueh Chang, Hsueh-Chih Chen, and Wei-Lun Hsu. 2020. Development and validation of a corpus for machine humor comprehension. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 1346-1352, Marseille, France. European Language Resources Association." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 412, + 291, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 412, + 291, + 488 + ], + "spans": [ + { + "bbox": [ + 69, + 412, + 291, + 488 + ], + "type": "text", + "content": "Ashmal Vayani, Dinura Dissanayake, Hasindri Watawana, Noor Ahsan, Nevasini Sasikumar, Omkar Thawakar, Henok Biadglin Ademtew, Yahya Hmaiti, Amandeep Kumar, Kartik Kuckreja, and 1 others. 2024. All languages matter: Evaluating Imms on culturally diverse 100 languages. arXiv preprint arXiv:2411.16508." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 497, + 291, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 497, + 291, + 552 + ], + "spans": [ + { + "bbox": [ + 69, + 497, + 291, + 552 + ], + "type": "text", + "content": "Benyou Wang, Xiang Wu, Xiaokang Liu, Jianquan Li, Prayag Tiwari, and Qianqian Xie. 2022. Can language models make fun? a case study in chinese comical crosstalk. In Annual Meeting of the Association for Computational Linguistics." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 560, + 291, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 560, + 291, + 625 + ], + "spans": [ + { + "bbox": [ + 69, + 560, + 291, + 625 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, and 1 others. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 633, + 291, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 633, + 291, + 689 + ], + "spans": [ + { + "bbox": [ + 69, + 633, + 291, + 689 + ], + "type": "text", + "content": "Orion Weller and Kevin Seppi. 2020. The rJokes dataset: a large scale humor collection. In Proceedings of the Twelfth Language Resources and Evaluation Conference, pages 6136-6141, Marseille, France. European Language Resources Association." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 696, + 291, + 774 + ], + "type": "text", + "content": "Jiaming Wu, Hongfei Lin, Liang Yang, and Bo Xu. 2021. Mumor: A multimodal dataset for humor detection in conversations. In *Natural Language Processing and Chinese Computing: 10th CCF International Conference*, NLPCC 2021, Qingdao, China, October 13–17, 2021, Proceedings, Part I, page 619–627, Berlin, Heidelberg. Springer-Verlag." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 521 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 305, + 72, + 526, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 526, + 150 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 526, + 150 + ], + "type": "text", + "content": "Yufan Wu, Yinghui He, Yilin Jia, Rada Mihalcea, Yu-long Chen, and Naihao Deng. 2023. Hi-ToM: A benchmark for evaluating higher-order theory of mind reasoning in large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 10691-10706, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 158, + 526, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 158, + 526, + 225 + ], + "spans": [ + { + "bbox": [ + 304, + 158, + 526, + 225 + ], + "type": "text", + "content": "Diyi Yang, Alon Lavie, Chris Dyer, and Eduard Hovy. 2015. Humor recognition and humor anchor extraction. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2367-2376, Lisbon, Portugal. Association for Computational Linguistics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 232, + 526, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 232, + 526, + 288 + ], + "spans": [ + { + "bbox": [ + 304, + 232, + 526, + 288 + ], + "type": "text", + "content": "Dongyu Zhang, Heting Zhang, Xikai Liu, Hongfei Lin, and Feng Xia. 2019. Telling the whole story: A manually annotated chinese dataset for the analysis of humor in jokes. In Conference on Empirical Methods in Natural Language Processing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 296, + 526, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 296, + 526, + 350 + ], + "spans": [ + { + "bbox": [ + 304, + 296, + 526, + 350 + ], + "type": "text", + "content": "Min Zhang, Jianfeng He, Taoran Ji, and Chang-Tien Lu. 2024a. Don't go to extremes: Revealing the excessive sensitivity and calibration limitations of llms in implicit hate speech detection. Preprint, arXiv:2402.11406." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 359, + 526, + 416 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 359, + 526, + 416 + ], + "spans": [ + { + "bbox": [ + 304, + 359, + 526, + 416 + ], + "type": "text", + "content": "Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B. Hashimoto. 2024b. Benchmarking Large Language Models for News Summarization. Transactions of the Association for Computational Linguistics, 12:39-57." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 423, + 526, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 423, + 526, + 468 + ], + "spans": [ + { + "bbox": [ + 304, + 423, + 526, + 468 + ], + "type": "text", + "content": "Jun Zhao, Zhihao Zhang, Qi Zhang, Tao Gui, and Xuanjing Huang. 2024. Llama beyond english: An empirical study on language capability transfer. arXiv preprint arXiv:2401.01055." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 476, + 526, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 476, + 526, + 521 + ], + "spans": [ + { + "bbox": [ + 304, + 476, + 526, + 521 + ], + "type": "text", + "content": "Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. 2023. Instruction-following evaluation for large language models. Preprint, arXiv:2311.07911." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "text", + "content": "21810" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 164, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 164, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 164, + 84 + ], + "type": "text", + "content": "A Contributions" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 117, + 291, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 117, + 291, + 157 + ], + "spans": [ + { + "bbox": [ + 67, + 117, + 291, + 157 + ], + "type": "text", + "content": "Idea Proposal. Naihao Deng proposed the high-level idea of constructing a humor understanding benchmark sourced from RZB data." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 168, + 289, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 168, + 289, + 194 + ], + "spans": [ + { + "bbox": [ + 67, + 168, + 289, + 194 + ], + "type": "text", + "content": "Background Survey. Ruiqi He surveyed the humor-related tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 205, + 289, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 205, + 289, + 231 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 289, + 231 + ], + "type": "text", + "content": "Data Processing. Ruiqi He crawled and processed the jokes from RZB." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 242, + 290, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 242, + 290, + 296 + ], + "spans": [ + { + "bbox": [ + 67, + 242, + 290, + 296 + ], + "type": "text", + "content": "Annotation. Ruiqi He annotated the explanations for the RZB jokes. Yushu He, Longju Bai, Jiarui Liu, Zhenjie Sun, Zhenghao Tang, He Wang, Nai-hao Deng conducted the preference annotations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 306, + 289, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 306, + 289, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 306, + 289, + 333 + ], + "type": "text", + "content": "Experiments. Ruiqi He, Hanchen Xia, and Naihao Deng conducted the experiments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 343, + 289, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 343, + 289, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 343, + 289, + 370 + ], + "type": "text", + "content": "Result Aggregation. Ruiqi He, Naihao Deng, Yushu He aggregated the results." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 380, + 289, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 289, + 421 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 289, + 421 + ], + "type": "text", + "content": "Paper Writing. Ruiqi He and Naihao Deng drafted the paper. Other authors provided revisions and feedback on the paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 431, + 232, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 431, + 232, + 444 + ], + "spans": [ + { + "bbox": [ + 68, + 431, + 232, + 444 + ], + "type": "text", + "content": "Naihao Deng organized the research." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 488, + 236, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 488, + 236, + 502 + ], + "spans": [ + { + "bbox": [ + 68, + 488, + 236, + 502 + ], + "type": "text", + "content": "B Agreement Rate Calculation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "spans": [ + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "content": "We calculate the percentage agreement rate among annotators who annotate their preferences between explanations from LLMs and humans. The results show an average inter-annotator agreement of " + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "inline_equation", + "content": "61.9\\%" + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "content": " for GPT-4o and " + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "inline_equation", + "content": "60.9\\%" + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "content": " -turbo. Given the inherent subjectivity of humor interpretation tasks (Deng et al., 2023), the combined average agreement percentage of " + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "inline_equation", + "content": "61.4\\%" + }, + { + "bbox": [ + 67, + 535, + 291, + 643 + ], + "type": "text", + "content": " is decent." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 687, + 289, + 714 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 687, + 289, + 714 + ], + "spans": [ + { + "bbox": [ + 68, + 687, + 289, + 714 + ], + "type": "text", + "content": "C Annotation Instructions for Preference Annotation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "We include the following instructions for the preference annotations of the joke explanations:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 73, + 364, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 73, + 364, + 84 + ], + "spans": [ + { + "bbox": [ + 309, + 73, + 364, + 84 + ], + "type": "text", + "content": "Instruction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 97, + 518, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 97, + 518, + 151 + ], + "spans": [ + { + "bbox": [ + 307, + 97, + 518, + 151 + ], + "type": "text", + "content": "“在这个标注中,你将会看到一个笑话和对这个笑话的幽默之处的两个解释,请你比较哪个解释更好的解释了这个笑话的幽默之处,并从以下三个标签中选择:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 152, + 360, + 191 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 309, + 152, + 348, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 152, + 348, + 164 + ], + "spans": [ + { + "bbox": [ + 309, + 152, + 348, + 164 + ], + "type": "text", + "content": "1. 解释1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 166, + 350, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 166, + 350, + 178 + ], + "spans": [ + { + "bbox": [ + 309, + 166, + 350, + 178 + ], + "type": "text", + "content": "2. 解释2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 309, + 179, + 360, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 179, + 360, + 191 + ], + "spans": [ + { + "bbox": [ + 309, + 179, + 360, + 191 + ], + "type": "text", + "content": "3. 一样好”" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 386, + 194, + 442, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 386, + 194, + 442, + 204 + ], + "spans": [ + { + "bbox": [ + 386, + 194, + 442, + 204 + ], + "type": "text", + "content": "Translation" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 206, + 520, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 206, + 520, + 274 + ], + "spans": [ + { + "bbox": [ + 307, + 206, + 520, + 274 + ], + "type": "text", + "content": "\"In this annotation task, you will see a joke along with two explanations of its humor. Please compare which explanation better explains the reason why this joke is funny and choose from the following three labels:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 309, + 275, + 426, + 315 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 309, + 275, + 382, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 275, + 382, + 287 + ], + "spans": [ + { + "bbox": [ + 309, + 275, + 382, + 287 + ], + "type": "text", + "content": "1. Explanation 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 289, + 383, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 289, + 383, + 301 + ], + "spans": [ + { + "bbox": [ + 309, + 289, + 383, + 301 + ], + "type": "text", + "content": "2. Explanation 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 301, + 426, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 301, + 426, + 315 + ], + "spans": [ + { + "bbox": [ + 309, + 301, + 426, + 315 + ], + "type": "text", + "content": "3. Both are equally good.\"" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 333, + 526, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 333, + 526, + 386 + ], + "spans": [ + { + "bbox": [ + 302, + 333, + 526, + 386 + ], + "type": "text", + "content": "For each example, we randomly assign the explanations from the LLMs and the human as Explanation 1 and Explanation 2 to ensure a fair comparison." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 303, + 399, + 494, + 413 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 399, + 494, + 413 + ], + "spans": [ + { + "bbox": [ + 303, + 399, + 494, + 413 + ], + "type": "text", + "content": "D Discussion on Evaluation Setting" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 302, + 423, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 423, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 423, + 526, + 774 + ], + "type": "text", + "content": "Why Zero-Shot w.o. SFT? The primary research objective of this paper is to determine how well foundational LLMs can understand Chinese humor without relying on supervised fine-tuning for this binary classification task. The focus is on investigating the innate humor-understanding ability of these models through zero-shot and zero-shot CoT prompting. This aligns with the overarching goal of examining their general capabilities without additional task-specific training. From a human behavior perspective, individuals generally acquire a sense of humor through exposure and experience rather than explicit classroom instruction (McGhee, 1971; FRY, 1994; Gervais and Wilson, 2005). Analogously, our purpose lies in evaluating the models' intrinsic ability to recognize and interpret humor without deliberate, task-specific fine-tuning. Moreover, the experimental design follows practices from other benchmarks, such as GPQA (Rein et al., 2023), AI2ARC (Clark et al., 2018), and IFEVAL (Zhou et al., 2023), which do not provide predefined train/dev/test splits. Under these conditions, it is common to assess models in a zero-shot manner to directly evaluate their capabilities on each respective task (Touvron et al., 2023a,b; Bai et al., 2023; Abdin et al., 2024)." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 311, + 791 + ], + "type": "text", + "content": "21811" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 71, + 291, + 247 + ], + "blocks": [ + { + "bbox": [ + 68, + 71, + 291, + 247 + ], + "lines": [ + { + "bbox": [ + 68, + 71, + 291, + 247 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 291, + 247 + ], + "type": "table", + "html": "
Example(zh)真可怕,犯罪嫌疑人就在我们之中,被告席上一名法警对另一名法警说。
(en)“Terrifying, the criminal suspect is right between the two of us,” said one bailiff to another in the defendant's dock.
Correct Humor Explanation“between us” can refer to “either one of us” or literally means the actual physical position.
GPT-4o's Answer(zh)…暗示他们自己可能是犯罪嫌疑人…
(en)…it suggests the bailiffs themselves might be criminal suspects…
Failure ReasonsFail to address the literal meaning.
", + "image_path": "46535b0b36dd29ef0e96b1dab5b19ca84f0d5d1cf30a0cd676e0654f53fcb999.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 302, + 184, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 302, + 184, + 316 + ], + "spans": [ + { + "bbox": [ + 68, + 302, + 184, + 316 + ], + "type": "text", + "content": "E More Error Cases" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 325, + 291, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 325, + 291, + 365 + ], + "spans": [ + { + "bbox": [ + 67, + 325, + 291, + 365 + ], + "type": "text", + "content": "We note that many examples here encompass multiple error types, highlighting the complexity of Chumor." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 374, + 291, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 374, + 291, + 604 + ], + "spans": [ + { + "bbox": [ + 67, + 374, + 291, + 604 + ], + "type": "text", + "content": "Insufficient Contextual Understanding. LLMs may fail to ground their responses to the context when they explain the joke. For instance, in the example in Figure 9, \"between us\" typically means \"either you or me\", but it also has the literal meaning to indicate the person standing \"between us\", which is the right interpretation given that the two bailiffs are talking about the criminal. However, GPT-4o only reasons that \"the criminal is either you or me\" but fails to capture the literal meaning from the context. We hypothesize that in the pretraining corpus, \"between us\" most likely acquires the meaning of \"either you or me\" rather than the literal meaning in a scenario like this, which creates a bias that prevents the model from reasoning about the literal interpretation required for this specific explanation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 613, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 291, + 775 + ], + "type": "text", + "content": "Glyph-based Error. LLMs may fail to interpret the visual puns based on the shape or form of Chinese characters. Glyph-based humor in Chinese leverages its logographic writing system, where characters integrate both semantic and visual elements. Unlike the phonemic alphabet used in English, Chinese characters' pictorial and ideographic nature allows for visual puns in jokes (Daniels and Bright, 1996). In the example in Figure 10, \"simplify\" does not refer to simplifying the tally problem conceptually, but to simplifying the traditional Chinese characters to simplified Chinese" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 304, + 70, + 526, + 257 + ], + "blocks": [ + { + "bbox": [ + 67, + 257, + 291, + 282 + ], + "lines": [ + { + "bbox": [ + 67, + 257, + 291, + 282 + ], + "spans": [ + { + "bbox": [ + 67, + 257, + 291, + 282 + ], + "type": "text", + "content": "Figure 9: Insufficient contextual understanding example." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 70, + 526, + 257 + ], + "lines": [ + { + "bbox": [ + 304, + 70, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 70, + 526, + 257 + ], + "type": "table", + "html": "
Example(zh)我把電車難題簡化了,現在是电车难题。
(en)I simplified the trolley problem (in traditional Chinese), now it's the trolley problem (in simplified Chinese).
Correct Humor ExplanationThe joke does not actually simplify the scenario of the trolley problem; instead, it converts the traditional Chinese characters “電車難題” into the simplified Chinese characters “电车难题”.
GPT-4o's Answer(zh)...这个笑话的幽默之处在于通过将“電車難題”中的“難”字去掉,变成了“电车題”。
(en)...The humor of this joke lies in removing the character “hard” from “the hard tally problem”, turning it into “the tally problem”.
Failure ReasonsFail to interpret the visual puns in the shape or form of the characters.
", + "image_path": "8e9b7c6a740fafcc6643850905b76cb94233b6ddef0b447eaa715f0bae3b01b0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 304, + 292, + 526, + 571 + ], + "blocks": [ + { + "bbox": [ + 334, + 266, + 493, + 279 + ], + "lines": [ + { + "bbox": [ + 334, + 266, + 493, + 279 + ], + "spans": [ + { + "bbox": [ + 334, + 266, + 493, + 279 + ], + "type": "text", + "content": "Figure 10: Glyph-based error example." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 292, + 526, + 571 + ], + "lines": [ + { + "bbox": [ + 304, + 292, + 526, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 292, + 526, + 571 + ], + "type": "table", + "html": "
Example(zh) 小王订了张飞去北京的机票,给张飞省了一大笔钱。
(en) Xiaowang booked an airline ticket to Beijing, saving Zhang Fei a lot of money.
Correct \nHumor \nExplanationThere are two ways to parse the first half of the sentence: \n(1) 小王/订了/张/飞去/北京的/机票 XiaoWang/ booked/ a/ ticket to fly to Beijing. \n(2) 小王/订了/张飞/去北京的/机票 XiaoWang / booked/ Zhangfei/ a ticket/ to Beijing.
Typically, people would interpret in the first way as Zhangfei is a fictional figure and do not appear in the daily conversation. However, the second half of the sentence confirms that the second way of parsing turns out to be correct. Such contrast is hilarious.
GPT-4o's \nAnswer(zh) ...由于历史人物张飞已经不在世,所以给他省了一大笔钱。
(en) ... since the historical figure Zhang Fei is dead and cannot take a flight, the joke humorously suggests that he saved a lot of money.
Failure \nReasonsFail to parse textual elements in a sentence dynamically.
", + "image_path": "d467f8decb9e010f942f43b21e3fb0ea4d93dce6a3dfa366ded7f9e6b50eca2f.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 345, + 580, + 482, + 592 + ], + "lines": [ + { + "bbox": [ + 345, + 580, + 482, + 592 + ], + "spans": [ + { + "bbox": [ + 345, + 580, + 482, + 592 + ], + "type": "text", + "content": "Figure 11: Parsing error example." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 615, + 526, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 615, + 526, + 698 + ], + "spans": [ + { + "bbox": [ + 302, + 615, + 526, + 698 + ], + "type": "text", + "content": "characters as the traditional Chinese characters are also termed as \"complicated characters\". However, LLMs struggle to reason such graphemic differences as there are no explicit connections between the textual meaning and visual representations of the glyphs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "content": "Parsing Error. LLMs often fail to parse sentences in multiple ways simultaneously, leading to difficulties in explaining jokes that require different parsing for the same sentence. In the example in Figure 11, the humor hinges on the ambiguity of" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21812" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 275 + ], + "type": "text", + "content": "the phrase \"张飞\", which can be interpreted either as part of a verb phrase implying \"a ticket flying to Beijing\" or as a proper noun, referring to the historical figure Zhang Fei. This ambiguity stems from the flexibility of the Chinese language, where each character can function independently as a word or combine with others to form new words or phrases. There are decades of research studying the problem of parsing Chinese (Sun and Jurafsky, 2004; Sun et al., 2009). Recently, researchers have proposed task-specific tokenization approaches that adapt the parsing process to better align with downstream tasks (Liu et al., 2021, 2023b). However, how to incorporate different ways of parsing at one time still remains challenging." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 69, + 284, + 290, + 502 + ], + "blocks": [ + { + "bbox": [ + 69, + 284, + 290, + 502 + ], + "lines": [ + { + "bbox": [ + 69, + 284, + 290, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 284, + 290, + 502 + ], + "type": "table", + "html": "
Example(zh) 五步蛇的危险性是李白的两倍。 (en) The danger of a five-step snake is twice that of Li Bai.
Correct Humor ExplanationKill one in ten steps is a line from a poem by the ancient Chinese poet Li Bai, suggesting that while it takes Li Bai ten steps to kill, a person can die within five steps of being bitten by the “Five-Step Snake” hence it is twice as dangerous as Li Bai.
(zh) …李白的名字听起来像“二步”,所以“五步蛇”的危险性被戏称为是“二步”的两倍。
GPT-4o's Answer(en) …Li Bai's (pronounced as lee bye) name sounds similar to “two steps” (pronounced as r boo) in Chinese, so the danger of “Five-Step Snake” is humorously claimed to be twice that of “two steps”
Failure ReasonsFabricate ungrounded details or facts.
", + "image_path": "4d1e041edbe6852fb969e635d43982bfe36d895fdd82a978fd714c831e6677e5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 513, + 251, + 526 + ], + "lines": [ + { + "bbox": [ + 107, + 513, + 251, + 526 + ], + "spans": [ + { + "bbox": [ + 107, + 513, + 251, + 526 + ], + "type": "text", + "content": "Figure 12: Hallucinations example." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 544, + 291, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 291, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 291, + 638 + ], + "type": "text", + "content": "Hallucinations. LLMs may fabricate ungrounded details or facts in joke explanations. For instance, in the explanation in Figure 12, GPT-4o claims that \"Li Bai's name sounds similar to two steps\", while \"Li Bai\" (pronounced as lee bye) does not sound like \"two steps\" (pronounced as " + }, + { + "bbox": [ + 67, + 544, + 291, + 638 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 67, + 544, + 291, + 638 + ], + "type": "text", + "content": " boo)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 640, + 291, + 775 + ], + "type": "text", + "content": "On the other hand, the correct explanation requires an understanding of a Chinese poem from Li Bai, \"十步杀一人\" (The warrior kills a person for every ten steps). This line praises the courage of the soldiers, but the joke deliberately portrays this as a characteristic of Li Bai. Therefore, compared to Li Bai who can kill a person in ten steps, a five-step snake, which can kill a person in five steps, is twice as dangerous as Li Bai. Such explanation requires LLMs to have a deep understanding of Chinese" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "content": "culture and reason over cultural references, posing a great challenge to current LLMs. Although recent works have made progress towards building LLMs beyond English (Du et al., 2024; Zhao et al., 2024), building an LLM that can comprehend such nuanced Chinese cultural terms can be extremely hard." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21813" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 71, + 291, + 324 + ], + "blocks": [ + { + "bbox": [ + 68, + 71, + 291, + 324 + ], + "lines": [ + { + "bbox": [ + 68, + 71, + 291, + 324 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 291, + 324 + ], + "type": "table", + "html": "
Example(zh)曹操于城楼上问夏侯惇:“你瞧到了什么。”夏侯惇说:“瞧到马岱。”(en) Cao Cao, from atop the city tower, asked Xia Houdun, “What did you see?” Xia Houdun replied, “I saw Ma Dai.”
Correct Humor ExplanationCao Cao, Xiahou Dun, and Ma Dai were historical figures from the Three Kingdoms period in ancient China. The phrase “saw Ma Dai” (pronounced as chuh dow ma die) not only literally refers to spotting Ma Dai but also play on similar pronunciation with the Japanese phrase “まじと待て” (chotto matte), which means to “wait a moment.”
GPT-4o's Answer(zh) ... 这个笑话的幽默之处在于利用了“马岱”这个名字的谐音梗,听起来像“妈的”,让人误以为夏侯惇在骂人。(en) ... a pun involving the name "Ma Dai," which sounds like "damn" in Chinese. It sounds like Xiahou Dun is swearing at Cao Cao.
Failure ReasonsFail to recognize information/elements across different languages.
", + "image_path": "604942534d6cac5e355ee3bfb93d957a29a9cc0c270157a5be6db68d00b0046b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 368, + 291, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 368, + 291, + 543 + ], + "spans": [ + { + "bbox": [ + 66, + 368, + 291, + 543 + ], + "type": "text", + "content": "Cross-lingual Error. LLMs may fail to recognize elements or information across different languages. In the explanation in Figure 13, GPT-4o attempts to link the pronunciation of \"Ma Dai\" to other Chinese terms but fails to identify the similar pronunciations across the Chinese term \"瞧到马岱\" (pronounced as chuh dow ma die, meaning \"saw Ma Dai\") and the Japanese term \"ちよと待て\" (chotto matte, meaning \"wait a moment\"). Such cases require LLMs to connect pronunciations across languages, which may be rare in the LLMs' pre-training corpus and poses significant challenges to current LLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 550, + 291, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 550, + 291, + 672 + ], + "spans": [ + { + "bbox": [ + 66, + 550, + 291, + 672 + ], + "type": "text", + "content": "Overcritical. Figure 14 shows when the model overly criticizes an explanation, it focuses excessively on minor details, neglecting the major point and ultimately misjudging the explanation. The joke relies on common sense about life, death, and testing rather than cultural knowledge. Under the CoT prompting, the model recognizes that the explanation identifies the main punchlines but overly criticizes the lack of cultural context." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "content": "Excessive Sensitivity. For " + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "content": "-turbo, in addition to errors across all the aforementioned categories, it also demonstrates excessive sensitivity to certain examples. Specifically, when content includes languages related to hate speech but used in non-harmful contexts, " + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "content": "-turbo refuses to provide an explanation. During our evalua" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 303, + 70, + 526, + 428 + ], + "blocks": [ + { + "bbox": [ + 98, + 334, + 260, + 347 + ], + "lines": [ + { + "bbox": [ + 98, + 334, + 260, + 347 + ], + "spans": [ + { + "bbox": [ + 98, + 334, + 260, + 347 + ], + "type": "text", + "content": "Figure 13: Cross-lingual error example." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 303, + 70, + 526, + 428 + ], + "lines": [ + { + "bbox": [ + 303, + 70, + 526, + 428 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 526, + 428 + ], + "type": "table", + "html": "
Example(zh)网上传疯了的超准测试题!20道题轻松判断出你有没有死!(en)The viral online quiz! 20 questions to easily determine if you're alive or dead!
Joke Explanation(zh)这个笑话的幽默之处在于它用夸张的方式提出了一个显而易见的问题——判断自己是否还活着,这本身就不需要测试。其次,标题中“超准测试题”和“轻松判断”这样的描述让人觉得荒谬可笑,因为活着与否是最基本的感知,不需要任何测试。
Model's Answer DP(en)The humor in this joke lies in its exaggerated premise of asking an obvious question—determining whether one is still alive, which inherently doesn't require any test. Additionally, the descriptions in the title, such as "super accurate quiz" and "easily determine," come across as absurdly funny because being alive is the most basic perception, requiring no testing at all.
Model's Answer CoT(zh)完全解释(en)Fully Explained
(zh)...最终判断:部分/没有解释理由:虽然解释准确地指出了笑话的主要笑点(夸张性和标题的矛盾),但它没有深入探讨这些元素为什么会产生幽默效果,也没有讨论潜在的文化或认知背景...
(en)...Final judgment: Partially/Not explained Reason: While the explanation accurately identifies the main punchlines of the joke (exaggeration and the contradiction in the title), it does not delve into why these elements create a humorous effect or discuss the potential cultural or cognitive context behind it...
", + "image_path": "c01566a1c5ddc72f0d260e31ba078b1f5aa93658afc3313cf4e76de7871f4a51.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "lines": [ + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "type": "text", + "content": "Figure 14: Overcritical example by Nematron" + }, + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "type": "inline_equation", + "content": "_{70\\mathrm{B}}" + }, + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "type": "text", + "content": ". The Nematron" + }, + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "type": "inline_equation", + "content": "_{70\\mathrm{B}}" + }, + { + "bbox": [ + 302, + 438, + 525, + 486 + ], + "type": "text", + "content": " model selects the correct answer in the DP prompting, but selects the incorrect answer due to being overly critical in the CoT prompting." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 518, + 526, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 526, + 640 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 526, + 640 + ], + "type": "text", + "content": "tion, we observe this excessive sensitivity in the " + }, + { + "bbox": [ + 302, + 518, + 526, + 640 + ], + "type": "inline_equation", + "content": "\\mathrm{ERNIE}_{4}" + }, + { + "bbox": [ + 302, + 518, + 526, + 640 + ], + "type": "text", + "content": "-turbo's responses to humor related to medical ethics and political discussions. This suggests that correctly understanding the context and the language toxicity remains an open challenge (Zhang et al., 2024a). Such issues are particularly critical for humor explanation, as misclassifying non-toxic context can cause the responses to deviate from the intended humor." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 671, + 511, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 671, + 511, + 685 + ], + "spans": [ + { + "bbox": [ + 302, + 671, + 511, + 685 + ], + "type": "text", + "content": "F Prompts for DP and CoT in Chumor" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "content": "This section outlines the prompts used in Chumor to evaluate whether an explanation fully explains a joke. Two prompting strategies are adopted: Direct Prompting (DP) and Chain of Thought (CoT). Below are the details of each approach:" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21814" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 74, + 73, + 183, + 86 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 73, + 183, + 86 + ], + "spans": [ + { + "bbox": [ + 74, + 73, + 183, + 86 + ], + "type": "text", + "content": "Direct Prompting (DP)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 73, + 99, + 284, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 99, + 284, + 153 + ], + "spans": [ + { + "bbox": [ + 73, + 99, + 284, + 153 + ], + "type": "text", + "content": "你将看到一个笑话以及对这个笑话的解释。请判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”,不需要解释为什么对或者不对。" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 74, + 154, + 135, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 154, + 135, + 168 + ], + "spans": [ + { + "bbox": [ + 74, + 154, + 135, + 168 + ], + "type": "text", + "content": "笑话:[joke]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 168, + 188, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 168, + 188, + 181 + ], + "spans": [ + { + "bbox": [ + 74, + 168, + 188, + 181 + ], + "type": "text", + "content": "笑话解释:[explanation]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 182, + 207, + 193 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 182, + 207, + 193 + ], + "spans": [ + { + "bbox": [ + 151, + 182, + 207, + 193 + ], + "type": "text", + "content": "Translation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 195, + 285, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 195, + 285, + 275 + ], + "spans": [ + { + "bbox": [ + 73, + 195, + 285, + 275 + ], + "type": "text", + "content": "You will see a joke and an explanation of the joke. Please determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explain\" or \"partially/does not explain.\" You do not need to explain why it is correct or incorrect." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 276, + 128, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 276, + 128, + 290 + ], + "spans": [ + { + "bbox": [ + 74, + 276, + 128, + 290 + ], + "type": "text", + "content": "Joke: [joke]" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 290, + 193, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 290, + 193, + 303 + ], + "spans": [ + { + "bbox": [ + 74, + 290, + 193, + 303 + ], + "type": "text", + "content": "Explanation: [explanation]" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 320, + 290, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 320, + 290, + 375 + ], + "spans": [ + { + "bbox": [ + 67, + 320, + 290, + 375 + ], + "type": "text", + "content": "The DP prompt is designed to encourage concise decision-making. It directly asks the model to evaluate the completeness of the explanation without requiring reasoning or justification." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 74, + 386, + 189, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 386, + 189, + 397 + ], + "spans": [ + { + "bbox": [ + 74, + 386, + 189, + 397 + ], + "type": "text", + "content": "Chain of Thought (CoT)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 411, + 284, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 411, + 284, + 465 + ], + "spans": [ + { + "bbox": [ + 73, + 411, + 284, + 465 + ], + "type": "text", + "content": "你将看到一个笑话以及对这个笑话的解释。请逐步思考,写下过程并最终判断这个解释是否完全解释了笑话。根据判断,选择“完全解释”或“部分/没有解释”。" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 74, + 467, + 135, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 467, + 135, + 480 + ], + "spans": [ + { + "bbox": [ + 74, + 467, + 135, + 480 + ], + "type": "text", + "content": "笑话:[joke]" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 74, + 481, + 188, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 481, + 188, + 493 + ], + "spans": [ + { + "bbox": [ + 74, + 481, + 188, + 493 + ], + "type": "text", + "content": "笑话解释:[explanation]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 151, + 495, + 207, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 495, + 207, + 505 + ], + "spans": [ + { + "bbox": [ + 151, + 495, + 207, + 505 + ], + "type": "text", + "content": "Translation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 73, + 507, + 284, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 507, + 284, + 588 + ], + "spans": [ + { + "bbox": [ + 73, + 507, + 284, + 588 + ], + "type": "text", + "content": "You will see a joke and an explanation of the joke. Please think step by step, write down your reasoning process, and finally determine whether this explanation fully explains the joke. Based on your judgment, choose either \"fully explains\" or \"partially/does not explain.\"" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 74, + 589, + 128, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 589, + 128, + 602 + ], + "spans": [ + { + "bbox": [ + 74, + 589, + 128, + 602 + ], + "type": "text", + "content": "Joke: [joke]" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 74, + 603, + 193, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 603, + 193, + 615 + ], + "spans": [ + { + "bbox": [ + 74, + 603, + 193, + 615 + ], + "type": "text", + "content": "Explanation: [explanation]" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 633, + 290, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 633, + 290, + 699 + ], + "spans": [ + { + "bbox": [ + 67, + 633, + 290, + 699 + ], + "type": "text", + "content": "The CoT prompt, in contrast, requires the model to reason step by step before reaching a conclusion. This approach aims to improve transparency by explicitly documenting the thought process behind the evaluation." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 712, + 267, + 726 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 712, + 267, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 712, + 267, + 726 + ], + "type": "text", + "content": "G Joke Type Distribution in Chumor" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "content": "We sampled 200 datapoints from Chumorto analyze the distribution of joke types, as shown in Figure 15. Note that a single joke may belong to" + } + ] + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 315, + 68, + 513, + 164 + ], + "blocks": [ + { + "bbox": [ + 315, + 68, + 513, + 164 + ], + "lines": [ + { + "bbox": [ + 315, + 68, + 513, + 164 + ], + "spans": [ + { + "bbox": [ + 315, + 68, + 513, + 164 + ], + "type": "image", + "image_path": "326993549316df513bb9806d9d31515b487772d05a316df9b370565647db6178.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 172, + 524, + 196 + ], + "lines": [ + { + "bbox": [ + 302, + 172, + 524, + 196 + ], + "spans": [ + { + "bbox": [ + 302, + 172, + 524, + 196 + ], + "type": "text", + "content": "Figure 15: Distribution of Joke Types in 200 Sampled Datapoints." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 213, + 525, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 213, + 525, + 241 + ], + "spans": [ + { + "bbox": [ + 302, + 213, + 525, + 241 + ], + "type": "text", + "content": "multiple categories, as it can exhibit features of more than one joke type." + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 304, + 282, + 524, + 649 + ], + "blocks": [ + { + "bbox": [ + 303, + 251, + 491, + 265 + ], + "lines": [ + { + "bbox": [ + 303, + 251, + 491, + 265 + ], + "spans": [ + { + "bbox": [ + 303, + 251, + 491, + 265 + ], + "type": "text", + "content": "H Detailed Results of Experiments" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 282, + 524, + 649 + ], + "lines": [ + { + "bbox": [ + 304, + 282, + 524, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 282, + 524, + 649 + ], + "type": "image", + "image_path": "6df1b3c527229283c9d7de199eea2d6285b3eca5b0cdc18e78ecc11bf3f068e7.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 655, + 525, + 679 + ], + "lines": [ + { + "bbox": [ + 302, + 655, + 525, + 679 + ], + "spans": [ + { + "bbox": [ + 302, + 655, + 525, + 679 + ], + "type": "text", + "content": "Figure 16: The Matthew's correlation coefficient of different models' test results in DP and CoT." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + }, + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "content": "For evaluation, we input each prompt into the model and collect its responses, comparing them to the labels in Chumor. A model's response is considered correct if it matches the reference label. If the model provides an incorrect answer or doesn't generate a response at all (due to safety protocols or" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21815" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 290, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 290, + 112 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 290, + 112 + ], + "type": "text", + "content": "filtering sensitive terms), it is marked as incorrect. Such scenario is rare, occurring only 21 times in our experiments, and exclusively with GLM-4plus." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 112, + 290, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 112, + 290, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 290, + 232 + ], + "type": "text", + "content": "We highlight that CoT prompting at most cases degrade the models' performance on Chumor. As shown in Figure 16, only " + }, + { + "bbox": [ + 67, + 112, + 290, + 232 + ], + "type": "inline_equation", + "content": "\\mathrm{Athene}_{70\\mathrm{B}}" + }, + { + "bbox": [ + 67, + 112, + 290, + 232 + ], + "type": "text", + "content": " achieves a significant improvement. However, this is offset by its poorest performance under DP prompting among the models. GPT-4o shows a slight improvement, with its MCC score increasing from 0.19 to 0.20. And all other eight models exhibit different degrees of performance decline." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 70, + 242, + 289, + 741 + ], + "blocks": [ + { + "bbox": [ + 70, + 242, + 289, + 741 + ], + "lines": [ + { + "bbox": [ + 70, + 242, + 289, + 741 + ], + "spans": [ + { + "bbox": [ + 70, + 242, + 289, + 741 + ], + "type": "image", + "image_path": "6141e183d684833465b3a4c899603522d9953ad32c94768a7106eaf7f5d4f1c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 72, + 750, + 285, + 762 + ], + "lines": [ + { + "bbox": [ + 72, + 750, + 285, + 762 + ], + "spans": [ + { + "bbox": [ + 72, + 750, + 285, + 762 + ], + "type": "text", + "content": "Figure 17: CoT accuracy on different joke types " + }, + { + "bbox": [ + 72, + 750, + 285, + 762 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 72, + 750, + 285, + 762 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "text", + "content": "21816" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 129, + 86, + 465, + 260 + ], + "blocks": [ + { + "bbox": [ + 129, + 86, + 465, + 260 + ], + "lines": [ + { + "bbox": [ + 129, + 86, + 465, + 260 + ], + "spans": [ + { + "bbox": [ + 129, + 86, + 465, + 260 + ], + "type": "table", + "html": "
ModelDPCoT
MCCACC (%)FPR (%)FNR (%)MCCACC (%)FPR (%)FNR (%)
Yi34B0.1044.9597.240.210.0947.1789.305.44
Nemotron70B0.1956.3061.2620.870.1457.1740.2846.14
Athene70B0.0844.5997.830.280.1247.2691.102.89
ERNIE4-turbo0.2960.2959.8313.570.1145.1696.930.14
QWen2.572B0.1948.4690.670.690.1749.4586.913.31
Mistral123B0.2255.5669.2612.190.1651.1879.928.40
Gemini1.5-pro0.2454.0077.425.170.1960.3233.8147.31
GLM-4plus0.2455.5672.288.260.1458.1332.9653.44
GPT-4o0.1951.8780.026.680.2050.6485.003.03
GPT-4turbo0.2052.3279.286.610.1751.2780.876.96
", + "image_path": "a2350fa2fb3236f14324d34d9047f95e61b4fa92dc3097fd9d164c5d29fee80a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 85, + 333, + 508, + 722 + ], + "blocks": [ + { + "bbox": [ + 67, + 269, + 525, + 295 + ], + "lines": [ + { + "bbox": [ + 67, + 269, + 525, + 295 + ], + "spans": [ + { + "bbox": [ + 67, + 269, + 525, + 295 + ], + "type": "text", + "content": "Table 4: Performance metrics for explanation evaluation including Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 333, + 508, + 722 + ], + "lines": [ + { + "bbox": [ + 85, + 333, + 508, + 722 + ], + "spans": [ + { + "bbox": [ + 85, + 333, + 508, + 722 + ], + "type": "table", + "html": "
ModelSourceDPCoT
MCCACC(%)FPR(%)FNR(%)MCCACC(%)FPR(%)FNR(%)
Athene70BOverall0.0844.5997.830.280.1247.2691.102.89
ERNIE Bot0.1252.3897.150.000.1554.2491.132.13
GPT-4o0.0333.9098.510.860.0837.6791.064.50
ERNIE-turboOverall0.2960.2959.8313.570.1145.1696.930.14
ERNIE Bot0.2358.6478.145.990.1653.4794.830.10
GPT-4o0.2762.5441.3829.550.0433.7699.040.21
Gemini1.5-proOverall0.2454.0077.425.170.1960.3233.8147.31
ERNIE Bot0.2760.6674.135.890.2360.8728.6249.24
GPT-4o0.2144.8580.743.640.1759.5639.0443.25
GLM-4plusOverall0.2455.5672.288.260.1458.1332.9653.44
ERNIE Bot0.2559.8374.976.700.1557.5637.0647.61
GPT-4o0.2149.6869.5711.560.0658.9228.8365.74
GPT-4turboOverall0.2052.3279.286.610.1751.2780.876.96
ERNIE Bot0.2057.2580.995.990.2258.7576.147.72
GPT-4o0.1845.5677.557.920.1341.0185.645.35
GPT-4oOverall0.1951.8780.026.680.2050.6485.003.03
ERNIE Bot0.2157.8279.416.400.2458.0782.472.94
GPT-4o0.1643.7180.647.280.1540.4487.553.21
Nemotron70BOverall0.1956.3061.2620.870.1457.1740.2846.14
ERNIE Bot0.2260.6656.8122.540.1457.0439.1846.60
GPT-4o0.1850.3265.7417.340.1357.3641.3845.18
Mistral123BOverall0.2255.5669.2612.190.1651.1879.928.40
ERNIE Bot0.2561.1365.1513.600.1857.0479.737.61
GPT-4o0.2047.9073.409.210.1243.1480.1110.06
Qwen2.572BOverall0.1948.4690.670.690.1749.4586.913.31
ERNIE Bot0.1954.4592.610.300.1855.5488.072.54
GPT-4o0.1740.2388.721.500.1441.0885.744.93
Yi34BOverall0.1044.9597.240.210.0947.1789.305.44
ERNIE Bot0.1553.4294.720.300.1153.9988.385.28
GPT-4o0.0333.3399.790.000.0737.8190.215.78
", + "image_path": "b4755b453fe86ec9166c55a4a81fe89079260d39914b7d616f67a02f63b1c2f2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 729, + 525, + 755 + ], + "lines": [ + { + "bbox": [ + 67, + 729, + 525, + 755 + ], + "spans": [ + { + "bbox": [ + 67, + 729, + 525, + 755 + ], + "type": "text", + "content": "Table 5: Detailed performance metrics with source for explanation evaluation of Matthew's correlation coefficient (MCC), accuracy (ACC), false positive rate (FPR), and false negative rate (FNR)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "21817" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 87, + 273, + 506, + 546 + ], + "blocks": [ + { + "bbox": [ + 87, + 273, + 506, + 546 + ], + "lines": [ + { + "bbox": [ + 87, + 273, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 87, + 273, + 506, + 546 + ], + "type": "table", + "html": "
ModelPromptingCross-lingualGlyph-basedHomophonemicPun-basedSituationalCultural
Athene70BDP0.000.0030.0044.0061.0042.00
CoT0.0025.0030.0044.0059.0043.00
ERNIE4-turboDP50.0050.0060.0061.0070.0063.00
CoT0.000.0030.0043.0059.0042.00
Gemini1.5-proDP50.0050.0055.0063.0067.0061.00
CoT50.0075.0070.0061.0066.0069.00
GLM-4plusDP50.0025.0065.0060.0069.0060.00
CoT50.00100.0075.0064.0060.0061.00
GPT-4turboDP50.0025.0040.0057.0067.0055.00
CoT50.0025.0045.0054.0062.0056.00
GPT-4oDP0.0050.0035.0049.0063.0054.00
CoT0.0050.0035.0050.0062.0053.00
Nemotron70BDP50.0050.0065.0063.0062.0060.00
CoT100.00100.0065.0066.0060.0072.00
Mistral123BDP50.0050.0055.0061.0065.0061.00
CoT50.000.0040.0053.0066.0055.00
Qwen2.572BDP0.0050.0035.0047.0064.0051.00
CoT0.0050.0040.0053.0063.0053.00
Yi34BDP0.000.0030.0043.0060.0044.00
CoT0.0025.0040.0049.0063.0052.00
", + "image_path": "1eb9aa8f833d0b31f3f4e2b3363e87ef9f41484df688e9a2dffef02e4ccdab4a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 129, + 554, + 463, + 566 + ], + "lines": [ + { + "bbox": [ + 129, + 554, + 463, + 566 + ], + "spans": [ + { + "bbox": [ + 129, + 554, + 463, + 566 + ], + "type": "text", + "content": "Table 6: Performance metrics by joke type for explanation evaluation accuracy(%)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 781, + 312, + 791 + ], + "type": "text", + "content": "21818" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_content_list.json b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..fdf5ea0f9d09c0bb862868db995556974eb6731f --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_content_list.json @@ -0,0 +1,7737 @@ +[ + { + "type": "text", + "text": "Yu Li $^{1}$ , Qizhi Pei $^{1,2}$ , Mengyuan Sun $^{1}$ , Honglin Lin $^{1}$ , Chenlin Ming $^{1,3}$ , Xin Gao $^{1}$ , Jiang Wu $^{1}$ , Conghui He $^{1}$ , Lijun Wu $^{1*}$", + "bbox": [ + 218, + 137, + 776, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai Artificial Intelligence Laboratory", + "bbox": [ + 319, + 173, + 677, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Renmin University of China $^{3}$ Shanghai Jiao Tong University", + "bbox": [ + 238, + 189, + 757, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{liyu1, heconghui, wulijun}@pjlab.org.cn", + "bbox": [ + 307, + 206, + 690, + 222 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://cipherbankeva.github.io", + "bbox": [ + 327, + 222, + 668, + 240 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have demonstrated remarkable capabilities, especially the recent advancements in reasoning, such as o1 and o3, pushing the boundaries of AI. Despite these impressive achievements in mathematics and coding, the reasoning abilities of LLMs in domains requiring cryptographic expertise remain underexplored. In this paper, we introduce CipherBank, a comprehensive benchmark designed to evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. CipherBank comprises 2,358 meticulously crafted problems, covering 262 unique plaintexts across 5 domains and 14 subdomains, with a focus on privacy-sensitive and real-world scenarios that necessitate encryption. From a cryptographic perspective, CipherBank incorporates 3 major categories of encryption methods, spanning 9 distinct algorithms, ranging from classical ciphers to custom cryptographic techniques. We evaluate state-of-the-art LLMs on CipherBank, e.g., GPT-4o, DeepSeek-V3, and cutting-edge reasoning-focused models such as o1 and DeepSeek-R1. Our results reveal significant gaps in reasoning abilities not only between general-purpose chat LLMs and reasoning-focused LLMs but also in the performance of current reasoning-focused models when applied to classical cryptographic decryption tasks, highlighting the challenges these models face in understanding and manipulating encrypted data. Through detailed analysis and error investigations, we provide several key observations that shed light on the limitations and potential improvement areas for LLMs in cryptographic reasoning. These findings underscore the need for continuous advancements in LLM reasoning capabilities.", + "bbox": [ + 141, + 284, + 460, + 826 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 844, + 258, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have revolutionized artificial intelligence by achieving state-of", + "bbox": [ + 112, + 869, + 489, + 901 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7e5d59106d41d9b0c2418dc813e35174fbce5dbcdc831356661e7a6d45f3346a.jpg", + "image_caption": [ + "Figure 1: Comprehensive Performance of SOTA Chat and Reasoning Models on CipherBank." + ], + "image_footnote": [], + "bbox": [ + 515, + 263, + 875, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the-art (SOTA) performance across diverse domains, from Natural Language Understanding (NLP) (Dong et al., 2019; Karanikolas et al., 2023; Sasaki et al., 2024) to complex problem-solving (Yao et al., 2024; Ge et al., 2023). Recent models, such as GPT-4o (Hurst et al., 2024) and Claude 3.5 (Anthropic, 2024), have demonstrated unprecedented versatility, excelling in tasks ranging from creative writing to technical analysis. A particularly notable advancement lies in the reasoning-enhanced LLMs, which have emerged as a critical benchmark for evaluating LLMs' intelligence and now can solve mathematical problems (Wu et al., 2024; Ahn et al., 2024; Liu et al., 2024c), debug intricate code (Lee et al., 2024; Zhong et al., 2024), and even engage in multi-step logical deduction (Sun et al., 2024; Wang et al., 2023) with human-like proficiency. For instance, specialized architectures like o1 (Jaech et al., 2024) and DeepSeek-R1 (Guo et al., 2025) have pushed the boundaries of AI reasoning, achieving breakthroughs in domains such as theorem proving (Yang et al., 2024b) and algorithmic optimization (Liu et al., 2024b). These achievements underscore the transformative potential of LLMs as general-purpose reasoning engines, capable of adapting to both broad and specialized challenges.", + "bbox": [ + 507, + 486, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "C", + "bbox": [ + 127, + 83, + 163, + 110 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "cipherBank: Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenges", + "bbox": [ + 163, + 87, + 868, + 127 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author", + "bbox": [ + 134, + 906, + 285, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5929", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5929-5965", + "bbox": [ + 228, + 945, + 766, + 958 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics", + "bbox": [ + 268, + 959, + 727, + 971 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To quantify progress, the community has proposed numerous benchmarks targeting mathematical reasoning (e.g., MATH (Hendrycks et al., 2021a), AIME1, coding proficiency (e.g., HumanEval (Chen et al., 2021a), MBPP (Austin et al., 2021)), and general logical deduction (e.g., FOLO (Han et al., 2024), MMBench (Yuan Liu, 2023), CaLM (Chen et al., 2024). These testbeds have become indispensable tools for assessing model capabilities.", + "bbox": [ + 112, + 84, + 492, + 244 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite extensive evaluations in mathematics and coding, one critical domain remains underexplored: cryptographic decryption. Cryptographic reasoning (Shree et al., 2017) demands unique capabilities, including pattern recognition, algorithmic Reverse-engineering, and contextual understanding of security constraints (Schneier, 2002)—skills distinct from those tested in conventional benchmarks. This gap is particularly consequential, as cryptography lies at the heart of modern digital security (Konheim, 2007), with applications spanning privacy-preserving communication (Soomro et al., 2019), secure authentication (Rani et al., 2022), and data integrity (Sarkar et al., 2021). The absence of a rigorous benchmark for cryptographic reasoning not only limits the true understanding of LLM's reasoning ability but also hinders progress toward AI systems capable of contributing to security-critical contexts (e.g., jailbreaking (Wei et al., 2024)). OpenAI has scratched the surface of this challenge and put a demo2 when releasing their strong reasoning model o1, but no serious efforts have been made to reveal this challenge in the committee.", + "bbox": [ + 115, + 247, + 490, + 615 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address this gap, we introduce CipherBank, the first comprehensive benchmark specially designed to evaluate LLMs' reasoning capabilities in cryptographic decryption tasks. CipherBank is meticulously constructed to reflect real-world scenarios requiring encryption, instead of general texts that may serve as a toy testbed, with 2,358 problems derived from 262 unique plaintexts across 5 domains (e.g., Personal Privacy, Financial Information) and 14 subdomains (e.g., Identity Information, Personal Income). As for cipher algorithms, it spans 3 major cryptographic categories—Substitution Ciphers (e.g., Rot13, Vigenère), Transposition Ciphers (e.g., Reverse, SwapPairs), and custom hybrid algo", + "bbox": [ + 112, + 617, + 490, + 860 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "rithms—encompassing 9 distinct encryption methods, covering 5 difficulty levels (from Basic to Expert) to ensure a diverse range of challenges. By integrating privacy-sensitive contexts and multilayered cryptographic challenges, CipherBank provides a nuanced evaluation framework that captures both the complexity and practicality of real-world decryption tasks.", + "bbox": [ + 507, + 84, + 885, + 212 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We evaluate CipherBank on SOTA LLMs, including general-purpose models (GPT-4o (Hurst et al., 2024), DeepSeek-V3 (Liu et al., 2024a)) and reasoning-optimized models (o1 (Jaech et al., 2024), DeepSeek-R1 (Guo et al., 2025)). Results reveal striking limitations: even advanced models struggle with classical ciphers, achieving only 45.14 score on tasks solvable by human cryptanalysts. Notably, we observe a significant performance gap between general chat LLMs and specialized reasoning models, suggesting that current reasoning optimizations inadequately address cryptographic challenges. Besides, we also provide studies on different aspects for deep understandings, such as evaluate on noised plaintexts and different length of plaintexts. Observations show the limitations of current models in decryption reasoning, with chat and reasoning models each exhibiting distinct strengths and weaknesses in cryptographic tasks. These findings highlight the need for targeted improvements in LLMs' cryptographic reasoning, with implications for both AI safety (e.g., adversarial robustness) and applications in cybersecurity.", + "bbox": [ + 507, + 214, + 885, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 CipherBank Construction", + "text_level": 1, + "bbox": [ + 507, + 602, + 769, + 619 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CipherBank is a purpose-built benchmark designed to rigorously evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. It integrates three core components to ensure comprehensive coverage of real-world scenarios and cryptographic complexity: (1) diverse plaintexts meticulously constructed from multiple dimensions of real-world privacy-sensitive data, ensuring the decryption process aligns with practical requirements; (2) a comprehensive suite of encryption algorithms, including both traditional cryptographic methods and custom-designed algorithms, to thoroughly assess the model's reasoning, inductive, and computational capabilities from multiple perspectives; and (3) a structured problem set with rich metadata, enabling granular performance analysis and detailed error analysis based on the diverse properties of the plaintexts.", + "bbox": [ + 507, + 631, + 887, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1https://huggingface.co/datasets/AI-M0/ aimo-validation-aime", + "bbox": [ + 112, + 869, + 433, + 895 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2https://openai.com/index/ learning-to-reason-with-11ms/", + "bbox": [ + 112, + 896, + 341, + 920 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5930", + "bbox": [ + 480, + 928, + 521, + 940 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/55a6502770f6ba4f313d235f238ab47edc38299f957c41a17c063b28e5bad7bc.jpg", + "image_caption": [ + "Figure 2: Overview of CipherBank. CipherBank consists of simulated privacy data encrypted using various algorithms. The left side of the figure shows five domains, 14 subdomains, and selected tags. The right side displays three encryption categories, nine specific algorithms, and their corresponding difficulty levels." + ], + "image_footnote": [], + "bbox": [ + 122, + 80, + 880, + 392 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Plaintiff Data: Design, Sources, and Real-World Alignment", + "text_level": 1, + "bbox": [ + 112, + 470, + 448, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To construct CipherBank, we meticulously analyze real-world encryption scenarios and categorize the corresponding data types into five primary domains: Personal Privacy Data, Enterprise Sensitive Data, Public Safety Data, Financial Asset Data and Internet Records. These domains are further refined into 14 subdomains (e.g., Health Information, Policy Data) to ensure comprehensive coverage of encryption needs. Inspired by UltraChat (Ding et al., 2023), we adopt a tag-based approach to systematically structure encryption-relevant data, ensuring semantic consistency and domain relevance. Below, we detail the 3-step process for generating high-quality plaintext data.", + "bbox": [ + 112, + 514, + 489, + 740 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 1: Tag Definition and Curation. We leverage GPT-4o to generate candidate tags for each subdomain, capturing diverse real-world encryption scenarios. Human experts then curate these tags, eliminating redundancies, irrelevancies, and ambiguous entries, resulting in 89 distinct tags (see Appendix A.1). This structured approach ensures that the generated plaintext data remains realistic, contextually meaningful, and representative of actual encryption use cases. The tags are designed to align with the Variable Length property, enabling", + "bbox": [ + 112, + 744, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the generation of inputs of varying sizes to assess model robustness.", + "bbox": [ + 507, + 470, + 880, + 500 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 2: Controlled Text Generation. Our plaintext generation process employs tag combinations to control text granularity: entries with more tags contain richer contextual details and greater length, while those with fewer tags remain concise and specific. To ensure semantic validity, all generated data are filtered to eliminate generic or redundant descriptions, creating a dataset that reflects diverse encryption scenarios with varying complexity. Additionally, we introduce the Noise Perturbation property through controlled noise injection, which serves two key objectives: (1) testing the model's anti-interference capabilities and (2) reducing its reliance on contextual semantics to enhance robustness. Furthermore, we incorporate Sensitive Numerical Data by designing scenarios with complex alphanumeric combinations, including critical identifiers such as ID card and passport number. This multifaceted approach enables a comprehensive evaluation of the model's ability to address sophisticated decryption challenges.", + "bbox": [ + 507, + 502, + 884, + 840 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Step 3: Expert Validation and Refinement. After generation, we conduct expert validation to ensure data quality, correctness, and relevance. Noninformative content, excessively long or short samples, and entries lacking clear privacy attributes are", + "bbox": [ + 507, + 841, + 884, + 921 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5931", + "bbox": [ + 480, + 927, + 517, + 940 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "filtered out. Through this rigorous refinement process, we retain 262 high-quality plaintext samples. This approach enables a practical and application-driven benchmark for evaluating LLMs' decryption capabilities in cryptographic reasoning tasks.", + "bbox": [ + 112, + 84, + 489, + 165 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Encryption Algorithms", + "text_level": 1, + "bbox": [ + 112, + 175, + 344, + 190 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "CipherBank incorporates 3 major categories of encryption methods: Substitution Ciphers, Transposition Ciphers, and Custom Ciphers. (1) Substitution-based techniques, including Rot13, Atbash, Polybius and Vigenère, test a model's ability to decode character-level transformations. These ciphers involve monoalphabetic or polyalphabetic substitutions, where each character is replaced by another based on a fixed rule or key. These methods evaluate the model's capacity to decode symbolic mappings and generalize across substitution rules. (2) Transposition-based techniques, such as Reverse and SwapPair, focus on positional rearrangements rather than symbol substitutions. These ciphers challenge the model to recognize structural patterns, such as reversed sequences or pairwise swaps. Unlike substitution ciphers, which alter character identities but preserve their order, transposition ciphers preserve characters but disrupt their sequence. This tests the model's ability to analyze sequential dependencies and reconstruct the original symbol order.", + "bbox": [ + 112, + 195, + 489, + 548 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To further assess LLMs' ability to decrypt uncommon encryption methods, we introduce (3) Custom-designed ciphers that deviate from standard cryptographic schemes. (a) DualAvgCode is inspired by OpenAI's o1 model showcase3, where iterative transformations require models to infer multi-step encryption patterns. (b) ParityShift draws from LSB steganography (Mielikainen, 2006), a common technique in information hiding, incorporating bitwise manipulations based on character parity. (c) WordShift Cipher is designed to evaluate LLMs' ability to decrypt ciphers that combine substitution and transposition encryption, performing Caesar-style letter shifts within each word individually, blending character-level substitution with structural reordering.", + "bbox": [ + 112, + 550, + 489, + 806 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Meanwhile, We categorize the nine algorithms into five difficulty tiers based on key necessity and computational complexity. T1 (Basic) includes simple ciphers like ROT13 and Reverse. T2 (Intermediate) introduces Atbash and WordShift with", + "bbox": [ + 112, + 807, + 489, + 887 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "slightly more complex rules. T3 (Moderate) covers DualAvgCode and Polybius, requiring structured encoding. T4 (Advanced) involves ParityShift and SwapPairs with intricate data manipulation. T5 (Expert) features the Vigenère cipher, a polyalphabetic substitution cipher known for its keyword-based complexity. This framework organizes encryption techniques from basic to expert.", + "bbox": [ + 507, + 84, + 884, + 212 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 CipherBank Statistics", + "text_level": 1, + "bbox": [ + 507, + 223, + 729, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 2, we provides an overview of CipherBank structure. The encryption algorithm in Section 2.2 applies to the expert-curated dataset from Section 2.1, yielding 2,358 test data points.", + "bbox": [ + 507, + 244, + 882, + 307 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/535635db629f7b86f2a3c946cf62f939e891dda9bfa0fa0bf18d24756cc16ba4.jpg", + "table_caption": [ + "Table 1: Statistics of CipherBank." + ], + "table_footnote": [], + "table_body": "
Domains#Tag#Plaintext#TestAvg(len)
Personal Privacy Data2350450107.88
Enterprise Sensitive Data1652468103.10
Public Safety Data1763567110.89
Financial Asset Data1344396163.68
Internet Records2053477191.92
Summary892622358134.03
", + "bbox": [ + 510, + 344, + 882, + 445 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1 summarizes the distribution of plaintexts across 5 domains, each with varying numbers of tags, samples, and test cases. Notably, Internet Records has the longest plaintexts (191.92), while Enterprise Sensitive Data has shorter samples (103.10). This diversity ensures a comprehensive evaluation of model performance across different encryption contexts.", + "bbox": [ + 507, + 461, + 882, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Evaluations", + "text_level": 1, + "bbox": [ + 507, + 601, + 648, + 615 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Evaluation Setup", + "text_level": 1, + "bbox": [ + 507, + 627, + 690, + 642 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Evaluation Protocols. In terms of testing methodology, CipherBank's evaluation follows the Known-Plaintext Attack framework (Zulkifli and Mohd, 2008), employing a 3-shot testing approach. We prompt the model with three plaintext-ciphertext pairs as demonstrations to infer encryption rules, identify potential keys, and apply the learned patterns to decrypt a new ciphertext. The detailed prompt can be found in Appendix B.1.", + "bbox": [ + 507, + 646, + 882, + 791 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For evaluation metrics, we primarily employ accuracy to measure overall decryption success, which is the ratio of correctly decrypted cases to total test cases, where correctness requires an exact character match with the plaintext. Additionally, to capture finer-grained differences between the decrypted output and the original plaintext, we incorporate Levenshtein similarity (Yujian and Bo,", + "bbox": [ + 507, + 793, + 882, + 921 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3https://openai.com/index/ learning-to-reason-with-llms/", + "bbox": [ + 112, + 894, + 339, + 920 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5932", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Example 2.1: Plain-Ciphertext Pair", + "text_level": 1, + "bbox": [ + 126, + 85, + 359, + 99 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Domain: Personal Privacy Data", + "bbox": [ + 126, + 105, + 332, + 118 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Subdomain: Identity Information", + "bbox": [ + 126, + 121, + 349, + 134 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]", + "bbox": [ + 126, + 137, + 549, + 151 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Plaintext:", + "bbox": [ + 126, + 155, + 191, + 166 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Peter was born on April 23, 1985, and carries a passport with the number X123456789.", + "bbox": [ + 124, + 171, + 658, + 184 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Encryption results:", + "text_level": 1, + "bbox": [ + 126, + 187, + 253, + 200 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789.", + "(2) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9", + "(3) WordShift : erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwit the bernum 3456789.X12" + ], + "bbox": [ + 126, + 204, + 761, + 250 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(4) ...", + "bbox": [ + 127, + 255, + 161, + 266 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "More results can be found in the appendix.", + "bbox": [ + 126, + 271, + 386, + 284 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2007). We compute the Levenshtein distance for each sentence individually and report the average Levenshtein similarity across all test cases, providing a more nuanced assessment of model performance beyond binary correctness.", + "bbox": [ + 112, + 315, + 487, + 395 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "LLM Candidates. For a comprehensive evaluation, we carefully selected 18 SOTA LLMs for evaluation, ensuring a diverse representation of open-source, closed-source, and reasoning-specialized models. Below, we outline the tested models:", + "bbox": [ + 112, + 401, + 489, + 482 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\star$ Open-Source Chat Models: We evaluate leading open-source LLMs, including Mistral AI's Mixtral-8x22B (Jiang et al., 2024a), Alibaba's Qwen2.5-72B-Instruct (Yang et al., 2024a), Meta's Llama-3.1-70B-Instruct and Llama-3.3-70B-Instruct (Dubey et al., 2024), as well as the rising star - DeepSeek-V3 (Liu et al., 2024a).", + "$\\star$ Closed-Source Models: For proprietary models, evaluation is conducted via API access. The tested models include OpenAI's 4o-mini and GPT-4o series (0806, 1120) (Hurst et al., 2024), DeepMind's Gemini-1.5-Pro (Team, 2024a) and Gemini-2.0-Flash-Exp $^{4}$ , along with Anthropic's Claude-Sonnet-3.5 $(1022)^{5}$ .", + "$\\star$ Reasoning Models: We further investigate models optimized for reasoning tasks, including QwQ-32B-Preview (Team, 2024b), DeepSeek-R1 (Guo et al., 2025), Gemini-2.0-Flash-Thinking $(1219)^{6}$ o1-mini (0912) and o1 (1217) (Jaech et al., 2024)." + ], + "bbox": [ + 112, + 489, + 489, + 810 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Benchmark Results", + "text_level": 1, + "bbox": [ + 509, + 315, + 709, + 330 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2 presents the evaluation results of all candidate LLMs (Levenshtein similarity results are in Appendix C.1). Below, we distill the experimental findings into several observations:", + "bbox": [ + 507, + 338, + 882, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Limitations of Current Models in Cryptographic Reasoning. Despite advancements in LLMs, Table 2 highlights their limitations in structured cryptographic reasoning. The overall performance remains low, with most SOTA models struggling to achieve meaningful accuracy. In Cipher Score, common models like Qwen and LLaMA perform particularly poorly, with some scoring in the single digits or near zero. Even the best-performing models, Claude-3.5 and o1, achieve less than 50 in accuracy, underscoring the significant difficulty of CipherBank and the challenges LLMs face in systematic decryption.", + "bbox": [ + 507, + 404, + 884, + 613 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reasoning Models Generally Outperform Chat Models. When comparing reasoning models to chat models, generally we can find that the reasoning models do outperform chat models on all cipher algorithms and achieve better overall performance. The only expectation is the superior performance of Claude-3.5 (45.14) even better than o1, and also the bad performance of QwQ-32B-Preview (only 0.76 accuracy). This clearly demonstrate the advantages of the reasoning-specialized models.", + "bbox": [ + 507, + 614, + 882, + 774 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Closed-Source Models Retain an Edge Over Open-Source Models. Overall, closed-source models outperform open-source models in cryptographic decryption. Claude-3.5 (45.14) and o1 (40.59) achieve the highest performance across all cipher categories. However, DeepSeek-V3 (9.86) and DeepSeek-R1 (25.91) surpass most models in the GPT and Gemini families, indicating that advanced open-source models are closing the gap.", + "bbox": [ + 507, + 776, + 884, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{4}$ https://deepmind.google/technologies/gemini/ flash/", + "bbox": [ + 112, + 844, + 478, + 870 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "5https://www.anthropic.com/news/claude-3-5-sonnet", + "bbox": [ + 112, + 871, + 379, + 894 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://deepmind.google/technologies/gemini/flash-thinking/", + "bbox": [ + 112, + 895, + 477, + 920 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5933", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/6732dc1022bb797fbe15fc97d49a045df8e242d5120aa86484b0ff909337e93e.jpg", + "table_caption": [ + "Table 2: 3-shot scores (\\%) of LLMs across three major encryption paradigms and nine specific encryption algorithms on CipherBank. The highest scores in each category are highlighted with a blue background, while the second-best results are underlined for emphasis." + ], + "table_footnote": [], + "table_body": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
RotAtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.380000.7600.3801.150.30
Qwen2.5-72B-Instruct1.1500000.381.1502.290.55
Llama-3.1-70B-Instruct1.150.3800.38000.380.380.760.38
Llama-3.3-70B-Instruct2.670.38000000.7600.42
DeepSeek-V332.4414.882.290.7628.470.380.381.148.029.86
Closed-source Models
GPT-4o-mini-2024-07-183.692.0300.512.1600.3800.251.00
GPT-4o-2024-08-0638.173.050.380.7625.192.2901.148.408.82
GPT-4o-2024-11-2026.466.990.130.7615.270.760.250.896.116.40
gemini-1.5-pro55.340.760.380.7610.310.760.380.7616.419.54
gemini-2.0-flash-exp35.883.051.530.3829.391.5300.765.348.65
Claude-Sonnet-3.5-102283.2175.1972.901.9163.936.874.9658.2139.1245.14
Reasoning Models
QwQ-32B-Preview1.530.381.910000.380.382.290.76
DeepSeek-R173.2858.7844.270.3810.690.3824.0512.988.4025.91
gemini-2.0-flash-thinking40.4617.1821.761.1522.901.1507.639.1613.49
o1-mini-2024-09-1246.1868.3246.951.535.150.382.937.631.5320.07
o1-2024-12-1759.9279.0179.397.2514.8932.1450.3812.3929.9040.59
", + "bbox": [ + 117, + 136, + 884, + 401 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Nevertheless, both still lag behind Claude-3.5 and o1, suggesting that while open-source models are improving, there is significant potential for open-source models to achieve even better performance in the future.", + "bbox": [ + 112, + 423, + 487, + 502 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The performance variance among models of the same category is remarkably significant. Within the Open-source Chat Models category, the top-performing model, deepseek-v3 (9.86), outperforms the weakest model, Mixtral-8x22B (0.30), by a factor of 33. Similarly, in the Closed-source Models category, Claude-Sonnet-3.5 (45.14) demonstrates a performance 45 times greater than that of GPT-4o-mini (1.00). The disparity is even more pronounced in the Reasoning Models category, where o1 (40.59) surpasses QwQ-32B-Preview (0.76) by a factor of 53. Such substantial performance variations are rarely observed in other benchmarks, highlighting the challenging nature of CipherBank. This benchmark effectively distinguishes the reasoning capabilities of different models through its decryption dimension, providing a robust framework for evaluating model performance.", + "bbox": [ + 112, + 506, + 489, + 810 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Detailed Analysis", + "text_level": 1, + "bbox": [ + 112, + 828, + 299, + 844 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we conduct a detailed analysis from the perspectives of plaintext characteristics, noise levels, testing methodologies, finer-grained evaluation metrics, and error analysis to gain deeper", + "bbox": [ + 112, + 857, + 487, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "insights into the strengths and limitations of different LLMs in cryptographic decryption.", + "bbox": [ + 507, + 423, + 882, + 456 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/7ea28d201e6469f62838e2bad93547904fd243771f5e36cb024ae1a036c08bb2.jpg", + "table_caption": [ + "Table 3: Model Performance on Short and Long Plaintiff Setting (Lower Difference and Decrease Ratio Are Better). We highlight the most stable and sensitive results in blue and green respectively." + ], + "table_footnote": [], + "table_body": "
ModelShortLongDiffDecrease Ratio(%)
GPT-4o-2024-11-209.474.465.0152.60
gemini-2.0-flash-exp11.506.425.0844.35
DeepSeek-V313.245.228.0260.60
gemini-2.0-flash-thinking19.908.4711.4342.61
DeepSeek-R132.2720.9411.3333.16
ol-mini-2024-09-1233.7717.3516.4248.57
ol-2024-12-1747.6134.3813.2327.78
Claude-Sonnet-3.548.7047.850.851.74
", + "bbox": [ + 510, + 542, + 878, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Impact of Plaintext Length", + "text_level": 1, + "bbox": [ + 507, + 686, + 766, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To test models' sensitivity to text length, we categorize plaintexts into short (fewer than three tags) and long groups, averaging 70.29 and 181.61 characters, respectively. As shown in Table 3 (full results and plaintext examples can be found in Appendix C.2), longer plaintexts lead to a significant performance decline in most models. Most models exhibit a significant decline in decryption performance as text length increases. Among them, Claude-3.5 (-0.85) shows the most stable performance, while o1-mini (-16.42) is the most sensitive. This contrasts with human performance, highlighting LLMs' length bias in decryption reasoning.", + "bbox": [ + 505, + 712, + 884, + 921 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5934", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/4b0ec48e93805d8249bfdf5a2d13a4735aefef2c3348233ea635395b851fa389.jpg", + "image_caption": [ + "(a) Model Robustness to Noisy Inputs: Performance Comparison." + ], + "image_footnote": [], + "bbox": [ + 119, + 85, + 361, + 187 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/90ad513b31d0dfabfe9b6d16424b1e84a8c0a245a47240b779b8ffa98718a275.jpg", + "image_caption": [ + "Figure 3: Evaluation of LLM Performance Under Different Encryption and Prompting Conditions." + ], + "image_footnote": [], + "bbox": [ + 378, + 86, + 618, + 187 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/15a51b917cad39ba4fb7a86e640ecdb18cc5aad829229441318beab38dbe68c5.jpg", + "image_caption": [ + "(b) Effect of Encryption Scope: Letters Only vs. Letters & Numbers.", + "(c) Evaluating the Benefit of Explicit Algorithm Hints in 3-Shot Prompting." + ], + "image_footnote": [], + "bbox": [ + 638, + 84, + 878, + 187 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Effect of Noise on Model Robustness", + "text_level": 1, + "bbox": [ + 112, + 271, + 445, + 285 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We observe that models frequently substituted synonyms instead of strictly applying decryption rules to each character (examples in Appendix C.2), indicating the presence of shortcut reasoning, where models partially decrypt the text and infer the remainder based on semantic context rather than adhering to the encryption pattern.", + "bbox": [ + 112, + 300, + 489, + 413 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To evaluate robustness and mitigate reliance on semantic inference, we select the 40 plaintexts with the lowest perplexity (PPL) scores, computed using Llama-3.1-8B-Instruct, for noise injection. Figure 3a shows a substantial performance drop across all models, including Claude-3.5 (from 59.17 to 25.08) and o1-mini (from 24.25 to 5.83), highlighting their vulnerability to structural perturbations and further exposing the limitations of current models in systematic reasoning and precise decryption.", + "bbox": [ + 112, + 417, + 489, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Effect of Encryption Scope", + "text_level": 1, + "bbox": [ + 112, + 602, + 371, + 619 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In previous evaluations, only letters are encrypted. To better reflect real-world scenarios, here we select plaintexts with sensitive numerical data and apply encryption to both letters and numbers, focusing on algorithms that directly affect numbers (test prompt in Appendix C.2). As shown in Table 3b, model performance drops significantly in this more complex setting. This suggests difficulty in adapting decryption strategies to numerical transformations. Even under the same encryption principles, encrypting both letters and numbers greatly increases task complexity, posing a significant challenge for current reasoning models. This highlights a critical limitation in LLMs' ability to generalize across diverse data types, particularly when numerical transformations are involved. Future work should focus on enhancing models' capacity to handle mixed data encryption.", + "bbox": [ + 112, + 631, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.4 Effect of Explicit Algorithm Hints on Decryption Performance", + "text_level": 1, + "bbox": [ + 507, + 271, + 845, + 303 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Previous evaluations highlight the significant challenges posed by CipherBank. To evaluate the models' decryption capabilities when provided with algorithm details, we enhance the 3-shot setting by explicitly informing the models of the specific algorithm during testing. Under the revised setting, models are no longer required to independently deduce encryption logic but instead focus on identifying the necessary key and applying the specified decryption rules. The enhanced prompt is provided in Appendix C.2. Table 3c reveals distinct performance patterns. Most chat models show minimal improvement even with algorithm details, struggling with key inference and decryption—highlighting persistent limitations, especially in models like Claude (+5.30) and Gemini (+1.97).", + "bbox": [ + 505, + 307, + 884, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In contrast, reasoning models show marked performance gains, with R1 (+31.81) and o1-mini (+14.49) achieving significant improvements. The observed contrast underscores a fundamental distinction: chat models primarily rely on surface-level pattern recognition, while reasoning models excel in structured inference when provided with appropriate guidance.", + "bbox": [ + 507, + 567, + 882, + 695 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5 Error Analysis", + "text_level": 1, + "bbox": [ + 507, + 707, + 672, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We conduct a comprehensive error analysis based on the test results in Table 2, identifying six distinct error types. To gain deeper insights, we examine the three best-performing chat models and three best-performing reasoning models, summarizing their error distributions. Detailed error definitions and examples are provided in Appendix D.1 and D.2.", + "bbox": [ + 505, + 728, + 882, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As shown in Figure 4, the distribution of error types reveals key differences between reasoning and chat models. Surprisingly, (1) reasoning models exhibit a higher rate of reasoning failures than", + "bbox": [ + 507, + 857, + 882, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5935", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/978a939a205b5d785c3dfb5009bc4e72e0e7940913449dc416cc66f0c8385835.jpg", + "image_caption": [ + "Figure 4: Decryption Error Distribution. The left represents chat models, while the right corresponds to reasoning models." + ], + "image_footnote": [], + "bbox": [ + 131, + 84, + 473, + 225 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "chat models. A deeper examination of Appendix D.3 reveals that many of these failures occur on simpler tasks, suggesting that reasoning models may overanalyze problems, leading to incorrect conclusions. This indicates that their complex inference processes can sometimes hinder performance on straightforward decryption cases. Conversely, (2) chat models show a higher frequency of omission-insertion and reorganization errors, indicating that while they are stronger in semantic understanding, this often results in excessive auto-completion and sentence restructuring rather than strict rule adherence. This tendency suggests that chat models prioritize fluency over exact decryption, leading to unintended modifications. Additionally, (3) both model types frequently make errors in name decryption, highlighting a broader challenge in handling structured entity transformations. This suggests that current LLMs struggle to consistently apply encryption rules to proper nouns, potentially due to memorization biases or difficulties in preserving entity-level consistency during decryption.", + "bbox": [ + 115, + 288, + 489, + 644 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 112, + 653, + 270, + 669 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Benchmarks for Reasoning Evaluating reasoning abilities in LLMs has been a key focus in AI research, with various benchmarks assessing models across mathematical, logical, and inferential tasks. MATH (Hendrycks et al., 2021b), MathBench (Liu et al., 2024c), and LiveMath-Bench (Liu et al., 2024d) test arithmetic and algebraic reasoning, while HumanEval (Chen et al., 2021b), DebugBench (Tian et al., 2024) and Big-CodeBench (Zhuo et al., 2024) evaluates code generation that require programming logic. Additionally, BIG-Bench (Srivastava et al., 2022), BBH (Suzgun et al., 2022), and LiveBench (White et al., 2024) measure broader cognitive abilities, such as abstract reasoning and analogical problem", + "bbox": [ + 112, + 680, + 490, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "solving. KOR-Bench (Ma et al., 2024) is new benchmark that examines strong reasoning by introducing Knowledge-Orthogonal Reasoning (KOR) tasks, assessing models' ability to apply newly introduced rules independent of pretrained knowledge. Specially, it also contains a cipher reasoning task, which provides explicit encryption rules and keys, guiding models through step-by-step decryption rather than requiring pattern inference. In contrast, CipherBank presents a more realistic challenge, requiring models to identify encryption patterns from examples without prior knowledge, better reflecting real-world scenarios where encryption schemes are unknown.", + "bbox": [ + 507, + 84, + 884, + 309 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Jailbreaking via Cipher Characters Recent work demonstrates that encoding adversarial prompts via encryption (Yuan et al., 2023; Wei et al., 2024) or obfuscation (Yong et al., 2023; Jiang et al., 2024b; Kang et al., 2024) can bypass LLM safety filters by exploiting models' ability to process encoded inputs. While CipherBench (Handa et al., 2024) evaluates cipher-based jailbreaking, its reliance on 40 curated plaintexts and explicit algorithm hints limits practical relevance. Our CipherBank removes prior guidance, requiring autonomous pattern inference from plaintext-ciphertext pairs to simulate privacy-sensitive decryption scenarios, establishing a robust benchmark for LLM security evaluation.", + "bbox": [ + 507, + 317, + 884, + 558 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 570, + 640, + 586 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we introduce CipherBank, a comprehensive benchmark for evaluating reasoning capabilities through cryptographic decryption. CipherBank includes 5 domains, 14 subdomains of plaintext data, 9 encryption algorithms, and 2,358 decryption tasks. By testing SOTA LLMs on CipherBank, we uncover significant limitations in their decryption abilities, revealing distinct strengths and weaknesses between reasoning and chat models. Our analysis identifies key deficiencies in current reasoning approaches and suggests directions for improvement, positioning CipherBank as a novel benchmark for advancing structured inference and cryptographic reasoning in developing future LLMs.", + "bbox": [ + 507, + 595, + 882, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 507, + 848, + 613, + 863 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Our evaluation is constrained by the reliance on closed-source models, which are accessible only via API calls. This introduces potential variability", + "bbox": [ + 507, + 873, + 882, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5936", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "due to API updates and version changes, though we mitigate this by documenting the specific versions and dates used. Additionally, access restrictions prevent us from evaluating more advanced models such as o1 Pro and o3 series, limiting the scope of our benchmark. From a design perspective, CipherBank primarily focuses on classical encryption algorithms, as modern cryptographic schemes introduce complexities beyond current model capabilities. While this choice ensures feasibility in evaluation, it also restricts the benchmark's applicability to real-world cryptographic challenges. As models improve, expanding CipherBank to modern encryption techniques will provide a more comprehensive assessment of reasoning in cryptographic tasks.", + "bbox": [ + 112, + 84, + 492, + 341 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 114, + 355, + 285, + 370 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work is supported by National Key R&D Program of China (2022ZD0160201).", + "bbox": [ + 112, + 381, + 489, + 413 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 441, + 213, + 456 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. 2024. Large language models for mathematical reasoning: Progresses and challenges. arXiv preprint arXiv:2402.00157.", + "Anthropic. 2024. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 2025-02-09.", + "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. 2021. Program synthesis with large language models. Preprint, arXiv:2108.07732.", + "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021a. Evaluating large language models trained on code." + ], + "bbox": [ + 114, + 464, + 489, + 920 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021b. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374.", + "Sirui Chen, Bo Peng, Meiqi Chen, Ruiqi Wang, Mengying Xu, Xingyu Zeng, Rui Zhao, Shengjie Zhao, Yu Qiao, and Chaochao Lu. 2024. Causal evaluation of language models. Preprint, arXiv:2405.00622.", + "Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. 2023. Enhancing chat language models by scaling high-quality instructional conversations. Preprint, arXiv:2305.14233.", + "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pretraining for natural language understanding and generation. Advances in neural information processing systems, 32.", + "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.", + "Yingqiang Ge, Wenyue Hua, Kai Mei, Juntao Tan, Shuyuan Xu, Zelong Li, Yongfeng Zhang, et al. 2023. Openagi: When llm meets domain experts. Advances in Neural Information Processing Systems, 36:5539-5568.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.", + "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, Lucy Sun, Alex Wardle-Solano, Hannah Szabo, Ekaterina Zubova, Matthew Burtell, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Alexander R. Fabbri, Wojciech Kryscinski, Semih Yavuz, Ye Liu, Xi Victoria Lin, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Rex Ying, Arman Cohen, and Dragomir Radev. 2024. Folio: Natural language reasoning with first-order logic. Preprint, arXiv:2209.00840.", + "Divij Handa, Zehua Zhang, Amir Saeidi, and Chitta Baral. 2024. When \"competency\" in reasoning opens the door to vulnerability: Jailbreaking llms via novel complex ciphers. Preprint, arXiv:2402.10601.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021a. Measuring mathematical problem solving with the math dataset. NeurIPS." + ], + "bbox": [ + 510, + 85, + 884, + 920 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5937", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874.", + "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276.", + "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720.", + "Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. 2024a. Mixtral of experts. arXiv preprint arXiv:2401.04088.", + "Fengqing Jiang, Zhangchen Xu, Luyao Niu, Zhen Xiang, Bhaskar Ramasubramanian, Bo Li, and Radha Poovendran. 2024b. Artprompt: Ascii art-based jailbreak attacks against aligned llms. arXiv preprint arXiv:2402.11753.", + "Daniel Kang, Xuechen Li, Ion Stoica, Carlos Guestrin, Matei Zaharia, and Tatsunori Hashimoto. 2024. Exploiting programmatic behavior of llms: Dual-use through standard security attacks. In 2024 IEEE Security and Privacy Workshops (SPW), pages 132-143. IEEE.", + "Nikitas Karanikolas, Eirini Manga, Nikoletta Samaridi, Eleni Tousidou, and Michael Vassilakopoulos. 2023. Large language models versus natural language understanding and generation. In Proceedings of the 27th Pan-Hellenic Conference on Progress in Computing and Informatics, pages 278-290.", + "Alan G. Konheim. 2007. Computer Security and Cryptography. John Wiley & Sons.", + "Cheryl Lee, Chunqiu Steven Xia, Longji Yang, Jentse Huang, Zhouruixin Zhu, Lingming Zhang, and Michael R Lyu. 2024. A unified debugging approach via llm-based multi-agent synergy. arXiv preprint arXiv:2404.17153.", + "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437.", + "Fei Liu, Yiming Yao, Ping Guo, Zhiyuan Yang, Zhe Zhao, Xi Lin, Xialiang Tong, Mingxuan Yuan, Zhichao Lu, Zhenkun Wang, et al. 2024b. A systematic survey on large language models for algorithm design. arXiv preprint arXiv:2410.14716." + ], + "bbox": [ + 115, + 85, + 485, + 920 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Hongwei Liu, Zilong Zheng, Yuxuan Qiao, Haodong Duan, Zhiwei Fei, Fengzhe Zhou, Wenwei Zhang, Songyang Zhang, Dahua Lin, and Kai Chen. 2024c Mathbench: Evaluating the theory and application proficiency of llms with a hierarchical mathematics benchmark. arXiv preprint arXiv:2405.12209.", + "Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. 2024d. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147.", + "Kaijing Ma, Xinrun Du, Yunran Wang, Haoran Zhang, Zhoufutu Wen, Xingwei Qu, Jian Yang, Jiaheng Liu, Minghao Liu, Xiang Yue, et al 2024. Kor-bench: Benchmarking language models on knowledge-orthogonal reasoning tasks. arXiv preprint arXiv:2410.06526.", + "Jarno Mielikainen. 2006. Lsb matching revisited. IEEE signal processing letters, 13(5):285-287.", + "S. Rani, A. Kataria, and M. Chauhan. 2022. Cyber security techniques, architectures, and design In Holistic Approach to Quantum Cryptography in Cyber Security, pages 41-66. CRC Press.", + "A. Sarkar, S. R. Chatterjee, and M. Chakraborty. 2021 Role of cryptography in network security. The \"Essence\" of Network Security: An End-to-End Panorama, pages 103-143.", + "Miyu Sasaki, Natsumi Watanabe, and Tsukihito Komanaka. 2024. Enhancing contextual understanding of mistral llm with external knowledge bases.", + "Bruce Schneier. 2002. Cryptographic design vulnerabilities. Computer, 31(9):29-33.", + "Divya Shree, Seema Ahlawat, et al. 2017. A review on cryptography, attacks and cyber security. International Journal of Advanced Research in Computer Science, 8(5).", + "S. Soomro, M. R. Belgaum, Z. Alansari, et al. 2019 Review and open issues of cryptographic algorithms in cyber security. In 2019 International Conference on Computing, Electronics & Communications Engineering (iCCECE), pages 158-162. IEEE.", + "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615.", + "Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan 2024. Determinlr: Augmenting llm-based logical reasoning from indeterminacy to determinacy. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9828-9862." + ], + "bbox": [ + 510, + 85, + 880, + 920 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5938", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. 2022. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261.", + "Gemini Team. 2024a. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530.", + "Qwen Team. 2024b. Qwq: Reflect deeply on the boundaries of the unknown.", + "Runchu Tian, Yining Ye, Yujia Qin, Xin Cong, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. Debugbench: Evaluating debugging capability of large language models. Preprint, arXiv:2401.04621.", + "Boshi Wang, Xiang Yue, and Huan Sun. 2023. Can chatgpt defend its belief in truth? evaluating llm reasoning via debate. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 11865-11881.", + "Alexander Wei, Nika Haghtalab, and Jacob Steinhardt. 2024. Jailbroken: How does llm safety training fail? Advances in Neural Information Processing Systems, 36.", + "Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Ben Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Siddartha Naidu, et al. 2024. Livebench: A challenging, contamination-free llm benchmark. arXiv preprint arXiv:2406.19314.", + "Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, and Chi Wang. 2024. Mathchat: Converse to tackle challenging math problems with llm agents. In ICLR 2024 Workshop on Large Language Model (LLM) Agents.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115.", + "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan J Prenger, and Animashree Anandkumar. 2024b. Leandrojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems, 36.", + "Wenlin Yao, Haitao Mi, and Dong Yu. 2024. Hdflow: Enhancing llm complex problem-solving with hybrid thinking and dynamic workflows. arXiv preprint arXiv:2409.17433.", + "Zheng-Xin Yong, Cristina Menghini, and Stephen H Bach. 2023. Low-resource languages jailbreak gpt-4. arXiv preprint arXiv:2310.02446." + ], + "bbox": [ + 115, + 85, + 485, + 920 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Youliang Yuan, Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Pinjia He, Shuming Shi, and Zhaopeng Tu. 2023. Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher. arXiv preprint arXiv:2308.06463.", + "Haodong Duan Yuan Liu. 2023. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281.", + "Li Yujiang and Liu Bo. 2007. A normalized levenshtein distance metric. IEEE transactions on pattern analysis and machine intelligence, 29(6):1091-1095.", + "Li Zhong, Zilong Wang, and Jingbo Shang. 2024. Ldb: A large language model debugger via verifying runtime execution step-by-step. arXiv preprint arXiv:2402.16906.", + "Terry Yue Zhuo, Minh Chien Vu, Jenny Chim, Han Hu, Wenhao Yu, Ratnadira Widyasari, Imam Nur Bani Yusuf, Haolan Zhan, Junda He, Indraneil Paul, et al. 2024. Bigcodebench: Benchmarking code generation with diverse function calls and complex instructions. arXiv preprint arXiv:2406.15877.", + "MZWM Zulkifli and Zaid W Mohd. 2008. Attack on cryptography. Comput. Secur, 12(5):33-45." + ], + "bbox": [ + 510, + 85, + 880, + 439 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "5939", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A Detailed Benchmark Description", + "text_level": 1, + "bbox": [ + 112, + 84, + 435, + 99 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this chapter, we provide additional details on CipherBank that were not extensively covered in the main text. This includes a detailed breakdown of plaintext tags and their distribution across subdomains, as well as a more comprehensive description of the encryption algorithms used. These details offer deeper insights into the dataset construction and the encryption schemes evaluated in this benchmark.", + "bbox": [ + 112, + 110, + 489, + 254 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.1 Tags and Plaintext Distribution Across Subdomains", + "text_level": 1, + "bbox": [ + 112, + 266, + 468, + 296 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 4 provides an overview of the specific tags associated with each subdomain within CipherBank. The dataset spans five primary domains and 14 subdomains, ensuring diverse and realistic plaintext scenarios for cryptographic evaluation.", + "bbox": [ + 112, + 303, + 489, + 384 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.2 Detailed Descriptions of Encryption Algorithms", + "text_level": 1, + "bbox": [ + 112, + 395, + 445, + 426 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This section provides detailed descriptions of the nine encryption algorithms used in CipherBank. These algorithms span substitution, transposition, and custom-designed ciphers, covering a range of complexity levels. Notably, Rot13, Atbash, Polybius, DualAvgCode, and ParityShift also support numeric encryption, further enhancing the diversity of decryption challenges. Table 5 outlines each algorithm and its transformation rules. Some detailed encryption examples are provided below, illustrating how different ciphers transform plaintext into ciphertext.", + "bbox": [ + 112, + 432, + 489, + 624 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For each encryption algorithm, we have implemented a corresponding decryption algorithm to ensure that ciphertext can be fully restored to its original plaintext. This guarantees the reversibility and integrity of the encryption schemes used in CipherBank, allowing for a rigorous evaluation of model decryption capabilities. The decryption process follows the exact inverse of the encryption transformations, ensuring consistency across all test cases.", + "bbox": [ + 112, + 626, + 489, + 785 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B Experimental Setup Details", + "text_level": 1, + "bbox": [ + 112, + 799, + 389, + 815 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In our evaluation, we adopt a 3-shot approach. A more natural Ciphertext-Only Attack (zero-shot) setting was not adopted, as it would reduce the task to brute-force decryption, where the model blindly applies all known encryption algorithms in search of a coherent output. This contradicts the goal", + "bbox": [ + 112, + 825, + 489, + 921 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "of reasoning-based inference, where the model is expected to deduce encryption rules from provided examples rather than rely on exhaustive trial and error.", + "bbox": [ + 507, + 84, + 884, + 148 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To ensure a balanced evaluation of decryption difficulty, substitution ciphers exclude numbers to prevent inconsistencies arising from differing cyclic structures. In contrast, ciphers that do not involve direct substitution, such as Reverse, Word-Shift, and similar methods, process numbers normally, preserving structural integrity within the encrypted text.", + "bbox": [ + 507, + 154, + 885, + 282 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For all open-source models, we conduct evaluations using the OpenCompass framework with default temperature to ensure consistent outputs. For models evaluated via API, we perform 5 independent test runs per model and report the average result to enhance stability and reliability.", + "bbox": [ + 507, + 288, + 885, + 385 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B.1 Prompts Used for Querying", + "text_level": 1, + "bbox": [ + 507, + 414, + 778, + 430 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This section outlines the prompts used to query models during evaluation. To ensure consistency, all models were tested under a 3-shot setting, where they were provided with three plaintext-ciphertext pairs before attempting to decrypt a new ciphertext. The prompts were designed to encourage logical inference rather than relying on prior knowledge, guiding models to extract encryption patterns and apply the learned rules systematically. Below, Figure 5 provides the system prompt (some reasoning models may not support system prompts), while Figure 6 present the detailed user prompts.", + "bbox": [ + 507, + 445, + 885, + 640 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B.2 Post-processing Methods", + "text_level": 1, + "bbox": [ + 507, + 668, + 754, + 684 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "During querying, we instruct the model to think step by step and enclose the final decrypted output within ... tags. To extract the decoded plaintext, we apply the regular expression ' result $\\text{串}$ $(\\text{串} ?)$ /result>, capturing the content between these tags. The matching process is case-insensitive, aligning with algorithms like Polybius, which inherently do not differentiate between uppercase and lowercase letters when restoring plaintext. This ensures consistency across different decryption schemes.", + "bbox": [ + 507, + 700, + 884, + 876 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "7https://github.com/open-compass/opencompass", + "bbox": [ + 529, + 906, + 867, + 920 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "5940", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/f3e59a11922e149ec1e17250999d1d78fdb40700af0eee247df6a8342de9aab0.jpg", + "table_caption": [ + "Table 4: Tag Distribution Across Subdomains in CipherBank" + ], + "table_footnote": [], + "table_body": "
DomainSubdomainTags
Personal Privacy DataIdentity InformationName, ID Card Number, Passport Number, Date of Birth, Gender, Nationality, Marital Status, Mobile Number, Family Member Information (e.g., immediate family names, contact information), Residential Address
Health InformationMedical Record Number (Patient ID), Diagnosis Records, Surgery Records, Examination Reports (e.g., X-ray, CT scan results, heart rate, blood pressure, blood sugar level, blood type), Disease History, Allergy History, Vaccination Records, Family Medical History
Educational DataStudent ID (Student Number), School Records (Enrollment Date, Graduation Date), Academic Records (Subjects, Grades, GPA, Ranking), Degree Information (Bachelor, Master, Doctorate), Awards and Penalties Records (Disciplinary Records)
Enterprise Sensitive DataBusiness InformationBusiness Plans (e.g., Annual Plan, Five-Year Plan), Marketing Strategy (e.g., Marketing Promotion Plan, Advertising Budget), Customer Lists (e.g., Customer Contacts, Preferences), Supplier Information (Supplier List, Cooperation Agreements), Internal Financial Budgets (Cost Structure, Profit Forecasts)
Intellectual PropertyProduct Design Plans (e.g., Prototype Drawings, Design Documents), Internal Technical Documents (e.g., Technical Manuals, Specifications), Test Data (e.g., Product Performance Test Results, Quality Control Records), Copyright Data, Patent Data
Employee InformationContact Information (e.g., Phone Numbers, Email Addresses), Work Experience, Position and Department Information, Salary and Benefits Information (e.g., Salary Amount, Bonuses, Allowances), Performance Evaluation (e.g., Performance Scores, Promotion Records), Contract Information (e.g., Employment Contract, Non-Disclosure Agreement)
Public Safety DataPolice DataCase Information (Case Number, Case Type, Filing Date), Criminal Records (Suspect Information, Crime Time, Crime Location), Alarm Records (Informer Information, Alarm Time, Alarm Content), Investigation Reports (Investigation Results, Investigation Progress), Arrest Records (Arrest Time, Location, Action Description), Traffic Enforcement Data (Violation Records, Penalty Information), Police Officer Information (Officer Number, Name, Position, Department), Police Resource Allocation (Vehicle, Equipment, Weapon Usage Records)
National Security DataBorder Crossing Records (Entry and Exit Personnel Information, Vehicle Registration), Customs Inspection Data (Cargo List, Contraband Records), Territorial Patrol Data (Patrol Reports, Anomalies Records), Cyber Security Monitoring Data (Cyber Attack Records, Threat Intelligence)
Military DataOperation Plans, Target Location, Troop Deployment, Military Base Distribution, Defense Works Location
Financial Confidential DataBanking InformationAccount Number, Bank Card Number, Payment Method, Payment Platform ID, Transaction Details, Loan Amount, Interest Rate, Repayment Plan, Investment Records (Stocks, Funds, Bonds)
Personal IncomeSalary Amount, Pay Date, Tax Number, Tax Return Records
Internet RecordsBrowsing RecordsPage Interaction, Search Behavior, Click Activity, Device Information, Geolocation, Checkout Process, Multimedia Interaction, Download Records
Cookie DataSession Management, User Identification, Ad Targeting, Behavior Tracking, Authentication Tokens, Login Status
User PreferencesPreferred Genres, Device Usage Habits, Notification Preferences, Shopping Preferences, Video Preferences, Reading Habits
", + "bbox": [ + 115, + 171, + 878, + 854 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "5941", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Example A.1: Plain-Ciphertext Pair (Identity Information) - Only Letter", + "text_level": 1, + "bbox": [ + 126, + 93, + 596, + 105 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Domain: Personal Privacy Data", + "bbox": [ + 126, + 112, + 332, + 124 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Subdomain: Identity Information", + "bbox": [ + 126, + 129, + 347, + 142 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]", + "bbox": [ + 126, + 145, + 549, + 159 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Plaintext:", + "text_level": 1, + "bbox": [ + 127, + 162, + 191, + 174 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Peter was born on April 23, 1985, and carries a passport with the number X123456789.", + "bbox": [ + 126, + 178, + 658, + 192 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Encryption results:", + "text_level": 1, + "bbox": [ + 127, + 195, + 253, + 208 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789.", + "(2) Atbash: Kvgvi dzh ylim lm Zkiro 23, 1985, zmw xziirvh z kzhhklig drgs gsv mfnyvi C123456789.", + "(3) Polybius: 34 15 42 15 36 45 11 41 12 33 36 32 33 32 11 34 36 23 26 2 3, 1985, 11 32 14 13 11 36 36 23 15 41 11 34 11 41 41 34 33 36 42 45 23 42 22 42 22 15 32 43 31 12 15 36 46 12 3 4 5 6 7 89.", + "(4) Vigenère: Pgeet wcd dzrp op Arcin 23, 1985, cyd natcigd pcdszrv wkeh eh nwxbgc Z123456789.", + "(5) Reverse: .987654321X rebmun eht htiw tropssap a seirrac dna ,5891 ,32 lirpA no nrob saw reteP", + "(6) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9", + "(7) DualAvgCode: OQdfsudfqs vxaart acnpqsmo npmo AAoqqshjkm 23, 1985, aamoce bdaaqsqshjdfrt aa oqaartroqnpssu vxhjsugi sugidf motvlnacdfqs WY123456789.", + "(8) ParityShift: Qduds vzr cnso no Zqshm 23, 1985, zoe bzsshrd z qzrrqnsu vuhui uid otlcds Y123456789.", + "(9) WordShift: erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwt the bernum 3456789.X12" + ], + "bbox": [ + 126, + 211, + 870, + 390 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Example A.2: Plain-Ciphertext Pair (Police Data) - Only Letter", + "text_level": 1, + "bbox": [ + 126, + 423, + 534, + 437 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "# Domain: Public Safety Data", + "bbox": [ + 126, + 443, + 310, + 455 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Subdomain: Police Data", + "bbox": [ + 127, + 458, + 295, + 470 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tag Combination: [\"Suspect Information\", \"Crime Time\", \"Crime Location\", \"Police Officer Information\"]", + "bbox": [ + 126, + 475, + 800, + 489 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Plaintext:", + "text_level": 1, + "bbox": [ + 127, + 493, + 191, + 504 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Suspect: Jonathan, Crime: Burglary, Time: 2022-03-12 14:30, Location: 123 Elm Street, Officer Smith observed suspicious activity near 5th Ave on 2022-03-13.", + "bbox": [ + 126, + 508, + 870, + 538 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Encryption results:", + "text_level": 1, + "bbox": [ + 127, + 542, + 253, + 555 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(1) Rot13: Fhcrpg: Wbanguna, Pevzr: Ohetynel, Gvzr: 2022-03-12 14:30, Ybpngvba: 123 Ryz Fgerrg, Bssvpre Fzygu bofreirq fhcvpbhf npgvivgl arne 5gu Nir ba 2022-03-13.", + "(2) Atbash: Hfhkvyg: Qlmzgszm, Xirnv: Yfitozib, Grnv: 2022-03-12 14:30, Olxzgrlm: 123 Von Hgivvg, Luurxvi Hnrgs lyhvieww hfhkrxrlfh zxgrergb mvzi 5gs Zev lm 2022-03-13.", + "(3) Polybius: 41 43 41 34 15 13 42 : 24 33 32 11 42 22 11 32 , 13 36 23 31 15 : 12 43 36 21 26 11 36 51 , 42 23 31 15 : 20 22 - 03 - 1214 : 30 , 2633131142233332 : 123152631414236151542 , 331616231315364131234222 3312411536441514414341342313233343411113422344234513215113654222114415332022 -03 - 13.", + "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13.", + "(5) Reverse: 31-30-2202 no evA ht5 raen ytivitca suoicipsus devresbo htimS reciffO ,teertS mlE 321 :noitacoL ,03:41 21-30-2202 :emiT ,yralgruB :emirC ,nahtanoJ :tcepsuS.", + "(6) SwapPairs: uSpsc:tJ notaah,nC irem :uBgralyr ,iTem :02220-3211 :403 ,oLacitno :21 3lE mtSerte ,fOifec rmStihboesvrdes suipicuo scaitivyn ae rt5 hvA eno2 20-2301-3.", + "(7) DualAvgCode: RTvtrqdfbdu: IKnpmoaasugiaamo, BDqshlndf: ACtvqsfkmaaqsz, SUhjndf: 2022-03-12 14:30, KMnpbdaasuhjnpmo: 123 DFkmln RTsuqsdfdu, NPegeghjbddfq RSInhjsugi npacrtdfquuwdfce rttvrtoqhjbdhjnpvtrt aabb-suhjuwhjsuxz modfaaqs 5sugi AAuwdf npmo 2022-03-13.", + "(8) ParityShift: Rtrqduu: Knozuizo, Bshld: Ctsfznxsx, Uhld: 2022-03-12 14:30, Mnbzuhno: 123 Dml Rusddu, Ngghbds Rlhuicnrdswde rtrqhbntr zbuwhux odzs 5ui Zwd no 2022-03-13.", + "(9) **WordShift:** pect:Sus athan,Jon me:Cri glary,Bur e:Tim 2-03-12202 30,14: ation:Loc 123 Elm eet,Str icerOff thSmi ervedobs picioussus ivityact rnea 5th Ave no 2-03-13202." + ], + "bbox": [ + 126, + 558, + 870, + 903 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "5942", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Example A.3: Plain-Ciphertext Pair (Health Information) - Letter&Number", + "text_level": 1, + "bbox": [ + 126, + 99, + 620, + 114 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "# Domain: Personal Privacy Data", + "bbox": [ + 126, + 120, + 332, + 134 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Subdomain: Health Information", + "bbox": [ + 126, + 137, + 342, + 149 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Tag Combination: [\"Patient ID\", \"Diagnosis Records\"]", + "bbox": [ + 126, + 153, + 485, + 167 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Plaintext:", + "text_level": 1, + "bbox": [ + 126, + 170, + 191, + 181 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Patient ID: R094713; Name: Jamie Lee; Age: 45; Gender: Female; EMR: EHR-234987.", + "bbox": [ + 126, + 186, + 665, + 200 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Encryption results:", + "text_level": 1, + "bbox": [ + 126, + 203, + 253, + 216 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Rot13: Cngvrag VQ: E327046; Anzr: Wznvr Yrr; Ntr: 78; Traqe: Srznyr; RZE: RUE-567210.", + "(2) Atbash: Kzgrvmg RW: I905286; Mznv: Qznrv Ovv; Ztv: 54; Tvmwvi: Uvnzov; VNI: VSI-765012.", + "(3) Polybius: 34 11 42 23 15 32 42 23 14 : 36 66 65 56 63 53 55 ; 32 11 31 15 : 24 11 31 23 15 26 15 15 ; 11 21 15 : 56 61 ; 21 15 32 14 15 36 : 16 15 31 11 26 15 ; 15 31 36 : 15 22 36 - 54 55 56 65 64 63.", + "(4) Reverse: .789432-R HRE ;elameF :redneG ;54 :egA ;eeL eimaJ :emaN ;317490 R :DI tneitaP", + "(5) SwapPairs: aPteti DI: 0R94713; aNme: aJmei eLe; gAe: 45; eGndre: eFmale; MRE: HRE-239487.", + "(6) **WordShift:** atientP ID: R94713; ameN: Jamie eLe; geA: 45; enderG: emaleF; REM: EHR-234987.", + "(7) DualAvgCode: OQaaushjdmosu HJCE: QS009935680224; MOaalndf: IKaalnhjdf KMdfd; AAfhdf: 3546; FHdfmoced-fqs: EGdfnaakmdf; DFLNQS: DFGIQS-132435997968.", + "(8) ParityShift: Qzuhdou HE: S185602; Ozld: Kzlhd Mdd; Zfd: 54; Fdoeds: Gdlzmd; DLS: DIS-325896." + ], + "bbox": [ + 126, + 219, + 873, + 382 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Example A.4: Plain-Ciphertext Pair (Banking Information) - Letter&Number", + "text_level": 1, + "bbox": [ + 126, + 431, + 631, + 445 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "# Domain: Financial Confidential Data", + "bbox": [ + 126, + 451, + 366, + 463 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "## Subdomain: Banking Information", + "bbox": [ + 126, + 467, + 352, + 480 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Tag Combination: [\"Account Number\", \"Bank Card Number\", \"Payment Platform ID\"]", + "bbox": [ + 126, + 483, + 678, + 497 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Plaintext:", + "text_level": 1, + "bbox": [ + 126, + 500, + 191, + 512 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Account Number: 123456789, Bank: LA Bank, Card Number: 9876-5432-1098-7654, Payment Method: Virtual Credit Card, Payment Platform ID: ABC123XYZ, Timestamp: 2023-09-15 14:35, Amount: $250.00.", + "bbox": [ + 126, + 517, + 873, + 546 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Encryption results:", + "text_level": 1, + "bbox": [ + 126, + 550, + 253, + 563 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(1) Rot13: Nppbhag Ahzore: 456789012, Onax: YN Onax, Pneq Ahzore: 2109-8765-4321-0987, CnIzrag Zrgubq: Iveghny PerqvG Pneq, CnIzrag CyngsbEZ VQ: NOP456KLM, Gvzrfgnzc: 5356-32-48 47:68, Nzbhag: $583.33.", + "(2) Atbash: Zxlfmg Mfnyvi: 876543210, Yzmp: OZ Yzmp, Xziw Mfnyvi: 0123-4567-8901-2345, Kzbnvmg Nvgslw: Erigfzo Xivwr Xziw, Kzbnvmg Kozgulin RW: ZYX876CBA, Grnvhgznk: 7976-90-84 85:64, Znlfmg: $749.99.", + "(3) Polybius: 11 13 13 33 43 32 42 32 43 31 12 15 36 : 53 54 55 56 61 62 63 64 65 , 12 11 32 25 : 26 11 12 11 32 25 , 13 11\n36 14 32 43 31 12 15 36 : 65 64 63 62 - 61 56 55 54 - 53 66 65 64 - 63 62 61 56 , 34 11 51 31 15 32 42 31 15 42 22 33 14 :\n44 23 36 42 43 11 26 13 36 15 14 23 42 13 11 36 14 , 34 11 51 31 15 32 42 34 26 11 42 16 33 36 31 23 14 : 11 12 13 53 54\n55 46 51 52 , 42 23 31 15 41 42 11 31 34 : 54 66 54 55 - 66 65 - 53 61 53 56 : 55 61 , 11 31 33 43 32 42 : $546166 .6666 .", + "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13.", + "(5) Reverse: .00.052$ :tnuomA ,53:41 51-90-3202 :pmatsemit ,ZYX321CBA :DI mroftalP tnemyap ,draC tiderC lautriV :dohtem tnemyap ,4567-8901-2345-6789 :rebnuN draC ,knaB AL :knaB ,987654321 :rebnuN tnuoccA", + "(6) SwapPairs: cAotcnu mNuber: 214365879, aBnk: A Lank, aCrd Nmu:bre 8967-5423-1980-7564, aPymnet Mtohed: Vritaul Cerdti aCdr, aPymnet Ptaforml DI: BAC321YXZ, iTmsetamp: 3202-90-51 53:41, aAmount: $250.00.", + "(7) DualAvgCode: AAbbddnptvmosu MOtvlnacdfqs: 021324354657687999, ACAamojl: KMAA ACAamojl, BDaaqsc MEtvlnacdfqs: 99796857-46352413-02009979-68574635, OQaaxzlndfmosu LNdfsuginpce: UWhjssutvaakm BDqsdfcehjsu BDaaqsc, OQaaxzlndfmosu OQkmaasuegnpqsln HJCE: AAACBD021324WYXZZZ, SUhjlndfrtsuaalnoq: 13001324-0099-0246 0235:2446, AAlnnptvmosu: $134600.0000.", + "(8) ParityShift: Zbbntou Otlcds: 032547698, Czoj: MZ Czoj, Bzse Otlcds: 8967-4523-0189-6745, Qzxldou Lduine: Whsutzm Bsdehu Bzse, Qzxldou Qmzugsnl HE: ZCB032YXA, Uhldruzlj: 3132-18-04 05:24, Zlntou: $341.11." + ], + "bbox": [ + 126, + 567, + 873, + 895 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "5943", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/d6bb30e1962e77b8709915e18c6d880330246ae23981f9ae39cd774eda9323ed.jpg", + "table_caption": [ + "Table 5: Descriptions of Encryption Algorithms in CipherBank" + ], + "table_footnote": [], + "table_body": "
AlgorithmDescription
Rot13A simple substitution cipher that shifts each letter 13 places forward in the alphabet. Encryption and decryption are identical, as applying the transformation twice restores the original text. Non-alphabetic characters remain unchanged.Additionally, Rot13 in CipherBank supports number encryption by shifting digits cyclically within the range 0-9.
AtbashA monoalphabetic substitution cipher where each letter is replaced with its counterpart from the reversed alphabet (e.g., A→Z, B→Y). Since the transformation is symmetric, encryption and decryption follow the same process CipherBank's Atbash implementation extends this to digits, where each number is replaced with its complement relative to 9 (e.g., 0→9, 1→8, ..., 9→0).
PolybiusA fractionating substitution cipher that replaces each letter with a two-digit coordinate from a 6×6 grid, mapping characters to numerical positions. Traditional Polybius squares typically use a 5×5 grid, supporting only letter encryption while merging I and J into the same cell, leading to ambiguity during decryption. To address this limitation and enable number encryption, CipherBank extends the Polybius square to a 6×6 grid, allowing both letters and numbers to be uniquely represented as coordinate pairs, increasing the cipher's complexity.
VigenèreA polyalphabetic substitution cipher that employs multiple shifting alphabets determined by a repeating key. Unlike monoalphabetic ciphers that use a single mapping, Vigenère utilizes multiple substitution tables, where each plaintext letter is shifted based on the corresponding key character's position in the alphabet. By default, the key is set to "ACL".This multi-table approach enhances security by distributing letter frequencies across different shifts, making it more resistant to frequency analysis. Decryption reverses this process by applying the inverse shifts dictated by the key. Unlike Rot13, it requires a key for both encryption and decryption.
ReverseA transposition cipher that reverses the order of all characters in the plaintext. Since it does not substitute characters, it preserves all information but alters the sequence, making it effective against naive attacks.
SwapPairsA transposition cipher that swaps adjacent characters in the plaintext. If the text length is odd, the final character remains unchanged. Decryption follows the same swapping process.
DualAvgCodeA custom transformation where each letter expands into two adjacent characters, shifting one position forward and one position backward in the ASCII table. Special cases (e.g., 'a', 'z', 'A', 'Z') are duplicated instead CipherBank extends this method to digits, where each number expands into two adjacent values (e.g., 2 → "13", 5 → "46"), increasing redundancy in the encrypted text.
ParityShiftA custom encryption method that shifts each letter one position forward or backward based on its ASCII parity. Even-ASCII characters shift forward, while odd-ASCII characters shift backward. For digits, ParityShift follows a similar rule, shifting numbers based on their parity (e.g., even numbers shift up, odd numbers shift down within 0-9).
WordShiftA transformation applied at the word level rather than the character level. Each word undergoes a left shift by a fixed number of positions, cycling characters within the word while preserving word spacing. Decryption reverses this shift, ensuring character order is restored within each word. By default, the shift is set to 3 positions.
", + "bbox": [ + 114, + 164, + 878, + 860 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "5944", + "bbox": [ + 480, + 928, + 521, + 940 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Example B.1", + "text_level": 1, + "bbox": [ + 127, + 137, + 211, + 149 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "## Role:", + "text_level": 1, + "bbox": [ + 127, + 158, + 183, + 168 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Cryptography Analysis Expert.", + "bbox": [ + 156, + 174, + 347, + 187 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "## Goals:", + "text_level": 1, + "bbox": [ + 127, + 191, + 189, + 202 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Utilize the provided ciphertext and plaintext examples to analyze encryption patterns and decrypt new ciphertext.", + "bbox": [ + 154, + 206, + 840, + 221 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "## Workflow:", + "text_level": 1, + "bbox": [ + 127, + 224, + 216, + 235 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Analyze the provided ciphertext and plaintext examples to identify possible encryption patterns and rules.", + "2. Apply the decryption algorithm to the new ciphertext, attempt to decrypt, and verify the results." + ], + "bbox": [ + 154, + 240, + 811, + 269 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 5: System Prompt", + "bbox": [ + 410, + 288, + 584, + 302 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Example B.2", + "text_level": 1, + "bbox": [ + 127, + 420, + 211, + 432 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 439, + 231, + 451 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples.", + "bbox": [ + 154, + 456, + 773, + 469 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 473, + 273, + 486 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 489, + 734, + 502 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 506, + 216, + 518 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 523, + 240, + 535 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 539, + 265, + 552 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321", + "bbox": [ + 216, + 556, + 747, + 569 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 573, + 255, + 583 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 588, + 731, + 601 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 606, + 240, + 618 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 623, + 265, + 634 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Pnfr Ahzone: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyat Qngr: Bpgbore 19, 2024", + "bbox": [ + 216, + 638, + 781, + 651 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 656, + 255, + 667 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 671, + 773, + 684 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 688, + 240, + 701 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 706, + 265, + 718 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00", + "bbox": [ + 216, + 721, + 863, + 734 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 739, + 255, + 750 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 753, + 838, + 766 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 771, + 189, + 784 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 157, + 788, + 235, + 801 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Yrqvn, na Nzrevpna, erfvqrva Ybf Natryrf.", + "bbox": [ + 186, + 804, + 457, + 818 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 821, + 226, + 832 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Figure 6: User Prompt (Rot13 - 3shot - Only Letter)", + "bbox": [ + 319, + 848, + 675, + 864 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "5945", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C Extended Experimental Results", + "text_level": 1, + "bbox": [ + 112, + 83, + 426, + 99 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.1 Levenshtein Distance Evaluation from Main Results", + "text_level": 1, + "bbox": [ + 112, + 109, + 463, + 140 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the main text, most reported results are based on accuracy, which provides a binary assessment of decryption success. However, accuracy does not account for cases where decrypted outputs closely resemble the ground truth but contain minor errors. To provide a more fine-grained evaluation, we also compute Levenshtein similarity, which measures the edit distance between the model output and the correct plaintext.", + "bbox": [ + 112, + 146, + 487, + 290 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We define the Levenshtein similarity score as follows:", + "bbox": [ + 112, + 291, + 485, + 321 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nS _ {\\mathrm {l e v}} = 1 - \\frac {d _ {\\mathrm {l e v}} \\left(P _ {\\mathrm {p r e d}} , P _ {\\mathrm {r e f}}\\right)}{\\max \\left(\\left| P _ {\\mathrm {p r e d}} \\right| , \\left| P _ {\\mathrm {r e f}} \\right|\\right)} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 332, + 487, + 368 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "where:", + "bbox": [ + 132, + 375, + 188, + 387 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $d_{\\mathrm{lev}}(P_{\\mathrm{pred}}, P_{\\mathrm{ref}})$ is the Levenshtein distance between the predicted and reference plaintexts.", + "- $|P_{\\mathrm{pred}}|$ and $|P_{\\mathrm{ref}}|$ denote the lengths of the predicted and reference plaintexts, respectively." + ], + "bbox": [ + 134, + 400, + 487, + 475 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This metric normalizes the edit distance by the length of the longer string, ensuring that similarity is measured on a scale from 0 to 1, where 1 represents an exact match and lower values indicate increasing deviations from the ground truth.", + "bbox": [ + 112, + 486, + 487, + 565 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The corresponding Levenshtein-based evaluation results for Table 2 are presented in Table 6 and Figure 7, offering deeper insights into models' decryption performance beyond strict accuracy metrics.", + "bbox": [ + 112, + 567, + 487, + 645 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "One key observation is that most models achieve significantly higher Levenshtein similarity scores than their accuracy scores, indicating that even when decryption is incorrect, outputs often retain structural similarities to the original plaintext. This suggests that models capture some encryption patterns but struggle with full decryption, failing to consistently apply correct transformations. Notably, Claude-Sonnet-3.5 achieves near-perfect scores ( $>0.99$ for most ciphers), demonstrating its ability to minimize decryption errors while maintaining structural accuracy, making it the most reliable model overall.", + "bbox": [ + 112, + 648, + 487, + 854 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Interestingly, reasoning models such as DeepSeek-R1 and o1 exhibit a large gap between accuracy and Levenshtein similarity. Despite their moderate accuracy, their similarity scores", + "bbox": [ + 112, + 857, + 487, + 920 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "often exceed 0.80, indicating that they frequently produce outputs that preserve much of the original structure but contain systematic errors. This suggests that reasoning models are better at capturing encryption logic but may struggle with precise execution, sometimes overcomplicating simpler tasks.", + "bbox": [ + 507, + 84, + 882, + 196 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Conversely, chat models such as DeepSeek-V3 and Llama-based models exhibit high variability, showing relatively low accuracy but moderate Levenshtein similarity (0.40 - 0.70). This indicates a tendency toward semantic approximation rather than strict decryption, where models generate linguistically plausible outputs that fail to adhere to precise encryption rules.", + "bbox": [ + 507, + 197, + 882, + 325 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Another notable trend is that transposition ciphers (e.g., Reverse, SwapPairs) yield lower Levenshtein similarity scores across all models, confirming that character reordering remains a major challenge. Unlike substitution ciphers, where models can rely on token-level mappings, transposition ciphers require strict positional tracking, which even the strongest models struggle to handle effectively.", + "bbox": [ + 507, + 326, + 882, + 454 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Overall, Levenshtein similarity results highlight fundamental differences in how chat and reasoning models approach decryption. Chat models rely more on semantic fluency, leading to structurally incorrect but coherent outputs, whereas reasoning models exhibit stronger pattern retention but occasionally fail due to overgeneralization or overthinking. These findings suggest that while LLMs can approximate decryption rules, achieving precise symbolic transformations remains a significant challenge, especially for positional-based ciphers.", + "bbox": [ + 507, + 455, + 882, + 631 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C.2 Additional Analysis and Insights", + "text_level": 1, + "bbox": [ + 507, + 642, + 815, + 658 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we present more detailed experimental results that complement the findings in the main text. These additional analyses provide further insights into model performance across different encryption schemes, highlighting trends, challenges, and specific cases where models excel or struggle.", + "bbox": [ + 507, + 663, + 882, + 759 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the analysis of length sensitivity, plaintexts of different lengths can be seen in Figure 8. The impact of plaintext length on decryption performance is shown in Table 7 and Table 8, where we compare model accuracy on short vs. long texts. These results illustrate how increasing text length affects model performance, revealing notable differences in decryption robustness across various architectures", + "bbox": [ + 507, + 760, + 882, + 903 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The dataset used for the noise interference experi", + "bbox": [ + 526, + 904, + 882, + 920 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "5946", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/2a89c3c3ad84718ae761e02302e066bb4cbab67961539f3b4c074c9f45bed335.jpg", + "table_caption": [ + "Table 6: Results on CipherBank(3-shot) Levenshtein similarity" + ], + "table_footnote": [], + "table_body": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13At ba shPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftLevenshtein Similarity
Open-source Chat Models
Mixtral-8x22B-v0.10.45420.37440.26940.40320.38100.47450.33300.38710.64010.4130
Qwen2.5-72B-Instruct0.55560.42880.30420.40160.40220.53080.37180.47860.84270.4796
Llama-3.1-70B-Instruct0.57760.43780.31320.44310.37750.55420.39900.45050.72880.4758
Llama-3.3-70B-Instruct0.57540.40540.13170.43970.24820.53750.38330.40960.45800.3988
DeepSeek-V30.91950.75940.45620.48440.90880.69750.42050.57310.88870.6787
Closed-source Models
GPT-4o-mini-2024-07-180.64590.49350.24630.44990.56640.60050.34180.41880.72580.4988
GPT-4o-2024-08-060.96030.58760.34450.53460.81700.79680.43040.58500.89400.6612
GPT-4o-2024-11-200.93400.60540.35110.53380.72770.67800.42350.55300.87150.6309
gemini-1.5-pro0.93090.50430.49690.52010.75360.73170.47840.57200.88190.6522
gemini-2.0-flash-exp0.96160.65670.48130.50640.89010.75690.44760.53080.86050.6769
Claude-Sonnet-3.5-10220.99840.99610.99550.71430.98930.92620.78740.98830.97120.9296
Reasoning Models
QwQ-32B-Preview0.24770.15910.12310.16600.14440.16660.15640.16450.30570.1815
DeepSeek-R10.99200.97610.93440.52270.73680.72130.83160.69280.84910.8063
gemini-2.0-flash-thinking0.96640.85710.90740.55110.85080.77880.42610.73530.87770.7723
o1-mini-2024-09-120.97570.98600.95630.54120.59590.52670.39540.69350.72360.7105
o1-2024-12-170.83200.99280.96400.56420.77250.92080.86530.65620.93350.8335
", + "bbox": [ + 117, + 124, + 882, + 373 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/683e3b08ad6cbbf27df8bf47c11d0fbb8fd2e73a78a8436de133e5b3ea35e2c1.jpg", + "image_caption": [ + "Figure 7: Model Performance - Accuracy vs. Levenshtein Similarity." + ], + "image_footnote": [], + "bbox": [ + 151, + 418, + 845, + 678 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/5ab78ee30cbe0da825efc4ff3ffd9d7169db76c1c41e28261fc84db42c1fdd0d.jpg", + "table_caption": [ + "Table 7: Decryption Performance on Short Texts" + ], + "table_footnote": [], + "table_body": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V340.0027.834.351.7429.570.870.872.6111.313.24
DeepSeek-R180.0071.3053.040.8718.260.8735.6518.2612.1732.27
GPT-4o-2024-11-2034.7813.040.87021.741.740.871.7410.439.47
gemini-2.0-flash-exp42.614.351.740.8740.872.6101.748.7011.50
Claude-Sonnet-3.5-102286.0977.3969.573.4877.398.709.5763.4842.6148.70
gemini-2.0-flash-thinking52.1726.9633.912.6133.910.87013.9114.7819.90
o1-mini-2024-09-1264.3582.6165.22015.6506.6713.912.6133.77
o1-2024-12-1761.7489.5784.550.8723.4846.6761.7417.1735.8047.61
", + "bbox": [ + 119, + 775, + 882, + 902 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "5947", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/6d39235f9e2f33dd38174e9bd779196a688af66ce1dc0121c6b682e99f413446.jpg", + "table_caption": [ + "Table 8: Decryption Performance on Long Texts" + ], + "table_footnote": [], + "table_body": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V326.534.760.6809.520005.445.22
DeepSeek-R168.0348.9837.4104.76014.978.845.4420.94
GPT-4o-2024-11-2020.414.080012.240003.404.46
gemini-2.0-flash-exp30.612.041.36020.410.68002.726.42
Claude-Sonnet-3.5-102292.5278.9182.311.3663.955.442.7263.2740.1447.85
gemini-2.0-flash-thinking31.299.5212.24014.291.3602.724.768.47
o1-mini-2024-09-1231.9757.1432.6500002.72017.35
o1-2024-12-1758.5070.7561.110.688.1615.3841.58.6625.6634.38
", + "bbox": [ + 119, + 196, + 882, + 321 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Example C.1: Plaintiff Examples", + "text_level": 1, + "bbox": [ + 127, + 510, + 346, + 523 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Short: James, American, is married to Susan.", + "bbox": [ + 127, + 530, + 405, + 542 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Long: John Smith, born on January 15, 1990, holds American nationality and resides at 123 Elm Street, Springfield, Illinois. His mobile number is +1-312-555-6789, and his ID card number is IDURITY1234567. He is married to Jane Smith, who can be reached at +1-312-555-6789. They have two children: Emily (16, high school) and Michael (12, middle school). Their address and contact information are the same.", + "bbox": [ + 126, + 546, + 872, + 608 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Short:Jimmy,GPA:3.71.", + "bbox": [ + 127, + 619, + 287, + 632 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization.", + "bbox": [ + 127, + 636, + 872, + 665 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Short: Medical Record Number: 987-654-321; Patient Name: James.", + "bbox": [ + 127, + 675, + 549, + 688 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization.", + "bbox": [ + 127, + 692, + 872, + 722 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Short: Lucas, lucas@ucc.company.com", + "bbox": [ + 127, + 732, + 371, + 745 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Long: Hank, Senior Developer, IT Department, Salary: \\(95,000, Bonuses: \\)5,000, Allowances: $2,000 (Remote Work), Performance Rating: A, Full-time, Start Date: 2020-03-15, Last Promotion: 2021-08-10, Benefits: Health Insurance, Retirement 5%, Training: \\)1,500/year, Projects: Nexus, Zeta, Feedback: 4.5/5", + "bbox": [ + 127, + 749, + 872, + 795 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 8: Samples used for length sensitivity analysis", + "bbox": [ + 317, + 813, + 678, + 827 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "5948", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Example C.2: Noise Example", + "text_level": 1, + "bbox": [ + 126, + 85, + 319, + 99 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Example 1:", + "bbox": [ + 126, + 105, + 201, + 118 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Origin: Card Number: 9876 5432 1098 7654", + "bbox": [ + 126, + 121, + 405, + 134 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Noise: Card Numbr: 9876 54-32 1O98 765 four", + "bbox": [ + 126, + 137, + 418, + 149 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Example 2:", + "bbox": [ + 126, + 161, + 201, + 174 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Origin: Pay Date: 2023-05-15, Income: $75,000, Currency: USD, Bonus:$ 5,000", + "bbox": [ + 126, + 177, + 626, + 191 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": " Noise: Pay Date (scheduled): 2023-05-15! Income approx: $75,000. Currency spec: USD, and Bonus = $5,000.", + "bbox": [ + 126, + 193, + 811, + 208 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Example 3:", + "bbox": [ + 126, + 218, + 201, + 231 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Predictions: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide", + "bbox": [ + 126, + 234, + 673, + 247 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "References: Officer Identification-No.: P12345, Full-Name: John (J.), Job-Title: Sergeant, Dept.: Homicide Squad.", + "bbox": [ + 126, + 250, + 828, + 265 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 9: The samples used for the noise comparison experiments.", + "bbox": [ + 270, + 281, + 722, + 297 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/4b1f854a6290bc56ccef374753099879756833e56bcdb3542a341b5e8def1ebf.jpg", + "table_caption": [ + "Table 9: Decryption Performance without Noise" + ], + "table_footnote": [], + "table_body": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V350.0031.5018.506.509.0017.0022.08
DeepSeek-R183.5077.5042.002.5020.005.5038.50
Closed-source Models
GPT-4o-2024-11-2049.5010.5013.5003.505.5013.75
Gemini-2.0-flash-exp45.007.5042.502.505.0015.5019.67
Claude-Sonnet-3.5-102292.5085.0062.5010.0070.0035.0059.17
Gemini-2.0-flash-thinking62.5033.5022.50017.501.5022.92
o1-mini-2024-09-1255.5067.505.00017.50024.25
", + "bbox": [ + 115, + 335, + 882, + 520 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/b071647c4c6be2e71114225ed2cb060b6ef0d56925b587016ca315a9f3408aa2.jpg", + "table_caption": [ + "Table 10: Decryption Performance with Noise" + ], + "table_footnote": [], + "table_body": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V38.5010.507.5000.501.504.75
DeepSeek-R133.5023.004.5001.50010.42
Closed-source Models
GPT-4o-2024-11-205.5004.500001.67
Gemini-2.0-flash-exp2.50002.50000.83
Claude-Sonnet-3.5-102250.5040.0020.002.5030.007.5025.08
Gemini-2.0-flash-thinking30.5019.003.5002.5009.25
o1-mini-2024-09-1215.0020.000005.83
", + "bbox": [ + 115, + 557, + 882, + 741 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "iments can be found in Figure 9. Detailed results on the impact of noise on decryption performance are presented in Table 9 and Table 10, comparing model performance on short and long plaintexts under noisy conditions. These findings highlight the varying degrees of resilience across models, with some maintaining reasonable performance under noise while others degrade significantly.", + "bbox": [ + 112, + 766, + 487, + 894 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the analysis of the impact of encryption scope", + "bbox": [ + 131, + 904, + 485, + 921 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "on decryption performance, the test prompts used are shown in Figure 10. Detailed results are presented in Table 11. This analysis compares model performance when encrypting only letters versus encrypting both letters and numbers. The results highlight how different models handle the increased complexity introduced by number encryption, showing varying degrees of adaptability. While some models maintain relatively stable per", + "bbox": [ + 507, + 766, + 884, + 910 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "5949", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Example C.3", + "text_level": 1, + "bbox": [ + 126, + 85, + 213, + 99 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 126, + 105, + 231, + 118 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples.", + "bbox": [ + 154, + 121, + 774, + 136 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 137, + 273, + 152 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 154, + 734, + 168 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 126, + 171, + 216, + 184 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 187, + 240, + 200 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 205, + 265, + 217 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 84, 8009; Kzhhklig Mfnyvi: B012345678", + "bbox": [ + 216, + 221, + 754, + 234 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 237, + 255, + 250 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 254, + 731, + 267 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 271, + 240, + 282 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 288, + 265, + 300 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Xzhv Mfnyvi: 7975-KH-998765; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 80, 7975", + "bbox": [ + 216, + 304, + 815, + 317 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 321, + 255, + 332 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 336, + 774, + 350 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 354, + 240, + 366 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 370, + 265, + 382 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "gzc_rwvmgrurvi: GCM-7976-ZYX543, gzc_ivxliwh: bvzi: 7976, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:", + "bbox": [ + 216, + 387, + 872, + 401 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "379.99", + "bbox": [ + 126, + 404, + 171, + 414 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 420, + 255, + 432 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 436, + 838, + 450 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 126, + 453, + 189, + 466 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 469, + 236, + 483 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Wvzm slowh gsv kzhhklig mfnyvi Z87654321.", + "bbox": [ + 186, + 486, + 473, + 499 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 504, + 226, + 514 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Figure 10: User Prompt (Atbash - 3shot - Letter & Number)", + "bbox": [ + 292, + 531, + 702, + 546 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/cae536e8c44905a6bbfa91268e2bf8afe91cbd3e25937d2387a1e8cf921c8509.jpg", + "table_caption": [ + "Table 11: Impact of Encryption Scope on Decryption Performance" + ], + "table_footnote": [ + "Note: Values before the $\\prime /{}^{\\prime }$ indicate performance when encrypting letters only, while values after the ${}^{\\prime }/{}^{\\prime }$ represent performance when encrypting both letters and numbers." + ], + "table_body": "
ModelRot13AtbashPolybiusDualAvgCodeParityShiftAccuracyavg
Open-source Models
DeepSeek-V368.94/23.3224.02/14.6419.35/6.013.51/011.31/025.23 / 8.79
DeepSeek-R159.10/43.0563.19/23.0239.21/43.2337.36/013.05/0.7642.38 / 22.01
Closed-source Models
GPT-4o-2024-11-2027.53/010.08/00/02.54/02.67/08.56 / 0
gemini-2.0-flash-exp47.54/07.50/2.507.50/5.050/02.67/013.04 / 1.51
Claude-Sonnet-3.5-102292.50/50.0087.56/27.5365.00/32.2515.00/062.54/17.3564.52 / 25.43
gemini-2.0-flash-thinking35.00/2.650/2.540/10.000/02.50/07.50 / 3.04
o1-mini-2024-09-1250.00/32.5972.57/35.0040.00/42.530/07.50/0.7634.01 / 22.18
", + "bbox": [ + 115, + 583, + 882, + 760 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "formance, others exhibit significant drops when required to decrypt mixed alphanumeric ciphertexts.", + "bbox": [ + 112, + 828, + 489, + 860 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "For the enhanced prompt template, please refer to Figures 11-19, while more detailed experimental results can be found in Table 12.", + "bbox": [ + 112, + 873, + 487, + 920 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "C.3 Impact of Plaintext Source on Decryption Performance", + "text_level": 1, + "bbox": [ + 509, + 828, + 880, + 859 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To assess how plaintext characteristics influence decryption performance, we compare results on synthetically generated privacy-sensitive data ver", + "bbox": [ + 507, + 873, + 884, + 921 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "5950", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Example C.4", + "text_level": 1, + "bbox": [ + 127, + 254, + 213, + 267 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 290, + 873, + 304 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 307, + 273, + 319 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 323, + 734, + 337 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Uses the Caesar cipher with a fixed shift of 13 positions. For each letter in the Plaintext, shift it forward by 13 positions in the alphabet to produce the Ciphertext.", + "bbox": [ + 124, + 356, + 870, + 387 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 390, + 215, + 403 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 156, + 406, + 240, + 419 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 423, + 265, + 436 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321", + "bbox": [ + 216, + 439, + 749, + 453 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 456, + 257, + 468 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 472, + 732, + 486 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 156, + 489, + 240, + 502 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 506, + 265, + 519 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Pnfr Ahzore: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyvat Qngr: Bpgbore 19, 2024", + "bbox": [ + 216, + 521, + 781, + 536 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 257, + 551 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 774, + 569 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 156, + 571, + 240, + 585 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 601 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00", + "bbox": [ + 216, + 605, + 865, + 619 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 623, + 257, + 633 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 840, + 652 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 667 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 671, + 236, + 684 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Yrqvn, na Nzrevpna, erfvqrf va Ybf Natryrf.", + "bbox": [ + 186, + 688, + 458, + 702 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 156, + 705, + 226, + 715 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 11:Enhanced Prompt (ROT13 - 3shot - Letter)", + "bbox": [ + 314, + 733, + 682, + 747 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "5951", + "bbox": [ + 480, + 928, + 517, + 940 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Example C.5", + "text_level": 1, + "bbox": [ + 127, + 254, + 213, + 267 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 156, + 290, + 873, + 304 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 157, + 307, + 273, + 319 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 156, + 323, + 734, + 337 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Uses the Atbash cipher. Each letter in the \\*\\*Plaintext\\*\\* is replaced with its reverse counterpart in the alphabet.", + "bbox": [ + 154, + 356, + 828, + 370 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 373, + 216, + 387 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 390, + 240, + 403 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 407, + 265, + 419 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 15, 1990; Kzhhklig Mfnyvi: B987654321", + "bbox": [ + 216, + 423, + 752, + 436 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 439, + 255, + 451 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 455, + 731, + 469 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 472, + 240, + 485 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 489, + 265, + 502 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Xzhv Mfnyvi: 2024-KH-001234; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 19, 2024", + "bbox": [ + 216, + 505, + 815, + 519 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 523, + 255, + 533 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 538, + 774, + 552 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 555, + 240, + 568 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 573, + 265, + 585 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "gzc_rwvmgrurvi: GCM-2023-ZYX456, gzc_ivxliwh: bvzi: 2023, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:", + "bbox": [ + 216, + 588, + 873, + 602 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "620.00", + "bbox": [ + 127, + 606, + 171, + 615 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 623, + 255, + 633 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 838, + 651 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 667 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 157, + 671, + 235, + 684 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Ovwrz,zm Znvirxzm, ivhrwhrm Olh Zmtvovh.", + "bbox": [ + 186, + 688, + 485, + 701 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 705, + 226, + 715 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 12:Enhanced Prompt (Atbash - 3shot - Letter)", + "bbox": [ + 314, + 733, + 682, + 747 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "5952", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Example C.6", + "text_level": 1, + "bbox": [ + 126, + 221, + 213, + 234 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 126, + 241, + 231, + 253 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 256, + 872, + 271 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 274, + 273, + 287 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 290, + 734, + 304 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 126, + 307, + 255, + 319 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Uses the Polybius cipher. Each letter in the $^{**}$ Plaintext\\*\\* is mapped to a pair of coordinates in the Polybius square, forming the $^{**}$ Ciphertext\\*.", + "bbox": [ + 126, + 323, + 872, + 353 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 126, + 357, + 215, + 369 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 373, + 240, + 386 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 390, + 265, + 403 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "32 11 31 15 : 15 31 23 26 51 24 33 22 32 41 33 32 ; 14 11 42 15 33 16 12 23 36 42 22 : 24 43 26 51 15 , 19", + "bbox": [ + 216, + 406, + 870, + 419 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "90;3411414134333642324331121536:51987654321", + "bbox": [ + 126, + 422, + 549, + 435 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 439, + 255, + 451 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 455, + 731, + 469 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 472, + 240, + 485 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 489, + 265, + 502 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "13 11 41 15 32 43 31 12 15 36 : 2 0 2 4 - 34 41 - 0 0 1 2 3 4 ; 13 11 41 15 42 51 34 15 : 42 22 15 16 42 / 12", + "bbox": [ + 216, + 505, + 870, + 518 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "36 15 11 25 - 23 32 ; 16 23 26 23 32 21 14 11 42 15 : 33 13 42 33 12 15 36 19 , 20 24", + "bbox": [ + 126, + 521, + 660, + 533 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 255, + 551 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 773, + 569 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 571, + 240, + 585 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 601 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "42 11 46 _ 23 14 15 32 42 23 16 23 15 36 : 42 46 32 _ 20 23 - 11 12 13 456 , 42 11 46 _ 36 15 13 33 36 14", + "bbox": [ + 216, + 604, + 870, + 618 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "41:51 15 11 36:2023,41 42 11 42 43 41:34 36 33 13 15 41 41 15 14,36 15 16 43 32 14_23 41 41 43 15 14:620.", + "bbox": [ + 126, + 621, + 872, + 634 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "00", + "bbox": [ + 126, + 639, + 147, + 650 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 655, + 255, + 665 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 671, + 838, + 684 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 126, + 688, + 189, + 701 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 157, + 705, + 235, + 718 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "26 15 14 23 11 , 11 32 11 31 15 36 23 13 11 32 , 36 15 41 23 14 15 41 23 32 26 33 41 11 32 21 15 26 15 41 .", + "bbox": [ + 186, + 721, + 848, + 734 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 738, + 225, + 749 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 13: Enhanced Prompt (Polybius - 3shot - Letter)", + "bbox": [ + 309, + 766, + 685, + 781 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "5953", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Example C.7", + "text_level": 1, + "bbox": [ + 127, + 254, + 213, + 267 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 156, + 290, + 873, + 304 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 157, + 307, + 273, + 319 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 156, + 323, + 734, + 337 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Uses the Vigenère cipher. Each letter in the **Plaintext** is shifted by the corresponding letter in the **Key** to produce the **Ciphertext**.", + "bbox": [ + 126, + 356, + 870, + 387 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 390, + 215, + 401 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 407, + 240, + 419 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 423, + 265, + 436 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Nexe: Eotla Jqsnuzn; Dcee zf Miteh: Jwwy 15, 1990; Pcdsrzrv Nwbgc: J987654321", + "bbox": [ + 216, + 439, + 736, + 453 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 456, + 257, + 468 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 472, + 731, + 486 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 489, + 240, + 502 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 506, + 265, + 519 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Ccde Yuomet: 2024-PU-001234; Naup Vjpg: Vsehe/Dcecv-Ky; Qintni Dcee: Oeeodpr 19, 2024", + "bbox": [ + 216, + 521, + 801, + 536 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 257, + 551 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 774, + 569 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 571, + 240, + 585 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 601 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "tci_koepeihtet: VIN-2023-CMC456, tci_tpcqcdu: jecc: 2023, dtceuu: Rcoepsupd, rgqupo_kdswpd: 620.00", + "bbox": [ + 216, + 605, + 865, + 619 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 623, + 257, + 633 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 840, + 652 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 667 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 157, + 671, + 236, + 684 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Lgoic, cy Cxettccy, ceutgdg ky Nzs Lniplgd.", + "bbox": [ + 186, + 688, + 458, + 702 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 705, + 226, + 715 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Figure 14: Enhanced Prompt (Vigenère - 3shot - Letter)", + "bbox": [ + 307, + 733, + 687, + 747 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "5954", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Example C.8", + "text_level": 1, + "bbox": [ + 126, + 262, + 213, + 275 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 126, + 282, + 231, + 294 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 298, + 873, + 312 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 315, + 273, + 328 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 331, + 734, + 344 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 126, + 349, + 255, + 362 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Reverses the $^{**}$ Plaintiff\\*\\* to create the $^{**}$ Ciphertext\\*\\*.", + "bbox": [ + 154, + 365, + 500, + 378 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 126, + 382, + 215, + 394 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 156, + 398, + 240, + 411 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 414, + 265, + 428 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "123456789Y :rebmuN tropssaP ;0991 ,51 yluJ :htriB fo etaD ;nosnhoJ ylimE :emaN", + "bbox": [ + 218, + 431, + 729, + 444 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 449, + 255, + 458 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 464, + 731, + 476 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 156, + 481, + 240, + 494 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 498, + 265, + 510 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "4202,91 rebotcO :etaD gniliF ;nI-kaerB/tfehT :epyT ESA C;432100-SP-4202 :rebmuN ESA C", + "bbox": [ + 216, + 514, + 771, + 527 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 531, + 255, + 542 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 546, + 774, + 561 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 156, + 564, + 240, + 576 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 581, + 265, + 593 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "}00.026 :deussi_dnufer,dessecorP:sutats,3202:raey{sdrocer_xat,654CBA-3202-NXT:reifitnedi_xat", + "bbox": [ + 216, + 596, + 848, + 609 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 614, + 255, + 625 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 630, + 838, + 643 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 126, + 646, + 189, + 659 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 664, + 235, + 676 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": ".selegnAsoL ni sediser,naciremAna ,aideL", + "bbox": [ + 186, + 680, + 457, + 693 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 156, + 697, + 226, + 708 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Figure 15:Enhanced Prompt (Reverse -3shot-Letter)", + "bbox": [ + 310, + 725, + 684, + 739 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "5955", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Example C.9", + "text_level": 1, + "bbox": [ + 127, + 254, + 213, + 267 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 290, + 873, + 304 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 307, + 273, + 319 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 323, + 734, + 337 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For each pair of letters in the $^{**}$ Plaintext\\*\\*, their positions are swapped to produce the $^{**}$ Ciphertext\\*\\*. If the number of letters is odd, the last letter remains in its original position.", + "bbox": [ + 124, + 356, + 872, + 387 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 390, + 216, + 403 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 156, + 407, + 240, + 419 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 423, + 265, + 436 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "aNem :mEli yoJnhos;nD ta efoB riht :uJy11 ,51 99;0P sapsro tuNbmre :9Y78563412", + "bbox": [ + 216, + 439, + 729, + 453 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 456, + 257, + 468 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 472, + 731, + 486 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 156, + 489, + 240, + 502 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 506, + 265, + 519 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "aCesN mueb:r2 20-4SP0-1032;4C sa eyTep :hTfe/trBae-knI ;iFilgnD ta:eO tcbore1 ,92 204", + "bbox": [ + 216, + 521, + 771, + 535 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 257, + 551 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 774, + 569 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 156, + 571, + 240, + 585 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 601 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "at_xdineititre:XT-N0232A-CB54,6t xar_cerosd :yae:r2 20,3s atut:sP orecssde ,erufdni_sseu:d6 020.0", + "bbox": [ + 216, + 605, + 835, + 618 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 621, + 257, + 633 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 840, + 652 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 667 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 671, + 236, + 684 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "eLid,aa nmArecina ,erised sniL soA gnlese.", + "bbox": [ + 186, + 688, + 453, + 701 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 156, + 705, + 226, + 715 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Figure 16: Enhanced Prompt (SwapPairs - 3shot - Letter)", + "bbox": [ + 302, + 733, + 692, + 747 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "5956", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Example C.10", + "text_level": 1, + "bbox": [ + 127, + 229, + 220, + 241 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 249, + 231, + 261 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Decrypt the provided Ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 265, + 873, + 278 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 282, + 273, + 296 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 298, + 734, + 312 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 126, + 315, + 255, + 328 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "This encryption method converts each letter of the **Plaintext** into two letters in the **Ciphertext** such that the average of their ASCII values equals the ASCII value of the original letter.", + "bbox": [ + 124, + 331, + 870, + 362 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 365, + 216, + 378 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 382, + 240, + 394 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 398, + 265, + 411 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "MOaalndf: DFlnhjkmxz IKnpgimortnpmo; CEaasudf npeg AChjqssugi: IKtvkmxz 15, 1990; OQaartrtoqnacdfqx: XZ987654321", + "bbox": [ + 216, + 414, + 873, + 444 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 448, + 255, + 458 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 464, + 731, + 476 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 481, + 240, + 494 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 498, + 265, + 510 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "BDaartdf MOtvlnacdfqs: 2024-OQRT-001234; BDaartdf SUxzoqdf: SUgidfgsu/ACqsdfaajl-HJmo; h CEaesudf: NPbdsunpacdfqs 19, 2024", + "bbox": [ + 216, + 514, + 870, + 544 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 548, + 255, + 558 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 563, + 773, + 577 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 580, + 240, + 593 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 598, + 265, + 609 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "suaawy_hjcedfmosuhjeghjdfqs: SUWYMO-2023-AAACBD456, suaawy_qsdfbnpqscert: xzdfaaqs: 2023,", + "bbox": [ + 216, + 613, + 873, + 627 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "rtsuasutvrt: OQqsnpbbdftrtdfce, qsdfgtvmoce_hjrttrtvdfce: 620.00", + "bbox": [ + 126, + 630, + 549, + 643 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 646, + 255, + 658 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 663, + 838, + 676 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 680, + 189, + 692 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 696, + 236, + 709 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "KMdfcehjaa, aamo AAlndfqshjbdaamo, qsdfrthjcedfrh jhmo KMnprt AAmofhdfkmdfrt.", + "bbox": [ + 186, + 712, + 719, + 726 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 730, + 226, + 741 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Figure 17: Enhanced Prompt (DualAvgCode - 3shot - Letter)", + "bbox": [ + 290, + 758, + 705, + 772 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "5957", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Example C.11", + "text_level": 1, + "bbox": [ + 127, + 254, + 218, + 267 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 290, + 873, + 304 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 307, + 273, + 319 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 323, + 734, + 337 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For each letter in the $^{**}$ Plaintext\\*\\*: - If the ASCII value is even, add 1 to it to get the corresponding character in the $^{**}$ Ciphertext\\*. - If the ASCII value is odd, subtract 1 to get the new character in the $^{**}$ Ciphertext\\*.", + "bbox": [ + 126, + 356, + 870, + 386 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 390, + 215, + 403 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 406, + 240, + 419 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 423, + 265, + 436 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Ozld: Dlhmax Kniorno; Ezud ng Chsui: Ktmx 15, 1990; Qzrrqnsu Otlcds: X987654321", + "bbox": [ + 216, + 439, + 744, + 453 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 456, + 257, + 468 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 472, + 732, + 486 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 489, + 240, + 502 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 506, + 265, + 519 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Bzrd Otlcds: 2024-QR-001234; Bzrd Uxqd: Uidgu/Csdzj-Ho; Ghmhof Ezud: Nbuncds 19, 2024", + "bbox": [ + 216, + 521, + 803, + 536 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 257, + 551 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 774, + 569 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 571, + 240, + 585 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 602 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "uzy_hedoughghds: UYO-2023-ZCB456, uzy_sdbnser: xdzs: 2023, ruzutr: Qsnbdrrde, sdgtoe_hrrte: 620.00", + "bbox": [ + 216, + 605, + 870, + 618 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 621, + 257, + 633 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 840, + 652 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 667 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 671, + 236, + 684 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Mdehz, zo Zldshbzo, sdrhedr ho Mnr Zofdmdr.", + "bbox": [ + 186, + 688, + 473, + 701 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 705, + 226, + 715 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Figure 18: Enhanced Prompt (ParityShift - 3shot - Letter)", + "bbox": [ + 302, + 733, + 694, + 747 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "5958", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Example C.12", + "text_level": 1, + "bbox": [ + 127, + 254, + 220, + 267 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Background:", + "text_level": 1, + "bbox": [ + 127, + 274, + 231, + 286 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples.", + "bbox": [ + 154, + 290, + 873, + 304 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Think step by step.", + "bbox": [ + 156, + 307, + 273, + 319 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Provide the Plaintext result in the format text, where text is the decrypted text.", + "bbox": [ + 154, + 323, + 734, + 337 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Algorithm Flow:", + "text_level": 1, + "bbox": [ + 127, + 340, + 255, + 353 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The algorithm splits the $^{**}$ Plaintext\\*\\* into words based on spaces. Each word is then individually encrypted using the Caesar cipher, resulting in the $^{**}$ ciphertext\\*\\*.", + "bbox": [ + 124, + 356, + 870, + 387 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Examples:", + "text_level": 1, + "bbox": [ + 127, + 390, + 215, + 403 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "* Example 1:", + "bbox": [ + 157, + 406, + 240, + 419 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 423, + 265, + 436 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "e:Nam lyEmi nson;Joh eDat fo th:Bir yJul 15,0;199 sportPas ber:Number 7654321Y98", + "bbox": [ + 216, + 439, + 729, + 451 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 456, + 257, + 468 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321", + "bbox": [ + 216, + 472, + 731, + 486 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "* Example 2:", + "bbox": [ + 157, + 489, + 240, + 502 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 506, + 265, + 519 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "eCas ber:Num 4-PS-001234;202 eCas e:Typ ft/Break-In;The ingFil e:Dat oberOct 19, 4202", + "bbox": [ + 216, + 521, + 771, + 535 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 539, + 257, + 551 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024", + "bbox": [ + 216, + 555, + 774, + 569 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "* Example 3:", + "bbox": [ + 157, + 571, + 240, + 585 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 186, + 588, + 265, + 601 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "identifier:tax -2023-ABC456,TXNRecords:tax ar:ye 3,202 tus:sta cessed,Pro und_iuied:ref .00620", + "bbox": [ + 216, + 604, + 835, + 618 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 186, + 621, + 257, + 633 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00", + "bbox": [ + 216, + 638, + 838, + 651 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 127, + 655, + 189, + 668 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Ciphertext:", + "bbox": [ + 156, + 671, + 236, + 684 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "ia,Led na rican,Ame idesres ni Los eles.Ang", + "bbox": [ + 186, + 688, + 457, + 702 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "- Plaintiff:", + "bbox": [ + 157, + 705, + 226, + 715 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Figure 19: Enhanced Prompt (WordShift - 3shot - Letter)", + "bbox": [ + 302, + 733, + 692, + 747 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "5959", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/be4b85a911a0f3570d25ab52c1a8437eef2655f1d3404adbbbd16fd89883e3b7.jpg", + "table_caption": [ + "Table 12: Results on CipherBank(Enhanced Prompt)" + ], + "table_footnote": [], + "table_body": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.760000.3802.670.380.380.51
Qwen2.5-72B-Instruct12.609.1600002.290.381.532.88
Llama-3.1-70B-Instruct2.671.15001.530.381.15000.76
Llama-3.3-70B-Instruct4.581.5300.381.1501.15000.98
DeepSeek-V341.6027.860.380.3865.955.3412.660.765.1717.79
Closed-source Models
GPT-4o-mini-2024-07-1821.7619.0800.384.3900005.07
GPT-4o-2024-08-0645.4224.0500.7651.538.401.911.1510.3115.95
GPT-4o-2024-11-2045.4241.980053.638.023.821.159.5418.17
gemini-1.5-pro63.695.730.760.3814.122.670.381.9110.6911.15
gemini-2.0-flash-exp45.0422.902.290.3846.564.583.8201.1514.08
Claude-Sonnet-3.5-102292.7582.0678.242.4879.399.732.4862.0244.8550.44
Reasoning Models
QwQ-32B-Preview1.913.052.670002.670.380.381.23
DeepSeek-R188.3786.5472.730.7646.9675.0173.1774.421.5157.72
gemini-2.0-flash-thinking37.9819.0910.50055.344.964.770.386.1115.46
ol-mini-2024-09-1254.2072.1450.00.7611.0718.7047.3349.627.2534.56
", + "bbox": [ + 117, + 107, + 882, + 360 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "sus externally sourced structured text (e.g., quotes from Shakespeare's works). The structured text exhibits greater linguistic familiarity, while the privacy-sensitive data represents real-world encryption needs, lacking inherent semantic patterns.", + "bbox": [ + 112, + 382, + 489, + 462 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "As shown in Table 13 and Table 14, models generally perform better on structured text, suggesting that they leverage linguistic priors rather than strictly following decryption rules. When encountering encrypted text with recognizable patterns, models tend to shortcut reasoning, aligning decoded fragments with plausible linguistic structures instead of strictly adhering to learned transformation rules. Conversely, for less structured, domain-specific text, models struggle to infer decryption patterns, reinforcing the advantage of CipherBank's privacy-sensitive dataset, which forces models to engage in independent reasoning rather than rely on pretraining biases.", + "bbox": [ + 112, + 463, + 489, + 690 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "D Error Analysis", + "text_level": 1, + "bbox": [ + 112, + 701, + 282, + 717 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "D.1 Error Classification", + "text_level": 1, + "bbox": [ + 112, + 727, + 319, + 741 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "This section defines the error categories observed in model decryption outputs. These classifications help identify systematic failure patterns and provide insights into how models approach cryptographic reasoning.", + "bbox": [ + 112, + 747, + 489, + 829 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "(A) Omission/Insertion: The model output contains missing or extra characters, words, or punctuation compared to the reference plaintext. These errors indicate incomplete decryption or unintended modifications, leading to", + "bbox": [ + 134, + 841, + 489, + 921 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "partial but inaccurate results.", + "bbox": [ + 544, + 382, + 761, + 398 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- (B) Name Decryption Error: The decryption result is correct except for the name part, which remains incorrect or partially distorted. This suggests challenges in handling named entities, possibly due to memorization effects or entity-based biases.", + "- (C) Semantic Inference: The model makes errors based on semantic reasoning rather than strictly following decryption rules. Instead of decoding symbols precisely, the model hallucinates plausible but incorrect outputs that fit the general meaning of the sentence. This indicates a tendency to prioritize linguistic coherence over strict decryption fidelity.", + "- (D) Reorganization: The output preserves the exact meaning of the reference plaintext but rearranges the sentence structure. This suggests that the model prioritizes fluency over strict character-level fidelity, leading to errors in cryptographic tasks where precision is essential.", + "- (E) Reasoning Failure: The model output is significantly different from the reference, and decryption is essentially unsuccessful. This suggests a fundamental failure in identifying encryption patterns, leading to outputs that bear little resemblance to the expected plaintext. This category includes cases where the model fails to infer transformation rules or apply correct decryption strategies." + ], + "bbox": [ + 531, + 409, + 884, + 921 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "5960", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 31 + }, + { + "type": "table", + "img_path": "images/67364b7148e07ce1d85c2a4ea8fb38d75ff13dcce828846e8062ec62f7a4d1b4.jpg", + "table_caption": [ + "Table 13: Decryption Performance on Privacy-Sensitive Data" + ], + "table_footnote": [], + "table_body": "
ModelRot13AtbashPolybiusVigenèreReverseSwapDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V324.3415.6415.70033.723.5104.3515.6412.54
DeepSeek-R157.8871.0271.554.3533.574.35012.718.7029.35
Closed-source Models
GPT-4o-2024-11-2021.7421.740030.438.700013.0410.63
Gemini-2.0-Flash-Exp47.834.354.35052.1704.354.3513.0414.49
Claude-Sonnet-3.5-102286.9678.2665.224.3591.3013.044.3552.1747.8349.28
Gemini-2.0-Flash-Thinking39.134.350060.87004.3530.4315.46
o1-Mini-2024-09-1260.8786.9669.5708.70013.0417.394.3528.99
", + "bbox": [ + 114, + 107, + 882, + 247 + ], + "page_idx": 32 + }, + { + "type": "table", + "img_path": "images/3f19db9ab6803c61a4f28ce8404c5d001da0c21536c79523550d35e0e90aadc4.jpg", + "table_caption": [ + "Table 14: Decryption Performance on Structured Text" + ], + "table_footnote": [], + "table_body": "
ModelRot13AtbashPolybiusVigenèreReverseSwapPairDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V376.1224.0315.70052.1729.40012.7155.1329.47
DeepSeek-R184.5185.041007.5979.108.708.7015.6430.4346.63
Closed-source Models
GPT-4o-2024-11-2078.2639.134.35086.9621.7404.3543.4830.92
Gemini-2.0-Flash-Exp86.9613.044.35086.968.70017.3943.4828.99
Claude-Sonnet-3.5-102291.3095.6595.654.3510052.178.7078.2695.6569.08
Gemini-2.0-Flash-Thinking86.9613.048.70069.5717.390052.1727.54
o1-Mini-2024-09-1282.6195.6578.26060.874.3513.0417.3943.4843.96
", + "bbox": [ + 114, + 284, + 882, + 420 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "- (F) Other: Miscellaneous errors that do not fit into the defined categories.", + "bbox": [ + 134, + 445, + 487, + 476 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "This classification framework provides a structured approach to analyzing decryption errors, helping to pinpoint systematic weaknesses and guide future improvements in cryptographic reasoning models.", + "bbox": [ + 112, + 493, + 489, + 571 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "D.2 Examples of Different Error Types", + "text_level": 1, + "bbox": [ + 112, + 587, + 438, + 602 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "To further illustrate the types of decryption errors encountered in our evaluation, we provide concrete examples corresponding to each error category. These cases demonstrate how models fail in various aspects of decryption, including omission-insertion, name decryption errors, semantic inference, reorganization, reasoning failures, and other anomalies. Example D.1 - D6 showcase representative examples of each error type.", + "bbox": [ + 112, + 609, + 489, + 755 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "D.3 Detailed Error Distribution Tables", + "text_level": 1, + "bbox": [ + 112, + 768, + 435, + 783 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Tables 15-20 present a detailed breakdown of error distributions across different encryption algorithms for the six selected models. From these results, we identify several common trends and model-specific differences.", + "bbox": [ + 112, + 791, + 487, + 870 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Challenges in Name Decryption and Symbolic Reasoning. Across all models, name decryption errors remain prevalent, particularly in Atbash and", + "bbox": [ + 112, + 873, + 487, + 921 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Polybius, indicating persistent difficulties in handling entity-based transformations. Additionally, models struggle with key-based and transposition ciphers such as Vigenère and SwapPairs, suggesting limitations in tracking multi-step transformations and generalizing decryption strategies.", + "bbox": [ + 507, + 445, + 884, + 541 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Semantic Overreliance vs. Overthinking in Decryption. Chat models often exhibit semantic inference errors, where decrypted outputs align with linguistic patterns rather than encryption rules. In contrast, reasoning models tend to overthink simple tasks, leading to unnecessary self-correction loops that degrade performance in straightforward ciphers like Reverse.", + "bbox": [ + 507, + 542, + 885, + 669 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Structural Alignment and Insertion Errors. Frequent omission and insertion errors in WordShift and Reverse ciphers highlight difficulties in preserving character order. This suggests that models rely on semantic priors rather than strict symbolic reasoning, leading to misaligned outputs.", + "bbox": [ + 507, + 671, + 885, + 766 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Key Takeaways:", + "text_level": 1, + "bbox": [ + 527, + 768, + 660, + 784 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Chat models (Claude, Gemini) perform well in substitution ciphers but struggle with complex rule-based encryption.", + "- Reasoning models (DeepSeek-R1, o1) maintain better structural accuracy but underperform in transposition-based and key-dependent ciphers." + ], + "bbox": [ + 531, + 796, + 884, + 921 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "5961", + "bbox": [ + 480, + 928, + 517, + 940 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Example D.1: Error Type: Omission/Insertion", + "text_level": 1, + "bbox": [ + 126, + 116, + 426, + 130 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 1:", + "bbox": [ + 127, + 136, + 201, + 149 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Card Number: ID 1245-6789-0123", + "bbox": [ + 127, + 152, + 423, + 165 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Clark holds the ID Card Number 1245-6789-0123.", + "bbox": [ + 127, + 168, + 514, + 181 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 2:", + "bbox": [ + 127, + 192, + 201, + 205 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Card Number: ID 1245-6789-0123", + "bbox": [ + 127, + 209, + 423, + 222 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Clark holds the ID Card Number 1245-6789-0123.", + "bbox": [ + 127, + 225, + 514, + 237 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 3:", + "bbox": [ + 127, + 249, + 201, + 262 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Salary Amount: $67,000; Pay Date: 2023-10-25", + "bbox": [ + 127, + 265, + 502, + 278 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Salary Amount: $67,000, Pay Date: 2023-10-25.", + "bbox": [ + 127, + 281, + 502, + 294 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example D.2: Error Type: Name Decryption Error", + "text_level": 1, + "bbox": [ + 127, + 375, + 458, + 388 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 1:", + "bbox": [ + 127, + 395, + 201, + 407 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Learn, an American, inside on Los Angeles.", + "bbox": [ + 127, + 411, + 477, + 424 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Ledia, an American, resides in Los Angeles.", + "bbox": [ + 127, + 428, + 475, + 441 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 2:", + "bbox": [ + 127, + 451, + 201, + 464 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Individual ID: A1234567; Name: John Doe; Age: 34; Gender Identity: Cisgender 16k11.2 Location, Country State Citizenship.", + "bbox": [ + 127, + 468, + 870, + 498 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Individual ID: A1234567; Name: Jane Doe; Age: 34; Genetic Testing: Chromosome 16p11.2 Deletion, Celiac Disease Predisposition.", + "bbox": [ + 127, + 501, + 870, + 531 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 3:", + "bbox": [ + 127, + 541, + 201, + 554 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Handed lost the passport Number A12345678.", + "bbox": [ + 127, + 557, + 492, + 571 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Dean holds the passport number A12345678.", + "bbox": [ + 127, + 574, + 480, + 587 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example D.3: Error Type: Semantic Inference", + "text_level": 1, + "bbox": [ + 127, + 667, + 426, + 681 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 1:", + "bbox": [ + 127, + 687, + 201, + 700 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Jessica Brown, Bachelor of Biology, GPA: 3.9, Graduated 2023, Skills: Genetics, Microbiology, Ecology, Bioinformatics.", + "bbox": [ + 127, + 703, + 872, + 732 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Jessica Brown, Bachelors in Biology, GPA: 3.9, Graduated 2023, Courses: Genetics, Microbiology, Ecology, Biochemistry.", + "bbox": [ + 127, + 736, + 872, + 766 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 2:", + "bbox": [ + 127, + 776, + 201, + 789 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Patent-pending design specification PR2023_KURITY, Company Z, including batch production requirements.", + "bbox": [ + 127, + 793, + 872, + 807 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Patent-pending design specification PR2023_KURITY, Company Z, including batch performance analysis.", + "bbox": [ + 127, + 810, + 853, + 822 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example 3:", + "bbox": [ + 127, + 833, + 201, + 846 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Predictions: Office ID: P12345, Name: John, Position: Manager, Department: Finance.", + "bbox": [ + 127, + 850, + 660, + 863 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "References: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide.", + "bbox": [ + 127, + 866, + 673, + 879 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "5962", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Example D.4: Error Type: Reorganization", + "text_level": 1, + "bbox": [ + 127, + 105, + 403, + 118 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 1:", + "text_level": 1, + "bbox": [ + 127, + 124, + 203, + 137 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Name: Ledia; Nationality: American; Address: Los Angeles", + "bbox": [ + 127, + 141, + 576, + 155 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Ledia, an American, resides in Los Angeles.", + "bbox": [ + 127, + 158, + 475, + 171 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 2:", + "text_level": 1, + "bbox": [ + 127, + 181, + 201, + 193 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Passport Number: A12345678; Holder Name: Dean", + "bbox": [ + 127, + 198, + 526, + 211 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Dean holds the passport number A12345678.", + "bbox": [ + 127, + 214, + 480, + 227 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 3:", + "text_level": 1, + "bbox": [ + 127, + 237, + 201, + 250 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Name: Peter; Date of Birth: April 23, 1985; Passport Number: X123456789", + "bbox": [ + 127, + 254, + 672, + 267 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Peter was born on April 23, 1985, and carries a passport with the number X123456789.", + "bbox": [ + 127, + 269, + 734, + 284 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example D.5: Error Type: Reasoning Failure", + "text_level": 1, + "bbox": [ + 127, + 342, + 420, + 356 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 1:", + "text_level": 1, + "bbox": [ + 127, + 362, + 201, + 375 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Address: 123 Main St, Apt 4B, New York, NY, Zip Code: 10001, Phone: 555-1234, Unit: 101, Floor: 2.", + "bbox": [ + 127, + 378, + 840, + 391 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Officer: Lisa Grant, Sergeant, Downtown Precinct, Patrol Car: VG2301, Equipment: Radar Gun, Model: RGX501, Weapon: Taser X2.", + "bbox": [ + 127, + 395, + 872, + 424 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 2:", + "text_level": 1, + "bbox": [ + 127, + 435, + 201, + 448 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Welcome, once more, securely within.", + "bbox": [ + 127, + 451, + 442, + 464 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Ledia, an American, resides in Los Angeles.", + "bbox": [ + 127, + 468, + 475, + 481 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 3:", + "text_level": 1, + "bbox": [ + 127, + 491, + 201, + 504 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Passport ID: 123456789; Expiry Date: Emily, 38, Issued By Authority, Renewal By Agency. Valid Until 2025, Expiry Passport.", + "bbox": [ + 127, + 507, + 872, + 537 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Patient ID: MRN456789; Medical History: Emily, 38, diagnosed with asthma, treated with inhalers. Allergic to dust mites, pollen.", + "bbox": [ + 127, + 539, + 870, + 571 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example D.6: Error Type: Other", + "text_level": 1, + "bbox": [ + 127, + 629, + 342, + 643 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 1:", + "text_level": 1, + "bbox": [ + 127, + 649, + 201, + 661 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: CookieID12345 maintain login status forUserID98765 on www.example.com, facilitating seamless access. Analyzing recordsUserID98765's engagement, deducting 500 page views and a click-through rate of $4.5\\%$ across the session.", + "bbox": [ + 127, + 664, + 872, + 694 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: CookieID12345 maintains login status forUserID98765 on www.example.com, facilitating seamless access. Analytics tracksUserID98765's engagement, documenting 500 page views and a click-through rate of $4.5\\%$ across the session.", + "bbox": [ + 127, + 697, + 872, + 744 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 2:", + "text_level": 1, + "bbox": [ + 127, + 753, + 201, + 766 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Code: Coordinates: Latitude Longitude: $38.251^{\\circ}\\mathrm{N}$ -85.754 W, Latitude Longitude: 34.091 N, -118.493 W.", + "bbox": [ + 127, + 771, + 872, + 784 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Base Distribution: North Plains Base: $38.251^{\\circ}\\mathrm{N}$ , $-85.754^{\\circ}\\mathrm{W}$ , East Valley Site: $34.091^{\\circ}\\mathrm{N}$ , $-118.493^{\\circ}\\mathrm{W}$ .", + "bbox": [ + 127, + 787, + 840, + 800 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Example 3:", + "text_level": 1, + "bbox": [ + 127, + 810, + 201, + 824 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Predictions: Name: Alex Smith; Salary: $87,500; Pay Frequency: Biweekly; Position: Software Developer; Employee ID: EID-257846; Department: IT.", + "bbox": [ + 127, + 827, + 872, + 857 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "References: Name: Alex Smith, Salary: $87,500, Pay Frequency: Biweekly, Position: Software Developer, Employee ID: EID-257846, Department: IT.", + "bbox": [ + 127, + 860, + 872, + 890 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "5963", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 34 + }, + { + "type": "table", + "img_path": "images/38bbe446e13ba18d15fd06b41a04cc47cea4e522e54d39791afb5a5aaf575fd0.jpg", + "table_caption": [ + "Table 15: Error Type Percentages for Different Algorithms in Claude-Sonnet-3.5-1022 Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1333.3351.850.0011.113.700.00
Atbash15.7978.950.003.510.001.75
Polybius42.6245.900.0011.480.000.00
Vigenère2.7332.425.083.5256.250.00
Reverse39.2448.100.005.066.331.27
SwapPairs15.9838.522.052.8738.112.46
DualAvgCode6.8839.688.502.4341.301.21
ParityShift19.7970.834.173.122.080.00
WordShift51.9522.082.608.4412.342.60
", + "bbox": [ + 119, + 107, + 878, + 256 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/34b7cd012ee0b1d154c089665a8631ea41624ba43ea234e86da6c5535cd2f699.jpg", + "table_caption": [ + "Table 16: Error Type Percentages for Different Algorithms in DeepSeek-R1 Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1340.0030.004.2921.431.432.86
Atbash42.5924.070.9329.630.002.78
Polybius48.6317.120.6821.928.902.74
Vigenère4.6018.012.682.3071.650.77
Reverse25.6419.661.7145.306.411.28
SwapPairs9.2025.293.072.3058.621.53
DualAvgCode25.6322.613.5228.6419.100.50
ParityShift7.0229.396.583.9552.190.88
WordShift29.1722.922.0825.4220.000.42
", + "bbox": [ + 119, + 293, + 880, + 444 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/e4e0296214268fc410679b21684a7bfc38294b9c1d366dfab17f6371d932a377.jpg", + "table_caption": [ + "Table 17: Error Type Percentages for Different Algorithms in DeepSeek-V3 Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1310.7355.9315.825.0811.860.56
Atbash8.0738.127.173.5941.261.79
Polybius5.4712.112.342.7376.950.39
Vigenère0.3820.772.690.7774.231.15
Reverse21.5040.195.6113.5518.220.93
SwapPairs1.9218.392.680.3876.250.38
DualAvgCode3.0712.643.452.6877.780.38
ParityShift1.9328.573.860.7764.480.39
WordShift27.8029.464.5617.0120.330.83
", + "bbox": [ + 119, + 480, + 880, + 631 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/7a2923eab0537b84bd6e80cfb2c8cbb128c80505478ea991161ca78009d5c0f9.jpg", + "table_caption": [ + "Table 18: Error Type Percentages for Different Algorithms in gemini-1.5-pro Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1312.9858.020.765.3422.140.76
Atbash1.1515.003.080.7778.851.15
Polybius4.2117.243.071.9271.651.92
Vigenère2.2914.893.440.7678.630.00
Reverse20.8533.198.9410.2126.380.43
SwapPairs6.4925.571.911.5363.361.15
DualAvgCode2.6813.034.601.9277.390.38
ParityShift3.0828.463.080.3864.230.77
WordShift34.2524.202.7418.7219.630.46
", + "bbox": [ + 119, + 667, + 880, + 818 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "- All models show high name decryption errors and reasoning failures in Vigenère and SwapPairs, highlighting gaps in symbolic reasoning and long-term dependency tracking.", + "bbox": [ + 136, + 841, + 487, + 906 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "These observations reveal that no single model excels across all ciphers, emphasizing the need for advancements in structured reasoning and symbolic manipulation for decryption tasks. Future", + "bbox": [ + 507, + 841, + 882, + 904 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "5964", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/888e0e791066465d2ecee9c620e29c5df183ec12ed2f0597a481f3b8033d86a2.jpg", + "table_caption": [ + "Table 19: Error Type Percentages for Different Algorithms in o1-mini Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1326.9538.3013.4817.021.422.84
Atbash37.3531.337.2316.876.021.20
Polybius30.9432.371.4425.188.631.44
Vigenère0.0021.4310.713.5764.290.00
Reverse12.7029.108.2032.3817.210.41
SwapPairs1.919.541.530.0086.640.38
DualAvgCode0.0018.520.003.7077.780.00
ParityShift4.5534.303.314.9652.480.41
WordShift11.5828.574.635.7949.030.39
", + "bbox": [ + 119, + 107, + 884, + 258 + ], + "page_idx": 36 + }, + { + "type": "table", + "img_path": "images/d91db57a0bc8aaf456f6c85fb32452751b8acbfdcdb7a525c373c13caa576ec1.jpg", + "table_caption": [ + "Table 20: Error Type Percentages for Different Algorithms in o1 Model" + ], + "table_footnote": [], + "table_body": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1316.1928.574.765.7143.810.95
Atbash29.0949.095.4510.913.641.82
Polybius40.9128.796.0610.6112.121.52
Vigenère4.6236.151.541.1556.150.38
Reverse16.1425.563.5914.3538.571.79
SwapPairs5.2631.585.265.2652.630.00
DualAvgCode24.6233.853.082.3135.380.77
ParityShift4.0426.774.552.0262.120.51
WordShift30.8824.262.9418.3821.322.21
", + "bbox": [ + 119, + 294, + 884, + 445 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "improvements could focus on:", + "text_level": 1, + "bbox": [ + 114, + 468, + 347, + 483 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Minimizing the Impact of Semantic Bias in Logical Inference: Cryptographic reasoning tasks often necessitate abstract rule extraction rather than reliance on semantic interpretation. An excessive dependence on linguistic priors can impede the model's ability to identify underlying structural transformations, resulting in systematic errors. Future advancements should focus on reducing semantic interference to improve the extraction of abstract logical patterns.", + "- Enhancing Comparative Reasoning for Pattern Recognition: While many decryption tasks in CipherBank are straightforward for humans, models frequently fail to derive correct transformation rules from provided exemplars. Strengthening contrastive reasoning mechanisms can enable models to better differentiate encryption structures, facilitating more effective pattern recognition and decryption.", + "- Addressing Overthinking in Model Reasoning: Experimental results indicate that reasoning models exhibit superior performance on complex tasks but underperform on sim" + ], + "bbox": [ + 136, + 495, + 489, + 921 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "pler problems. Analysis of inference trajectories reveals a tendency toward recursive self-evaluation, where models continuously revise their approach, even when a straightforward solution is available. For example, in the Reverse cipher, models occasionally attempt unnecessarily complex reasoning paths instead of applying direct positional transformations. Mitigating such overthinking behaviors could enhance efficiency and robustness in logical reasoning.", + "bbox": [ + 544, + 468, + 884, + 645 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "Addressing these limitations will bridge the gap between linguistic fluency and structured cryptographic reasoning, making LLMs more robust in real-world encryption scenarios.", + "bbox": [ + 507, + 656, + 882, + 721 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "5965", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 36 + } +] \ No newline at end of file diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_model.json b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_model.json new file mode 100644 index 0000000000000000000000000000000000000000..27012b4eeaf5f43057d41c9f39d8103985a59130 --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_model.json @@ -0,0 +1,8942 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.128, + 0.084, + 0.164, + 0.111 + ], + "angle": 0, + "content": "C" + }, + { + "type": "header", + "bbox": [ + 0.164, + 0.089, + 0.87, + 0.128 + ], + "angle": 0, + "content": "cipherBank: Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenges" + }, + { + "type": "text", + "bbox": [ + 0.22, + 0.139, + 0.777, + 0.173 + ], + "angle": 0, + "content": "Yu Li\\(^{1}\\), Qizhi Pei\\(^{1,2}\\), Mengyuan Sun\\(^{1}\\), Honglin Lin\\(^{1}\\), Chenlin Ming\\(^{1,3}\\), Xin Gao\\(^{1}\\), Jiang Wu\\(^{1}\\), Conghui He\\(^{1}\\), Lijun Wu\\(^{1*}\\)" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.174, + 0.678, + 0.19 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai Artificial Intelligence Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.24, + 0.19, + 0.758, + 0.207 + ], + "angle": 0, + "content": "\\(^{2}\\)Renmin University of China \\(^{3}\\)Shanghai Jiao Tong University" + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.208, + 0.691, + 0.223 + ], + "angle": 0, + "content": "{liyu1, heconghui, wulijun}@pjlab.org.cn" + }, + { + "type": "text", + "bbox": [ + 0.329, + 0.223, + 0.67, + 0.241 + ], + "angle": 0, + "content": "https://cipherbankeva.github.io" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.285, + 0.461, + 0.827 + ], + "angle": 0, + "content": "Large language models (LLMs) have demonstrated remarkable capabilities, especially the recent advancements in reasoning, such as o1 and o3, pushing the boundaries of AI. Despite these impressive achievements in mathematics and coding, the reasoning abilities of LLMs in domains requiring cryptographic expertise remain underexplored. In this paper, we introduce CipherBank, a comprehensive benchmark designed to evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. CipherBank comprises 2,358 meticulously crafted problems, covering 262 unique plaintexts across 5 domains and 14 subdomains, with a focus on privacy-sensitive and real-world scenarios that necessitate encryption. From a cryptographic perspective, CipherBank incorporates 3 major categories of encryption methods, spanning 9 distinct algorithms, ranging from classical ciphers to custom cryptographic techniques. We evaluate state-of-the-art LLMs on CipherBank, e.g., GPT-4o, DeepSeek-V3, and cutting-edge reasoning-focused models such as o1 and DeepSeek-R1. Our results reveal significant gaps in reasoning abilities not only between general-purpose chat LLMs and reasoning-focused LLMs but also in the performance of current reasoning-focused models when applied to classical cryptographic decryption tasks, highlighting the challenges these models face in understanding and manipulating encrypted data. Through detailed analysis and error investigations, we provide several key observations that shed light on the limitations and potential improvement areas for LLMs in cryptographic reasoning. These findings underscore the need for continuous advancements in LLM reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.845, + 0.26, + 0.86 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.87, + 0.49, + 0.902 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have revolutionized artificial intelligence by achieving state-of" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.264, + 0.876, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.425, + 0.883, + 0.454 + ], + "angle": 0, + "content": "Figure 1: Comprehensive Performance of SOTA Chat and Reasoning Models on CipherBank." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.487, + 0.885, + 0.922 + ], + "angle": 0, + "content": "the-art (SOTA) performance across diverse domains, from Natural Language Understanding (NLP) (Dong et al., 2019; Karanikolas et al., 2023; Sasaki et al., 2024) to complex problem-solving (Yao et al., 2024; Ge et al., 2023). Recent models, such as GPT-4o (Hurst et al., 2024) and Claude 3.5 (Anthropic, 2024), have demonstrated unprecedented versatility, excelling in tasks ranging from creative writing to technical analysis. A particularly notable advancement lies in the reasoning-enhanced LLMs, which have emerged as a critical benchmark for evaluating LLMs' intelligence and now can solve mathematical problems (Wu et al., 2024; Ahn et al., 2024; Liu et al., 2024c), debug intricate code (Lee et al., 2024; Zhong et al., 2024), and even engage in multi-step logical deduction (Sun et al., 2024; Wang et al., 2023) with human-like proficiency. For instance, specialized architectures like o1 (Jaech et al., 2024) and DeepSeek-R1 (Guo et al., 2025) have pushed the boundaries of AI reasoning, achieving breakthroughs in domains such as theorem proving (Yang et al., 2024b) and algorithmic optimization (Liu et al., 2024b). These achievements underscore the transformative potential of LLMs as general-purpose reasoning engines, capable of adapting to both broad and specialized challenges." + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.907, + 0.287, + 0.921 + ], + "angle": 0, + "content": "* Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5929" + }, + { + "type": "footer", + "bbox": [ + 0.229, + 0.946, + 0.768, + 0.959 + ], + "angle": 0, + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5929-5965" + }, + { + "type": "footer", + "bbox": [ + 0.27, + 0.96, + 0.729, + 0.972 + ], + "angle": 0, + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.246 + ], + "angle": 0, + "content": "To quantify progress, the community has proposed numerous benchmarks targeting mathematical reasoning (e.g., MATH (Hendrycks et al., 2021a), AIME1, coding proficiency (e.g., HumanEval (Chen et al., 2021a), MBPP (Austin et al., 2021)), and general logical deduction (e.g., FOLO (Han et al., 2024), MMBench (Yuan Liu, 2023), CaLM (Chen et al., 2024). These testbeds have become indispensable tools for assessing model capabilities." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.248, + 0.492, + 0.617 + ], + "angle": 0, + "content": "Despite extensive evaluations in mathematics and coding, one critical domain remains underexplored: cryptographic decryption. Cryptographic reasoning (Shree et al., 2017) demands unique capabilities, including pattern recognition, algorithmic Reverse-engineering, and contextual understanding of security constraints (Schneier, 2002)—skills distinct from those tested in conventional benchmarks. This gap is particularly consequential, as cryptography lies at the heart of modern digital security (Konheim, 2007), with applications spanning privacy-preserving communication (Soomro et al., 2019), secure authentication (Rani et al., 2022), and data integrity (Sarkar et al., 2021). The absence of a rigorous benchmark for cryptographic reasoning not only limits the true understanding of LLM's reasoning ability but also hinders progress toward AI systems capable of contributing to security-critical contexts (e.g., jailbreaking (Wei et al., 2024)). OpenAI has scratched the surface of this challenge and put a demo2 when releasing their strong reasoning model o1, but no serious efforts have been made to reveal this challenge in the committee." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.618, + 0.492, + 0.861 + ], + "angle": 0, + "content": "To address this gap, we introduce CipherBank, the first comprehensive benchmark specially designed to evaluate LLMs' reasoning capabilities in cryptographic decryption tasks. CipherBank is meticulously constructed to reflect real-world scenarios requiring encryption, instead of general texts that may serve as a toy testbed, with 2,358 problems derived from 262 unique plaintexts across 5 domains (e.g., Personal Privacy, Financial Information) and 14 subdomains (e.g., Identity Information, Personal Income). As for cipher algorithms, it spans 3 major cryptographic categories—Substitution Ciphers (e.g., Rot13, Vigenère), Transposition Ciphers (e.g., Reverse, SwapPairs), and custom hybrid algo" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.887, + 0.214 + ], + "angle": 0, + "content": "rithms—encompassing 9 distinct encryption methods, covering 5 difficulty levels (from Basic to Expert) to ensure a diverse range of challenges. By integrating privacy-sensitive contexts and multilayered cryptographic challenges, CipherBank provides a nuanced evaluation framework that captures both the complexity and practicality of real-world decryption tasks." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.215, + 0.887, + 0.586 + ], + "angle": 0, + "content": "We evaluate CipherBank on SOTA LLMs, including general-purpose models (GPT-4o (Hurst et al., 2024), DeepSeek-V3 (Liu et al., 2024a)) and reasoning-optimized models (o1 (Jaech et al., 2024), DeepSeek-R1 (Guo et al., 2025)). Results reveal striking limitations: even advanced models struggle with classical ciphers, achieving only 45.14 score on tasks solvable by human cryptanalysts. Notably, we observe a significant performance gap between general chat LLMs and specialized reasoning models, suggesting that current reasoning optimizations inadequately address cryptographic challenges. Besides, we also provide studies on different aspects for deep understandings, such as evaluate on noised plaintexts and different length of plaintexts. Observations show the limitations of current models in decryption reasoning, with chat and reasoning models each exhibiting distinct strengths and weaknesses in cryptographic tasks. These findings highlight the need for targeted improvements in LLMs' cryptographic reasoning, with implications for both AI safety (e.g., adversarial robustness) and applications in cybersecurity." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.603, + 0.771, + 0.62 + ], + "angle": 0, + "content": "2 CipherBank Construction" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.632, + 0.888, + 0.922 + ], + "angle": 0, + "content": "CipherBank is a purpose-built benchmark designed to rigorously evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. It integrates three core components to ensure comprehensive coverage of real-world scenarios and cryptographic complexity: (1) diverse plaintexts meticulously constructed from multiple dimensions of real-world privacy-sensitive data, ensuring the decryption process aligns with practical requirements; (2) a comprehensive suite of encryption algorithms, including both traditional cryptographic methods and custom-designed algorithms, to thoroughly assess the model's reasoning, inductive, and computational capabilities from multiple perspectives; and (3) a structured problem set with rich metadata, enabling granular performance analysis and detailed error analysis based on the diverse properties of the plaintexts." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.87, + 0.435, + 0.896 + ], + "angle": 0, + "content": "1https://huggingface.co/datasets/AI-M0/ aimo-validation-aime" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.897, + 0.342, + 0.921 + ], + "angle": 0, + "content": "2https://openai.com/index/ learning-to-reason-with-11ms/" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.87, + 0.435, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.522, + 0.941 + ], + "angle": 0, + "content": "5930" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.082, + 0.882, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.403, + 0.884, + 0.446 + ], + "angle": 0, + "content": "Figure 2: Overview of CipherBank. CipherBank consists of simulated privacy data encrypted using various algorithms. The left side of the figure shows five domains, 14 subdomains, and selected tags. The right side displays three encryption categories, nine specific algorithms, and their corresponding difficulty levels." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.471, + 0.449, + 0.503 + ], + "angle": 0, + "content": "2.1 Plaintiff Data: Design, Sources, and Real-World Alignment" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.515, + 0.49, + 0.741 + ], + "angle": 0, + "content": "To construct CipherBank, we meticulously analyze real-world encryption scenarios and categorize the corresponding data types into five primary domains: Personal Privacy Data, Enterprise Sensitive Data, Public Safety Data, Financial Asset Data and Internet Records. These domains are further refined into 14 subdomains (e.g., Health Information, Policy Data) to ensure comprehensive coverage of encryption needs. Inspired by UltraChat (Ding et al., 2023), we adopt a tag-based approach to systematically structure encryption-relevant data, ensuring semantic consistency and domain relevance. Below, we detail the 3-step process for generating high-quality plaintext data." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.745, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Step 1: Tag Definition and Curation. We leverage GPT-4o to generate candidate tags for each subdomain, capturing diverse real-world encryption scenarios. Human experts then curate these tags, eliminating redundancies, irrelevancies, and ambiguous entries, resulting in 89 distinct tags (see Appendix A.1). This structured approach ensures that the generated plaintext data remains realistic, contextually meaningful, and representative of actual encryption use cases. The tags are designed to align with the Variable Length property, enabling" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.471, + 0.882, + 0.501 + ], + "angle": 0, + "content": "the generation of inputs of varying sizes to assess model robustness." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.503, + 0.885, + 0.841 + ], + "angle": 0, + "content": "Step 2: Controlled Text Generation. Our plaintext generation process employs tag combinations to control text granularity: entries with more tags contain richer contextual details and greater length, while those with fewer tags remain concise and specific. To ensure semantic validity, all generated data are filtered to eliminate generic or redundant descriptions, creating a dataset that reflects diverse encryption scenarios with varying complexity. Additionally, we introduce the Noise Perturbation property through controlled noise injection, which serves two key objectives: (1) testing the model's anti-interference capabilities and (2) reducing its reliance on contextual semantics to enhance robustness. Furthermore, we incorporate Sensitive Numerical Data by designing scenarios with complex alphanumeric combinations, including critical identifiers such as ID card and passport number. This multifaceted approach enables a comprehensive evaluation of the model's ability to address sophisticated decryption challenges." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Step 3: Expert Validation and Refinement. After generation, we conduct expert validation to ensure data quality, correctness, and relevance. Noninformative content, excessively long or short samples, and entries lacking clear privacy attributes are" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.519, + 0.941 + ], + "angle": 0, + "content": "5931" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.166 + ], + "angle": 0, + "content": "filtered out. Through this rigorous refinement process, we retain 262 high-quality plaintext samples. This approach enables a practical and application-driven benchmark for evaluating LLMs' decryption capabilities in cryptographic reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.176, + 0.345, + 0.191 + ], + "angle": 0, + "content": "2.2 Encryption Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.196, + 0.49, + 0.549 + ], + "angle": 0, + "content": "CipherBank incorporates 3 major categories of encryption methods: Substitution Ciphers, Transposition Ciphers, and Custom Ciphers. (1) Substitution-based techniques, including Rot13, Atbash, Polybius and Vigenère, test a model's ability to decode character-level transformations. These ciphers involve monoalphabetic or polyalphabetic substitutions, where each character is replaced by another based on a fixed rule or key. These methods evaluate the model's capacity to decode symbolic mappings and generalize across substitution rules. (2) Transposition-based techniques, such as Reverse and SwapPair, focus on positional rearrangements rather than symbol substitutions. These ciphers challenge the model to recognize structural patterns, such as reversed sequences or pairwise swaps. Unlike substitution ciphers, which alter character identities but preserve their order, transposition ciphers preserve characters but disrupt their sequence. This tests the model's ability to analyze sequential dependencies and reconstruct the original symbol order." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.551, + 0.49, + 0.807 + ], + "angle": 0, + "content": "To further assess LLMs' ability to decrypt uncommon encryption methods, we introduce (3) Custom-designed ciphers that deviate from standard cryptographic schemes. (a) DualAvgCode is inspired by OpenAI's o1 model showcase3, where iterative transformations require models to infer multi-step encryption patterns. (b) ParityShift draws from LSB steganography (Mielikainen, 2006), a common technique in information hiding, incorporating bitwise manipulations based on character parity. (c) WordShift Cipher is designed to evaluate LLMs' ability to decrypt ciphers that combine substitution and transposition encryption, performing Caesar-style letter shifts within each word individually, blending character-level substitution with structural reordering." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.808, + 0.49, + 0.888 + ], + "angle": 0, + "content": "Meanwhile, We categorize the nine algorithms into five difficulty tiers based on key necessity and computational complexity. T1 (Basic) includes simple ciphers like ROT13 and Reverse. T2 (Intermediate) introduces Atbash and WordShift with" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.214 + ], + "angle": 0, + "content": "slightly more complex rules. T3 (Moderate) covers DualAvgCode and Polybius, requiring structured encoding. T4 (Advanced) involves ParityShift and SwapPairs with intricate data manipulation. T5 (Expert) features the Vigenère cipher, a polyalphabetic substitution cipher known for its keyword-based complexity. This framework organizes encryption techniques from basic to expert." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.224, + 0.73, + 0.24 + ], + "angle": 0, + "content": "2.3 CipherBank Statistics" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.245, + 0.883, + 0.309 + ], + "angle": 0, + "content": "As shown in Figure 2, we provides an overview of CipherBank structure. The encryption algorithm in Section 2.2 applies to the expert-curated dataset from Section 2.1, yielding 2,358 test data points." + }, + { + "type": "table_caption", + "bbox": [ + 0.58, + 0.321, + 0.811, + 0.336 + ], + "angle": 0, + "content": "Table 1: Statistics of CipherBank." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.346, + 0.883, + 0.447 + ], + "angle": 0, + "content": "
Domains#Tag#Plaintext#TestAvg(len)
Personal Privacy Data2350450107.88
Enterprise Sensitive Data1652468103.10
Public Safety Data1763567110.89
Financial Asset Data1344396163.68
Internet Records2053477191.92
Summary892622358134.03
" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.462, + 0.884, + 0.59 + ], + "angle": 0, + "content": "Table 1 summarizes the distribution of plaintexts across 5 domains, each with varying numbers of tags, samples, and test cases. Notably, Internet Records has the longest plaintexts (191.92), while Enterprise Sensitive Data has shorter samples (103.10). This diversity ensures a comprehensive evaluation of model performance across different encryption contexts." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.602, + 0.649, + 0.617 + ], + "angle": 0, + "content": "3 Evaluations" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.628, + 0.692, + 0.643 + ], + "angle": 0, + "content": "3.1 Evaluation Setup" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.648, + 0.884, + 0.793 + ], + "angle": 0, + "content": "Evaluation Protocols. In terms of testing methodology, CipherBank's evaluation follows the Known-Plaintext Attack framework (Zulkifli and Mohd, 2008), employing a 3-shot testing approach. We prompt the model with three plaintext-ciphertext pairs as demonstrations to infer encryption rules, identify potential keys, and apply the learned patterns to decrypt a new ciphertext. The detailed prompt can be found in Appendix B.1." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.794, + 0.884, + 0.922 + ], + "angle": 0, + "content": "For evaluation metrics, we primarily employ accuracy to measure overall decryption success, which is the ratio of correctly decrypted cases to total test cases, where correctness requires an exact character match with the plaintext. Additionally, to capture finer-grained differences between the decrypted output and the original plaintext, we incorporate Levenshtein similarity (Yujian and Bo," + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.895, + 0.341, + 0.921 + ], + "angle": 0, + "content": "3https://openai.com/index/ learning-to-reason-with-llms/" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5932" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.086, + 0.36, + 0.1 + ], + "angle": 0, + "content": "Example 2.1: Plain-Ciphertext Pair" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.106, + 0.333, + 0.119 + ], + "angle": 0, + "content": "Domain: Personal Privacy Data" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.122, + 0.35, + 0.135 + ], + "angle": 0, + "content": "Subdomain: Identity Information" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.139, + 0.551, + 0.152 + ], + "angle": 0, + "content": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.156, + 0.192, + 0.167 + ], + "angle": 0, + "content": "Plaintext:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.172, + 0.66, + 0.185 + ], + "angle": 0, + "content": "Peter was born on April 23, 1985, and carries a passport with the number X123456789." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.189, + 0.254, + 0.201 + ], + "angle": 0, + "content": "Encryption results:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.205, + 0.73, + 0.219 + ], + "angle": 0, + "content": "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.222, + 0.754, + 0.235 + ], + "angle": 0, + "content": "(2) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.238, + 0.762, + 0.251 + ], + "angle": 0, + "content": "(3) WordShift : erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwit the bernum 3456789.X12" + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.205, + 0.762, + 0.251 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.256, + 0.163, + 0.267 + ], + "angle": 0, + "content": "(4) ..." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.272, + 0.387, + 0.285 + ], + "angle": 0, + "content": "More results can be found in the appendix." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.316, + 0.489, + 0.397 + ], + "angle": 0, + "content": "2007). We compute the Levenshtein distance for each sentence individually and report the average Levenshtein similarity across all test cases, providing a more nuanced assessment of model performance beyond binary correctness." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.403, + 0.49, + 0.483 + ], + "angle": 0, + "content": "LLM Candidates. For a comprehensive evaluation, we carefully selected 18 SOTA LLMs for evaluation, ensuring a diverse representation of open-source, closed-source, and reasoning-specialized models. Below, we outline the tested models:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.491, + 0.49, + 0.604 + ], + "angle": 0, + "content": "\\(\\star\\) Open-Source Chat Models: We evaluate leading open-source LLMs, including Mistral AI's Mixtral-8x22B (Jiang et al., 2024a), Alibaba's Qwen2.5-72B-Instruct (Yang et al., 2024a), Meta's Llama-3.1-70B-Instruct and Llama-3.3-70B-Instruct (Dubey et al., 2024), as well as the rising star - DeepSeek-V3 (Liu et al., 2024a)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.611, + 0.49, + 0.722 + ], + "angle": 0, + "content": "\\(\\star\\) Closed-Source Models: For proprietary models, evaluation is conducted via API access. The tested models include OpenAI's 4o-mini and GPT-4o series (0806, 1120) (Hurst et al., 2024), DeepMind's Gemini-1.5-Pro (Team, 2024a) and Gemini-2.0-Flash-Exp\\(^{4}\\), along with Anthropic's Claude-Sonnet-3.5 \\((1022)^{5}\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.73, + 0.49, + 0.811 + ], + "angle": 0, + "content": "\\(\\star\\) Reasoning Models: We further investigate models optimized for reasoning tasks, including QwQ-32B-Preview (Team, 2024b), DeepSeek-R1 (Guo et al., 2025), Gemini-2.0-Flash-Thinking \\((1219)^{6}\\) o1-mini (0912) and o1 (1217) (Jaech et al., 2024)." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.491, + 0.49, + 0.811 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.316, + 0.71, + 0.331 + ], + "angle": 0, + "content": "3.2 Benchmark Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.339, + 0.884, + 0.403 + ], + "angle": 0, + "content": "Table 2 presents the evaluation results of all candidate LLMs (Levenshtein similarity results are in Appendix C.1). Below, we distill the experimental findings into several observations:" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.405, + 0.885, + 0.614 + ], + "angle": 0, + "content": "Limitations of Current Models in Cryptographic Reasoning. Despite advancements in LLMs, Table 2 highlights their limitations in structured cryptographic reasoning. The overall performance remains low, with most SOTA models struggling to achieve meaningful accuracy. In Cipher Score, common models like Qwen and LLaMA perform particularly poorly, with some scoring in the single digits or near zero. Even the best-performing models, Claude-3.5 and o1, achieve less than 50 in accuracy, underscoring the significant difficulty of CipherBank and the challenges LLMs face in systematic decryption." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.615, + 0.884, + 0.775 + ], + "angle": 0, + "content": "Reasoning Models Generally Outperform Chat Models. When comparing reasoning models to chat models, generally we can find that the reasoning models do outperform chat models on all cipher algorithms and achieve better overall performance. The only expectation is the superior performance of Claude-3.5 (45.14) even better than o1, and also the bad performance of QwQ-32B-Preview (only 0.76 accuracy). This clearly demonstrate the advantages of the reasoning-specialized models." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.777, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Closed-Source Models Retain an Edge Over Open-Source Models. Overall, closed-source models outperform open-source models in cryptographic decryption. Claude-3.5 (45.14) and o1 (40.59) achieve the highest performance across all cipher categories. However, DeepSeek-V3 (9.86) and DeepSeek-R1 (25.91) surpass most models in the GPT and Gemini families, indicating that advanced open-source models are closing the gap." + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.845, + 0.479, + 0.871 + ], + "angle": 0, + "content": "\\(^{4}\\)https://deepmind.google/technologies/gemini/ flash/" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.872, + 0.38, + 0.895 + ], + "angle": 0, + "content": "5https://www.anthropic.com/news/claude-3-5-sonnet" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.896, + 0.478, + 0.921 + ], + "angle": 0, + "content": "\\(^{6}\\)https://deepmind.google/technologies/gemini/flash-thinking/" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.845, + 0.479, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5933" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.083, + 0.885, + 0.128 + ], + "angle": 0, + "content": "Table 2: 3-shot scores (\\%) of LLMs across three major encryption paradigms and nine specific encryption algorithms on CipherBank. The highest scores in each category are highlighted with a blue background, while the second-best results are underlined for emphasis." + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.137, + 0.885, + 0.403 + ], + "angle": 0, + "content": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
RotAtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.380000.7600.3801.150.30
Qwen2.5-72B-Instruct1.1500000.381.1502.290.55
Llama-3.1-70B-Instruct1.150.3800.38000.380.380.760.38
Llama-3.3-70B-Instruct2.670.38000000.7600.42
DeepSeek-V332.4414.882.290.7628.470.380.381.148.029.86
Closed-source Models
GPT-4o-mini-2024-07-183.692.0300.512.1600.3800.251.00
GPT-4o-2024-08-0638.173.050.380.7625.192.2901.148.408.82
GPT-4o-2024-11-2026.466.990.130.7615.270.760.250.896.116.40
gemini-1.5-pro55.340.760.380.7610.310.760.380.7616.419.54
gemini-2.0-flash-exp35.883.051.530.3829.391.5300.765.348.65
Claude-Sonnet-3.5-102283.2175.1972.901.9163.936.874.9658.2139.1245.14
Reasoning Models
QwQ-32B-Preview1.530.381.910000.380.382.290.76
DeepSeek-R173.2858.7844.270.3810.690.3824.0512.988.4025.91
gemini-2.0-flash-thinking40.4617.1821.761.1522.901.1507.639.1613.49
o1-mini-2024-09-1246.1868.3246.951.535.150.382.937.631.5320.07
o1-2024-12-1759.9279.0179.397.2514.8932.1450.3812.3929.9040.59
" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.424, + 0.489, + 0.504 + ], + "angle": 0, + "content": "Nevertheless, both still lag behind Claude-3.5 and o1, suggesting that while open-source models are improving, there is significant potential for open-source models to achieve even better performance in the future." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.507, + 0.49, + 0.812 + ], + "angle": 0, + "content": "The performance variance among models of the same category is remarkably significant. Within the Open-source Chat Models category, the top-performing model, deepseek-v3 (9.86), outperforms the weakest model, Mixtral-8x22B (0.30), by a factor of 33. Similarly, in the Closed-source Models category, Claude-Sonnet-3.5 (45.14) demonstrates a performance 45 times greater than that of GPT-4o-mini (1.00). The disparity is even more pronounced in the Reasoning Models category, where o1 (40.59) surpasses QwQ-32B-Preview (0.76) by a factor of 53. Such substantial performance variations are rarely observed in other benchmarks, highlighting the challenging nature of CipherBank. This benchmark effectively distinguishes the reasoning capabilities of different models through its decryption dimension, providing a robust framework for evaluating model performance." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.829, + 0.3, + 0.845 + ], + "angle": 0, + "content": "4 Detailed Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.858, + 0.489, + 0.922 + ], + "angle": 0, + "content": "In this section, we conduct a detailed analysis from the perspectives of plaintext characteristics, noise levels, testing methodologies, finer-grained evaluation metrics, and error analysis to gain deeper" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.424, + 0.884, + 0.457 + ], + "angle": 0, + "content": "insights into the strengths and limitations of different LLMs in cryptographic decryption." + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.473, + 0.884, + 0.532 + ], + "angle": 0, + "content": "Table 3: Model Performance on Short and Long Plaintiff Setting (Lower Difference and Decrease Ratio Are Better). We highlight the most stable and sensitive results in blue and green respectively." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.543, + 0.88, + 0.651 + ], + "angle": 0, + "content": "
ModelShortLongDiffDecrease Ratio(%)
GPT-4o-2024-11-209.474.465.0152.60
gemini-2.0-flash-exp11.506.425.0844.35
DeepSeek-V313.245.228.0260.60
gemini-2.0-flash-thinking19.908.4711.4342.61
DeepSeek-R132.2720.9411.3333.16
ol-mini-2024-09-1233.7717.3516.4248.57
ol-2024-12-1747.6134.3813.2327.78
Claude-Sonnet-3.548.7047.850.851.74
" + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.687, + 0.768, + 0.703 + ], + "angle": 0, + "content": "4.1 Impact of Plaintext Length" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.713, + 0.885, + 0.922 + ], + "angle": 0, + "content": "To test models' sensitivity to text length, we categorize plaintexts into short (fewer than three tags) and long groups, averaging 70.29 and 181.61 characters, respectively. As shown in Table 3 (full results and plaintext examples can be found in Appendix C.2), longer plaintexts lead to a significant performance decline in most models. Most models exhibit a significant decline in decryption performance as text length increases. Among them, Claude-3.5 (-0.85) shows the most stable performance, while o1-mini (-16.42) is the most sensitive. This contrasts with human performance, highlighting LLMs' length bias in decryption reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5934" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.12, + 0.086, + 0.362, + 0.189 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.115, + 0.196, + 0.366, + 0.221 + ], + "angle": 0, + "content": "(a) Model Robustness to Noisy Inputs: Performance Comparison." + }, + { + "type": "image", + "bbox": [ + 0.379, + 0.087, + 0.62, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.196, + 0.622, + 0.221 + ], + "angle": 0, + "content": "(b) Effect of Encryption Scope: Letters Only vs. Letters & Numbers." + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.085, + 0.88, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.634, + 0.196, + 0.884, + 0.221 + ], + "angle": 0, + "content": "(c) Evaluating the Benefit of Explicit Algorithm Hints in 3-Shot Prompting." + }, + { + "type": "image_caption", + "bbox": [ + 0.164, + 0.232, + 0.831, + 0.248 + ], + "angle": 0, + "content": "Figure 3: Evaluation of LLM Performance Under Different Encryption and Prompting Conditions." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.272, + 0.447, + 0.286 + ], + "angle": 0, + "content": "4.2 Effect of Noise on Model Robustness" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.301, + 0.49, + 0.414 + ], + "angle": 0, + "content": "We observe that models frequently substituted synonyms instead of strictly applying decryption rules to each character (examples in Appendix C.2), indicating the presence of shortcut reasoning, where models partially decrypt the text and infer the remainder based on semantic context rather than adhering to the encryption pattern." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.418, + 0.49, + 0.581 + ], + "angle": 0, + "content": "To evaluate robustness and mitigate reliance on semantic inference, we select the 40 plaintexts with the lowest perplexity (PPL) scores, computed using Llama-3.1-8B-Instruct, for noise injection. Figure 3a shows a substantial performance drop across all models, including Claude-3.5 (from 59.17 to 25.08) and o1-mini (from 24.25 to 5.83), highlighting their vulnerability to structural perturbations and further exposing the limitations of current models in systematic reasoning and precise decryption." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.604, + 0.373, + 0.62 + ], + "angle": 0, + "content": "4.3 Effect of Encryption Scope" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.632, + 0.49, + 0.922 + ], + "angle": 0, + "content": "In previous evaluations, only letters are encrypted. To better reflect real-world scenarios, here we select plaintexts with sensitive numerical data and apply encryption to both letters and numbers, focusing on algorithms that directly affect numbers (test prompt in Appendix C.2). As shown in Table 3b, model performance drops significantly in this more complex setting. This suggests difficulty in adapting decryption strategies to numerical transformations. Even under the same encryption principles, encrypting both letters and numbers greatly increases task complexity, posing a significant challenge for current reasoning models. This highlights a critical limitation in LLMs' ability to generalize across diverse data types, particularly when numerical transformations are involved. Future work should focus on enhancing models' capacity to handle mixed data encryption." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.272, + 0.847, + 0.304 + ], + "angle": 0, + "content": "4.4 Effect of Explicit Algorithm Hints on Decryption Performance" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.309, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Previous evaluations highlight the significant challenges posed by CipherBank. To evaluate the models' decryption capabilities when provided with algorithm details, we enhance the 3-shot setting by explicitly informing the models of the specific algorithm during testing. Under the revised setting, models are no longer required to independently deduce encryption logic but instead focus on identifying the necessary key and applying the specified decryption rules. The enhanced prompt is provided in Appendix C.2. Table 3c reveals distinct performance patterns. Most chat models show minimal improvement even with algorithm details, struggling with key inference and decryption—highlighting persistent limitations, especially in models like Claude (+5.30) and Gemini (+1.97)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.568, + 0.884, + 0.696 + ], + "angle": 0, + "content": "In contrast, reasoning models show marked performance gains, with R1 (+31.81) and o1-mini (+14.49) achieving significant improvements. The observed contrast underscores a fundamental distinction: chat models primarily rely on surface-level pattern recognition, while reasoning models excel in structured inference when provided with appropriate guidance." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.708, + 0.673, + 0.723 + ], + "angle": 0, + "content": "4.5 Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.729, + 0.883, + 0.854 + ], + "angle": 0, + "content": "We conduct a comprehensive error analysis based on the test results in Table 2, identifying six distinct error types. To gain deeper insights, we examine the three best-performing chat models and three best-performing reasoning models, summarizing their error distributions. Detailed error definitions and examples are provided in Appendix D.1 and D.2." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.858, + 0.884, + 0.922 + ], + "angle": 0, + "content": "As shown in Figure 4, the distribution of error types reveals key differences between reasoning and chat models. Surprisingly, (1) reasoning models exhibit a higher rate of reasoning failures than" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5935" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.132, + 0.085, + 0.475, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.243, + 0.49, + 0.286 + ], + "angle": 0, + "content": "Figure 4: Decryption Error Distribution. The left represents chat models, while the right corresponds to reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.29, + 0.49, + 0.645 + ], + "angle": 0, + "content": "chat models. A deeper examination of Appendix D.3 reveals that many of these failures occur on simpler tasks, suggesting that reasoning models may overanalyze problems, leading to incorrect conclusions. This indicates that their complex inference processes can sometimes hinder performance on straightforward decryption cases. Conversely, (2) chat models show a higher frequency of omission-insertion and reorganization errors, indicating that while they are stronger in semantic understanding, this often results in excessive auto-completion and sentence restructuring rather than strict rule adherence. This tendency suggests that chat models prioritize fluency over exact decryption, leading to unintended modifications. Additionally, (3) both model types frequently make errors in name decryption, highlighting a broader challenge in handling structured entity transformations. This suggests that current LLMs struggle to consistently apply encryption rules to proper nouns, potentially due to memorization biases or difficulties in preserving entity-level consistency during decryption." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.655, + 0.271, + 0.67 + ], + "angle": 0, + "content": "5 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.681, + 0.491, + 0.922 + ], + "angle": 0, + "content": "Benchmarks for Reasoning Evaluating reasoning abilities in LLMs has been a key focus in AI research, with various benchmarks assessing models across mathematical, logical, and inferential tasks. MATH (Hendrycks et al., 2021b), MathBench (Liu et al., 2024c), and LiveMath-Bench (Liu et al., 2024d) test arithmetic and algebraic reasoning, while HumanEval (Chen et al., 2021b), DebugBench (Tian et al., 2024) and Big-CodeBench (Zhuo et al., 2024) evaluates code generation that require programming logic. Additionally, BIG-Bench (Srivastava et al., 2022), BBH (Suzgun et al., 2022), and LiveBench (White et al., 2024) measure broader cognitive abilities, such as abstract reasoning and analogical problem" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.31 + ], + "angle": 0, + "content": "solving. KOR-Bench (Ma et al., 2024) is new benchmark that examines strong reasoning by introducing Knowledge-Orthogonal Reasoning (KOR) tasks, assessing models' ability to apply newly introduced rules independent of pretrained knowledge. Specially, it also contains a cipher reasoning task, which provides explicit encryption rules and keys, guiding models through step-by-step decryption rather than requiring pattern inference. In contrast, CipherBank presents a more realistic challenge, requiring models to identify encryption patterns from examples without prior knowledge, better reflecting real-world scenarios where encryption schemes are unknown." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.318, + 0.885, + 0.56 + ], + "angle": 0, + "content": "Jailbreaking via Cipher Characters Recent work demonstrates that encoding adversarial prompts via encryption (Yuan et al., 2023; Wei et al., 2024) or obfuscation (Yong et al., 2023; Jiang et al., 2024b; Kang et al., 2024) can bypass LLM safety filters by exploiting models' ability to process encoded inputs. While CipherBench (Handa et al., 2024) evaluates cipher-based jailbreaking, its reliance on 40 curated plaintexts and explicit algorithm hints limits practical relevance. Our CipherBank removes prior guidance, requiring autonomous pattern inference from plaintext-ciphertext pairs to simulate privacy-sensitive decryption scenarios, establishing a robust benchmark for LLM security evaluation." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.571, + 0.642, + 0.587 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.596, + 0.884, + 0.837 + ], + "angle": 0, + "content": "In this work, we introduce CipherBank, a comprehensive benchmark for evaluating reasoning capabilities through cryptographic decryption. CipherBank includes 5 domains, 14 subdomains of plaintext data, 9 encryption algorithms, and 2,358 decryption tasks. By testing SOTA LLMs on CipherBank, we uncover significant limitations in their decryption abilities, revealing distinct strengths and weaknesses between reasoning and chat models. Our analysis identifies key deficiencies in current reasoning approaches and suggests directions for improvement, positioning CipherBank as a novel benchmark for advancing structured inference and cryptographic reasoning in developing future LLMs." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.849, + 0.615, + 0.864 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.874, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Our evaluation is constrained by the reliance on closed-source models, which are accessible only via API calls. This introduces potential variability" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5936" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.342 + ], + "angle": 0, + "content": "due to API updates and version changes, though we mitigate this by documenting the specific versions and dates used. Additionally, access restrictions prevent us from evaluating more advanced models such as o1 Pro and o3 series, limiting the scope of our benchmark. From a design perspective, CipherBank primarily focuses on classical encryption algorithms, as modern cryptographic schemes introduce complexities beyond current model capabilities. While this choice ensures feasibility in evaluation, it also restricts the benchmark's applicability to real-world cryptographic challenges. As models improve, expanding CipherBank to modern encryption techniques will provide a more comprehensive assessment of reasoning in cryptographic tasks." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.356, + 0.287, + 0.372 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.382, + 0.49, + 0.414 + ], + "angle": 0, + "content": "This work is supported by National Key R&D Program of China (2022ZD0160201)." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.442, + 0.214, + 0.457 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.466, + 0.489, + 0.52 + ], + "angle": 0, + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. 2024. Large language models for mathematical reasoning: Progresses and challenges. arXiv preprint arXiv:2402.00157." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.531, + 0.49, + 0.57 + ], + "angle": 0, + "content": "Anthropic. 2024. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 2025-02-09." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.582, + 0.488, + 0.648 + ], + "angle": 0, + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. 2021. Program synthesis with large language models. Preprint, arXiv:2108.07732." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.659, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021a. Evaluating large language models trained on code." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.466, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.885, + 0.166 + ], + "angle": 0, + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021b. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.176, + 0.885, + 0.23 + ], + "angle": 0, + "content": "Sirui Chen, Bo Peng, Meiqi Chen, Ruiqi Wang, Mengying Xu, Xingyu Zeng, Rui Zhao, Shengjie Zhao, Yu Qiao, and Chaochao Lu. 2024. Causal evaluation of language models. Preprint, arXiv:2405.00622." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.239, + 0.885, + 0.306 + ], + "angle": 0, + "content": "Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. 2023. Enhancing chat language models by scaling high-quality instructional conversations. Preprint, arXiv:2305.14233." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.316, + 0.885, + 0.396 + ], + "angle": 0, + "content": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pretraining for natural language understanding and generation. Advances in neural information processing systems, 32." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.406, + 0.885, + 0.473 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.483, + 0.885, + 0.549 + ], + "angle": 0, + "content": "Yingqiang Ge, Wenyue Hua, Kai Mei, Juntao Tan, Shuyuan Xu, Zelong Li, Yongfeng Zhang, et al. 2023. Openagi: When llm meets domain experts. Advances in Neural Information Processing Systems, 36:5539-5568." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.56, + 0.885, + 0.626 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.636, + 0.885, + 0.794 + ], + "angle": 0, + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, Lucy Sun, Alex Wardle-Solano, Hannah Szabo, Ekaterina Zubova, Matthew Burtell, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Alexander R. Fabbri, Wojciech Kryscinski, Semih Yavuz, Ye Liu, Xi Victoria Lin, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Rex Ying, Arman Cohen, and Dragomir Radev. 2024. Folio: Natural language reasoning with first-order logic. Preprint, arXiv:2209.00840." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.804, + 0.885, + 0.858 + ], + "angle": 0, + "content": "Divij Handa, Zehua Zhang, Amir Saeidi, and Chitta Baral. 2024. When \"competency\" in reasoning opens the door to vulnerability: Jailbreaking llms via novel complex ciphers. Preprint, arXiv:2402.10601." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.868, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021a. Measuring mathematical problem solving with the math dataset. NeurIPS." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.885, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5937" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.152 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.163, + 0.487, + 0.229 + ], + "angle": 0, + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.24, + 0.487, + 0.306 + ], + "angle": 0, + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.317, + 0.487, + 0.396 + ], + "angle": 0, + "content": "Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. 2024a. Mixtral of experts. arXiv preprint arXiv:2401.04088." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.407, + 0.487, + 0.472 + ], + "angle": 0, + "content": "Fengqing Jiang, Zhangchen Xu, Luyao Niu, Zhen Xiang, Bhaskar Ramasubramanian, Bo Li, and Radha Poovendran. 2024b. Artprompt: Ascii art-based jailbreak attacks against aligned llms. arXiv preprint arXiv:2402.11753." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.483, + 0.487, + 0.561 + ], + "angle": 0, + "content": "Daniel Kang, Xuechen Li, Ion Stoica, Carlos Guestrin, Matei Zaharia, and Tatsunori Hashimoto. 2024. Exploiting programmatic behavior of llms: Dual-use through standard security attacks. In 2024 IEEE Security and Privacy Workshops (SPW), pages 132-143. IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.573, + 0.487, + 0.652 + ], + "angle": 0, + "content": "Nikitas Karanikolas, Eirini Manga, Nikoletta Samaridi, Eleni Tousidou, and Michael Vassilakopoulos. 2023. Large language models versus natural language understanding and generation. In Proceedings of the 27th Pan-Hellenic Conference on Progress in Computing and Informatics, pages 278-290." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.663, + 0.486, + 0.691 + ], + "angle": 0, + "content": "Alan G. Konheim. 2007. Computer Security and Cryptography. John Wiley & Sons." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.701, + 0.487, + 0.767 + ], + "angle": 0, + "content": "Cheryl Lee, Chunqiu Steven Xia, Longji Yang, Jentse Huang, Zhouruixin Zhu, Lingming Zhang, and Michael R Lyu. 2024. A unified debugging approach via llm-based multi-agent synergy. arXiv preprint arXiv:2404.17153." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.778, + 0.487, + 0.844 + ], + "angle": 0, + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.855, + 0.487, + 0.921 + ], + "angle": 0, + "content": "Fei Liu, Yiming Yao, Ping Guo, Zhiyuan Yang, Zhe Zhao, Xi Lin, Xialiang Tong, Mingxuan Yuan, Zhichao Lu, Zhenkun Wang, et al. 2024b. A systematic survey on large language models for algorithm design. arXiv preprint arXiv:2410.14716." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.881, + 0.165 + ], + "angle": 0, + "content": "Hongwei Liu, Zilong Zheng, Yuxuan Qiao, Haodong Duan, Zhiwei Fei, Fengzhe Zhou, Wenwei Zhang, Songyang Zhang, Dahua Lin, and Kai Chen. 2024c Mathbench: Evaluating the theory and application proficiency of llms with a hierarchical mathematics benchmark. arXiv preprint arXiv:2405.12209." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.175, + 0.881, + 0.24 + ], + "angle": 0, + "content": "Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. 2024d. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.25, + 0.881, + 0.33 + ], + "angle": 0, + "content": "Kaijing Ma, Xinrun Du, Yunran Wang, Haoran Zhang, Zhoufutu Wen, Xingwei Qu, Jian Yang, Jiaheng Liu, Minghao Liu, Xiang Yue, et al 2024. Kor-bench: Benchmarking language models on knowledge-orthogonal reasoning tasks. arXiv preprint arXiv:2410.06526." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.339, + 0.881, + 0.367 + ], + "angle": 0, + "content": "Jarno Mielikainen. 2006. Lsb matching revisited. IEEE signal processing letters, 13(5):285-287." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.376, + 0.881, + 0.429 + ], + "angle": 0, + "content": "S. Rani, A. Kataria, and M. Chauhan. 2022. Cyber security techniques, architectures, and design In Holistic Approach to Quantum Cryptography in Cyber Security, pages 41-66. CRC Press." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.439, + 0.881, + 0.492 + ], + "angle": 0, + "content": "A. Sarkar, S. R. Chatterjee, and M. Chakraborty. 2021 Role of cryptography in network security. The \"Essence\" of Network Security: An End-to-End Panorama, pages 103-143." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.502, + 0.881, + 0.542 + ], + "angle": 0, + "content": "Miyu Sasaki, Natsumi Watanabe, and Tsukihito Komanaka. 2024. Enhancing contextual understanding of mistral llm with external knowledge bases." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.551, + 0.881, + 0.579 + ], + "angle": 0, + "content": "Bruce Schneier. 2002. Cryptographic design vulnerabilities. Computer, 31(9):29-33." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.589, + 0.881, + 0.641 + ], + "angle": 0, + "content": "Divya Shree, Seema Ahlawat, et al. 2017. A review on cryptography, attacks and cyber security. International Journal of Advanced Research in Computer Science, 8(5)." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.651, + 0.881, + 0.717 + ], + "angle": 0, + "content": "S. Soomro, M. R. Belgaum, Z. Alansari, et al. 2019 Review and open issues of cryptographic algorithms in cyber security. In 2019 International Conference on Computing, Electronics & Communications Engineering (iCCECE), pages 158-162. IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.727, + 0.881, + 0.819 + ], + "angle": 0, + "content": "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.829, + 0.881, + 0.921 + ], + "angle": 0, + "content": "Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan 2024. Determinlr: Augmenting llm-based logical reasoning from indeterminacy to determinacy. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9828-9862." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.881, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5938" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.166 + ], + "angle": 0, + "content": "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. 2022. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.178, + 0.487, + 0.219 + ], + "angle": 0, + "content": "Gemini Team. 2024a. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.232, + 0.487, + 0.257 + ], + "angle": 0, + "content": "Qwen Team. 2024b. Qwq: Reflect deeply on the boundaries of the unknown." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.272, + 0.487, + 0.325 + ], + "angle": 0, + "content": "Runchu Tian, Yining Ye, Yujia Qin, Xin Cong, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. Debugbench: Evaluating debugging capability of large language models. Preprint, arXiv:2401.04621." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.338, + 0.487, + 0.403 + ], + "angle": 0, + "content": "Boshi Wang, Xiang Yue, and Huan Sun. 2023. Can chatgpt defend its belief in truth? evaluating llm reasoning via debate. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 11865-11881." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.418, + 0.487, + 0.47 + ], + "angle": 0, + "content": "Alexander Wei, Nika Haghtalab, and Jacob Steinhardt. 2024. Jailbroken: How does llm safety training fail? Advances in Neural Information Processing Systems, 36." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.484, + 0.487, + 0.55 + ], + "angle": 0, + "content": "Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Ben Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Siddartha Naidu, et al. 2024. Livebench: A challenging, contamination-free llm benchmark. arXiv preprint arXiv:2406.19314." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.564, + 0.487, + 0.642 + ], + "angle": 0, + "content": "Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, and Chi Wang. 2024. Mathchat: Converse to tackle challenging math problems with llm agents. In ICLR 2024 Workshop on Large Language Model (LLM) Agents." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.656, + 0.487, + 0.709 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.722, + 0.487, + 0.801 + ], + "angle": 0, + "content": "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan J Prenger, and Animashree Anandkumar. 2024b. Leandrojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems, 36." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.814, + 0.487, + 0.867 + ], + "angle": 0, + "content": "Wenlin Yao, Haitao Mi, and Dong Yu. 2024. Hdflow: Enhancing llm complex problem-solving with hybrid thinking and dynamic workflows. arXiv preprint arXiv:2409.17433." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.881, + 0.487, + 0.921 + ], + "angle": 0, + "content": "Zheng-Xin Yong, Cristina Menghini, and Stephen H Bach. 2023. Low-resource languages jailbreak gpt-4. arXiv preprint arXiv:2310.02446." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.882, + 0.152 + ], + "angle": 0, + "content": "Youliang Yuan, Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Pinjia He, Shuming Shi, and Zhaopeng Tu. 2023. Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher. arXiv preprint arXiv:2308.06463." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.162, + 0.882, + 0.201 + ], + "angle": 0, + "content": "Haodong Duan Yuan Liu. 2023. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.212, + 0.882, + 0.252 + ], + "angle": 0, + "content": "Li Yujiang and Liu Bo. 2007. A normalized levenshtein distance metric. IEEE transactions on pattern analysis and machine intelligence, 29(6):1091-1095." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.261, + 0.882, + 0.313 + ], + "angle": 0, + "content": "Li Zhong, Zilong Wang, and Jingbo Shang. 2024. Ldb: A large language model debugger via verifying runtime execution step-by-step. arXiv preprint arXiv:2402.16906." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.324, + 0.882, + 0.403 + ], + "angle": 0, + "content": "Terry Yue Zhuo, Minh Chien Vu, Jenny Chim, Han Hu, Wenhao Yu, Ratnadira Widyasari, Imam Nur Bani Yusuf, Haolan Zhan, Junda He, Indraneil Paul, et al. 2024. Bigcodebench: Benchmarking code generation with diverse function calls and complex instructions. arXiv preprint arXiv:2406.15877." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.412, + 0.882, + 0.44 + ], + "angle": 0, + "content": "MZWM Zulkifli and Zaid W Mohd. 2008. Attack on cryptography. Comput. Secur, 12(5):33-45." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.882, + 0.44 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5939" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.436, + 0.101 + ], + "angle": 0, + "content": "A Detailed Benchmark Description" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.111, + 0.49, + 0.255 + ], + "angle": 0, + "content": "In this chapter, we provide additional details on CipherBank that were not extensively covered in the main text. This includes a detailed breakdown of plaintext tags and their distribution across subdomains, as well as a more comprehensive description of the encryption algorithms used. These details offer deeper insights into the dataset construction and the encryption schemes evaluated in this benchmark." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.267, + 0.469, + 0.297 + ], + "angle": 0, + "content": "A.1 Tags and Plaintext Distribution Across Subdomains" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.304, + 0.49, + 0.385 + ], + "angle": 0, + "content": "Table 4 provides an overview of the specific tags associated with each subdomain within CipherBank. The dataset spans five primary domains and 14 subdomains, ensuring diverse and realistic plaintext scenarios for cryptographic evaluation." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.396, + 0.447, + 0.427 + ], + "angle": 0, + "content": "A.2 Detailed Descriptions of Encryption Algorithms" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.433, + 0.49, + 0.625 + ], + "angle": 0, + "content": "This section provides detailed descriptions of the nine encryption algorithms used in CipherBank. These algorithms span substitution, transposition, and custom-designed ciphers, covering a range of complexity levels. Notably, Rot13, Atbash, Polybius, DualAvgCode, and ParityShift also support numeric encryption, further enhancing the diversity of decryption challenges. Table 5 outlines each algorithm and its transformation rules. Some detailed encryption examples are provided below, illustrating how different ciphers transform plaintext into ciphertext." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.627, + 0.49, + 0.786 + ], + "angle": 0, + "content": "For each encryption algorithm, we have implemented a corresponding decryption algorithm to ensure that ciphertext can be fully restored to its original plaintext. This guarantees the reversibility and integrity of the encryption schemes used in CipherBank, allowing for a rigorous evaluation of model decryption capabilities. The decryption process follows the exact inverse of the encryption transformations, ensuring consistency across all test cases." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.8, + 0.391, + 0.816 + ], + "angle": 0, + "content": "B Experimental Setup Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.826, + 0.49, + 0.922 + ], + "angle": 0, + "content": "In our evaluation, we adopt a 3-shot approach. A more natural Ciphertext-Only Attack (zero-shot) setting was not adopted, as it would reduce the task to brute-force decryption, where the model blindly applies all known encryption algorithms in search of a coherent output. This contradicts the goal" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.149 + ], + "angle": 0, + "content": "of reasoning-based inference, where the model is expected to deduce encryption rules from provided examples rather than rely on exhaustive trial and error." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.155, + 0.886, + 0.284 + ], + "angle": 0, + "content": "To ensure a balanced evaluation of decryption difficulty, substitution ciphers exclude numbers to prevent inconsistencies arising from differing cyclic structures. In contrast, ciphers that do not involve direct substitution, such as Reverse, Word-Shift, and similar methods, process numbers normally, preserving structural integrity within the encrypted text." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.289, + 0.886, + 0.386 + ], + "angle": 0, + "content": "For all open-source models, we conduct evaluations using the OpenCompass framework with default temperature to ensure consistent outputs. For models evaluated via API, we perform 5 independent test runs per model and report the average result to enhance stability and reliability." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.415, + 0.779, + 0.431 + ], + "angle": 0, + "content": "B.1 Prompts Used for Querying" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.447, + 0.886, + 0.641 + ], + "angle": 0, + "content": "This section outlines the prompts used to query models during evaluation. To ensure consistency, all models were tested under a 3-shot setting, where they were provided with three plaintext-ciphertext pairs before attempting to decrypt a new ciphertext. The prompts were designed to encourage logical inference rather than relying on prior knowledge, guiding models to extract encryption patterns and apply the learned rules systematically. Below, Figure 5 provides the system prompt (some reasoning models may not support system prompts), while Figure 6 present the detailed user prompts." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.669, + 0.755, + 0.685 + ], + "angle": 0, + "content": "B.2 Post-processing Methods" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.701, + 0.885, + 0.877 + ], + "angle": 0, + "content": "During querying, we instruct the model to think step by step and enclose the final decrypted output within ... tags. To extract the decoded plaintext, we apply the regular expression ' result \\(\\text{串}\\) \\((\\text{串} ?)\\) /result>, capturing the content between these tags. The matching process is case-insensitive, aligning with algorithms like Polybius, which inherently do not differentiate between uppercase and lowercase letters when restoring plaintext. This ensures consistency across different decryption schemes." + }, + { + "type": "page_footnote", + "bbox": [ + 0.531, + 0.907, + 0.868, + 0.921 + ], + "angle": 0, + "content": "7https://github.com/open-compass/opencompass" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5940" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.292, + 0.146, + 0.706, + 0.161 + ], + "angle": 0, + "content": "Table 4: Tag Distribution Across Subdomains in CipherBank" + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.172, + 0.879, + 0.855 + ], + "angle": 0, + "content": "
DomainSubdomainTags
Personal Privacy DataIdentity InformationName, ID Card Number, Passport Number, Date of Birth, Gender, Nationality, Marital Status, Mobile Number, Family Member Information (e.g., immediate family names, contact information), Residential Address
Health InformationMedical Record Number (Patient ID), Diagnosis Records, Surgery Records, Examination Reports (e.g., X-ray, CT scan results, heart rate, blood pressure, blood sugar level, blood type), Disease History, Allergy History, Vaccination Records, Family Medical History
Educational DataStudent ID (Student Number), School Records (Enrollment Date, Graduation Date), Academic Records (Subjects, Grades, GPA, Ranking), Degree Information (Bachelor, Master, Doctorate), Awards and Penalties Records (Disciplinary Records)
Enterprise Sensitive DataBusiness InformationBusiness Plans (e.g., Annual Plan, Five-Year Plan), Marketing Strategy (e.g., Marketing Promotion Plan, Advertising Budget), Customer Lists (e.g., Customer Contacts, Preferences), Supplier Information (Supplier List, Cooperation Agreements), Internal Financial Budgets (Cost Structure, Profit Forecasts)
Intellectual PropertyProduct Design Plans (e.g., Prototype Drawings, Design Documents), Internal Technical Documents (e.g., Technical Manuals, Specifications), Test Data (e.g., Product Performance Test Results, Quality Control Records), Copyright Data, Patent Data
Employee InformationContact Information (e.g., Phone Numbers, Email Addresses), Work Experience, Position and Department Information, Salary and Benefits Information (e.g., Salary Amount, Bonuses, Allowances), Performance Evaluation (e.g., Performance Scores, Promotion Records), Contract Information (e.g., Employment Contract, Non-Disclosure Agreement)
Public Safety DataPolice DataCase Information (Case Number, Case Type, Filing Date), Criminal Records (Suspect Information, Crime Time, Crime Location), Alarm Records (Informer Information, Alarm Time, Alarm Content), Investigation Reports (Investigation Results, Investigation Progress), Arrest Records (Arrest Time, Location, Action Description), Traffic Enforcement Data (Violation Records, Penalty Information), Police Officer Information (Officer Number, Name, Position, Department), Police Resource Allocation (Vehicle, Equipment, Weapon Usage Records)
National Security DataBorder Crossing Records (Entry and Exit Personnel Information, Vehicle Registration), Customs Inspection Data (Cargo List, Contraband Records), Territorial Patrol Data (Patrol Reports, Anomalies Records), Cyber Security Monitoring Data (Cyber Attack Records, Threat Intelligence)
Military DataOperation Plans, Target Location, Troop Deployment, Military Base Distribution, Defense Works Location
Financial Confidential DataBanking InformationAccount Number, Bank Card Number, Payment Method, Payment Platform ID, Transaction Details, Loan Amount, Interest Rate, Repayment Plan, Investment Records (Stocks, Funds, Bonds)
Personal IncomeSalary Amount, Pay Date, Tax Number, Tax Return Records
Internet RecordsBrowsing RecordsPage Interaction, Search Behavior, Click Activity, Device Information, Geolocation, Checkout Process, Multimedia Interaction, Download Records
Cookie DataSession Management, User Identification, Ad Targeting, Behavior Tracking, Authentication Tokens, Login Status
User PreferencesPreferred Genres, Device Usage Habits, Notification Preferences, Shopping Preferences, Video Preferences, Reading Habits
" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5941" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.094, + 0.597, + 0.107 + ], + "angle": 0, + "content": "Example A.1: Plain-Ciphertext Pair (Identity Information) - Only Letter" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.114, + 0.333, + 0.126 + ], + "angle": 0, + "content": "Domain: Personal Privacy Data" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.13, + 0.349, + 0.143 + ], + "angle": 0, + "content": "Subdomain: Identity Information" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.146, + 0.551, + 0.16 + ], + "angle": 0, + "content": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.163, + 0.192, + 0.175 + ], + "angle": 0, + "content": "Plaintext:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.179, + 0.66, + 0.193 + ], + "angle": 0, + "content": "Peter was born on April 23, 1985, and carries a passport with the number X123456789." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.196, + 0.254, + 0.209 + ], + "angle": 0, + "content": "Encryption results:" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.212, + 0.73, + 0.226 + ], + "angle": 0, + "content": "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.229, + 0.731, + 0.242 + ], + "angle": 0, + "content": "(2) Atbash: Kvgvi dzh ylim lm Zkiro 23, 1985, zmw xziirvh z kzhhklig drgs gsv mfnyvi C123456789." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.245, + 0.872, + 0.274 + ], + "angle": 0, + "content": "(3) Polybius: 34 15 42 15 36 45 11 41 12 33 36 32 33 32 11 34 36 23 26 2 3, 1985, 11 32 14 13 11 36 36 23 15 41 11 34 11 41 41 34 33 36 42 45 23 42 22 42 22 15 32 43 31 12 15 36 46 12 3 4 5 6 7 89." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.279, + 0.757, + 0.292 + ], + "angle": 0, + "content": "(4) Vigenère: Pgeet wcd dzrp op Arcin 23, 1985, cyd natcigd pcdszrv wkeh eh nwxbgc Z123456789." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.295, + 0.74, + 0.308 + ], + "angle": 0, + "content": "(5) Reverse: .987654321X rebmun eht htiw tropssap a seirrac dna ,5891 ,32 lirpA no nrob saw reteP" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.312, + 0.754, + 0.325 + ], + "angle": 0, + "content": "(6) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.329, + 0.872, + 0.358 + ], + "angle": 0, + "content": "(7) DualAvgCode: OQdfsudfqs vxaart acnpqsmo npmo AAoqqshjkm 23, 1985, aamoce bdaaqsqshjdfrt aa oqaartroqnpssu vxhjsugi sugidf motvlnacdfqs WY123456789." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.362, + 0.763, + 0.375 + ], + "angle": 0, + "content": "(8) ParityShift: Qduds vzr cnso no Zqshm 23, 1985, zoe bzsshrd z qzrrqnsu vuhui uid otlcds Y123456789." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.378, + 0.759, + 0.391 + ], + "angle": 0, + "content": "(9) WordShift: erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwt the bernum 3456789.X12" + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.212, + 0.872, + 0.391 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.424, + 0.536, + 0.438 + ], + "angle": 0, + "content": "Example A.2: Plain-Ciphertext Pair (Police Data) - Only Letter" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.444, + 0.312, + 0.456 + ], + "angle": 0, + "content": "# Domain: Public Safety Data" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.46, + 0.297, + 0.472 + ], + "angle": 0, + "content": "Subdomain: Police Data" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.476, + 0.801, + 0.49 + ], + "angle": 0, + "content": "Tag Combination: [\"Suspect Information\", \"Crime Time\", \"Crime Location\", \"Police Officer Information\"]" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.494, + 0.192, + 0.505 + ], + "angle": 0, + "content": "Plaintext:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.51, + 0.872, + 0.539 + ], + "angle": 0, + "content": "Suspect: Jonathan, Crime: Burglary, Time: 2022-03-12 14:30, Location: 123 Elm Street, Officer Smith observed suspicious activity near 5th Ave on 2022-03-13." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.543, + 0.254, + 0.556 + ], + "angle": 0, + "content": "Encryption results:" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.56, + 0.872, + 0.589 + ], + "angle": 0, + "content": "(1) Rot13: Fhcrpg: Wbanguna, Pevzr: Ohetynel, Gvzr: 2022-03-12 14:30, Ybpngvba: 123 Ryz Fgerrg, Bssvpre Fzygu bofreirq fhcvpbhf npgvivgl arne 5gu Nir ba 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.593, + 0.872, + 0.622 + ], + "angle": 0, + "content": "(2) Atbash: Hfhkvyg: Qlmzgszm, Xirnv: Yfitozib, Grnv: 2022-03-12 14:30, Olxzgrlm: 123 Von Hgivvg, Luurxvi Hnrgs lyhvieww hfhkrxrlfh zxgrergb mvzi 5gs Zev lm 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.626, + 0.872, + 0.687 + ], + "angle": 0, + "content": "(3) Polybius: 41 43 41 34 15 13 42 : 24 33 32 11 42 22 11 32 , 13 36 23 31 15 : 12 43 36 21 26 11 36 51 , 42 23 31 15 : 20 22 - 03 - 1214 : 30 , 2633131142233332 : 123152631414236151542 , 331616231315364131234222 3312411536441514414341342313233343411113422344234513215113654222114415332022 -03 - 13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.692, + 0.872, + 0.722 + ], + "angle": 0, + "content": "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.726, + 0.872, + 0.755 + ], + "angle": 0, + "content": "(5) Reverse: 31-30-2202 no evA ht5 raen ytivitca suoicipsus devresbo htimS reciffO ,teertS mlE 321 :noitacoL ,03:41 21-30-2202 :emiT ,yralgruB :emirC ,nahtanoJ :tcepsuS." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.758, + 0.872, + 0.788 + ], + "angle": 0, + "content": "(6) SwapPairs: uSpsc:tJ notaah,nC irem :uBgralyr ,iTem :02220-3211 :403 ,oLacitno :21 3lE mtSerte ,fOifec rmStihboesvrdes suipicuo scaitivyn ae rt5 hvA eno2 20-2301-3." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.792, + 0.872, + 0.838 + ], + "angle": 0, + "content": "(7) DualAvgCode: RTvtrqdfbdu: IKnpmoaasugiaamo, BDqshlndf: ACtvqsfkmaaqsz, SUhjndf: 2022-03-12 14:30, KMnpbdaasuhjnpmo: 123 DFkmln RTsuqsdfdu, NPegeghjbddfq RSInhjsugi npacrtdfquuwdfce rttvrtoqhjbdhjnpvtrt aabb-suhjuwhjsuxz modfaaqs 5sugi AAuwdf npmo 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.841, + 0.872, + 0.871 + ], + "angle": 0, + "content": "(8) ParityShift: Rtrqduu: Knozuizo, Bshld: Ctsfznxsx, Uhld: 2022-03-12 14:30, Mnbzuhno: 123 Dml Rusddu, Ngghbds Rlhuicnrdswde rtrqhbntr zbuwhux odzs 5ui Zwd no 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.874, + 0.872, + 0.904 + ], + "angle": 0, + "content": "(9) **WordShift:** pect:Sus athan,Jon me:Cri glary,Bur e:Tim 2-03-12202 30,14: ation:Loc 123 Elm eet,Str icerOff thSmi ervedobs picioussus ivityact rnea 5th Ave no 2-03-13202." + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.56, + 0.872, + 0.904 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5942" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.101, + 0.621, + 0.115 + ], + "angle": 0, + "content": "Example A.3: Plain-Ciphertext Pair (Health Information) - Letter&Number" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.121, + 0.333, + 0.135 + ], + "angle": 0, + "content": "# Domain: Personal Privacy Data" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.138, + 0.343, + 0.15 + ], + "angle": 0, + "content": "Subdomain: Health Information" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.154, + 0.487, + 0.168 + ], + "angle": 0, + "content": "Tag Combination: [\"Patient ID\", \"Diagnosis Records\"]" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.171, + 0.192, + 0.183 + ], + "angle": 0, + "content": "Plaintext:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.187, + 0.666, + 0.201 + ], + "angle": 0, + "content": "Patient ID: R094713; Name: Jamie Lee; Age: 45; Gender: Female; EMR: EHR-234987." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.204, + 0.254, + 0.217 + ], + "angle": 0, + "content": "Encryption results:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.22, + 0.726, + 0.234 + ], + "angle": 0, + "content": "(1) Rot13: Cngvrag VQ: E327046; Anzr: Wznvr Yrr; Ntr: 78; Traqe: Srznyr; RZE: RUE-567210." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.237, + 0.739, + 0.25 + ], + "angle": 0, + "content": "(2) Atbash: Kzgrvmg RW: I905286; Mznv: Qznrv Ovv; Ztv: 54; Tvmwvi: Uvnzov; VNI: VSI-765012." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.254, + 0.874, + 0.282 + ], + "angle": 0, + "content": "(3) Polybius: 34 11 42 23 15 32 42 23 14 : 36 66 65 56 63 53 55 ; 32 11 31 15 : 24 11 31 23 15 26 15 15 ; 11 21 15 : 56 61 ; 21 15 32 14 15 36 : 16 15 31 11 26 15 ; 15 31 36 : 15 22 36 - 54 55 56 65 64 63." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.286, + 0.724, + 0.3 + ], + "angle": 0, + "content": "(4) Reverse: .789432-R HRE ;elameF :redneG ;54 :egA ;eeL eimaJ :emaN ;317490 R :DI tneitaP" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.303, + 0.767, + 0.316 + ], + "angle": 0, + "content": "(5) SwapPairs: aPteti DI: 0R94713; aNme: aJmei eLe; gAe: 45; eGndre: eFmale; MRE: HRE-239487." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.32, + 0.756, + 0.333 + ], + "angle": 0, + "content": "(6) **WordShift:** atientP ID: R94713; ameN: Jamie eLe; geA: 45; enderG: emaleF; REM: EHR-234987." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.336, + 0.874, + 0.366 + ], + "angle": 0, + "content": "(7) DualAvgCode: OQaaushjdmosu HJCE: QS009935680224; MOaalndf: IKaalnhjdf KMdfd; AAfhdf: 3546; FHdfmoced-fqs: EGdfnaakmdf; DFLNQS: DFGIQS-132435997968." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.369, + 0.769, + 0.383 + ], + "angle": 0, + "content": "(8) ParityShift: Qzuhdou HE: S185602; Ozld: Kzlhd Mdd; Zfd: 54; Fdoeds: Gdlzmd; DLS: DIS-325896." + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.22, + 0.874, + 0.383 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.432, + 0.632, + 0.446 + ], + "angle": 0, + "content": "Example A.4: Plain-Ciphertext Pair (Banking Information) - Letter&Number" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.452, + 0.367, + 0.464 + ], + "angle": 0, + "content": "# Domain: Financial Confidential Data" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.468, + 0.354, + 0.481 + ], + "angle": 0, + "content": "## Subdomain: Banking Information" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.484, + 0.679, + 0.498 + ], + "angle": 0, + "content": "Tag Combination: [\"Account Number\", \"Bank Card Number\", \"Payment Platform ID\"]" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.501, + 0.192, + 0.513 + ], + "angle": 0, + "content": "Plaintext:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.518, + 0.874, + 0.547 + ], + "angle": 0, + "content": "Account Number: 123456789, Bank: LA Bank, Card Number: 9876-5432-1098-7654, Payment Method: Virtual Credit Card, Payment Platform ID: ABC123XYZ, Timestamp: 2023-09-15 14:35, Amount: $250.00." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.551, + 0.254, + 0.564 + ], + "angle": 0, + "content": "Encryption results:" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.568, + 0.872, + 0.597 + ], + "angle": 0, + "content": "(1) Rot13: Nppbhag Ahzore: 456789012, Onax: YN Onax, Pneq Ahzore: 2109-8765-4321-0987, CnIzrag Zrgubq: Iveghny PerqvG Pneq, CnIzrag CyngsbEZ VQ: NOP456KLM, Gvzrfgnzc: 5356-32-48 47:68, Nzbhag: $583.33." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.601, + 0.872, + 0.631 + ], + "angle": 0, + "content": "(2) Atbash: Zxlfmg Mfnyvi: 876543210, Yzmp: OZ Yzmp, Xziw Mfnyvi: 0123-4567-8901-2345, Kzbnvmg Nvgslw: Erigfzo Xivwr Xziw, Kzbnvmg Kozgulin RW: ZYX876CBA, Grnvhgznk: 7976-90-84 85:64, Znlfmg: $749.99." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.634, + 0.874, + 0.696 + ], + "angle": 0, + "content": "(3) Polybius: 11 13 13 33 43 32 42 32 43 31 12 15 36 : 53 54 55 56 61 62 63 64 65 , 12 11 32 25 : 26 11 12 11 32 25 , 13 11\n36 14 32 43 31 12 15 36 : 65 64 63 62 - 61 56 55 54 - 53 66 65 64 - 63 62 61 56 , 34 11 51 31 15 32 42 31 15 42 22 33 14 :\n44 23 36 42 43 11 26 13 36 15 14 23 42 13 11 36 14 , 34 11 51 31 15 32 42 34 26 11 42 16 33 36 31 23 14 : 11 12 13 53 54\n55 46 51 52 , 42 23 31 15 41 42 11 31 34 : 54 66 54 55 - 66 65 - 53 61 53 56 : 55 61 , 11 31 33 43 32 42 : $546166 .6666 ." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.7, + 0.872, + 0.731 + ], + "angle": 0, + "content": "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.734, + 0.874, + 0.763 + ], + "angle": 0, + "content": "(5) Reverse: .00.052$ :tnuomA ,53:41 51-90-3202 :pmatsemit ,ZYX321CBA :DI mroftalP tnemyap ,draC tiderC lautriV :dohtem tnemyap ,4567-8901-2345-6789 :rebnuN draC ,knaB AL :knaB ,987654321 :rebnuN tnuoccA" + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.767, + 0.872, + 0.796 + ], + "angle": 0, + "content": "(6) SwapPairs: cAotcnu mNuber: 214365879, aBnk: A Lank, aCrd Nmu:bre 8967-5423-1980-7564, aPymnet Mtohed: Vritaul Cerdti aCdr, aPymnet Ptaforml DI: BAC321YXZ, iTmsetamp: 3202-90-51 53:41, aAmount: $250.00." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.799, + 0.874, + 0.862 + ], + "angle": 0, + "content": "(7) DualAvgCode: AAbbddnptvmosu MOtvlnacdfqs: 021324354657687999, ACAamojl: KMAA ACAamojl, BDaaqsc MEtvlnacdfqs: 99796857-46352413-02009979-68574635, OQaaxzlndfmosu LNdfsuginpce: UWhjssutvaakm BDqsdfcehjsu BDaaqsc, OQaaxzlndfmosu OQkmaasuegnpqsln HJCE: AAACBD021324WYXZZZ, SUhjlndfrtsuaalnoq: 13001324-0099-0246 0235:2446, AAlnnptvmosu: $134600.0000." + }, + { + "type": "ref_text", + "bbox": [ + 0.127, + 0.865, + 0.872, + 0.896 + ], + "angle": 0, + "content": "(8) ParityShift: Zbbntou Otlcds: 032547698, Czoj: MZ Czoj, Bzse Otlcds: 8967-4523-0189-6745, Qzxldou Lduine: Whsutzm Bsdehu Bzse, Qzxldou Qmzugsnl HE: ZCB032YXA, Uhldruzlj: 3132-18-04 05:24, Zlntou: $341.11." + }, + { + "type": "list", + "bbox": [ + 0.127, + 0.568, + 0.874, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5943" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.284, + 0.139, + 0.713, + 0.155 + ], + "angle": 0, + "content": "Table 5: Descriptions of Encryption Algorithms in CipherBank" + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.165, + 0.879, + 0.862 + ], + "angle": 0, + "content": "
AlgorithmDescription
Rot13A simple substitution cipher that shifts each letter 13 places forward in the alphabet. Encryption and decryption are identical, as applying the transformation twice restores the original text. Non-alphabetic characters remain unchanged.Additionally, Rot13 in CipherBank supports number encryption by shifting digits cyclically within the range 0-9.
AtbashA monoalphabetic substitution cipher where each letter is replaced with its counterpart from the reversed alphabet (e.g., A→Z, B→Y). Since the transformation is symmetric, encryption and decryption follow the same process CipherBank's Atbash implementation extends this to digits, where each number is replaced with its complement relative to 9 (e.g., 0→9, 1→8, ..., 9→0).
PolybiusA fractionating substitution cipher that replaces each letter with a two-digit coordinate from a 6×6 grid, mapping characters to numerical positions. Traditional Polybius squares typically use a 5×5 grid, supporting only letter encryption while merging I and J into the same cell, leading to ambiguity during decryption. To address this limitation and enable number encryption, CipherBank extends the Polybius square to a 6×6 grid, allowing both letters and numbers to be uniquely represented as coordinate pairs, increasing the cipher's complexity.
VigenèreA polyalphabetic substitution cipher that employs multiple shifting alphabets determined by a repeating key. Unlike monoalphabetic ciphers that use a single mapping, Vigenère utilizes multiple substitution tables, where each plaintext letter is shifted based on the corresponding key character's position in the alphabet. By default, the key is set to "ACL".This multi-table approach enhances security by distributing letter frequencies across different shifts, making it more resistant to frequency analysis. Decryption reverses this process by applying the inverse shifts dictated by the key. Unlike Rot13, it requires a key for both encryption and decryption.
ReverseA transposition cipher that reverses the order of all characters in the plaintext. Since it does not substitute characters, it preserves all information but alters the sequence, making it effective against naive attacks.
SwapPairsA transposition cipher that swaps adjacent characters in the plaintext. If the text length is odd, the final character remains unchanged. Decryption follows the same swapping process.
DualAvgCodeA custom transformation where each letter expands into two adjacent characters, shifting one position forward and one position backward in the ASCII table. Special cases (e.g., 'a', 'z', 'A', 'Z') are duplicated instead CipherBank extends this method to digits, where each number expands into two adjacent values (e.g., 2 → "13", 5 → "46"), increasing redundancy in the encrypted text.
ParityShiftA custom encryption method that shifts each letter one position forward or backward based on its ASCII parity. Even-ASCII characters shift forward, while odd-ASCII characters shift backward. For digits, ParityShift follows a similar rule, shifting numbers based on their parity (e.g., even numbers shift up, odd numbers shift down within 0-9).
WordShiftA transformation applied at the word level rather than the character level. Each word undergoes a left shift by a fixed number of positions, cycling characters within the word while preserving word spacing. Decryption reverses this shift, ensuring character order is restored within each word. By default, the shift is set to 3 positions.
" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.522, + 0.941 + ], + "angle": 0, + "content": "5944" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.139, + 0.212, + 0.151 + ], + "angle": 0, + "content": "Example B.1" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.159, + 0.184, + 0.17 + ], + "angle": 0, + "content": "## Role:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.175, + 0.348, + 0.188 + ], + "angle": 0, + "content": "Cryptography Analysis Expert." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.192, + 0.191, + 0.203 + ], + "angle": 0, + "content": "## Goals:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.208, + 0.842, + 0.222 + ], + "angle": 0, + "content": "Utilize the provided ciphertext and plaintext examples to analyze encryption patterns and decrypt new ciphertext." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.225, + 0.217, + 0.236 + ], + "angle": 0, + "content": "## Workflow:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.241, + 0.812, + 0.254 + ], + "angle": 0, + "content": "1. Analyze the provided ciphertext and plaintext examples to identify possible encryption patterns and rules." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.258, + 0.751, + 0.271 + ], + "angle": 0, + "content": "2. Apply the decryption algorithm to the new ciphertext, attempt to decrypt, and verify the results." + }, + { + "type": "list", + "bbox": [ + 0.156, + 0.241, + 0.812, + 0.271 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.411, + 0.289, + 0.586, + 0.303 + ], + "angle": 0, + "content": "Figure 5: System Prompt" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.421, + 0.213, + 0.433 + ], + "angle": 0, + "content": "Example B.2" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.441, + 0.233, + 0.453 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.457, + 0.774, + 0.47 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.474, + 0.275, + 0.487 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.49, + 0.735, + 0.503 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.507, + 0.217, + 0.519 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.524, + 0.241, + 0.536 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.266, + 0.553 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.557, + 0.749, + 0.57 + ], + "angle": 0, + "content": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.574, + 0.257, + 0.585 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.589, + 0.732, + 0.602 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.607, + 0.241, + 0.619 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.624, + 0.266, + 0.636 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.782, + 0.652 + ], + "angle": 0, + "content": "Pnfr Ahzone: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyat Qngr: Bpgbore 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.657, + 0.257, + 0.668 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.673, + 0.774, + 0.686 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.689, + 0.241, + 0.702 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.707, + 0.266, + 0.719 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.722, + 0.865, + 0.736 + ], + "angle": 0, + "content": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.74, + 0.257, + 0.751 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.755, + 0.84, + 0.768 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.772, + 0.19, + 0.785 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.789, + 0.236, + 0.802 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.805, + 0.458, + 0.819 + ], + "angle": 0, + "content": "Yrqvn, na Nzrevpna, erfvqrva Ybf Natryrf." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.822, + 0.227, + 0.833 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.321, + 0.85, + 0.676, + 0.865 + ], + "angle": 0, + "content": "Figure 6: User Prompt (Rot13 - 3shot - Only Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5945" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.084, + 0.427, + 0.101 + ], + "angle": 0, + "content": "C Extended Experimental Results" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.11, + 0.465, + 0.141 + ], + "angle": 0, + "content": "C.1 Levenshtein Distance Evaluation from Main Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.147, + 0.488, + 0.291 + ], + "angle": 0, + "content": "In the main text, most reported results are based on accuracy, which provides a binary assessment of decryption success. However, accuracy does not account for cases where decrypted outputs closely resemble the ground truth but contain minor errors. To provide a more fine-grained evaluation, we also compute Levenshtein similarity, which measures the edit distance between the model output and the correct plaintext." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.292, + 0.487, + 0.322 + ], + "angle": 0, + "content": "We define the Levenshtein similarity score as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.334, + 0.488, + 0.369 + ], + "angle": 0, + "content": "\\[\nS _ {\\mathrm {l e v}} = 1 - \\frac {d _ {\\mathrm {l e v}} \\left(P _ {\\mathrm {p r e d}} , P _ {\\mathrm {r e f}}\\right)}{\\max \\left(\\left| P _ {\\mathrm {p r e d}} \\right| , \\left| P _ {\\mathrm {r e f}} \\right|\\right)} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.376, + 0.189, + 0.388 + ], + "angle": 0, + "content": "where:" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.401, + 0.488, + 0.433 + ], + "angle": 0, + "content": "- \\( d_{\\mathrm{lev}}(P_{\\mathrm{pred}}, P_{\\mathrm{ref}}) \\) is the Levenshtein distance between the predicted and reference plaintexts." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.444, + 0.488, + 0.476 + ], + "angle": 0, + "content": "- \\( |P_{\\mathrm{pred}}| \\) and \\( |P_{\\mathrm{ref}}| \\) denote the lengths of the predicted and reference plaintexts, respectively." + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.401, + 0.488, + 0.476 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.487, + 0.488, + 0.567 + ], + "angle": 0, + "content": "This metric normalizes the edit distance by the length of the longer string, ensuring that similarity is measured on a scale from 0 to 1, where 1 represents an exact match and lower values indicate increasing deviations from the ground truth." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.568, + 0.489, + 0.646 + ], + "angle": 0, + "content": "The corresponding Levenshtein-based evaluation results for Table 2 are presented in Table 6 and Figure 7, offering deeper insights into models' decryption performance beyond strict accuracy metrics." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.649, + 0.489, + 0.856 + ], + "angle": 0, + "content": "One key observation is that most models achieve significantly higher Levenshtein similarity scores than their accuracy scores, indicating that even when decryption is incorrect, outputs often retain structural similarities to the original plaintext. This suggests that models capture some encryption patterns but struggle with full decryption, failing to consistently apply correct transformations. Notably, Claude-Sonnet-3.5 achieves near-perfect scores (\\(>0.99\\) for most ciphers), demonstrating its ability to minimize decryption errors while maintaining structural accuracy, making it the most reliable model overall." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.858, + 0.488, + 0.921 + ], + "angle": 0, + "content": "Interestingly, reasoning models such as DeepSeek-R1 and o1 exhibit a large gap between accuracy and Levenshtein similarity. Despite their moderate accuracy, their similarity scores" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.197 + ], + "angle": 0, + "content": "often exceed 0.80, indicating that they frequently produce outputs that preserve much of the original structure but contain systematic errors. This suggests that reasoning models are better at capturing encryption logic but may struggle with precise execution, sometimes overcomplicating simpler tasks." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.198, + 0.884, + 0.326 + ], + "angle": 0, + "content": "Conversely, chat models such as DeepSeek-V3 and Llama-based models exhibit high variability, showing relatively low accuracy but moderate Levenshtein similarity (0.40 - 0.70). This indicates a tendency toward semantic approximation rather than strict decryption, where models generate linguistically plausible outputs that fail to adhere to precise encryption rules." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.327, + 0.884, + 0.455 + ], + "angle": 0, + "content": "Another notable trend is that transposition ciphers (e.g., Reverse, SwapPairs) yield lower Levenshtein similarity scores across all models, confirming that character reordering remains a major challenge. Unlike substitution ciphers, where models can rely on token-level mappings, transposition ciphers require strict positional tracking, which even the strongest models struggle to handle effectively." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.456, + 0.884, + 0.632 + ], + "angle": 0, + "content": "Overall, Levenshtein similarity results highlight fundamental differences in how chat and reasoning models approach decryption. Chat models rely more on semantic fluency, leading to structurally incorrect but coherent outputs, whereas reasoning models exhibit stronger pattern retention but occasionally fail due to overgeneralization or overthinking. These findings suggest that while LLMs can approximate decryption rules, achieving precise symbolic transformations remains a significant challenge, especially for positional-based ciphers." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.643, + 0.816, + 0.659 + ], + "angle": 0, + "content": "C.2 Additional Analysis and Insights" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.664, + 0.883, + 0.76 + ], + "angle": 0, + "content": "In this section, we present more detailed experimental results that complement the findings in the main text. These additional analyses provide further insights into model performance across different encryption schemes, highlighting trends, challenges, and specific cases where models excel or struggle." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.884, + 0.904 + ], + "angle": 0, + "content": "In the analysis of length sensitivity, plaintexts of different lengths can be seen in Figure 8. The impact of plaintext length on decryption performance is shown in Table 7 and Table 8, where we compare model accuracy on short vs. long texts. These results illustrate how increasing text length affects model performance, revealing notable differences in decryption robustness across various architectures" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.906, + 0.883, + 0.921 + ], + "angle": 0, + "content": "The dataset used for the noise interference experi" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5946" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.286, + 0.101, + 0.713, + 0.116 + ], + "angle": 0, + "content": "Table 6: Results on CipherBank(3-shot) Levenshtein similarity" + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.126, + 0.884, + 0.374 + ], + "angle": 0, + "content": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13At ba shPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftLevenshtein Similarity
Open-source Chat Models
Mixtral-8x22B-v0.10.45420.37440.26940.40320.38100.47450.33300.38710.64010.4130
Qwen2.5-72B-Instruct0.55560.42880.30420.40160.40220.53080.37180.47860.84270.4796
Llama-3.1-70B-Instruct0.57760.43780.31320.44310.37750.55420.39900.45050.72880.4758
Llama-3.3-70B-Instruct0.57540.40540.13170.43970.24820.53750.38330.40960.45800.3988
DeepSeek-V30.91950.75940.45620.48440.90880.69750.42050.57310.88870.6787
Closed-source Models
GPT-4o-mini-2024-07-180.64590.49350.24630.44990.56640.60050.34180.41880.72580.4988
GPT-4o-2024-08-060.96030.58760.34450.53460.81700.79680.43040.58500.89400.6612
GPT-4o-2024-11-200.93400.60540.35110.53380.72770.67800.42350.55300.87150.6309
gemini-1.5-pro0.93090.50430.49690.52010.75360.73170.47840.57200.88190.6522
gemini-2.0-flash-exp0.96160.65670.48130.50640.89010.75690.44760.53080.86050.6769
Claude-Sonnet-3.5-10220.99840.99610.99550.71430.98930.92620.78740.98830.97120.9296
Reasoning Models
QwQ-32B-Preview0.24770.15910.12310.16600.14440.16660.15640.16450.30570.1815
DeepSeek-R10.99200.97610.93440.52270.73680.72130.83160.69280.84910.8063
gemini-2.0-flash-thinking0.96640.85710.90740.55110.85080.77880.42610.73530.87770.7723
o1-mini-2024-09-120.97570.98600.95630.54120.59590.52670.39540.69350.72360.7105
o1-2024-12-170.83200.99280.96400.56420.77250.92080.86530.65620.93350.8335
" + }, + { + "type": "image", + "bbox": [ + 0.152, + 0.419, + 0.847, + 0.679 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.264, + 0.691, + 0.733, + 0.707 + ], + "angle": 0, + "content": "Figure 7: Model Performance - Accuracy vs. Levenshtein Similarity." + }, + { + "type": "table_caption", + "bbox": [ + 0.333, + 0.751, + 0.665, + 0.766 + ], + "angle": 0, + "content": "Table 7: Decryption Performance on Short Texts" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.776, + 0.884, + 0.903 + ], + "angle": 0, + "content": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V340.0027.834.351.7429.570.870.872.6111.313.24
DeepSeek-R180.0071.3053.040.8718.260.8735.6518.2612.1732.27
GPT-4o-2024-11-2034.7813.040.87021.741.740.871.7410.439.47
gemini-2.0-flash-exp42.614.351.740.8740.872.6101.748.7011.50
Claude-Sonnet-3.5-102286.0977.3969.573.4877.398.709.5763.4842.6148.70
gemini-2.0-flash-thinking52.1726.9633.912.6133.910.87013.9114.7819.90
o1-mini-2024-09-1264.3582.6165.22015.6506.6713.912.6133.77
o1-2024-12-1761.7489.5784.550.8723.4846.6761.7417.1735.8047.61
" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5947" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.334, + 0.173, + 0.664, + 0.187 + ], + "angle": 0, + "content": "Table 8: Decryption Performance on Long Texts" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.197, + 0.884, + 0.322 + ], + "angle": 0, + "content": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V326.534.760.6809.520005.445.22
DeepSeek-R168.0348.9837.4104.76014.978.845.4420.94
GPT-4o-2024-11-2020.414.080012.240003.404.46
gemini-2.0-flash-exp30.612.041.36020.410.68002.726.42
Claude-Sonnet-3.5-102292.5278.9182.311.3663.955.442.7263.2740.1447.85
gemini-2.0-flash-thinking31.299.5212.24014.291.3602.724.768.47
o1-mini-2024-09-1231.9757.1432.6500002.72017.35
o1-2024-12-1758.5070.7561.110.688.1615.3841.58.6625.6634.38
" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.511, + 0.347, + 0.524 + ], + "angle": 0, + "content": "Example C.1: Plaintiff Examples" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.531, + 0.406, + 0.543 + ], + "angle": 0, + "content": "Short: James, American, is married to Susan." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.547, + 0.873, + 0.609 + ], + "angle": 0, + "content": "Long: John Smith, born on January 15, 1990, holds American nationality and resides at 123 Elm Street, Springfield, Illinois. His mobile number is +1-312-555-6789, and his ID card number is IDURITY1234567. He is married to Jane Smith, who can be reached at +1-312-555-6789. They have two children: Emily (16, high school) and Michael (12, middle school). Their address and contact information are the same." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.62, + 0.289, + 0.633 + ], + "angle": 0, + "content": "Short:Jimmy,GPA:3.71." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.637, + 0.873, + 0.666 + ], + "angle": 0, + "content": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.676, + 0.55, + 0.689 + ], + "angle": 0, + "content": "Short: Medical Record Number: 987-654-321; Patient Name: James." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.693, + 0.873, + 0.723 + ], + "angle": 0, + "content": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.733, + 0.373, + 0.746 + ], + "angle": 0, + "content": "Short: Lucas, lucas@ucc.company.com" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.75, + 0.873, + 0.796 + ], + "angle": 0, + "content": "Long: Hank, Senior Developer, IT Department, Salary: \\(95,000, Bonuses: \\)5,000, Allowances: $2,000 (Remote Work), Performance Rating: A, Full-time, Start Date: 2020-03-15, Last Promotion: 2021-08-10, Benefits: Health Insurance, Retirement 5%, Training: \\)1,500/year, Projects: Nexus, Zeta, Feedback: 4.5/5" + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.814, + 0.68, + 0.828 + ], + "angle": 0, + "content": "Figure 8: Samples used for length sensitivity analysis" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5948" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.086, + 0.32, + 0.1 + ], + "angle": 0, + "content": "Example C.2: Noise Example" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.106, + 0.203, + 0.119 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.122, + 0.406, + 0.135 + ], + "angle": 0, + "content": "Origin: Card Number: 9876 5432 1098 7654" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.139, + 0.419, + 0.151 + ], + "angle": 0, + "content": "Noise: Card Numbr: 9876 54-32 1O98 765 four" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.162, + 0.203, + 0.175 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.178, + 0.628, + 0.192 + ], + "angle": 0, + "content": "Origin: Pay Date: 2023-05-15, Income: \\(75,000, Currency: USD, Bonus: \\)5,000" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.195, + 0.812, + 0.209 + ], + "angle": 0, + "content": " Noise: Pay Date (scheduled): 2023-05-15! Income approx: $75,000. Currency spec: USD, and Bonus = $5,000." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.219, + 0.203, + 0.232 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.235, + 0.675, + 0.248 + ], + "angle": 0, + "content": "Predictions: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.251, + 0.829, + 0.266 + ], + "angle": 0, + "content": "References: Officer Identification-No.: P12345, Full-Name: John (J.), Job-Title: Sergeant, Dept.: Homicide Squad." + }, + { + "type": "image_caption", + "bbox": [ + 0.272, + 0.282, + 0.724, + 0.298 + ], + "angle": 0, + "content": "Figure 9: The samples used for the noise comparison experiments." + }, + { + "type": "table_caption", + "bbox": [ + 0.334, + 0.311, + 0.662, + 0.326 + ], + "angle": 0, + "content": "Table 9: Decryption Performance without Noise" + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.336, + 0.884, + 0.521 + ], + "angle": 0, + "content": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V350.0031.5018.506.509.0017.0022.08
DeepSeek-R183.5077.5042.002.5020.005.5038.50
Closed-source Models
GPT-4o-2024-11-2049.5010.5013.5003.505.5013.75
Gemini-2.0-flash-exp45.007.5042.502.505.0015.5019.67
Claude-Sonnet-3.5-102292.5085.0062.5010.0070.0035.0059.17
Gemini-2.0-flash-thinking62.5033.5022.50017.501.5022.92
o1-mini-2024-09-1255.5067.505.00017.50024.25
" + }, + { + "type": "table_caption", + "bbox": [ + 0.34, + 0.533, + 0.657, + 0.548 + ], + "angle": 0, + "content": "Table 10: Decryption Performance with Noise" + }, + { + "type": "table", + "bbox": [ + 0.117, + 0.558, + 0.884, + 0.743 + ], + "angle": 0, + "content": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V38.5010.507.5000.501.504.75
DeepSeek-R133.5023.004.5001.50010.42
Closed-source Models
GPT-4o-2024-11-205.5004.500001.67
Gemini-2.0-flash-exp2.50002.50000.83
Claude-Sonnet-3.5-102250.5040.0020.002.5030.007.5025.08
Gemini-2.0-flash-thinking30.5019.003.5002.5009.25
o1-mini-2024-09-1215.0020.000005.83
" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.767, + 0.489, + 0.895 + ], + "angle": 0, + "content": "iments can be found in Figure 9. Detailed results on the impact of noise on decryption performance are presented in Table 9 and Table 10, comparing model performance on short and long plaintexts under noisy conditions. These findings highlight the varying degrees of resilience across models, with some maintaining reasonable performance under noise while others degrade significantly." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.906, + 0.486, + 0.922 + ], + "angle": 0, + "content": "In the analysis of the impact of encryption scope" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.767, + 0.885, + 0.911 + ], + "angle": 0, + "content": "on decryption performance, the test prompts used are shown in Figure 10. Detailed results are presented in Table 11. This analysis compares model performance when encrypting only letters versus encrypting both letters and numbers. The results highlight how different models handle the increased complexity introduced by number encryption, showing varying degrees of adaptability. While some models maintain relatively stable per" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5949" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.086, + 0.214, + 0.1 + ], + "angle": 0, + "content": "Example C.3" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.106, + 0.233, + 0.119 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.122, + 0.775, + 0.137 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.139, + 0.275, + 0.153 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.155, + 0.736, + 0.169 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.172, + 0.217, + 0.185 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.189, + 0.241, + 0.202 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.206, + 0.267, + 0.218 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.222, + 0.756, + 0.235 + ], + "angle": 0, + "content": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 84, 8009; Kzhhklig Mfnyvi: B012345678" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.239, + 0.257, + 0.251 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.255, + 0.732, + 0.268 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.272, + 0.241, + 0.284 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.289, + 0.267, + 0.301 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.305, + 0.816, + 0.318 + ], + "angle": 0, + "content": "Xzhv Mfnyvi: 7975-KH-998765; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 80, 7975" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.322, + 0.257, + 0.333 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.337, + 0.775, + 0.351 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.355, + 0.241, + 0.367 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.372, + 0.267, + 0.384 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.388, + 0.873, + 0.402 + ], + "angle": 0, + "content": "gzc_rwvmgrurvi: GCM-7976-ZYX543, gzc_ivxliwh: bvzi: 7976, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.405, + 0.172, + 0.416 + ], + "angle": 0, + "content": "379.99" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.421, + 0.257, + 0.433 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.437, + 0.84, + 0.451 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.454, + 0.19, + 0.467 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.47, + 0.237, + 0.484 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.487, + 0.475, + 0.5 + ], + "angle": 0, + "content": "Wvzm slowh gsv kzhhklig mfnyvi Z87654321." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.505, + 0.227, + 0.516 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.294, + 0.532, + 0.703, + 0.547 + ], + "angle": 0, + "content": "Figure 10: User Prompt (Atbash - 3shot - Letter & Number)" + }, + { + "type": "table_caption", + "bbox": [ + 0.273, + 0.561, + 0.724, + 0.575 + ], + "angle": 0, + "content": "Table 11: Impact of Encryption Scope on Decryption Performance" + }, + { + "type": "table", + "bbox": [ + 0.116, + 0.585, + 0.884, + 0.761 + ], + "angle": 0, + "content": "
ModelRot13AtbashPolybiusDualAvgCodeParityShiftAccuracyavg
Open-source Models
DeepSeek-V368.94/23.3224.02/14.6419.35/6.013.51/011.31/025.23 / 8.79
DeepSeek-R159.10/43.0563.19/23.0239.21/43.2337.36/013.05/0.7642.38 / 22.01
Closed-source Models
GPT-4o-2024-11-2027.53/010.08/00/02.54/02.67/08.56 / 0
gemini-2.0-flash-exp47.54/07.50/2.507.50/5.050/02.67/013.04 / 1.51
Claude-Sonnet-3.5-102292.50/50.0087.56/27.5365.00/32.2515.00/062.54/17.3564.52 / 25.43
gemini-2.0-flash-thinking35.00/2.650/2.540/10.000/02.50/07.50 / 3.04
o1-mini-2024-09-1250.00/32.5972.57/35.0040.00/42.530/07.50/0.7634.01 / 22.18
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.113, + 0.766, + 0.884, + 0.793 + ], + "angle": 0, + "content": "Note: Values before the \\( \\prime /{}^{\\prime } \\) indicate performance when encrypting letters only, while values after the \\( {}^{\\prime }/{}^{\\prime } \\) represent performance when encrypting both letters and numbers." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.829, + 0.49, + 0.861 + ], + "angle": 0, + "content": "formance, others exhibit significant drops when required to decrypt mixed alphanumeric ciphertexts." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.921 + ], + "angle": 0, + "content": "For the enhanced prompt template, please refer to Figures 11-19, while more detailed experimental results can be found in Table 12." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.829, + 0.882, + 0.86 + ], + "angle": 0, + "content": "C.3 Impact of Plaintext Source on Decryption Performance" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.874, + 0.885, + 0.922 + ], + "angle": 0, + "content": "To assess how plaintext characteristics influence decryption performance, we compare results on synthetically generated privacy-sensitive data ver" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5950" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.214, + 0.268 + ], + "angle": 0, + "content": "Example C.4" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.357, + 0.872, + 0.388 + ], + "angle": 0, + "content": "Uses the Caesar cipher with a fixed shift of 13 positions. For each letter in the Plaintext, shift it forward by 13 positions in the alphabet to produce the Ciphertext." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.391, + 0.216, + 0.404 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.407, + 0.242, + 0.42 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.424, + 0.267, + 0.437 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.75, + 0.454 + ], + "angle": 0, + "content": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.457, + 0.258, + 0.469 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.733, + 0.487 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.49, + 0.242, + 0.503 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.507, + 0.267, + 0.52 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.523, + 0.783, + 0.537 + ], + "angle": 0, + "content": "Pnfr Ahzore: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyvat Qngr: Bpgbore 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.258, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.775, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.573, + 0.242, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.267, + 0.602 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.606, + 0.866, + 0.62 + ], + "angle": 0, + "content": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.624, + 0.258, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.841, + 0.653 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.668 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.673, + 0.237, + 0.685 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.46, + 0.703 + ], + "angle": 0, + "content": "Yrqvn, na Nzrevpna, erfvqrf va Ybf Natryrf." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.315, + 0.734, + 0.683, + 0.749 + ], + "angle": 0, + "content": "Figure 11:Enhanced Prompt (ROT13 - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.519, + 0.941 + ], + "angle": 0, + "content": "5951" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.214, + 0.268 + ], + "angle": 0, + "content": "Example C.5" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.357, + 0.83, + 0.371 + ], + "angle": 0, + "content": "Uses the Atbash cipher. Each letter in the \\*\\*Plaintext\\*\\* is replaced with its reverse counterpart in the alphabet." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.374, + 0.217, + 0.388 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.391, + 0.241, + 0.404 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.408, + 0.267, + 0.42 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.424, + 0.753, + 0.437 + ], + "angle": 0, + "content": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 15, 1990; Kzhhklig Mfnyvi: B987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.441, + 0.257, + 0.452 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.456, + 0.732, + 0.47 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.473, + 0.241, + 0.486 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.491, + 0.267, + 0.503 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.506, + 0.816, + 0.52 + ], + "angle": 0, + "content": "Xzhv Mfnyvi: 2024-KH-001234; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.524, + 0.257, + 0.535 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.539, + 0.775, + 0.553 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.556, + 0.241, + 0.569 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.574, + 0.267, + 0.586 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.589, + 0.874, + 0.603 + ], + "angle": 0, + "content": "gzc_rwvmgrurvi: GCM-2023-ZYX456, gzc_ivxliwh: bvzi: 2023, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.607, + 0.172, + 0.617 + ], + "angle": 0, + "content": "620.00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.624, + 0.257, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.84, + 0.652 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.668 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.673, + 0.236, + 0.685 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.486, + 0.702 + ], + "angle": 0, + "content": "Ovwrz,zm Znvirxzm, ivhrwhrm Olh Zmtvovh." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.315, + 0.734, + 0.683, + 0.749 + ], + "angle": 0, + "content": "Figure 12:Enhanced Prompt (Atbash - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5952" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.222, + 0.214, + 0.235 + ], + "angle": 0, + "content": "Example C.6" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.242, + 0.233, + 0.254 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.258, + 0.873, + 0.272 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.275, + 0.274, + 0.288 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.291, + 0.735, + 0.305 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.308, + 0.256, + 0.321 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.324, + 0.873, + 0.354 + ], + "angle": 0, + "content": "Uses the Polybius cipher. Each letter in the \\(^{**}\\) Plaintext\\*\\* is mapped to a pair of coordinates in the Polybius square, forming the \\(^{**}\\) Ciphertext\\*." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.358, + 0.216, + 0.37 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.374, + 0.241, + 0.387 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.391, + 0.266, + 0.404 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.407, + 0.872, + 0.42 + ], + "angle": 0, + "content": "32 11 31 15 : 15 31 23 26 51 24 33 22 32 41 33 32 ; 14 11 42 15 33 16 12 23 36 42 22 : 24 43 26 51 15 , 19" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.423, + 0.55, + 0.436 + ], + "angle": 0, + "content": "90;3411414134333642324331121536:51987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.441, + 0.256, + 0.452 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.456, + 0.732, + 0.47 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.473, + 0.241, + 0.486 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.491, + 0.266, + 0.503 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.506, + 0.872, + 0.519 + ], + "angle": 0, + "content": "13 11 41 15 32 43 31 12 15 36 : 2 0 2 4 - 34 41 - 0 0 1 2 3 4 ; 13 11 41 15 42 51 34 15 : 42 22 15 16 42 / 12" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.523, + 0.661, + 0.535 + ], + "angle": 0, + "content": "36 15 11 25 - 23 32 ; 16 23 26 23 32 21 14 11 42 15 : 33 13 42 33 12 15 36 19 , 20 24" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.256, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.774, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.573, + 0.241, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.266, + 0.602 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.605, + 0.872, + 0.619 + ], + "angle": 0, + "content": "42 11 46 _ 23 14 15 32 42 23 16 23 15 36 : 42 46 32 _ 20 23 - 11 12 13 456 , 42 11 46 _ 36 15 13 33 36 14" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.622, + 0.873, + 0.635 + ], + "angle": 0, + "content": "41:51 15 11 36:2023,41 42 11 42 43 41:34 36 33 13 15 41 41 15 14,36 15 16 43 32 14_23 41 41 43 15 14:620." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.64, + 0.149, + 0.651 + ], + "angle": 0, + "content": "00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.656, + 0.256, + 0.667 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.672, + 0.84, + 0.685 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.689, + 0.19, + 0.702 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.706, + 0.236, + 0.719 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.722, + 0.85, + 0.735 + ], + "angle": 0, + "content": "26 15 14 23 11 , 11 32 11 31 15 36 23 13 11 32 , 36 15 41 23 14 15 41 23 32 26 33 41 11 32 21 15 26 15 41 ." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.739, + 0.226, + 0.75 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.31, + 0.767, + 0.687, + 0.782 + ], + "angle": 0, + "content": "Figure 13: Enhanced Prompt (Polybius - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5953" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.214, + 0.268 + ], + "angle": 0, + "content": "Example C.7" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.357, + 0.872, + 0.388 + ], + "angle": 0, + "content": "Uses the Vigenère cipher. Each letter in the **Plaintext** is shifted by the corresponding letter in the **Key** to produce the **Ciphertext**." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.391, + 0.216, + 0.403 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.408, + 0.241, + 0.42 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.424, + 0.267, + 0.437 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.737, + 0.454 + ], + "angle": 0, + "content": "Nexe: Eotla Jqsnuzn; Dcee zf Miteh: Jwwy 15, 1990; Pcdsrzrv Nwbgc: J987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.457, + 0.258, + 0.469 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.732, + 0.487 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.49, + 0.241, + 0.503 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.507, + 0.267, + 0.52 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.523, + 0.802, + 0.537 + ], + "angle": 0, + "content": "Ccde Yuomet: 2024-PU-001234; Naup Vjpg: Vsehe/Dcecv-Ky; Qintni Dcee: Oeeodpr 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.258, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.775, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.573, + 0.241, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.267, + 0.602 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.606, + 0.867, + 0.62 + ], + "angle": 0, + "content": "tci_koepeihtet: VIN-2023-CMC456, tci_tpcqcdu: jecc: 2023, dtceuu: Rcoepsupd, rgqupo_kdswpd: 620.00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.624, + 0.258, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.841, + 0.653 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.668 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.673, + 0.237, + 0.685 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.46, + 0.703 + ], + "angle": 0, + "content": "Lgoic, cy Cxettccy, ceutgdg ky Nzs Lniplgd." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.309, + 0.734, + 0.688, + 0.749 + ], + "angle": 0, + "content": "Figure 14: Enhanced Prompt (Vigenère - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5954" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.263, + 0.214, + 0.277 + ], + "angle": 0, + "content": "Example C.8" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.283, + 0.233, + 0.296 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.299, + 0.874, + 0.313 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.316, + 0.275, + 0.329 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.332, + 0.736, + 0.346 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.35, + 0.257, + 0.363 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.366, + 0.502, + 0.379 + ], + "angle": 0, + "content": "Reverses the \\(^{**}\\) Plaintiff\\*\\* to create the \\(^{**}\\) Ciphertext\\*\\*." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.383, + 0.216, + 0.395 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.399, + 0.241, + 0.412 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.416, + 0.267, + 0.429 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.219, + 0.432, + 0.731, + 0.445 + ], + "angle": 0, + "content": "123456789Y :rebmuN tropssaP ;0991 ,51 yluJ :htriB fo etaD ;nosnhoJ ylimE :emaN" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.45, + 0.257, + 0.46 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.465, + 0.732, + 0.478 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.482, + 0.241, + 0.495 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.499, + 0.267, + 0.511 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.515, + 0.773, + 0.528 + ], + "angle": 0, + "content": "4202,91 rebotcO :etaD gniliF ;nI-kaerB/tfehT :epyT ESA C;432100-SP-4202 :rebmuN ESA C" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.532, + 0.257, + 0.543 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.548, + 0.775, + 0.562 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.565, + 0.241, + 0.577 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.582, + 0.267, + 0.594 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.598, + 0.85, + 0.611 + ], + "angle": 0, + "content": "}00.026 :deussi_dnufer,dessecorP:sutats,3202:raey{sdrocer_xat,654CBA-3202-NXT:reifitnedi_xat" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.615, + 0.257, + 0.626 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.631, + 0.84, + 0.644 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.648, + 0.19, + 0.66 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.665, + 0.236, + 0.677 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.681, + 0.458, + 0.694 + ], + "angle": 0, + "content": ".selegnAsoL ni sediser,naciremAna ,aideL" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.698, + 0.227, + 0.709 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.312, + 0.726, + 0.685, + 0.74 + ], + "angle": 0, + "content": "Figure 15:Enhanced Prompt (Reverse -3shot-Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5955" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.214, + 0.268 + ], + "angle": 0, + "content": "Example C.9" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.357, + 0.873, + 0.388 + ], + "angle": 0, + "content": "For each pair of letters in the \\(^{**}\\) Plaintext\\*\\*, their positions are swapped to produce the \\(^{**}\\) Ciphertext\\*\\*. If the number of letters is odd, the last letter remains in its original position." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.391, + 0.217, + 0.404 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.408, + 0.242, + 0.42 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.424, + 0.267, + 0.437 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.731, + 0.454 + ], + "angle": 0, + "content": "aNem :mEli yoJnhos;nD ta efoB riht :uJy11 ,51 99;0P sapsro tuNbmre :9Y78563412" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.457, + 0.258, + 0.469 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.732, + 0.487 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.49, + 0.242, + 0.503 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.507, + 0.267, + 0.52 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.523, + 0.772, + 0.536 + ], + "angle": 0, + "content": "aCesN mueb:r2 20-4SP0-1032;4C sa eyTep :hTfe/trBae-knI ;iFilgnD ta:eO tcbore1 ,92 204" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.258, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.775, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.573, + 0.242, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.267, + 0.602 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.606, + 0.836, + 0.619 + ], + "angle": 0, + "content": "at_xdineititre:XT-N0232A-CB54,6t xar_cerosd :yae:r2 20,3s atut:sP orecssde ,erufdni_sseu:d6 020.0" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.623, + 0.258, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.841, + 0.653 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.668 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.673, + 0.237, + 0.685 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.454, + 0.702 + ], + "angle": 0, + "content": "eLid,aa nmArecina ,erised sniL soA gnlese." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.734, + 0.694, + 0.749 + ], + "angle": 0, + "content": "Figure 16: Enhanced Prompt (SwapPairs - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5956" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.23, + 0.221, + 0.242 + ], + "angle": 0, + "content": "Example C.10" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.25, + 0.233, + 0.262 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.266, + 0.874, + 0.279 + ], + "angle": 0, + "content": "Decrypt the provided Ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.283, + 0.275, + 0.297 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.299, + 0.735, + 0.313 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.127, + 0.316, + 0.256, + 0.329 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.332, + 0.872, + 0.363 + ], + "angle": 0, + "content": "This encryption method converts each letter of the **Plaintext** into two letters in the **Ciphertext** such that the average of their ASCII values equals the ASCII value of the original letter." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.366, + 0.217, + 0.379 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.383, + 0.242, + 0.395 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.399, + 0.267, + 0.412 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.415, + 0.874, + 0.445 + ], + "angle": 0, + "content": "MOaalndf: DFlnhjkmxz IKnpgimortnpmo; CEaasudf npeg AChjqssugi: IKtvkmxz 15, 1990; OQaartrtoqnacdfqx: XZ987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.449, + 0.257, + 0.46 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.465, + 0.732, + 0.478 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.482, + 0.241, + 0.495 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.499, + 0.267, + 0.511 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.515, + 0.872, + 0.545 + ], + "angle": 0, + "content": "BDaartdf MOtvlnacdfqs: 2024-OQRT-001234; BDaartdf SUxzoqdf: SUgidfgsu/ACqsdfaajl-HJmo; h CEaesudf: NPbdsunpacdfqs 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.549, + 0.257, + 0.56 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.564, + 0.774, + 0.578 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.581, + 0.241, + 0.594 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.599, + 0.267, + 0.611 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.614, + 0.874, + 0.629 + ], + "angle": 0, + "content": "suaawy_hjcedfmosuhjeghjdfqs: SUWYMO-2023-AAACBD456, suaawy_qsdfbnpqscert: xzdfaaqs: 2023," + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.631, + 0.55, + 0.644 + ], + "angle": 0, + "content": "rtsuasutvrt: OQqsnpbbdftrtdfce, qsdfgtvmoce_hjrttrtvdfce: 620.00" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.648, + 0.257, + 0.659 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.664, + 0.84, + 0.677 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.681, + 0.19, + 0.693 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.697, + 0.237, + 0.71 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.713, + 0.72, + 0.727 + ], + "angle": 0, + "content": "KMdfcehjaa, aamo AAlndfqshjbdaamo, qsdfrthjcedfrh jhmo KMnprt AAmofhdfkmdfrt." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.731, + 0.227, + 0.742 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.291, + 0.759, + 0.706, + 0.773 + ], + "angle": 0, + "content": "Figure 17: Enhanced Prompt (DualAvgCode - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5957" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.22, + 0.268 + ], + "angle": 0, + "content": "Example C.11" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.357, + 0.872, + 0.387 + ], + "angle": 0, + "content": "For each letter in the \\(^{**}\\) Plaintext\\*\\*: - If the ASCII value is even, add 1 to it to get the corresponding character in the \\(^{**}\\) Ciphertext\\*. - If the ASCII value is odd, subtract 1 to get the new character in the \\(^{**}\\) Ciphertext\\*." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.391, + 0.216, + 0.404 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.407, + 0.242, + 0.42 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.424, + 0.267, + 0.437 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.745, + 0.454 + ], + "angle": 0, + "content": "Ozld: Dlhmax Kniorno; Ezud ng Chsui: Ktmx 15, 1990; Qzrrqnsu Otlcds: X987654321" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.457, + 0.258, + 0.469 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.733, + 0.487 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.49, + 0.242, + 0.503 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.507, + 0.267, + 0.52 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.523, + 0.804, + 0.537 + ], + "angle": 0, + "content": "Bzrd Otlcds: 2024-QR-001234; Bzrd Uxqd: Uidgu/Csdzj-Ho; Ghmhof Ezud: Nbuncds 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.258, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.775, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.573, + 0.242, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.267, + 0.603 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.606, + 0.872, + 0.619 + ], + "angle": 0, + "content": "uzy_hedoughghds: UYO-2023-ZCB456, uzy_sdbnser: xdzs: 2023, ruzutr: Qsnbdrrde, sdgtoe_hrrte: 620.00" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.623, + 0.258, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.841, + 0.653 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.668 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.673, + 0.237, + 0.685 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.475, + 0.702 + ], + "angle": 0, + "content": "Mdehz, zo Zldshbzo, sdrhedr ho Mnr Zofdmdr." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.303, + 0.734, + 0.695, + 0.749 + ], + "angle": 0, + "content": "Figure 18: Enhanced Prompt (ParityShift - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5958" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.255, + 0.221, + 0.268 + ], + "angle": 0, + "content": "Example C.12" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.275, + 0.233, + 0.287 + ], + "angle": 0, + "content": "Background:" + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.291, + 0.874, + 0.305 + ], + "angle": 0, + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.308, + 0.275, + 0.321 + ], + "angle": 0, + "content": "Think step by step." + }, + { + "type": "text", + "bbox": [ + 0.156, + 0.324, + 0.736, + 0.338 + ], + "angle": 0, + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.341, + 0.256, + 0.354 + ], + "angle": 0, + "content": "Algorithm Flow:" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.357, + 0.872, + 0.388 + ], + "angle": 0, + "content": "The algorithm splits the \\(^{**}\\) Plaintext\\*\\* into words based on spaces. Each word is then individually encrypted using the Caesar cipher, resulting in the \\(^{**}\\) ciphertext\\*\\*." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.391, + 0.216, + 0.404 + ], + "angle": 0, + "content": "Examples:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.407, + 0.242, + 0.42 + ], + "angle": 0, + "content": "* Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.424, + 0.267, + 0.437 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.44, + 0.731, + 0.453 + ], + "angle": 0, + "content": "e:Nam lyEmi nson;Joh eDat fo th:Bir yJul 15,0;199 sportPas ber:Number 7654321Y98" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.457, + 0.258, + 0.469 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.473, + 0.732, + 0.487 + ], + "angle": 0, + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.49, + 0.242, + 0.503 + ], + "angle": 0, + "content": "* Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.507, + 0.267, + 0.52 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.523, + 0.772, + 0.536 + ], + "angle": 0, + "content": "eCas ber:Num 4-PS-001234;202 eCas e:Typ ft/Break-In;The ingFil e:Dat oberOct 19, 4202" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.541, + 0.258, + 0.552 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.556, + 0.775, + 0.57 + ], + "angle": 0, + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.573, + 0.242, + 0.586 + ], + "angle": 0, + "content": "* Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.589, + 0.267, + 0.602 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.605, + 0.836, + 0.619 + ], + "angle": 0, + "content": "identifier:tax -2023-ABC456,TXNRecords:tax ar:ye 3,202 tus:sta cessed,Pro und_iuied:ref .00620" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.623, + 0.258, + 0.634 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.639, + 0.84, + 0.652 + ], + "angle": 0, + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.656, + 0.19, + 0.669 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.673, + 0.237, + 0.686 + ], + "angle": 0, + "content": "- Ciphertext:" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.689, + 0.458, + 0.703 + ], + "angle": 0, + "content": "ia,Led na rican,Ame idesres ni Los eles.Ang" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.706, + 0.227, + 0.717 + ], + "angle": 0, + "content": "- Plaintiff:" + }, + { + "type": "image_caption", + "bbox": [ + 0.304, + 0.734, + 0.694, + 0.749 + ], + "angle": 0, + "content": "Figure 19: Enhanced Prompt (WordShift - 3shot - Letter)" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5959" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.319, + 0.083, + 0.679, + 0.098 + ], + "angle": 0, + "content": "Table 12: Results on CipherBank(Enhanced Prompt)" + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.108, + 0.884, + 0.361 + ], + "angle": 0, + "content": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.760000.3802.670.380.380.51
Qwen2.5-72B-Instruct12.609.1600002.290.381.532.88
Llama-3.1-70B-Instruct2.671.15001.530.381.15000.76
Llama-3.3-70B-Instruct4.581.5300.381.1501.15000.98
DeepSeek-V341.6027.860.380.3865.955.3412.660.765.1717.79
Closed-source Models
GPT-4o-mini-2024-07-1821.7619.0800.384.3900005.07
GPT-4o-2024-08-0645.4224.0500.7651.538.401.911.1510.3115.95
GPT-4o-2024-11-2045.4241.980053.638.023.821.159.5418.17
gemini-1.5-pro63.695.730.760.3814.122.670.381.9110.6911.15
gemini-2.0-flash-exp45.0422.902.290.3846.564.583.8201.1514.08
Claude-Sonnet-3.5-102292.7582.0678.242.4879.399.732.4862.0244.8550.44
Reasoning Models
QwQ-32B-Preview1.913.052.670002.670.380.381.23
DeepSeek-R188.3786.5472.730.7646.9675.0173.1774.421.5157.72
gemini-2.0-flash-thinking37.9819.0910.50055.344.964.770.386.1115.46
ol-mini-2024-09-1254.2072.1450.00.7611.0718.7047.3349.627.2534.56
" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.384, + 0.49, + 0.463 + ], + "angle": 0, + "content": "sus externally sourced structured text (e.g., quotes from Shakespeare's works). The structured text exhibits greater linguistic familiarity, while the privacy-sensitive data represents real-world encryption needs, lacking inherent semantic patterns." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.464, + 0.49, + 0.691 + ], + "angle": 0, + "content": "As shown in Table 13 and Table 14, models generally perform better on structured text, suggesting that they leverage linguistic priors rather than strictly following decryption rules. When encountering encrypted text with recognizable patterns, models tend to shortcut reasoning, aligning decoded fragments with plausible linguistic structures instead of strictly adhering to learned transformation rules. Conversely, for less structured, domain-specific text, models struggle to infer decryption patterns, reinforcing the advantage of CipherBank's privacy-sensitive dataset, which forces models to engage in independent reasoning rather than rely on pretraining biases." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.702, + 0.283, + 0.718 + ], + "angle": 0, + "content": "D Error Analysis" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.728, + 0.32, + 0.742 + ], + "angle": 0, + "content": "D.1 Error Classification" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.749, + 0.49, + 0.83 + ], + "angle": 0, + "content": "This section defines the error categories observed in model decryption outputs. These classifications help identify systematic failure patterns and provide insights into how models approach cryptographic reasoning." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.842, + 0.49, + 0.922 + ], + "angle": 0, + "content": "(A) Omission/Insertion: The model output contains missing or extra characters, words, or punctuation compared to the reference plaintext. These errors indicate incomplete decryption or unintended modifications, leading to" + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.384, + 0.763, + 0.399 + ], + "angle": 0, + "content": "partial but inaccurate results." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.41, + 0.884, + 0.505 + ], + "angle": 0, + "content": "- (B) Name Decryption Error: The decryption result is correct except for the name part, which remains incorrect or partially distorted. This suggests challenges in handling named entities, possibly due to memorization effects or entity-based biases." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.516, + 0.885, + 0.645 + ], + "angle": 0, + "content": "- (C) Semantic Inference: The model makes errors based on semantic reasoning rather than strictly following decryption rules. Instead of decoding symbols precisely, the model hallucinates plausible but incorrect outputs that fit the general meaning of the sentence. This indicates a tendency to prioritize linguistic coherence over strict decryption fidelity." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.655, + 0.884, + 0.766 + ], + "angle": 0, + "content": "- (D) Reorganization: The output preserves the exact meaning of the reference plaintext but rearranges the sentence structure. This suggests that the model prioritizes fluency over strict character-level fidelity, leading to errors in cryptographic tasks where precision is essential." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.777, + 0.885, + 0.922 + ], + "angle": 0, + "content": "- (E) Reasoning Failure: The model output is significantly different from the reference, and decryption is essentially unsuccessful. This suggests a fundamental failure in identifying encryption patterns, leading to outputs that bear little resemblance to the expected plaintext. This category includes cases where the model fails to infer transformation rules or apply correct decryption strategies." + }, + { + "type": "list", + "bbox": [ + 0.532, + 0.41, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5960" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.291, + 0.083, + 0.707, + 0.098 + ], + "angle": 0, + "content": "Table 13: Decryption Performance on Privacy-Sensitive Data" + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.108, + 0.884, + 0.248 + ], + "angle": 0, + "content": "
ModelRot13AtbashPolybiusVigenèreReverseSwapDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V324.3415.6415.70033.723.5104.3515.6412.54
DeepSeek-R157.8871.0271.554.3533.574.35012.718.7029.35
Closed-source Models
GPT-4o-2024-11-2021.7421.740030.438.700013.0410.63
Gemini-2.0-Flash-Exp47.834.354.35052.1704.354.3513.0414.49
Claude-Sonnet-3.5-102286.9678.2665.224.3591.3013.044.3552.1747.8349.28
Gemini-2.0-Flash-Thinking39.134.350060.87004.3530.4315.46
o1-Mini-2024-09-1260.8786.9669.5708.70013.0417.394.3528.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.315, + 0.26, + 0.682, + 0.275 + ], + "angle": 0, + "content": "Table 14: Decryption Performance on Structured Text" + }, + { + "type": "table", + "bbox": [ + 0.115, + 0.285, + 0.884, + 0.421 + ], + "angle": 0, + "content": "
ModelRot13AtbashPolybiusVigenèreReverseSwapPairDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V376.1224.0315.70052.1729.40012.7155.1329.47
DeepSeek-R184.5185.041007.5979.108.708.7015.6430.4346.63
Closed-source Models
GPT-4o-2024-11-2078.2639.134.35086.9621.7404.3543.4830.92
Gemini-2.0-Flash-Exp86.9613.044.35086.968.70017.3943.4828.99
Claude-Sonnet-3.5-102291.3095.6595.654.3510052.178.7078.2695.6569.08
Gemini-2.0-Flash-Thinking86.9613.048.70069.5717.390052.1727.54
o1-Mini-2024-09-1282.6195.6578.26060.874.3513.0417.3943.4843.96
" + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.446, + 0.489, + 0.478 + ], + "angle": 0, + "content": "- (F) Other: Miscellaneous errors that do not fit into the defined categories." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.494, + 0.49, + 0.573 + ], + "angle": 0, + "content": "This classification framework provides a structured approach to analyzing decryption errors, helping to pinpoint systematic weaknesses and guide future improvements in cryptographic reasoning models." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.588, + 0.439, + 0.604 + ], + "angle": 0, + "content": "D.2 Examples of Different Error Types" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.611, + 0.49, + 0.756 + ], + "angle": 0, + "content": "To further illustrate the types of decryption errors encountered in our evaluation, we provide concrete examples corresponding to each error category. These cases demonstrate how models fail in various aspects of decryption, including omission-insertion, name decryption errors, semantic inference, reorganization, reasoning failures, and other anomalies. Example D.1 - D6 showcase representative examples of each error type." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.769, + 0.436, + 0.784 + ], + "angle": 0, + "content": "D.3 Detailed Error Distribution Tables" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.792, + 0.489, + 0.871 + ], + "angle": 0, + "content": "Tables 15-20 present a detailed breakdown of error distributions across different encryption algorithms for the six selected models. From these results, we identify several common trends and model-specific differences." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Challenges in Name Decryption and Symbolic Reasoning. Across all models, name decryption errors remain prevalent, particularly in Atbash and" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.446, + 0.885, + 0.542 + ], + "angle": 0, + "content": "Polybius, indicating persistent difficulties in handling entity-based transformations. Additionally, models struggle with key-based and transposition ciphers such as Vigenère and SwapPairs, suggesting limitations in tracking multi-step transformations and generalizing decryption strategies." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.543, + 0.886, + 0.67 + ], + "angle": 0, + "content": "Semantic Overreliance vs. Overthinking in Decryption. Chat models often exhibit semantic inference errors, where decrypted outputs align with linguistic patterns rather than encryption rules. In contrast, reasoning models tend to overthink simple tasks, leading to unnecessary self-correction loops that degrade performance in straightforward ciphers like Reverse." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.672, + 0.886, + 0.768 + ], + "angle": 0, + "content": "Structural Alignment and Insertion Errors. Frequent omission and insertion errors in WordShift and Reverse ciphers highlight difficulties in preserving character order. This suggests that models rely on semantic priors rather than strict symbolic reasoning, leading to misaligned outputs." + }, + { + "type": "title", + "bbox": [ + 0.528, + 0.769, + 0.662, + 0.785 + ], + "angle": 0, + "content": "Key Takeaways:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.797, + 0.884, + 0.845 + ], + "angle": 0, + "content": "- Chat models (Claude, Gemini) perform well in substitution ciphers but struggle with complex rule-based encryption." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.858, + 0.885, + 0.922 + ], + "angle": 0, + "content": "- Reasoning models (DeepSeek-R1, o1) maintain better structural accuracy but underperform in transposition-based and key-dependent ciphers." + }, + { + "type": "list", + "bbox": [ + 0.532, + 0.797, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.519, + 0.941 + ], + "angle": 0, + "content": "5961" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.127, + 0.117, + 0.427, + 0.131 + ], + "angle": 0, + "content": "Example D.1: Error Type: Omission/Insertion" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.137, + 0.203, + 0.15 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.153, + 0.424, + 0.166 + ], + "angle": 0, + "content": "Predictions: Card Number: ID 1245-6789-0123" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.169, + 0.515, + 0.182 + ], + "angle": 0, + "content": "References: Clark holds the ID Card Number 1245-6789-0123." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.193, + 0.203, + 0.206 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.21, + 0.424, + 0.223 + ], + "angle": 0, + "content": "Predictions: Card Number: ID 1245-6789-0123" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.226, + 0.515, + 0.239 + ], + "angle": 0, + "content": "References: Clark holds the ID Card Number 1245-6789-0123." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.25, + 0.203, + 0.263 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.266, + 0.504, + 0.279 + ], + "angle": 0, + "content": "Predictions: Salary Amount: $67,000; Pay Date: 2023-10-25" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.282, + 0.504, + 0.295 + ], + "angle": 0, + "content": "References: Salary Amount: $67,000, Pay Date: 2023-10-25." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.376, + 0.459, + 0.39 + ], + "angle": 0, + "content": "Example D.2: Error Type: Name Decryption Error" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.396, + 0.203, + 0.409 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.412, + 0.478, + 0.425 + ], + "angle": 0, + "content": "Predictions: Learn, an American, inside on Los Angeles." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.429, + 0.477, + 0.442 + ], + "angle": 0, + "content": "References: Ledia, an American, resides in Los Angeles." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.452, + 0.203, + 0.466 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.469, + 0.871, + 0.499 + ], + "angle": 0, + "content": "Predictions: Individual ID: A1234567; Name: John Doe; Age: 34; Gender Identity: Cisgender 16k11.2 Location, Country State Citizenship." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.502, + 0.871, + 0.532 + ], + "angle": 0, + "content": "References: Individual ID: A1234567; Name: Jane Doe; Age: 34; Genetic Testing: Chromosome 16p11.2 Deletion, Celiac Disease Predisposition." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.542, + 0.203, + 0.555 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.558, + 0.493, + 0.572 + ], + "angle": 0, + "content": "Predictions: Handed lost the passport Number A12345678." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.575, + 0.482, + 0.588 + ], + "angle": 0, + "content": "References: Dean holds the passport number A12345678." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.668, + 0.428, + 0.682 + ], + "angle": 0, + "content": "Example D.3: Error Type: Semantic Inference" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.688, + 0.203, + 0.701 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.705, + 0.873, + 0.733 + ], + "angle": 0, + "content": "Predictions: Jessica Brown, Bachelor of Biology, GPA: 3.9, Graduated 2023, Skills: Genetics, Microbiology, Ecology, Bioinformatics." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.737, + 0.873, + 0.768 + ], + "angle": 0, + "content": "References: Jessica Brown, Bachelors in Biology, GPA: 3.9, Graduated 2023, Courses: Genetics, Microbiology, Ecology, Biochemistry." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.777, + 0.203, + 0.79 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.794, + 0.873, + 0.808 + ], + "angle": 0, + "content": "Predictions: Patent-pending design specification PR2023_KURITY, Company Z, including batch production requirements." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.811, + 0.854, + 0.824 + ], + "angle": 0, + "content": "References: Patent-pending design specification PR2023_KURITY, Company Z, including batch performance analysis." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.834, + 0.203, + 0.847 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.851, + 0.661, + 0.864 + ], + "angle": 0, + "content": "Predictions: Office ID: P12345, Name: John, Position: Manager, Department: Finance." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.867, + 0.675, + 0.88 + ], + "angle": 0, + "content": "References: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5962" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.106, + 0.404, + 0.12 + ], + "angle": 0, + "content": "Example D.4: Error Type: Reorganization" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.126, + 0.204, + 0.139 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.142, + 0.578, + 0.156 + ], + "angle": 0, + "content": "Predictions: Name: Ledia; Nationality: American; Address: Los Angeles" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.159, + 0.477, + 0.172 + ], + "angle": 0, + "content": "References: Ledia, an American, resides in Los Angeles." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.182, + 0.203, + 0.195 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.199, + 0.527, + 0.212 + ], + "angle": 0, + "content": "Predictions: Passport Number: A12345678; Holder Name: Dean" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.215, + 0.482, + 0.228 + ], + "angle": 0, + "content": "References: Dean holds the passport number A12345678." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.239, + 0.203, + 0.252 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.255, + 0.673, + 0.268 + ], + "angle": 0, + "content": "Predictions: Name: Peter; Date of Birth: April 23, 1985; Passport Number: X123456789" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.271, + 0.736, + 0.285 + ], + "angle": 0, + "content": "References: Peter was born on April 23, 1985, and carries a passport with the number X123456789." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.343, + 0.421, + 0.357 + ], + "angle": 0, + "content": "Example D.5: Error Type: Reasoning Failure" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.363, + 0.203, + 0.376 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.379, + 0.842, + 0.392 + ], + "angle": 0, + "content": "Predictions: Address: 123 Main St, Apt 4B, New York, NY, Zip Code: 10001, Phone: 555-1234, Unit: 101, Floor: 2." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.396, + 0.873, + 0.425 + ], + "angle": 0, + "content": "References: Officer: Lisa Grant, Sergeant, Downtown Precinct, Patrol Car: VG2301, Equipment: Radar Gun, Model: RGX501, Weapon: Taser X2." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.436, + 0.203, + 0.449 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.452, + 0.443, + 0.465 + ], + "angle": 0, + "content": "Predictions: Welcome, once more, securely within." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.469, + 0.477, + 0.482 + ], + "angle": 0, + "content": "References: Ledia, an American, resides in Los Angeles." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.492, + 0.203, + 0.505 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.508, + 0.873, + 0.538 + ], + "angle": 0, + "content": "Predictions: Passport ID: 123456789; Expiry Date: Emily, 38, Issued By Authority, Renewal By Agency. Valid Until 2025, Expiry Passport." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.541, + 0.872, + 0.572 + ], + "angle": 0, + "content": "References: Patient ID: MRN456789; Medical History: Emily, 38, diagnosed with asthma, treated with inhalers. Allergic to dust mites, pollen." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.63, + 0.343, + 0.644 + ], + "angle": 0, + "content": "Example D.6: Error Type: Other" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.65, + 0.203, + 0.662 + ], + "angle": 0, + "content": "Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.665, + 0.873, + 0.695 + ], + "angle": 0, + "content": "Predictions: CookieID12345 maintain login status forUserID98765 on www.example.com, facilitating seamless access. Analyzing recordsUserID98765's engagement, deducting 500 page views and a click-through rate of \\(4.5\\%\\) across the session." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.699, + 0.873, + 0.745 + ], + "angle": 0, + "content": "References: CookieID12345 maintains login status forUserID98765 on www.example.com, facilitating seamless access. Analytics tracksUserID98765's engagement, documenting 500 page views and a click-through rate of \\(4.5\\%\\) across the session." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.755, + 0.203, + 0.768 + ], + "angle": 0, + "content": "Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.772, + 0.873, + 0.785 + ], + "angle": 0, + "content": "Predictions: Code: Coordinates: Latitude Longitude: \\(38.251^{\\circ}\\mathrm{N}\\) -85.754 W, Latitude Longitude: 34.091 N, -118.493 W." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.788, + 0.842, + 0.801 + ], + "angle": 0, + "content": "References: Base Distribution: North Plains Base: \\(38.251^{\\circ}\\mathrm{N}\\), \\(-85.754^{\\circ}\\mathrm{W}\\), East Valley Site: \\(34.091^{\\circ}\\mathrm{N}\\), \\(-118.493^{\\circ}\\mathrm{W}\\)." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.812, + 0.203, + 0.825 + ], + "angle": 0, + "content": "Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.828, + 0.873, + 0.858 + ], + "angle": 0, + "content": "Predictions: Name: Alex Smith; Salary: $87,500; Pay Frequency: Biweekly; Position: Software Developer; Employee ID: EID-257846; Department: IT." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.861, + 0.873, + 0.891 + ], + "angle": 0, + "content": "References: Name: Alex Smith, Salary: $87,500, Pay Frequency: Biweekly, Position: Software Developer, Employee ID: EID-257846, Department: IT." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5963" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.182, + 0.083, + 0.816, + 0.098 + ], + "angle": 0, + "content": "Table 15: Error Type Percentages for Different Algorithms in Claude-Sonnet-3.5-1022 Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.108, + 0.88, + 0.257 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1333.3351.850.0011.113.700.00
Atbash15.7978.950.003.510.001.75
Polybius42.6245.900.0011.480.000.00
Vigenère2.7332.425.083.5256.250.00
Reverse39.2448.100.005.066.331.27
SwapPairs15.9838.522.052.8738.112.46
DualAvgCode6.8839.688.502.4341.301.21
ParityShift19.7970.834.173.122.080.00
WordShift51.9522.082.608.4412.342.60
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.269, + 0.779, + 0.284 + ], + "angle": 0, + "content": "Table 16: Error Type Percentages for Different Algorithms in DeepSeek-R1 Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.294, + 0.881, + 0.445 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1340.0030.004.2921.431.432.86
Atbash42.5924.070.9329.630.002.78
Polybius48.6317.120.6821.928.902.74
Vigenère4.6018.012.682.3071.650.77
Reverse25.6419.661.7145.306.411.28
SwapPairs9.2025.293.072.3058.621.53
DualAvgCode25.6322.613.5228.6419.100.50
ParityShift7.0229.396.583.9552.190.88
WordShift29.1722.922.0825.4220.000.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.456, + 0.779, + 0.471 + ], + "angle": 0, + "content": "Table 17: Error Type Percentages for Different Algorithms in DeepSeek-V3 Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.481, + 0.881, + 0.632 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1310.7355.9315.825.0811.860.56
Atbash8.0738.127.173.5941.261.79
Polybius5.4712.112.342.7376.950.39
Vigenère0.3820.772.690.7774.231.15
Reverse21.5040.195.6113.5518.220.93
SwapPairs1.9218.392.680.3876.250.38
DualAvgCode3.0712.643.452.6877.780.38
ParityShift1.9328.573.860.7764.480.39
WordShift27.8029.464.5617.0120.330.83
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.643, + 0.783, + 0.658 + ], + "angle": 0, + "content": "Table 18: Error Type Percentages for Different Algorithms in gemini-1.5-pro Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.668, + 0.881, + 0.819 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1312.9858.020.765.3422.140.76
Atbash1.1515.003.080.7778.851.15
Polybius4.2117.243.071.9271.651.92
Vigenère2.2914.893.440.7678.630.00
Reverse20.8533.198.9410.2126.380.43
SwapPairs6.4925.571.911.5363.361.15
DualAvgCode2.6813.034.601.9277.390.38
ParityShift3.0828.463.080.3864.230.77
WordShift34.2524.202.7418.7219.630.46
" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.842, + 0.489, + 0.907 + ], + "angle": 0, + "content": "- All models show high name decryption errors and reasoning failures in Vigenère and SwapPairs, highlighting gaps in symbolic reasoning and long-term dependency tracking." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.842, + 0.884, + 0.906 + ], + "angle": 0, + "content": "These observations reveal that no single model excels across all ciphers, emphasizing the need for advancements in structured reasoning and symbolic manipulation for decryption tasks. Future" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5964" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.238, + 0.083, + 0.759, + 0.098 + ], + "angle": 0, + "content": "Table 19: Error Type Percentages for Different Algorithms in o1-mini Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.108, + 0.885, + 0.259 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1326.9538.3013.4817.021.422.84
Atbash37.3531.337.2316.876.021.20
Polybius30.9432.371.4425.188.631.44
Vigenère0.0021.4310.713.5764.290.00
Reverse12.7029.108.2032.3817.210.41
SwapPairs1.919.541.530.0086.640.38
DualAvgCode0.0018.520.003.7077.780.00
ParityShift4.5534.303.314.9652.480.41
WordShift11.5828.574.635.7949.030.39
" + }, + { + "type": "table_caption", + "bbox": [ + 0.255, + 0.27, + 0.741, + 0.285 + ], + "angle": 0, + "content": "Table 20: Error Type Percentages for Different Algorithms in o1 Model" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.295, + 0.885, + 0.446 + ], + "angle": 0, + "content": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1316.1928.574.765.7143.810.95
Atbash29.0949.095.4510.913.641.82
Polybius40.9128.796.0610.6112.121.52
Vigenère4.6236.151.541.1556.150.38
Reverse16.1425.563.5914.3538.571.79
SwapPairs5.2631.585.265.2652.630.00
DualAvgCode24.6233.853.082.3135.380.77
ParityShift4.0426.774.552.0262.120.51
WordShift30.8824.262.9418.3821.322.21
" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.469, + 0.348, + 0.484 + ], + "angle": 0, + "content": "improvements could focus on:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.497, + 0.49, + 0.675 + ], + "angle": 0, + "content": "- Minimizing the Impact of Semantic Bias in Logical Inference: Cryptographic reasoning tasks often necessitate abstract rule extraction rather than reliance on semantic interpretation. An excessive dependence on linguistic priors can impede the model's ability to identify underlying structural transformations, resulting in systematic errors. Future advancements should focus on reducing semantic interference to improve the extraction of abstract logical patterns." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.686, + 0.49, + 0.844 + ], + "angle": 0, + "content": "- Enhancing Comparative Reasoning for Pattern Recognition: While many decryption tasks in CipherBank are straightforward for humans, models frequently fail to derive correct transformation rules from provided exemplars. Strengthening contrastive reasoning mechanisms can enable models to better differentiate encryption structures, facilitating more effective pattern recognition and decryption." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.858, + 0.49, + 0.922 + ], + "angle": 0, + "content": "- Addressing Overthinking in Model Reasoning: Experimental results indicate that reasoning models exhibit superior performance on complex tasks but underperform on sim" + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.497, + 0.49, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.469, + 0.885, + 0.646 + ], + "angle": 0, + "content": "pler problems. Analysis of inference trajectories reveals a tendency toward recursive self-evaluation, where models continuously revise their approach, even when a straightforward solution is available. For example, in the Reverse cipher, models occasionally attempt unnecessarily complex reasoning paths instead of applying direct positional transformations. Mitigating such overthinking behaviors could enhance efficiency and robustness in logical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.657, + 0.884, + 0.722 + ], + "angle": 0, + "content": "Addressing these limitations will bridge the gap between linguistic fluency and structured cryptographic reasoning, making LLMs more robust in real-world encryption scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5965" + } + ] +] \ No newline at end of file diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_origin.pdf b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..02ae1380a778641acef301338748725251b25af5 --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/f6a4ee7e-a5b2-4a68-bf9b-6d717be3f8fe_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c2c26f35bcd0fc26f1b226fe2e54c436ec0a5ab5f4853f731cb542f4c74ff1b +size 772601 diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/full.md b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/full.md new file mode 100644 index 0000000000000000000000000000000000000000..cf5310f1212599088817eef3e45000859096eae5 --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/full.md @@ -0,0 +1,1381 @@ +Yu Li $^{1}$ , Qizhi Pei $^{1,2}$ , Mengyuan Sun $^{1}$ , Honglin Lin $^{1}$ , Chenlin Ming $^{1,3}$ , Xin Gao $^{1}$ , Jiang Wu $^{1}$ , Conghui He $^{1}$ , Lijun Wu $^{1*}$ + +$^{1}$ Shanghai Artificial Intelligence Laboratory + +$^{2}$ Renmin University of China $^{3}$ Shanghai Jiao Tong University + +{liyu1, heconghui, wulijun}@pjlab.org.cn + +https://cipherbankeva.github.io + +# Abstract + +Large language models (LLMs) have demonstrated remarkable capabilities, especially the recent advancements in reasoning, such as o1 and o3, pushing the boundaries of AI. Despite these impressive achievements in mathematics and coding, the reasoning abilities of LLMs in domains requiring cryptographic expertise remain underexplored. In this paper, we introduce CipherBank, a comprehensive benchmark designed to evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. CipherBank comprises 2,358 meticulously crafted problems, covering 262 unique plaintexts across 5 domains and 14 subdomains, with a focus on privacy-sensitive and real-world scenarios that necessitate encryption. From a cryptographic perspective, CipherBank incorporates 3 major categories of encryption methods, spanning 9 distinct algorithms, ranging from classical ciphers to custom cryptographic techniques. We evaluate state-of-the-art LLMs on CipherBank, e.g., GPT-4o, DeepSeek-V3, and cutting-edge reasoning-focused models such as o1 and DeepSeek-R1. Our results reveal significant gaps in reasoning abilities not only between general-purpose chat LLMs and reasoning-focused LLMs but also in the performance of current reasoning-focused models when applied to classical cryptographic decryption tasks, highlighting the challenges these models face in understanding and manipulating encrypted data. Through detailed analysis and error investigations, we provide several key observations that shed light on the limitations and potential improvement areas for LLMs in cryptographic reasoning. These findings underscore the need for continuous advancements in LLM reasoning capabilities. + +# 1 Introduction + +Large Language Models (LLMs) have revolutionized artificial intelligence by achieving state-of + +![](images/7e5d59106d41d9b0c2418dc813e35174fbce5dbcdc831356661e7a6d45f3346a.jpg) +Figure 1: Comprehensive Performance of SOTA Chat and Reasoning Models on CipherBank. + +the-art (SOTA) performance across diverse domains, from Natural Language Understanding (NLP) (Dong et al., 2019; Karanikolas et al., 2023; Sasaki et al., 2024) to complex problem-solving (Yao et al., 2024; Ge et al., 2023). Recent models, such as GPT-4o (Hurst et al., 2024) and Claude 3.5 (Anthropic, 2024), have demonstrated unprecedented versatility, excelling in tasks ranging from creative writing to technical analysis. A particularly notable advancement lies in the reasoning-enhanced LLMs, which have emerged as a critical benchmark for evaluating LLMs' intelligence and now can solve mathematical problems (Wu et al., 2024; Ahn et al., 2024; Liu et al., 2024c), debug intricate code (Lee et al., 2024; Zhong et al., 2024), and even engage in multi-step logical deduction (Sun et al., 2024; Wang et al., 2023) with human-like proficiency. For instance, specialized architectures like o1 (Jaech et al., 2024) and DeepSeek-R1 (Guo et al., 2025) have pushed the boundaries of AI reasoning, achieving breakthroughs in domains such as theorem proving (Yang et al., 2024b) and algorithmic optimization (Liu et al., 2024b). These achievements underscore the transformative potential of LLMs as general-purpose reasoning engines, capable of adapting to both broad and specialized challenges. + +To quantify progress, the community has proposed numerous benchmarks targeting mathematical reasoning (e.g., MATH (Hendrycks et al., 2021a), AIME1, coding proficiency (e.g., HumanEval (Chen et al., 2021a), MBPP (Austin et al., 2021)), and general logical deduction (e.g., FOLO (Han et al., 2024), MMBench (Yuan Liu, 2023), CaLM (Chen et al., 2024). These testbeds have become indispensable tools for assessing model capabilities. + +Despite extensive evaluations in mathematics and coding, one critical domain remains underexplored: cryptographic decryption. Cryptographic reasoning (Shree et al., 2017) demands unique capabilities, including pattern recognition, algorithmic Reverse-engineering, and contextual understanding of security constraints (Schneier, 2002)—skills distinct from those tested in conventional benchmarks. This gap is particularly consequential, as cryptography lies at the heart of modern digital security (Konheim, 2007), with applications spanning privacy-preserving communication (Soomro et al., 2019), secure authentication (Rani et al., 2022), and data integrity (Sarkar et al., 2021). The absence of a rigorous benchmark for cryptographic reasoning not only limits the true understanding of LLM's reasoning ability but also hinders progress toward AI systems capable of contributing to security-critical contexts (e.g., jailbreaking (Wei et al., 2024)). OpenAI has scratched the surface of this challenge and put a demo2 when releasing their strong reasoning model o1, but no serious efforts have been made to reveal this challenge in the committee. + +To address this gap, we introduce CipherBank, the first comprehensive benchmark specially designed to evaluate LLMs' reasoning capabilities in cryptographic decryption tasks. CipherBank is meticulously constructed to reflect real-world scenarios requiring encryption, instead of general texts that may serve as a toy testbed, with 2,358 problems derived from 262 unique plaintexts across 5 domains (e.g., Personal Privacy, Financial Information) and 14 subdomains (e.g., Identity Information, Personal Income). As for cipher algorithms, it spans 3 major cryptographic categories—Substitution Ciphers (e.g., Rot13, Vigenère), Transposition Ciphers (e.g., Reverse, SwapPairs), and custom hybrid algo + +rithms—encompassing 9 distinct encryption methods, covering 5 difficulty levels (from Basic to Expert) to ensure a diverse range of challenges. By integrating privacy-sensitive contexts and multilayered cryptographic challenges, CipherBank provides a nuanced evaluation framework that captures both the complexity and practicality of real-world decryption tasks. + +We evaluate CipherBank on SOTA LLMs, including general-purpose models (GPT-4o (Hurst et al., 2024), DeepSeek-V3 (Liu et al., 2024a)) and reasoning-optimized models (o1 (Jaech et al., 2024), DeepSeek-R1 (Guo et al., 2025)). Results reveal striking limitations: even advanced models struggle with classical ciphers, achieving only 45.14 score on tasks solvable by human cryptanalysts. Notably, we observe a significant performance gap between general chat LLMs and specialized reasoning models, suggesting that current reasoning optimizations inadequately address cryptographic challenges. Besides, we also provide studies on different aspects for deep understandings, such as evaluate on noised plaintexts and different length of plaintexts. Observations show the limitations of current models in decryption reasoning, with chat and reasoning models each exhibiting distinct strengths and weaknesses in cryptographic tasks. These findings highlight the need for targeted improvements in LLMs' cryptographic reasoning, with implications for both AI safety (e.g., adversarial robustness) and applications in cybersecurity. + +# 2 CipherBank Construction + +CipherBank is a purpose-built benchmark designed to rigorously evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. It integrates three core components to ensure comprehensive coverage of real-world scenarios and cryptographic complexity: (1) diverse plaintexts meticulously constructed from multiple dimensions of real-world privacy-sensitive data, ensuring the decryption process aligns with practical requirements; (2) a comprehensive suite of encryption algorithms, including both traditional cryptographic methods and custom-designed algorithms, to thoroughly assess the model's reasoning, inductive, and computational capabilities from multiple perspectives; and (3) a structured problem set with rich metadata, enabling granular performance analysis and detailed error analysis based on the diverse properties of the plaintexts. + +![](images/55a6502770f6ba4f313d235f238ab47edc38299f957c41a17c063b28e5bad7bc.jpg) +Figure 2: Overview of CipherBank. CipherBank consists of simulated privacy data encrypted using various algorithms. The left side of the figure shows five domains, 14 subdomains, and selected tags. The right side displays three encryption categories, nine specific algorithms, and their corresponding difficulty levels. + +# 2.1 Plaintiff Data: Design, Sources, and Real-World Alignment + +To construct CipherBank, we meticulously analyze real-world encryption scenarios and categorize the corresponding data types into five primary domains: Personal Privacy Data, Enterprise Sensitive Data, Public Safety Data, Financial Asset Data and Internet Records. These domains are further refined into 14 subdomains (e.g., Health Information, Policy Data) to ensure comprehensive coverage of encryption needs. Inspired by UltraChat (Ding et al., 2023), we adopt a tag-based approach to systematically structure encryption-relevant data, ensuring semantic consistency and domain relevance. Below, we detail the 3-step process for generating high-quality plaintext data. + +Step 1: Tag Definition and Curation. We leverage GPT-4o to generate candidate tags for each subdomain, capturing diverse real-world encryption scenarios. Human experts then curate these tags, eliminating redundancies, irrelevancies, and ambiguous entries, resulting in 89 distinct tags (see Appendix A.1). This structured approach ensures that the generated plaintext data remains realistic, contextually meaningful, and representative of actual encryption use cases. The tags are designed to align with the Variable Length property, enabling + +the generation of inputs of varying sizes to assess model robustness. + +Step 2: Controlled Text Generation. Our plaintext generation process employs tag combinations to control text granularity: entries with more tags contain richer contextual details and greater length, while those with fewer tags remain concise and specific. To ensure semantic validity, all generated data are filtered to eliminate generic or redundant descriptions, creating a dataset that reflects diverse encryption scenarios with varying complexity. Additionally, we introduce the Noise Perturbation property through controlled noise injection, which serves two key objectives: (1) testing the model's anti-interference capabilities and (2) reducing its reliance on contextual semantics to enhance robustness. Furthermore, we incorporate Sensitive Numerical Data by designing scenarios with complex alphanumeric combinations, including critical identifiers such as ID card and passport number. This multifaceted approach enables a comprehensive evaluation of the model's ability to address sophisticated decryption challenges. + +Step 3: Expert Validation and Refinement. After generation, we conduct expert validation to ensure data quality, correctness, and relevance. Noninformative content, excessively long or short samples, and entries lacking clear privacy attributes are + +filtered out. Through this rigorous refinement process, we retain 262 high-quality plaintext samples. This approach enables a practical and application-driven benchmark for evaluating LLMs' decryption capabilities in cryptographic reasoning tasks. + +# 2.2 Encryption Algorithms + +CipherBank incorporates 3 major categories of encryption methods: Substitution Ciphers, Transposition Ciphers, and Custom Ciphers. (1) Substitution-based techniques, including Rot13, Atbash, Polybius and Vigenère, test a model's ability to decode character-level transformations. These ciphers involve monoalphabetic or polyalphabetic substitutions, where each character is replaced by another based on a fixed rule or key. These methods evaluate the model's capacity to decode symbolic mappings and generalize across substitution rules. (2) Transposition-based techniques, such as Reverse and SwapPair, focus on positional rearrangements rather than symbol substitutions. These ciphers challenge the model to recognize structural patterns, such as reversed sequences or pairwise swaps. Unlike substitution ciphers, which alter character identities but preserve their order, transposition ciphers preserve characters but disrupt their sequence. This tests the model's ability to analyze sequential dependencies and reconstruct the original symbol order. + +To further assess LLMs' ability to decrypt uncommon encryption methods, we introduce (3) Custom-designed ciphers that deviate from standard cryptographic schemes. (a) DualAvgCode is inspired by OpenAI's o1 model showcase3, where iterative transformations require models to infer multi-step encryption patterns. (b) ParityShift draws from LSB steganography (Mielikainen, 2006), a common technique in information hiding, incorporating bitwise manipulations based on character parity. (c) WordShift Cipher is designed to evaluate LLMs' ability to decrypt ciphers that combine substitution and transposition encryption, performing Caesar-style letter shifts within each word individually, blending character-level substitution with structural reordering. + +Meanwhile, We categorize the nine algorithms into five difficulty tiers based on key necessity and computational complexity. T1 (Basic) includes simple ciphers like ROT13 and Reverse. T2 (Intermediate) introduces Atbash and WordShift with + +slightly more complex rules. T3 (Moderate) covers DualAvgCode and Polybius, requiring structured encoding. T4 (Advanced) involves ParityShift and SwapPairs with intricate data manipulation. T5 (Expert) features the Vigenère cipher, a polyalphabetic substitution cipher known for its keyword-based complexity. This framework organizes encryption techniques from basic to expert. + +# 2.3 CipherBank Statistics + +As shown in Figure 2, we provides an overview of CipherBank structure. The encryption algorithm in Section 2.2 applies to the expert-curated dataset from Section 2.1, yielding 2,358 test data points. + +Table 1: Statistics of CipherBank. + +
Domains#Tag#Plaintext#TestAvg(len)
Personal Privacy Data2350450107.88
Enterprise Sensitive Data1652468103.10
Public Safety Data1763567110.89
Financial Asset Data1344396163.68
Internet Records2053477191.92
Summary892622358134.03
+ +Table 1 summarizes the distribution of plaintexts across 5 domains, each with varying numbers of tags, samples, and test cases. Notably, Internet Records has the longest plaintexts (191.92), while Enterprise Sensitive Data has shorter samples (103.10). This diversity ensures a comprehensive evaluation of model performance across different encryption contexts. + +# 3 Evaluations + +# 3.1 Evaluation Setup + +Evaluation Protocols. In terms of testing methodology, CipherBank's evaluation follows the Known-Plaintext Attack framework (Zulkifli and Mohd, 2008), employing a 3-shot testing approach. We prompt the model with three plaintext-ciphertext pairs as demonstrations to infer encryption rules, identify potential keys, and apply the learned patterns to decrypt a new ciphertext. The detailed prompt can be found in Appendix B.1. + +For evaluation metrics, we primarily employ accuracy to measure overall decryption success, which is the ratio of correctly decrypted cases to total test cases, where correctness requires an exact character match with the plaintext. Additionally, to capture finer-grained differences between the decrypted output and the original plaintext, we incorporate Levenshtein similarity (Yujian and Bo, + +# Example 2.1: Plain-Ciphertext Pair + +Domain: Personal Privacy Data + +Subdomain: Identity Information + +Tag Combination: ["Name", "Date of Birth", "Passport Number"] + +Plaintext: + +Peter was born on April 23, 1985, and carries a passport with the number X123456789. + +# Encryption results: + +(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789. +(2) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9 +(3) WordShift : erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwit the bernum 3456789.X12 + +(4) ... + +More results can be found in the appendix. + +2007). We compute the Levenshtein distance for each sentence individually and report the average Levenshtein similarity across all test cases, providing a more nuanced assessment of model performance beyond binary correctness. + +LLM Candidates. For a comprehensive evaluation, we carefully selected 18 SOTA LLMs for evaluation, ensuring a diverse representation of open-source, closed-source, and reasoning-specialized models. Below, we outline the tested models: + +$\star$ Open-Source Chat Models: We evaluate leading open-source LLMs, including Mistral AI's Mixtral-8x22B (Jiang et al., 2024a), Alibaba's Qwen2.5-72B-Instruct (Yang et al., 2024a), Meta's Llama-3.1-70B-Instruct and Llama-3.3-70B-Instruct (Dubey et al., 2024), as well as the rising star - DeepSeek-V3 (Liu et al., 2024a). +$\star$ Closed-Source Models: For proprietary models, evaluation is conducted via API access. The tested models include OpenAI's 4o-mini and GPT-4o series (0806, 1120) (Hurst et al., 2024), DeepMind's Gemini-1.5-Pro (Team, 2024a) and Gemini-2.0-Flash-Exp $^{4}$ , along with Anthropic's Claude-Sonnet-3.5 $(1022)^{5}$ . +$\star$ Reasoning Models: We further investigate models optimized for reasoning tasks, including QwQ-32B-Preview (Team, 2024b), DeepSeek-R1 (Guo et al., 2025), Gemini-2.0-Flash-Thinking $(1219)^{6}$ o1-mini (0912) and o1 (1217) (Jaech et al., 2024). + +# 3.2 Benchmark Results + +Table 2 presents the evaluation results of all candidate LLMs (Levenshtein similarity results are in Appendix C.1). Below, we distill the experimental findings into several observations: + +Limitations of Current Models in Cryptographic Reasoning. Despite advancements in LLMs, Table 2 highlights their limitations in structured cryptographic reasoning. The overall performance remains low, with most SOTA models struggling to achieve meaningful accuracy. In Cipher Score, common models like Qwen and LLaMA perform particularly poorly, with some scoring in the single digits or near zero. Even the best-performing models, Claude-3.5 and o1, achieve less than 50 in accuracy, underscoring the significant difficulty of CipherBank and the challenges LLMs face in systematic decryption. + +Reasoning Models Generally Outperform Chat Models. When comparing reasoning models to chat models, generally we can find that the reasoning models do outperform chat models on all cipher algorithms and achieve better overall performance. The only expectation is the superior performance of Claude-3.5 (45.14) even better than o1, and also the bad performance of QwQ-32B-Preview (only 0.76 accuracy). This clearly demonstrate the advantages of the reasoning-specialized models. + +Closed-Source Models Retain an Edge Over Open-Source Models. Overall, closed-source models outperform open-source models in cryptographic decryption. Claude-3.5 (45.14) and o1 (40.59) achieve the highest performance across all cipher categories. However, DeepSeek-V3 (9.86) and DeepSeek-R1 (25.91) surpass most models in the GPT and Gemini families, indicating that advanced open-source models are closing the gap. + +Table 2: 3-shot scores (\%) of LLMs across three major encryption paradigms and nine specific encryption algorithms on CipherBank. The highest scores in each category are highlighted with a blue background, while the second-best results are underlined for emphasis. + +
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
RotAtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.380000.7600.3801.150.30
Qwen2.5-72B-Instruct1.1500000.381.1502.290.55
Llama-3.1-70B-Instruct1.150.3800.38000.380.380.760.38
Llama-3.3-70B-Instruct2.670.38000000.7600.42
DeepSeek-V332.4414.882.290.7628.470.380.381.148.029.86
Closed-source Models
GPT-4o-mini-2024-07-183.692.0300.512.1600.3800.251.00
GPT-4o-2024-08-0638.173.050.380.7625.192.2901.148.408.82
GPT-4o-2024-11-2026.466.990.130.7615.270.760.250.896.116.40
gemini-1.5-pro55.340.760.380.7610.310.760.380.7616.419.54
gemini-2.0-flash-exp35.883.051.530.3829.391.5300.765.348.65
Claude-Sonnet-3.5-102283.2175.1972.901.9163.936.874.9658.2139.1245.14
Reasoning Models
QwQ-32B-Preview1.530.381.910000.380.382.290.76
DeepSeek-R173.2858.7844.270.3810.690.3824.0512.988.4025.91
gemini-2.0-flash-thinking40.4617.1821.761.1522.901.1507.639.1613.49
o1-mini-2024-09-1246.1868.3246.951.535.150.382.937.631.5320.07
o1-2024-12-1759.9279.0179.397.2514.8932.1450.3812.3929.9040.59
+ +Nevertheless, both still lag behind Claude-3.5 and o1, suggesting that while open-source models are improving, there is significant potential for open-source models to achieve even better performance in the future. + +The performance variance among models of the same category is remarkably significant. Within the Open-source Chat Models category, the top-performing model, deepseek-v3 (9.86), outperforms the weakest model, Mixtral-8x22B (0.30), by a factor of 33. Similarly, in the Closed-source Models category, Claude-Sonnet-3.5 (45.14) demonstrates a performance 45 times greater than that of GPT-4o-mini (1.00). The disparity is even more pronounced in the Reasoning Models category, where o1 (40.59) surpasses QwQ-32B-Preview (0.76) by a factor of 53. Such substantial performance variations are rarely observed in other benchmarks, highlighting the challenging nature of CipherBank. This benchmark effectively distinguishes the reasoning capabilities of different models through its decryption dimension, providing a robust framework for evaluating model performance. + +# 4 Detailed Analysis + +In this section, we conduct a detailed analysis from the perspectives of plaintext characteristics, noise levels, testing methodologies, finer-grained evaluation metrics, and error analysis to gain deeper + +insights into the strengths and limitations of different LLMs in cryptographic decryption. + +Table 3: Model Performance on Short and Long Plaintiff Setting (Lower Difference and Decrease Ratio Are Better). We highlight the most stable and sensitive results in blue and green respectively. + +
ModelShortLongDiffDecrease Ratio(%)
GPT-4o-2024-11-209.474.465.0152.60
gemini-2.0-flash-exp11.506.425.0844.35
DeepSeek-V313.245.228.0260.60
gemini-2.0-flash-thinking19.908.4711.4342.61
DeepSeek-R132.2720.9411.3333.16
ol-mini-2024-09-1233.7717.3516.4248.57
ol-2024-12-1747.6134.3813.2327.78
Claude-Sonnet-3.548.7047.850.851.74
+ +# 4.1 Impact of Plaintext Length + +To test models' sensitivity to text length, we categorize plaintexts into short (fewer than three tags) and long groups, averaging 70.29 and 181.61 characters, respectively. As shown in Table 3 (full results and plaintext examples can be found in Appendix C.2), longer plaintexts lead to a significant performance decline in most models. Most models exhibit a significant decline in decryption performance as text length increases. Among them, Claude-3.5 (-0.85) shows the most stable performance, while o1-mini (-16.42) is the most sensitive. This contrasts with human performance, highlighting LLMs' length bias in decryption reasoning. + +![](images/4b0ec48e93805d8249bfdf5a2d13a4735aefef2c3348233ea635395b851fa389.jpg) +(a) Model Robustness to Noisy Inputs: Performance Comparison. + +![](images/90ad513b31d0dfabfe9b6d16424b1e84a8c0a245a47240b779b8ffa98718a275.jpg) +Figure 3: Evaluation of LLM Performance Under Different Encryption and Prompting Conditions. + +![](images/15a51b917cad39ba4fb7a86e640ecdb18cc5aad829229441318beab38dbe68c5.jpg) +(b) Effect of Encryption Scope: Letters Only vs. Letters & Numbers. +(c) Evaluating the Benefit of Explicit Algorithm Hints in 3-Shot Prompting. + +# 4.2 Effect of Noise on Model Robustness + +We observe that models frequently substituted synonyms instead of strictly applying decryption rules to each character (examples in Appendix C.2), indicating the presence of shortcut reasoning, where models partially decrypt the text and infer the remainder based on semantic context rather than adhering to the encryption pattern. + +To evaluate robustness and mitigate reliance on semantic inference, we select the 40 plaintexts with the lowest perplexity (PPL) scores, computed using Llama-3.1-8B-Instruct, for noise injection. Figure 3a shows a substantial performance drop across all models, including Claude-3.5 (from 59.17 to 25.08) and o1-mini (from 24.25 to 5.83), highlighting their vulnerability to structural perturbations and further exposing the limitations of current models in systematic reasoning and precise decryption. + +# 4.3 Effect of Encryption Scope + +In previous evaluations, only letters are encrypted. To better reflect real-world scenarios, here we select plaintexts with sensitive numerical data and apply encryption to both letters and numbers, focusing on algorithms that directly affect numbers (test prompt in Appendix C.2). As shown in Table 3b, model performance drops significantly in this more complex setting. This suggests difficulty in adapting decryption strategies to numerical transformations. Even under the same encryption principles, encrypting both letters and numbers greatly increases task complexity, posing a significant challenge for current reasoning models. This highlights a critical limitation in LLMs' ability to generalize across diverse data types, particularly when numerical transformations are involved. Future work should focus on enhancing models' capacity to handle mixed data encryption. + +# 4.4 Effect of Explicit Algorithm Hints on Decryption Performance + +Previous evaluations highlight the significant challenges posed by CipherBank. To evaluate the models' decryption capabilities when provided with algorithm details, we enhance the 3-shot setting by explicitly informing the models of the specific algorithm during testing. Under the revised setting, models are no longer required to independently deduce encryption logic but instead focus on identifying the necessary key and applying the specified decryption rules. The enhanced prompt is provided in Appendix C.2. Table 3c reveals distinct performance patterns. Most chat models show minimal improvement even with algorithm details, struggling with key inference and decryption—highlighting persistent limitations, especially in models like Claude (+5.30) and Gemini (+1.97). + +In contrast, reasoning models show marked performance gains, with R1 (+31.81) and o1-mini (+14.49) achieving significant improvements. The observed contrast underscores a fundamental distinction: chat models primarily rely on surface-level pattern recognition, while reasoning models excel in structured inference when provided with appropriate guidance. + +# 4.5 Error Analysis + +We conduct a comprehensive error analysis based on the test results in Table 2, identifying six distinct error types. To gain deeper insights, we examine the three best-performing chat models and three best-performing reasoning models, summarizing their error distributions. Detailed error definitions and examples are provided in Appendix D.1 and D.2. + +As shown in Figure 4, the distribution of error types reveals key differences between reasoning and chat models. Surprisingly, (1) reasoning models exhibit a higher rate of reasoning failures than + +![](images/978a939a205b5d785c3dfb5009bc4e72e0e7940913449dc416cc66f0c8385835.jpg) +Figure 4: Decryption Error Distribution. The left represents chat models, while the right corresponds to reasoning models. + +chat models. A deeper examination of Appendix D.3 reveals that many of these failures occur on simpler tasks, suggesting that reasoning models may overanalyze problems, leading to incorrect conclusions. This indicates that their complex inference processes can sometimes hinder performance on straightforward decryption cases. Conversely, (2) chat models show a higher frequency of omission-insertion and reorganization errors, indicating that while they are stronger in semantic understanding, this often results in excessive auto-completion and sentence restructuring rather than strict rule adherence. This tendency suggests that chat models prioritize fluency over exact decryption, leading to unintended modifications. Additionally, (3) both model types frequently make errors in name decryption, highlighting a broader challenge in handling structured entity transformations. This suggests that current LLMs struggle to consistently apply encryption rules to proper nouns, potentially due to memorization biases or difficulties in preserving entity-level consistency during decryption. + +# 5 Related Work + +Benchmarks for Reasoning Evaluating reasoning abilities in LLMs has been a key focus in AI research, with various benchmarks assessing models across mathematical, logical, and inferential tasks. MATH (Hendrycks et al., 2021b), MathBench (Liu et al., 2024c), and LiveMath-Bench (Liu et al., 2024d) test arithmetic and algebraic reasoning, while HumanEval (Chen et al., 2021b), DebugBench (Tian et al., 2024) and Big-CodeBench (Zhuo et al., 2024) evaluates code generation that require programming logic. Additionally, BIG-Bench (Srivastava et al., 2022), BBH (Suzgun et al., 2022), and LiveBench (White et al., 2024) measure broader cognitive abilities, such as abstract reasoning and analogical problem + +solving. KOR-Bench (Ma et al., 2024) is new benchmark that examines strong reasoning by introducing Knowledge-Orthogonal Reasoning (KOR) tasks, assessing models' ability to apply newly introduced rules independent of pretrained knowledge. Specially, it also contains a cipher reasoning task, which provides explicit encryption rules and keys, guiding models through step-by-step decryption rather than requiring pattern inference. In contrast, CipherBank presents a more realistic challenge, requiring models to identify encryption patterns from examples without prior knowledge, better reflecting real-world scenarios where encryption schemes are unknown. + +Jailbreaking via Cipher Characters Recent work demonstrates that encoding adversarial prompts via encryption (Yuan et al., 2023; Wei et al., 2024) or obfuscation (Yong et al., 2023; Jiang et al., 2024b; Kang et al., 2024) can bypass LLM safety filters by exploiting models' ability to process encoded inputs. While CipherBench (Handa et al., 2024) evaluates cipher-based jailbreaking, its reliance on 40 curated plaintexts and explicit algorithm hints limits practical relevance. Our CipherBank removes prior guidance, requiring autonomous pattern inference from plaintext-ciphertext pairs to simulate privacy-sensitive decryption scenarios, establishing a robust benchmark for LLM security evaluation. + +# 6 Conclusion + +In this work, we introduce CipherBank, a comprehensive benchmark for evaluating reasoning capabilities through cryptographic decryption. CipherBank includes 5 domains, 14 subdomains of plaintext data, 9 encryption algorithms, and 2,358 decryption tasks. By testing SOTA LLMs on CipherBank, we uncover significant limitations in their decryption abilities, revealing distinct strengths and weaknesses between reasoning and chat models. Our analysis identifies key deficiencies in current reasoning approaches and suggests directions for improvement, positioning CipherBank as a novel benchmark for advancing structured inference and cryptographic reasoning in developing future LLMs. + +# Limitations + +Our evaluation is constrained by the reliance on closed-source models, which are accessible only via API calls. This introduces potential variability + +due to API updates and version changes, though we mitigate this by documenting the specific versions and dates used. Additionally, access restrictions prevent us from evaluating more advanced models such as o1 Pro and o3 series, limiting the scope of our benchmark. From a design perspective, CipherBank primarily focuses on classical encryption algorithms, as modern cryptographic schemes introduce complexities beyond current model capabilities. While this choice ensures feasibility in evaluation, it also restricts the benchmark's applicability to real-world cryptographic challenges. As models improve, expanding CipherBank to modern encryption techniques will provide a more comprehensive assessment of reasoning in cryptographic tasks. + +# Acknowledgements + +This work is supported by National Key R&D Program of China (2022ZD0160201). + +# References + +Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. 2024. Large language models for mathematical reasoning: Progresses and challenges. arXiv preprint arXiv:2402.00157. +Anthropic. 2024. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 2025-02-09. +Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. 2021. Program synthesis with large language models. Preprint, arXiv:2108.07732. +Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021a. Evaluating large language models trained on code. + +Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021b. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374. +Sirui Chen, Bo Peng, Meiqi Chen, Ruiqi Wang, Mengying Xu, Xingyu Zeng, Rui Zhao, Shengjie Zhao, Yu Qiao, and Chaochao Lu. 2024. Causal evaluation of language models. Preprint, arXiv:2405.00622. +Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. 2023. Enhancing chat language models by scaling high-quality instructional conversations. Preprint, arXiv:2305.14233. +Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pretraining for natural language understanding and generation. Advances in neural information processing systems, 32. +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783. +Yingqiang Ge, Wenyue Hua, Kai Mei, Juntao Tan, Shuyuan Xu, Zelong Li, Yongfeng Zhang, et al. 2023. Openagi: When llm meets domain experts. Advances in Neural Information Processing Systems, 36:5539-5568. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948. +Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, Lucy Sun, Alex Wardle-Solano, Hannah Szabo, Ekaterina Zubova, Matthew Burtell, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Alexander R. Fabbri, Wojciech Kryscinski, Semih Yavuz, Ye Liu, Xi Victoria Lin, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Rex Ying, Arman Cohen, and Dragomir Radev. 2024. Folio: Natural language reasoning with first-order logic. Preprint, arXiv:2209.00840. +Divij Handa, Zehua Zhang, Amir Saeidi, and Chitta Baral. 2024. When "competency" in reasoning opens the door to vulnerability: Jailbreaking llms via novel complex ciphers. Preprint, arXiv:2402.10601. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021a. Measuring mathematical problem solving with the math dataset. NeurIPS. + +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874. +Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276. +Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720. +Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. 2024a. Mixtral of experts. arXiv preprint arXiv:2401.04088. +Fengqing Jiang, Zhangchen Xu, Luyao Niu, Zhen Xiang, Bhaskar Ramasubramanian, Bo Li, and Radha Poovendran. 2024b. Artprompt: Ascii art-based jailbreak attacks against aligned llms. arXiv preprint arXiv:2402.11753. +Daniel Kang, Xuechen Li, Ion Stoica, Carlos Guestrin, Matei Zaharia, and Tatsunori Hashimoto. 2024. Exploiting programmatic behavior of llms: Dual-use through standard security attacks. In 2024 IEEE Security and Privacy Workshops (SPW), pages 132-143. IEEE. +Nikitas Karanikolas, Eirini Manga, Nikoletta Samaridi, Eleni Tousidou, and Michael Vassilakopoulos. 2023. Large language models versus natural language understanding and generation. In Proceedings of the 27th Pan-Hellenic Conference on Progress in Computing and Informatics, pages 278-290. +Alan G. Konheim. 2007. Computer Security and Cryptography. John Wiley & Sons. +Cheryl Lee, Chunqiu Steven Xia, Longji Yang, Jentse Huang, Zhouruixin Zhu, Lingming Zhang, and Michael R Lyu. 2024. A unified debugging approach via llm-based multi-agent synergy. arXiv preprint arXiv:2404.17153. +Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437. +Fei Liu, Yiming Yao, Ping Guo, Zhiyuan Yang, Zhe Zhao, Xi Lin, Xialiang Tong, Mingxuan Yuan, Zhichao Lu, Zhenkun Wang, et al. 2024b. A systematic survey on large language models for algorithm design. arXiv preprint arXiv:2410.14716. + +Hongwei Liu, Zilong Zheng, Yuxuan Qiao, Haodong Duan, Zhiwei Fei, Fengzhe Zhou, Wenwei Zhang, Songyang Zhang, Dahua Lin, and Kai Chen. 2024c Mathbench: Evaluating the theory and application proficiency of llms with a hierarchical mathematics benchmark. arXiv preprint arXiv:2405.12209. +Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. 2024d. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147. +Kaijing Ma, Xinrun Du, Yunran Wang, Haoran Zhang, Zhoufutu Wen, Xingwei Qu, Jian Yang, Jiaheng Liu, Minghao Liu, Xiang Yue, et al 2024. Kor-bench: Benchmarking language models on knowledge-orthogonal reasoning tasks. arXiv preprint arXiv:2410.06526. +Jarno Mielikainen. 2006. Lsb matching revisited. IEEE signal processing letters, 13(5):285-287. +S. Rani, A. Kataria, and M. Chauhan. 2022. Cyber security techniques, architectures, and design In Holistic Approach to Quantum Cryptography in Cyber Security, pages 41-66. CRC Press. +A. Sarkar, S. R. Chatterjee, and M. Chakraborty. 2021 Role of cryptography in network security. The "Essence" of Network Security: An End-to-End Panorama, pages 103-143. +Miyu Sasaki, Natsumi Watanabe, and Tsukihito Komanaka. 2024. Enhancing contextual understanding of mistral llm with external knowledge bases. +Bruce Schneier. 2002. Cryptographic design vulnerabilities. Computer, 31(9):29-33. +Divya Shree, Seema Ahlawat, et al. 2017. A review on cryptography, attacks and cyber security. International Journal of Advanced Research in Computer Science, 8(5). +S. Soomro, M. R. Belgaum, Z. Alansari, et al. 2019 Review and open issues of cryptographic algorithms in cyber security. In 2019 International Conference on Computing, Electronics & Communications Engineering (iCCECE), pages 158-162. IEEE. +Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615. +Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan 2024. Determinlr: Augmenting llm-based logical reasoning from indeterminacy to determinacy. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9828-9862. + +Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. 2022. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261. +Gemini Team. 2024a. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530. +Qwen Team. 2024b. Qwq: Reflect deeply on the boundaries of the unknown. +Runchu Tian, Yining Ye, Yujia Qin, Xin Cong, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. Debugbench: Evaluating debugging capability of large language models. Preprint, arXiv:2401.04621. +Boshi Wang, Xiang Yue, and Huan Sun. 2023. Can chatgpt defend its belief in truth? evaluating llm reasoning via debate. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 11865-11881. +Alexander Wei, Nika Haghtalab, and Jacob Steinhardt. 2024. Jailbroken: How does llm safety training fail? Advances in Neural Information Processing Systems, 36. +Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Ben Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Siddartha Naidu, et al. 2024. Livebench: A challenging, contamination-free llm benchmark. arXiv preprint arXiv:2406.19314. +Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, and Chi Wang. 2024. Mathchat: Converse to tackle challenging math problems with llm agents. In ICLR 2024 Workshop on Large Language Model (LLM) Agents. +An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115. +Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan J Prenger, and Animashree Anandkumar. 2024b. Leandrojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems, 36. +Wenlin Yao, Haitao Mi, and Dong Yu. 2024. Hdflow: Enhancing llm complex problem-solving with hybrid thinking and dynamic workflows. arXiv preprint arXiv:2409.17433. +Zheng-Xin Yong, Cristina Menghini, and Stephen H Bach. 2023. Low-resource languages jailbreak gpt-4. arXiv preprint arXiv:2310.02446. + +Youliang Yuan, Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Pinjia He, Shuming Shi, and Zhaopeng Tu. 2023. Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher. arXiv preprint arXiv:2308.06463. +Haodong Duan Yuan Liu. 2023. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281. +Li Yujiang and Liu Bo. 2007. A normalized levenshtein distance metric. IEEE transactions on pattern analysis and machine intelligence, 29(6):1091-1095. +Li Zhong, Zilong Wang, and Jingbo Shang. 2024. Ldb: A large language model debugger via verifying runtime execution step-by-step. arXiv preprint arXiv:2402.16906. +Terry Yue Zhuo, Minh Chien Vu, Jenny Chim, Han Hu, Wenhao Yu, Ratnadira Widyasari, Imam Nur Bani Yusuf, Haolan Zhan, Junda He, Indraneil Paul, et al. 2024. Bigcodebench: Benchmarking code generation with diverse function calls and complex instructions. arXiv preprint arXiv:2406.15877. +MZWM Zulkifli and Zaid W Mohd. 2008. Attack on cryptography. Comput. Secur, 12(5):33-45. + +# A Detailed Benchmark Description + +In this chapter, we provide additional details on CipherBank that were not extensively covered in the main text. This includes a detailed breakdown of plaintext tags and their distribution across subdomains, as well as a more comprehensive description of the encryption algorithms used. These details offer deeper insights into the dataset construction and the encryption schemes evaluated in this benchmark. + +# A.1 Tags and Plaintext Distribution Across Subdomains + +Table 4 provides an overview of the specific tags associated with each subdomain within CipherBank. The dataset spans five primary domains and 14 subdomains, ensuring diverse and realistic plaintext scenarios for cryptographic evaluation. + +# A.2 Detailed Descriptions of Encryption Algorithms + +This section provides detailed descriptions of the nine encryption algorithms used in CipherBank. These algorithms span substitution, transposition, and custom-designed ciphers, covering a range of complexity levels. Notably, Rot13, Atbash, Polybius, DualAvgCode, and ParityShift also support numeric encryption, further enhancing the diversity of decryption challenges. Table 5 outlines each algorithm and its transformation rules. Some detailed encryption examples are provided below, illustrating how different ciphers transform plaintext into ciphertext. + +For each encryption algorithm, we have implemented a corresponding decryption algorithm to ensure that ciphertext can be fully restored to its original plaintext. This guarantees the reversibility and integrity of the encryption schemes used in CipherBank, allowing for a rigorous evaluation of model decryption capabilities. The decryption process follows the exact inverse of the encryption transformations, ensuring consistency across all test cases. + +# B Experimental Setup Details + +In our evaluation, we adopt a 3-shot approach. A more natural Ciphertext-Only Attack (zero-shot) setting was not adopted, as it would reduce the task to brute-force decryption, where the model blindly applies all known encryption algorithms in search of a coherent output. This contradicts the goal + +of reasoning-based inference, where the model is expected to deduce encryption rules from provided examples rather than rely on exhaustive trial and error. + +To ensure a balanced evaluation of decryption difficulty, substitution ciphers exclude numbers to prevent inconsistencies arising from differing cyclic structures. In contrast, ciphers that do not involve direct substitution, such as Reverse, Word-Shift, and similar methods, process numbers normally, preserving structural integrity within the encrypted text. + +For all open-source models, we conduct evaluations using the OpenCompass framework with default temperature to ensure consistent outputs. For models evaluated via API, we perform 5 independent test runs per model and report the average result to enhance stability and reliability. + +# B.1 Prompts Used for Querying + +This section outlines the prompts used to query models during evaluation. To ensure consistency, all models were tested under a 3-shot setting, where they were provided with three plaintext-ciphertext pairs before attempting to decrypt a new ciphertext. The prompts were designed to encourage logical inference rather than relying on prior knowledge, guiding models to extract encryption patterns and apply the learned rules systematically. Below, Figure 5 provides the system prompt (some reasoning models may not support system prompts), while Figure 6 present the detailed user prompts. + +# B.2 Post-processing Methods + +During querying, we instruct the model to think step by step and enclose the final decrypted output within ... tags. To extract the decoded plaintext, we apply the regular expression ' result $\text{串}$ $(\text{串} ?)$ /result>, capturing the content between these tags. The matching process is case-insensitive, aligning with algorithms like Polybius, which inherently do not differentiate between uppercase and lowercase letters when restoring plaintext. This ensures consistency across different decryption schemes. + +Table 4: Tag Distribution Across Subdomains in CipherBank + +
DomainSubdomainTags
Personal Privacy DataIdentity InformationName, ID Card Number, Passport Number, Date of Birth, Gender, Nationality, Marital Status, Mobile Number, Family Member Information (e.g., immediate family names, contact information), Residential Address
Health InformationMedical Record Number (Patient ID), Diagnosis Records, Surgery Records, Examination Reports (e.g., X-ray, CT scan results, heart rate, blood pressure, blood sugar level, blood type), Disease History, Allergy History, Vaccination Records, Family Medical History
Educational DataStudent ID (Student Number), School Records (Enrollment Date, Graduation Date), Academic Records (Subjects, Grades, GPA, Ranking), Degree Information (Bachelor, Master, Doctorate), Awards and Penalties Records (Disciplinary Records)
Enterprise Sensitive DataBusiness InformationBusiness Plans (e.g., Annual Plan, Five-Year Plan), Marketing Strategy (e.g., Marketing Promotion Plan, Advertising Budget), Customer Lists (e.g., Customer Contacts, Preferences), Supplier Information (Supplier List, Cooperation Agreements), Internal Financial Budgets (Cost Structure, Profit Forecasts)
Intellectual PropertyProduct Design Plans (e.g., Prototype Drawings, Design Documents), Internal Technical Documents (e.g., Technical Manuals, Specifications), Test Data (e.g., Product Performance Test Results, Quality Control Records), Copyright Data, Patent Data
Employee InformationContact Information (e.g., Phone Numbers, Email Addresses), Work Experience, Position and Department Information, Salary and Benefits Information (e.g., Salary Amount, Bonuses, Allowances), Performance Evaluation (e.g., Performance Scores, Promotion Records), Contract Information (e.g., Employment Contract, Non-Disclosure Agreement)
Public Safety DataPolice DataCase Information (Case Number, Case Type, Filing Date), Criminal Records (Suspect Information, Crime Time, Crime Location), Alarm Records (Informer Information, Alarm Time, Alarm Content), Investigation Reports (Investigation Results, Investigation Progress), Arrest Records (Arrest Time, Location, Action Description), Traffic Enforcement Data (Violation Records, Penalty Information), Police Officer Information (Officer Number, Name, Position, Department), Police Resource Allocation (Vehicle, Equipment, Weapon Usage Records)
National Security DataBorder Crossing Records (Entry and Exit Personnel Information, Vehicle Registration), Customs Inspection Data (Cargo List, Contraband Records), Territorial Patrol Data (Patrol Reports, Anomalies Records), Cyber Security Monitoring Data (Cyber Attack Records, Threat Intelligence)
Military DataOperation Plans, Target Location, Troop Deployment, Military Base Distribution, Defense Works Location
Financial Confidential DataBanking InformationAccount Number, Bank Card Number, Payment Method, Payment Platform ID, Transaction Details, Loan Amount, Interest Rate, Repayment Plan, Investment Records (Stocks, Funds, Bonds)
Personal IncomeSalary Amount, Pay Date, Tax Number, Tax Return Records
Internet RecordsBrowsing RecordsPage Interaction, Search Behavior, Click Activity, Device Information, Geolocation, Checkout Process, Multimedia Interaction, Download Records
Cookie DataSession Management, User Identification, Ad Targeting, Behavior Tracking, Authentication Tokens, Login Status
User PreferencesPreferred Genres, Device Usage Habits, Notification Preferences, Shopping Preferences, Video Preferences, Reading Habits
+ +# Example A.1: Plain-Ciphertext Pair (Identity Information) - Only Letter + +Domain: Personal Privacy Data + +Subdomain: Identity Information + +Tag Combination: ["Name", "Date of Birth", "Passport Number"] + +# Plaintext: + +Peter was born on April 23, 1985, and carries a passport with the number X123456789. + +# Encryption results: + +(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789. +(2) Atbash: Kvgvi dzh ylim lm Zkiro 23, 1985, zmw xziirvh z kzhhklig drgs gsv mfnyvi C123456789. +(3) Polybius: 34 15 42 15 36 45 11 41 12 33 36 32 33 32 11 34 36 23 26 2 3, 1985, 11 32 14 13 11 36 36 23 15 41 11 34 11 41 41 34 33 36 42 45 23 42 22 42 22 15 32 43 31 12 15 36 46 12 3 4 5 6 7 89. +(4) Vigenère: Pgeet wcd dzrp op Arcin 23, 1985, cyd natcigd pcdszrv wkeh eh nwxbgc Z123456789. +(5) Reverse: .987654321X rebmun eht htiw tropssap a seirrac dna ,5891 ,32 lirpA no nrob saw reteP +(6) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9 +(7) DualAvgCode: OQdfsudfqs vxaart acnpqsmo npmo AAoqqshjkm 23, 1985, aamoce bdaaqsqshjdfrt aa oqaartroqnpssu vxhjsugi sugidf motvlnacdfqs WY123456789. +(8) ParityShift: Qduds vzr cnso no Zqshm 23, 1985, zoe bzsshrd z qzrrqnsu vuhui uid otlcds Y123456789. +(9) WordShift: erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwt the bernum 3456789.X12 + +# Example A.2: Plain-Ciphertext Pair (Police Data) - Only Letter + +# Domain: Public Safety Data + +Subdomain: Police Data + +Tag Combination: ["Suspect Information", "Crime Time", "Crime Location", "Police Officer Information"] + +# Plaintext: + +Suspect: Jonathan, Crime: Burglary, Time: 2022-03-12 14:30, Location: 123 Elm Street, Officer Smith observed suspicious activity near 5th Ave on 2022-03-13. + +# Encryption results: + +(1) Rot13: Fhcrpg: Wbanguna, Pevzr: Ohetynel, Gvzr: 2022-03-12 14:30, Ybpngvba: 123 Ryz Fgerrg, Bssvpre Fzygu bofreirq fhcvpbhf npgvivgl arne 5gu Nir ba 2022-03-13. +(2) Atbash: Hfhkvyg: Qlmzgszm, Xirnv: Yfitozib, Grnv: 2022-03-12 14:30, Olxzgrlm: 123 Von Hgivvg, Luurxvi Hnrgs lyhvieww hfhkrxrlfh zxgrergb mvzi 5gs Zev lm 2022-03-13. +(3) Polybius: 41 43 41 34 15 13 42 : 24 33 32 11 42 22 11 32 , 13 36 23 31 15 : 12 43 36 21 26 11 36 51 , 42 23 31 15 : 20 22 - 03 - 1214 : 30 , 2633131142233332 : 123152631414236151542 , 331616231315364131234222 3312411536441514414341342313233343411113422344234513215113654222114415332022 -03 - 13. +(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13. +(5) Reverse: 31-30-2202 no evA ht5 raen ytivitca suoicipsus devresbo htimS reciffO ,teertS mlE 321 :noitacoL ,03:41 21-30-2202 :emiT ,yralgruB :emirC ,nahtanoJ :tcepsuS. +(6) SwapPairs: uSpsc:tJ notaah,nC irem :uBgralyr ,iTem :02220-3211 :403 ,oLacitno :21 3lE mtSerte ,fOifec rmStihboesvrdes suipicuo scaitivyn ae rt5 hvA eno2 20-2301-3. +(7) DualAvgCode: RTvtrqdfbdu: IKnpmoaasugiaamo, BDqshlndf: ACtvqsfkmaaqsz, SUhjndf: 2022-03-12 14:30, KMnpbdaasuhjnpmo: 123 DFkmln RTsuqsdfdu, NPegeghjbddfq RSInhjsugi npacrtdfquuwdfce rttvrtoqhjbdhjnpvtrt aabb-suhjuwhjsuxz modfaaqs 5sugi AAuwdf npmo 2022-03-13. +(8) ParityShift: Rtrqduu: Knozuizo, Bshld: Ctsfznxsx, Uhld: 2022-03-12 14:30, Mnbzuhno: 123 Dml Rusddu, Ngghbds Rlhuicnrdswde rtrqhbntr zbuwhux odzs 5ui Zwd no 2022-03-13. +(9) **WordShift:** pect:Sus athan,Jon me:Cri glary,Bur e:Tim 2-03-12202 30,14: ation:Loc 123 Elm eet,Str icerOff thSmi ervedobs picioussus ivityact rnea 5th Ave no 2-03-13202. + +# Example A.3: Plain-Ciphertext Pair (Health Information) - Letter&Number + +# Domain: Personal Privacy Data + +Subdomain: Health Information + +Tag Combination: ["Patient ID", "Diagnosis Records"] + +# Plaintext: + +Patient ID: R094713; Name: Jamie Lee; Age: 45; Gender: Female; EMR: EHR-234987. + +# Encryption results: + +(1) Rot13: Cngvrag VQ: E327046; Anzr: Wznvr Yrr; Ntr: 78; Traqe: Srznyr; RZE: RUE-567210. +(2) Atbash: Kzgrvmg RW: I905286; Mznv: Qznrv Ovv; Ztv: 54; Tvmwvi: Uvnzov; VNI: VSI-765012. +(3) Polybius: 34 11 42 23 15 32 42 23 14 : 36 66 65 56 63 53 55 ; 32 11 31 15 : 24 11 31 23 15 26 15 15 ; 11 21 15 : 56 61 ; 21 15 32 14 15 36 : 16 15 31 11 26 15 ; 15 31 36 : 15 22 36 - 54 55 56 65 64 63. +(4) Reverse: .789432-R HRE ;elameF :redneG ;54 :egA ;eeL eimaJ :emaN ;317490 R :DI tneitaP +(5) SwapPairs: aPteti DI: 0R94713; aNme: aJmei eLe; gAe: 45; eGndre: eFmale; MRE: HRE-239487. +(6) **WordShift:** atientP ID: R94713; ameN: Jamie eLe; geA: 45; enderG: emaleF; REM: EHR-234987. +(7) DualAvgCode: OQaaushjdmosu HJCE: QS009935680224; MOaalndf: IKaalnhjdf KMdfd; AAfhdf: 3546; FHdfmoced-fqs: EGdfnaakmdf; DFLNQS: DFGIQS-132435997968. +(8) ParityShift: Qzuhdou HE: S185602; Ozld: Kzlhd Mdd; Zfd: 54; Fdoeds: Gdlzmd; DLS: DIS-325896. + +# Example A.4: Plain-Ciphertext Pair (Banking Information) - Letter&Number + +# Domain: Financial Confidential Data + +## Subdomain: Banking Information + +Tag Combination: ["Account Number", "Bank Card Number", "Payment Platform ID"] + +# Plaintext: + +Account Number: 123456789, Bank: LA Bank, Card Number: 9876-5432-1098-7654, Payment Method: Virtual Credit Card, Payment Platform ID: ABC123XYZ, Timestamp: 2023-09-15 14:35, Amount: $250.00. + +# Encryption results: + +(1) Rot13: Nppbhag Ahzore: 456789012, Onax: YN Onax, Pneq Ahzore: 2109-8765-4321-0987, CnIzrag Zrgubq: Iveghny PerqvG Pneq, CnIzrag CyngsbEZ VQ: NOP456KLM, Gvzrfgnzc: 5356-32-48 47:68, Nzbhag: $583.33. +(2) Atbash: Zxlfmg Mfnyvi: 876543210, Yzmp: OZ Yzmp, Xziw Mfnyvi: 0123-4567-8901-2345, Kzbnvmg Nvgslw: Erigfzo Xivwr Xziw, Kzbnvmg Kozgulin RW: ZYX876CBA, Grnvhgznk: 7976-90-84 85:64, Znlfmg: $749.99. +(3) Polybius: 11 13 13 33 43 32 42 32 43 31 12 15 36 : 53 54 55 56 61 62 63 64 65 , 12 11 32 25 : 26 11 12 11 32 25 , 13 11 +36 14 32 43 31 12 15 36 : 65 64 63 62 - 61 56 55 54 - 53 66 65 64 - 63 62 61 56 , 34 11 51 31 15 32 42 31 15 42 22 33 14 : +44 23 36 42 43 11 26 13 36 15 14 23 42 13 11 36 14 , 34 11 51 31 15 32 42 34 26 11 42 16 33 36 31 23 14 : 11 12 13 53 54 +55 46 51 52 , 42 23 31 15 41 42 11 31 34 : 54 66 54 55 - 66 65 - 53 61 53 56 : 55 61 , 11 31 33 43 32 42 : $546166 .6666 . +(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13. +(5) Reverse: .00.052$ :tnuomA ,53:41 51-90-3202 :pmatsemit ,ZYX321CBA :DI mroftalP tnemyap ,draC tiderC lautriV :dohtem tnemyap ,4567-8901-2345-6789 :rebnuN draC ,knaB AL :knaB ,987654321 :rebnuN tnuoccA +(6) SwapPairs: cAotcnu mNuber: 214365879, aBnk: A Lank, aCrd Nmu:bre 8967-5423-1980-7564, aPymnet Mtohed: Vritaul Cerdti aCdr, aPymnet Ptaforml DI: BAC321YXZ, iTmsetamp: 3202-90-51 53:41, aAmount: $250.00. +(7) DualAvgCode: AAbbddnptvmosu MOtvlnacdfqs: 021324354657687999, ACAamojl: KMAA ACAamojl, BDaaqsc MEtvlnacdfqs: 99796857-46352413-02009979-68574635, OQaaxzlndfmosu LNdfsuginpce: UWhjssutvaakm BDqsdfcehjsu BDaaqsc, OQaaxzlndfmosu OQkmaasuegnpqsln HJCE: AAACBD021324WYXZZZ, SUhjlndfrtsuaalnoq: 13001324-0099-0246 0235:2446, AAlnnptvmosu: $134600.0000. +(8) ParityShift: Zbbntou Otlcds: 032547698, Czoj: MZ Czoj, Bzse Otlcds: 8967-4523-0189-6745, Qzxldou Lduine: Whsutzm Bsdehu Bzse, Qzxldou Qmzugsnl HE: ZCB032YXA, Uhldruzlj: 3132-18-04 05:24, Zlntou: $341.11. + +Table 5: Descriptions of Encryption Algorithms in CipherBank + +
AlgorithmDescription
Rot13A simple substitution cipher that shifts each letter 13 places forward in the alphabet. Encryption and decryption are identical, as applying the transformation twice restores the original text. Non-alphabetic characters remain unchanged.Additionally, Rot13 in CipherBank supports number encryption by shifting digits cyclically within the range 0-9.
AtbashA monoalphabetic substitution cipher where each letter is replaced with its counterpart from the reversed alphabet (e.g., A→Z, B→Y). Since the transformation is symmetric, encryption and decryption follow the same process CipherBank's Atbash implementation extends this to digits, where each number is replaced with its complement relative to 9 (e.g., 0→9, 1→8, ..., 9→0).
PolybiusA fractionating substitution cipher that replaces each letter with a two-digit coordinate from a 6×6 grid, mapping characters to numerical positions. Traditional Polybius squares typically use a 5×5 grid, supporting only letter encryption while merging I and J into the same cell, leading to ambiguity during decryption. To address this limitation and enable number encryption, CipherBank extends the Polybius square to a 6×6 grid, allowing both letters and numbers to be uniquely represented as coordinate pairs, increasing the cipher's complexity.
VigenèreA polyalphabetic substitution cipher that employs multiple shifting alphabets determined by a repeating key. Unlike monoalphabetic ciphers that use a single mapping, Vigenère utilizes multiple substitution tables, where each plaintext letter is shifted based on the corresponding key character's position in the alphabet. By default, the key is set to "ACL".This multi-table approach enhances security by distributing letter frequencies across different shifts, making it more resistant to frequency analysis. Decryption reverses this process by applying the inverse shifts dictated by the key. Unlike Rot13, it requires a key for both encryption and decryption.
ReverseA transposition cipher that reverses the order of all characters in the plaintext. Since it does not substitute characters, it preserves all information but alters the sequence, making it effective against naive attacks.
SwapPairsA transposition cipher that swaps adjacent characters in the plaintext. If the text length is odd, the final character remains unchanged. Decryption follows the same swapping process.
DualAvgCodeA custom transformation where each letter expands into two adjacent characters, shifting one position forward and one position backward in the ASCII table. Special cases (e.g., 'a', 'z', 'A', 'Z') are duplicated instead CipherBank extends this method to digits, where each number expands into two adjacent values (e.g., 2 → "13", 5 → "46"), increasing redundancy in the encrypted text.
ParityShiftA custom encryption method that shifts each letter one position forward or backward based on its ASCII parity. Even-ASCII characters shift forward, while odd-ASCII characters shift backward. For digits, ParityShift follows a similar rule, shifting numbers based on their parity (e.g., even numbers shift up, odd numbers shift down within 0-9).
WordShiftA transformation applied at the word level rather than the character level. Each word undergoes a left shift by a fixed number of positions, cycling characters within the word while preserving word spacing. Decryption reverses this shift, ensuring character order is restored within each word. By default, the shift is set to 3 positions.
+ +# Example B.1 + +# ## Role: + +Cryptography Analysis Expert. + +# ## Goals: + +Utilize the provided ciphertext and plaintext examples to analyze encryption patterns and decrypt new ciphertext. + +# ## Workflow: + +1. Analyze the provided ciphertext and plaintext examples to identify possible encryption patterns and rules. +2. Apply the decryption algorithm to the new ciphertext, attempt to decrypt, and verify the results. + +Figure 5: System Prompt + +# Example B.2 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Examples: + +* Example 1: + +- Ciphertext: + +Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Pnfr Ahzone: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyat Qngr: Bpgbore 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Yrqvn, na Nzrevpna, erfvqrva Ybf Natryrf. + +- Plaintiff: + +Figure 6: User Prompt (Rot13 - 3shot - Only Letter) + +# C Extended Experimental Results + +# C.1 Levenshtein Distance Evaluation from Main Results + +In the main text, most reported results are based on accuracy, which provides a binary assessment of decryption success. However, accuracy does not account for cases where decrypted outputs closely resemble the ground truth but contain minor errors. To provide a more fine-grained evaluation, we also compute Levenshtein similarity, which measures the edit distance between the model output and the correct plaintext. + +We define the Levenshtein similarity score as follows: + +$$ +S _ {\mathrm {l e v}} = 1 - \frac {d _ {\mathrm {l e v}} \left(P _ {\mathrm {p r e d}} , P _ {\mathrm {r e f}}\right)}{\max \left(\left| P _ {\mathrm {p r e d}} \right| , \left| P _ {\mathrm {r e f}} \right|\right)} \tag {1} +$$ + +where: + +- $d_{\mathrm{lev}}(P_{\mathrm{pred}}, P_{\mathrm{ref}})$ is the Levenshtein distance between the predicted and reference plaintexts. +- $|P_{\mathrm{pred}}|$ and $|P_{\mathrm{ref}}|$ denote the lengths of the predicted and reference plaintexts, respectively. + +This metric normalizes the edit distance by the length of the longer string, ensuring that similarity is measured on a scale from 0 to 1, where 1 represents an exact match and lower values indicate increasing deviations from the ground truth. + +The corresponding Levenshtein-based evaluation results for Table 2 are presented in Table 6 and Figure 7, offering deeper insights into models' decryption performance beyond strict accuracy metrics. + +One key observation is that most models achieve significantly higher Levenshtein similarity scores than their accuracy scores, indicating that even when decryption is incorrect, outputs often retain structural similarities to the original plaintext. This suggests that models capture some encryption patterns but struggle with full decryption, failing to consistently apply correct transformations. Notably, Claude-Sonnet-3.5 achieves near-perfect scores ( $>0.99$ for most ciphers), demonstrating its ability to minimize decryption errors while maintaining structural accuracy, making it the most reliable model overall. + +Interestingly, reasoning models such as DeepSeek-R1 and o1 exhibit a large gap between accuracy and Levenshtein similarity. Despite their moderate accuracy, their similarity scores + +often exceed 0.80, indicating that they frequently produce outputs that preserve much of the original structure but contain systematic errors. This suggests that reasoning models are better at capturing encryption logic but may struggle with precise execution, sometimes overcomplicating simpler tasks. + +Conversely, chat models such as DeepSeek-V3 and Llama-based models exhibit high variability, showing relatively low accuracy but moderate Levenshtein similarity (0.40 - 0.70). This indicates a tendency toward semantic approximation rather than strict decryption, where models generate linguistically plausible outputs that fail to adhere to precise encryption rules. + +Another notable trend is that transposition ciphers (e.g., Reverse, SwapPairs) yield lower Levenshtein similarity scores across all models, confirming that character reordering remains a major challenge. Unlike substitution ciphers, where models can rely on token-level mappings, transposition ciphers require strict positional tracking, which even the strongest models struggle to handle effectively. + +Overall, Levenshtein similarity results highlight fundamental differences in how chat and reasoning models approach decryption. Chat models rely more on semantic fluency, leading to structurally incorrect but coherent outputs, whereas reasoning models exhibit stronger pattern retention but occasionally fail due to overgeneralization or overthinking. These findings suggest that while LLMs can approximate decryption rules, achieving precise symbolic transformations remains a significant challenge, especially for positional-based ciphers. + +# C.2 Additional Analysis and Insights + +In this section, we present more detailed experimental results that complement the findings in the main text. These additional analyses provide further insights into model performance across different encryption schemes, highlighting trends, challenges, and specific cases where models excel or struggle. + +In the analysis of length sensitivity, plaintexts of different lengths can be seen in Figure 8. The impact of plaintext length on decryption performance is shown in Table 7 and Table 8, where we compare model accuracy on short vs. long texts. These results illustrate how increasing text length affects model performance, revealing notable differences in decryption robustness across various architectures + +The dataset used for the noise interference experi + +Table 6: Results on CipherBank(3-shot) Levenshtein similarity + +
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13At ba shPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftLevenshtein Similarity
Open-source Chat Models
Mixtral-8x22B-v0.10.45420.37440.26940.40320.38100.47450.33300.38710.64010.4130
Qwen2.5-72B-Instruct0.55560.42880.30420.40160.40220.53080.37180.47860.84270.4796
Llama-3.1-70B-Instruct0.57760.43780.31320.44310.37750.55420.39900.45050.72880.4758
Llama-3.3-70B-Instruct0.57540.40540.13170.43970.24820.53750.38330.40960.45800.3988
DeepSeek-V30.91950.75940.45620.48440.90880.69750.42050.57310.88870.6787
Closed-source Models
GPT-4o-mini-2024-07-180.64590.49350.24630.44990.56640.60050.34180.41880.72580.4988
GPT-4o-2024-08-060.96030.58760.34450.53460.81700.79680.43040.58500.89400.6612
GPT-4o-2024-11-200.93400.60540.35110.53380.72770.67800.42350.55300.87150.6309
gemini-1.5-pro0.93090.50430.49690.52010.75360.73170.47840.57200.88190.6522
gemini-2.0-flash-exp0.96160.65670.48130.50640.89010.75690.44760.53080.86050.6769
Claude-Sonnet-3.5-10220.99840.99610.99550.71430.98930.92620.78740.98830.97120.9296
Reasoning Models
QwQ-32B-Preview0.24770.15910.12310.16600.14440.16660.15640.16450.30570.1815
DeepSeek-R10.99200.97610.93440.52270.73680.72130.83160.69280.84910.8063
gemini-2.0-flash-thinking0.96640.85710.90740.55110.85080.77880.42610.73530.87770.7723
o1-mini-2024-09-120.97570.98600.95630.54120.59590.52670.39540.69350.72360.7105
o1-2024-12-170.83200.99280.96400.56420.77250.92080.86530.65620.93350.8335
+ +![](images/683e3b08ad6cbbf27df8bf47c11d0fbb8fd2e73a78a8436de133e5b3ea35e2c1.jpg) +Figure 7: Model Performance - Accuracy vs. Levenshtein Similarity. + +Table 7: Decryption Performance on Short Texts + +
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V340.0027.834.351.7429.570.870.872.6111.313.24
DeepSeek-R180.0071.3053.040.8718.260.8735.6518.2612.1732.27
GPT-4o-2024-11-2034.7813.040.87021.741.740.871.7410.439.47
gemini-2.0-flash-exp42.614.351.740.8740.872.6101.748.7011.50
Claude-Sonnet-3.5-102286.0977.3969.573.4877.398.709.5763.4842.6148.70
gemini-2.0-flash-thinking52.1726.9633.912.6133.910.87013.9114.7819.90
o1-mini-2024-09-1264.3582.6165.22015.6506.6713.912.6133.77
o1-2024-12-1761.7489.5784.550.8723.4846.6761.7417.1735.8047.61
+ +Table 8: Decryption Performance on Long Texts + +
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V326.534.760.6809.520005.445.22
DeepSeek-R168.0348.9837.4104.76014.978.845.4420.94
GPT-4o-2024-11-2020.414.080012.240003.404.46
gemini-2.0-flash-exp30.612.041.36020.410.68002.726.42
Claude-Sonnet-3.5-102292.5278.9182.311.3663.955.442.7263.2740.1447.85
gemini-2.0-flash-thinking31.299.5212.24014.291.3602.724.768.47
o1-mini-2024-09-1231.9757.1432.6500002.72017.35
o1-2024-12-1758.5070.7561.110.688.1615.3841.58.6625.6634.38
+ +# Example C.1: Plaintiff Examples + +Short: James, American, is married to Susan. + +Long: John Smith, born on January 15, 1990, holds American nationality and resides at 123 Elm Street, Springfield, Illinois. His mobile number is +1-312-555-6789, and his ID card number is IDURITY1234567. He is married to Jane Smith, who can be reached at +1-312-555-6789. They have two children: Emily (16, high school) and Michael (12, middle school). Their address and contact information are the same. + +Short:Jimmy,GPA:3.71. + +Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization. + +Short: Medical Record Number: 987-654-321; Patient Name: James. + +Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization. + +Short: Lucas, lucas@ucc.company.com + +Long: Hank, Senior Developer, IT Department, Salary: \(95,000, Bonuses: \)5,000, Allowances: $2,000 (Remote Work), Performance Rating: A, Full-time, Start Date: 2020-03-15, Last Promotion: 2021-08-10, Benefits: Health Insurance, Retirement 5%, Training: \)1,500/year, Projects: Nexus, Zeta, Feedback: 4.5/5 + +Figure 8: Samples used for length sensitivity analysis + +# Example C.2: Noise Example + +Example 1: + +Origin: Card Number: 9876 5432 1098 7654 + +Noise: Card Numbr: 9876 54-32 1O98 765 four + +Example 2: + +Origin: Pay Date: 2023-05-15, Income: $75,000, Currency: USD, Bonus:$ 5,000 + +Noise: Pay Date (scheduled): 2023-05-15! Income approx: $75,000. Currency spec: USD, and Bonus = $5,000. + +Example 3: + +Predictions: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide + +References: Officer Identification-No.: P12345, Full-Name: John (J.), Job-Title: Sergeant, Dept.: Homicide Squad. + +Figure 9: The samples used for the noise comparison experiments. + +Table 9: Decryption Performance without Noise + +
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V350.0031.5018.506.509.0017.0022.08
DeepSeek-R183.5077.5042.002.5020.005.5038.50
Closed-source Models
GPT-4o-2024-11-2049.5010.5013.5003.505.5013.75
Gemini-2.0-flash-exp45.007.5042.502.505.0015.5019.67
Claude-Sonnet-3.5-102292.5085.0062.5010.0070.0035.0059.17
Gemini-2.0-flash-thinking62.5033.5022.50017.501.5022.92
o1-mini-2024-09-1255.5067.505.00017.50024.25
+ +Table 10: Decryption Performance with Noise + +
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V38.5010.507.5000.501.504.75
DeepSeek-R133.5023.004.5001.50010.42
Closed-source Models
GPT-4o-2024-11-205.5004.500001.67
Gemini-2.0-flash-exp2.50002.50000.83
Claude-Sonnet-3.5-102250.5040.0020.002.5030.007.5025.08
Gemini-2.0-flash-thinking30.5019.003.5002.5009.25
o1-mini-2024-09-1215.0020.000005.83
+ +iments can be found in Figure 9. Detailed results on the impact of noise on decryption performance are presented in Table 9 and Table 10, comparing model performance on short and long plaintexts under noisy conditions. These findings highlight the varying degrees of resilience across models, with some maintaining reasonable performance under noise while others degrade significantly. + +In the analysis of the impact of encryption scope + +on decryption performance, the test prompts used are shown in Figure 10. Detailed results are presented in Table 11. This analysis compares model performance when encrypting only letters versus encrypting both letters and numbers. The results highlight how different models handle the increased complexity introduced by number encryption, showing varying degrees of adaptability. While some models maintain relatively stable per + +# Example C.3 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Examples: + +* Example 1: + +- Ciphertext: + +Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 84, 8009; Kzhhklig Mfnyvi: B012345678 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Xzhv Mfnyvi: 7975-KH-998765; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 80, 7975 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +gzc_rwvmgrurvi: GCM-7976-ZYX543, gzc_ivxliwh: bvzi: 7976, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw: + +379.99 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Wvzm slowh gsv kzhhklig mfnyvi Z87654321. + +- Plaintiff: + +Figure 10: User Prompt (Atbash - 3shot - Letter & Number) + +Table 11: Impact of Encryption Scope on Decryption Performance + +
ModelRot13AtbashPolybiusDualAvgCodeParityShiftAccuracyavg
Open-source Models
DeepSeek-V368.94/23.3224.02/14.6419.35/6.013.51/011.31/025.23 / 8.79
DeepSeek-R159.10/43.0563.19/23.0239.21/43.2337.36/013.05/0.7642.38 / 22.01
Closed-source Models
GPT-4o-2024-11-2027.53/010.08/00/02.54/02.67/08.56 / 0
gemini-2.0-flash-exp47.54/07.50/2.507.50/5.050/02.67/013.04 / 1.51
Claude-Sonnet-3.5-102292.50/50.0087.56/27.5365.00/32.2515.00/062.54/17.3564.52 / 25.43
gemini-2.0-flash-thinking35.00/2.650/2.540/10.000/02.50/07.50 / 3.04
o1-mini-2024-09-1250.00/32.5972.57/35.0040.00/42.530/07.50/0.7634.01 / 22.18
+ +Note: Values before the $\prime /{}^{\prime }$ indicate performance when encrypting letters only, while values after the ${}^{\prime }/{}^{\prime }$ represent performance when encrypting both letters and numbers. + +formance, others exhibit significant drops when required to decrypt mixed alphanumeric ciphertexts. + +For the enhanced prompt template, please refer to Figures 11-19, while more detailed experimental results can be found in Table 12. + +# C.3 Impact of Plaintext Source on Decryption Performance + +To assess how plaintext characteristics influence decryption performance, we compare results on synthetically generated privacy-sensitive data ver + +# Example C.4 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +Uses the Caesar cipher with a fixed shift of 13 positions. For each letter in the Plaintext, shift it forward by 13 positions in the alphabet to produce the Ciphertext. + +# Examples: + +* Example 1: + +- Ciphertext: + +Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Pnfr Ahzore: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyvat Qngr: Bpgbore 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Yrqvn, na Nzrevpna, erfvqrf va Ybf Natryrf. + +- Plaintiff: + +Figure 11:Enhanced Prompt (ROT13 - 3shot - Letter) + +# Example C.5 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +Uses the Atbash cipher. Each letter in the \*\*Plaintext\*\* is replaced with its reverse counterpart in the alphabet. + +# Examples: + +* Example 1: + +- Ciphertext: + +Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 15, 1990; Kzhhklig Mfnyvi: B987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Xzhv Mfnyvi: 2024-KH-001234; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +gzc_rwvmgrurvi: GCM-2023-ZYX456, gzc_ivxliwh: bvzi: 2023, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw: + +620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Ovwrz,zm Znvirxzm, ivhrwhrm Olh Zmtvovh. + +- Plaintiff: + +Figure 12:Enhanced Prompt (Atbash - 3shot - Letter) + +# Example C.6 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +Uses the Polybius cipher. Each letter in the $^{**}$ Plaintext\*\* is mapped to a pair of coordinates in the Polybius square, forming the $^{**}$ Ciphertext\*. + +# Examples: + +* Example 1: + +- Ciphertext: + +32 11 31 15 : 15 31 23 26 51 24 33 22 32 41 33 32 ; 14 11 42 15 33 16 12 23 36 42 22 : 24 43 26 51 15 , 19 + +90;3411414134333642324331121536:51987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +13 11 41 15 32 43 31 12 15 36 : 2 0 2 4 - 34 41 - 0 0 1 2 3 4 ; 13 11 41 15 42 51 34 15 : 42 22 15 16 42 / 12 + +36 15 11 25 - 23 32 ; 16 23 26 23 32 21 14 11 42 15 : 33 13 42 33 12 15 36 19 , 20 24 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +42 11 46 _ 23 14 15 32 42 23 16 23 15 36 : 42 46 32 _ 20 23 - 11 12 13 456 , 42 11 46 _ 36 15 13 33 36 14 + +41:51 15 11 36:2023,41 42 11 42 43 41:34 36 33 13 15 41 41 15 14,36 15 16 43 32 14_23 41 41 43 15 14:620. + +00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +26 15 14 23 11 , 11 32 11 31 15 36 23 13 11 32 , 36 15 41 23 14 15 41 23 32 26 33 41 11 32 21 15 26 15 41 . + +- Plaintiff: + +Figure 13: Enhanced Prompt (Polybius - 3shot - Letter) + +# Example C.7 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +Uses the Vigenère cipher. Each letter in the **Plaintext** is shifted by the corresponding letter in the **Key** to produce the **Ciphertext**. + +# Examples: + +* Example 1: + +- Ciphertext: + +Nexe: Eotla Jqsnuzn; Dcee zf Miteh: Jwwy 15, 1990; Pcdsrzrv Nwbgc: J987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Ccde Yuomet: 2024-PU-001234; Naup Vjpg: Vsehe/Dcecv-Ky; Qintni Dcee: Oeeodpr 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +tci_koepeihtet: VIN-2023-CMC456, tci_tpcqcdu: jecc: 2023, dtceuu: Rcoepsupd, rgqupo_kdswpd: 620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Lgoic, cy Cxettccy, ceutgdg ky Nzs Lniplgd. + +- Plaintiff: + +Figure 14: Enhanced Prompt (Vigenère - 3shot - Letter) + +# Example C.8 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +Reverses the $^{**}$ Plaintiff\*\* to create the $^{**}$ Ciphertext\*\*. + +# Examples: + +* Example 1: + +- Ciphertext: + +123456789Y :rebmuN tropssaP ;0991 ,51 yluJ :htriB fo etaD ;nosnhoJ ylimE :emaN + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +4202,91 rebotcO :etaD gniliF ;nI-kaerB/tfehT :epyT ESA C;432100-SP-4202 :rebmuN ESA C + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +}00.026 :deussi_dnufer,dessecorP:sutats,3202:raey{sdrocer_xat,654CBA-3202-NXT:reifitnedi_xat + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +.selegnAsoL ni sediser,naciremAna ,aideL + +- Plaintiff: + +Figure 15:Enhanced Prompt (Reverse -3shot-Letter) + +# Example C.9 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +For each pair of letters in the $^{**}$ Plaintext\*\*, their positions are swapped to produce the $^{**}$ Ciphertext\*\*. If the number of letters is odd, the last letter remains in its original position. + +# Examples: + +* Example 1: + +- Ciphertext: + +aNem :mEli yoJnhos;nD ta efoB riht :uJy11 ,51 99;0P sapsro tuNbmre :9Y78563412 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +aCesN mueb:r2 20-4SP0-1032;4C sa eyTep :hTfe/trBae-knI ;iFilgnD ta:eO tcbore1 ,92 204 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +at_xdineititre:XT-N0232A-CB54,6t xar_cerosd :yae:r2 20,3s atut:sP orecssde ,erufdni_sseu:d6 020.0 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +eLid,aa nmArecina ,erised sniL soA gnlese. + +- Plaintiff: + +Figure 16: Enhanced Prompt (SwapPairs - 3shot - Letter) + +# Example C.10 + +# Background: + +Decrypt the provided Ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +This encryption method converts each letter of the **Plaintext** into two letters in the **Ciphertext** such that the average of their ASCII values equals the ASCII value of the original letter. + +# Examples: + +* Example 1: + +- Ciphertext: + +MOaalndf: DFlnhjkmxz IKnpgimortnpmo; CEaasudf npeg AChjqssugi: IKtvkmxz 15, 1990; OQaartrtoqnacdfqx: XZ987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +BDaartdf MOtvlnacdfqs: 2024-OQRT-001234; BDaartdf SUxzoqdf: SUgidfgsu/ACqsdfaajl-HJmo; h CEaesudf: NPbdsunpacdfqs 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +suaawy_hjcedfmosuhjeghjdfqs: SUWYMO-2023-AAACBD456, suaawy_qsdfbnpqscert: xzdfaaqs: 2023, + +rtsuasutvrt: OQqsnpbbdftrtdfce, qsdfgtvmoce_hjrttrtvdfce: 620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +KMdfcehjaa, aamo AAlndfqshjbdaamo, qsdfrthjcedfrh jhmo KMnprt AAmofhdfkmdfrt. + +- Plaintiff: + +Figure 17: Enhanced Prompt (DualAvgCode - 3shot - Letter) + +# Example C.11 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +For each letter in the $^{**}$ Plaintext\*\*: - If the ASCII value is even, add 1 to it to get the corresponding character in the $^{**}$ Ciphertext\*. - If the ASCII value is odd, subtract 1 to get the new character in the $^{**}$ Ciphertext\*. + +# Examples: + +* Example 1: + +- Ciphertext: + +Ozld: Dlhmax Kniorno; Ezud ng Chsui: Ktmx 15, 1990; Qzrrqnsu Otlcds: X987654321 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +Bzrd Otlcds: 2024-QR-001234; Bzrd Uxqd: Uidgu/Csdzj-Ho; Ghmhof Ezud: Nbuncds 19, 2024 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +uzy_hedoughghds: UYO-2023-ZCB456, uzy_sdbnser: xdzs: 2023, ruzutr: Qsnbdrrde, sdgtoe_hrrte: 620.00 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +Mdehz, zo Zldshbzo, sdrhedr ho Mnr Zofdmdr. + +- Plaintiff: + +Figure 18: Enhanced Prompt (ParityShift - 3shot - Letter) + +# Example C.12 + +# Background: + +Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples. + +Think step by step. + +Provide the Plaintext result in the format text, where text is the decrypted text. + +# Algorithm Flow: + +The algorithm splits the $^{**}$ Plaintext\*\* into words based on spaces. Each word is then individually encrypted using the Caesar cipher, resulting in the $^{**}$ ciphertext\*\*. + +# Examples: + +* Example 1: + +- Ciphertext: + +e:Nam lyEmi nson;Joh eDat fo th:Bir yJul 15,0;199 sportPas ber:Number 7654321Y98 + +- Plaintiff: + +Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321 + +* Example 2: + +- Ciphertext: + +eCas ber:Num 4-PS-001234;202 eCas e:Typ ft/Break-In;The ingFil e:Dat oberOct 19, 4202 + +- Plaintiff: + +Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024 + +* Example 3: + +- Ciphertext: + +identifier:tax -2023-ABC456,TXNRecords:tax ar:ye 3,202 tus:sta cessed,Pro und_iuied:ref .00620 + +- Plaintiff: + +tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00 + +# Input: + +- Ciphertext: + +ia,Led na rican,Ame idesres ni Los eles.Ang + +- Plaintiff: + +Figure 19: Enhanced Prompt (WordShift - 3shot - Letter) + +Table 12: Results on CipherBank(Enhanced Prompt) + +
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.760000.3802.670.380.380.51
Qwen2.5-72B-Instruct12.609.1600002.290.381.532.88
Llama-3.1-70B-Instruct2.671.15001.530.381.15000.76
Llama-3.3-70B-Instruct4.581.5300.381.1501.15000.98
DeepSeek-V341.6027.860.380.3865.955.3412.660.765.1717.79
Closed-source Models
GPT-4o-mini-2024-07-1821.7619.0800.384.3900005.07
GPT-4o-2024-08-0645.4224.0500.7651.538.401.911.1510.3115.95
GPT-4o-2024-11-2045.4241.980053.638.023.821.159.5418.17
gemini-1.5-pro63.695.730.760.3814.122.670.381.9110.6911.15
gemini-2.0-flash-exp45.0422.902.290.3846.564.583.8201.1514.08
Claude-Sonnet-3.5-102292.7582.0678.242.4879.399.732.4862.0244.8550.44
Reasoning Models
QwQ-32B-Preview1.913.052.670002.670.380.381.23
DeepSeek-R188.3786.5472.730.7646.9675.0173.1774.421.5157.72
gemini-2.0-flash-thinking37.9819.0910.50055.344.964.770.386.1115.46
ol-mini-2024-09-1254.2072.1450.00.7611.0718.7047.3349.627.2534.56
+ +sus externally sourced structured text (e.g., quotes from Shakespeare's works). The structured text exhibits greater linguistic familiarity, while the privacy-sensitive data represents real-world encryption needs, lacking inherent semantic patterns. + +As shown in Table 13 and Table 14, models generally perform better on structured text, suggesting that they leverage linguistic priors rather than strictly following decryption rules. When encountering encrypted text with recognizable patterns, models tend to shortcut reasoning, aligning decoded fragments with plausible linguistic structures instead of strictly adhering to learned transformation rules. Conversely, for less structured, domain-specific text, models struggle to infer decryption patterns, reinforcing the advantage of CipherBank's privacy-sensitive dataset, which forces models to engage in independent reasoning rather than rely on pretraining biases. + +# D Error Analysis + +# D.1 Error Classification + +This section defines the error categories observed in model decryption outputs. These classifications help identify systematic failure patterns and provide insights into how models approach cryptographic reasoning. + +(A) Omission/Insertion: The model output contains missing or extra characters, words, or punctuation compared to the reference plaintext. These errors indicate incomplete decryption or unintended modifications, leading to + +partial but inaccurate results. + +- (B) Name Decryption Error: The decryption result is correct except for the name part, which remains incorrect or partially distorted. This suggests challenges in handling named entities, possibly due to memorization effects or entity-based biases. +- (C) Semantic Inference: The model makes errors based on semantic reasoning rather than strictly following decryption rules. Instead of decoding symbols precisely, the model hallucinates plausible but incorrect outputs that fit the general meaning of the sentence. This indicates a tendency to prioritize linguistic coherence over strict decryption fidelity. +- (D) Reorganization: The output preserves the exact meaning of the reference plaintext but rearranges the sentence structure. This suggests that the model prioritizes fluency over strict character-level fidelity, leading to errors in cryptographic tasks where precision is essential. +- (E) Reasoning Failure: The model output is significantly different from the reference, and decryption is essentially unsuccessful. This suggests a fundamental failure in identifying encryption patterns, leading to outputs that bear little resemblance to the expected plaintext. This category includes cases where the model fails to infer transformation rules or apply correct decryption strategies. + +Table 13: Decryption Performance on Privacy-Sensitive Data + +
ModelRot13AtbashPolybiusVigenèreReverseSwapDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V324.3415.6415.70033.723.5104.3515.6412.54
DeepSeek-R157.8871.0271.554.3533.574.35012.718.7029.35
Closed-source Models
GPT-4o-2024-11-2021.7421.740030.438.700013.0410.63
Gemini-2.0-Flash-Exp47.834.354.35052.1704.354.3513.0414.49
Claude-Sonnet-3.5-102286.9678.2665.224.3591.3013.044.3552.1747.8349.28
Gemini-2.0-Flash-Thinking39.134.350060.87004.3530.4315.46
o1-Mini-2024-09-1260.8786.9669.5708.70013.0417.394.3528.99
+ +Table 14: Decryption Performance on Structured Text + +
ModelRot13AtbashPolybiusVigenèreReverseSwapPairDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V376.1224.0315.70052.1729.40012.7155.1329.47
DeepSeek-R184.5185.041007.5979.108.708.7015.6430.4346.63
Closed-source Models
GPT-4o-2024-11-2078.2639.134.35086.9621.7404.3543.4830.92
Gemini-2.0-Flash-Exp86.9613.044.35086.968.70017.3943.4828.99
Claude-Sonnet-3.5-102291.3095.6595.654.3510052.178.7078.2695.6569.08
Gemini-2.0-Flash-Thinking86.9613.048.70069.5717.390052.1727.54
o1-Mini-2024-09-1282.6195.6578.26060.874.3513.0417.3943.4843.96
+ +- (F) Other: Miscellaneous errors that do not fit into the defined categories. + +This classification framework provides a structured approach to analyzing decryption errors, helping to pinpoint systematic weaknesses and guide future improvements in cryptographic reasoning models. + +# D.2 Examples of Different Error Types + +To further illustrate the types of decryption errors encountered in our evaluation, we provide concrete examples corresponding to each error category. These cases demonstrate how models fail in various aspects of decryption, including omission-insertion, name decryption errors, semantic inference, reorganization, reasoning failures, and other anomalies. Example D.1 - D6 showcase representative examples of each error type. + +# D.3 Detailed Error Distribution Tables + +Tables 15-20 present a detailed breakdown of error distributions across different encryption algorithms for the six selected models. From these results, we identify several common trends and model-specific differences. + +Challenges in Name Decryption and Symbolic Reasoning. Across all models, name decryption errors remain prevalent, particularly in Atbash and + +Polybius, indicating persistent difficulties in handling entity-based transformations. Additionally, models struggle with key-based and transposition ciphers such as Vigenère and SwapPairs, suggesting limitations in tracking multi-step transformations and generalizing decryption strategies. + +Semantic Overreliance vs. Overthinking in Decryption. Chat models often exhibit semantic inference errors, where decrypted outputs align with linguistic patterns rather than encryption rules. In contrast, reasoning models tend to overthink simple tasks, leading to unnecessary self-correction loops that degrade performance in straightforward ciphers like Reverse. + +Structural Alignment and Insertion Errors. Frequent omission and insertion errors in WordShift and Reverse ciphers highlight difficulties in preserving character order. This suggests that models rely on semantic priors rather than strict symbolic reasoning, leading to misaligned outputs. + +# Key Takeaways: + +- Chat models (Claude, Gemini) perform well in substitution ciphers but struggle with complex rule-based encryption. +- Reasoning models (DeepSeek-R1, o1) maintain better structural accuracy but underperform in transposition-based and key-dependent ciphers. + +# Example D.1: Error Type: Omission/Insertion + +Example 1: + +Predictions: Card Number: ID 1245-6789-0123 + +References: Clark holds the ID Card Number 1245-6789-0123. + +Example 2: + +Predictions: Card Number: ID 1245-6789-0123 + +References: Clark holds the ID Card Number 1245-6789-0123. + +Example 3: + +Predictions: Salary Amount: $67,000; Pay Date: 2023-10-25 + +References: Salary Amount: $67,000, Pay Date: 2023-10-25. + +# Example D.2: Error Type: Name Decryption Error + +Example 1: + +Predictions: Learn, an American, inside on Los Angeles. + +References: Ledia, an American, resides in Los Angeles. + +Example 2: + +Predictions: Individual ID: A1234567; Name: John Doe; Age: 34; Gender Identity: Cisgender 16k11.2 Location, Country State Citizenship. + +References: Individual ID: A1234567; Name: Jane Doe; Age: 34; Genetic Testing: Chromosome 16p11.2 Deletion, Celiac Disease Predisposition. + +Example 3: + +Predictions: Handed lost the passport Number A12345678. + +References: Dean holds the passport number A12345678. + +# Example D.3: Error Type: Semantic Inference + +Example 1: + +Predictions: Jessica Brown, Bachelor of Biology, GPA: 3.9, Graduated 2023, Skills: Genetics, Microbiology, Ecology, Bioinformatics. + +References: Jessica Brown, Bachelors in Biology, GPA: 3.9, Graduated 2023, Courses: Genetics, Microbiology, Ecology, Biochemistry. + +Example 2: + +Predictions: Patent-pending design specification PR2023_KURITY, Company Z, including batch production requirements. + +References: Patent-pending design specification PR2023_KURITY, Company Z, including batch performance analysis. + +Example 3: + +Predictions: Office ID: P12345, Name: John, Position: Manager, Department: Finance. + +References: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide. + +# Example D.4: Error Type: Reorganization + +# Example 1: + +Predictions: Name: Ledia; Nationality: American; Address: Los Angeles + +References: Ledia, an American, resides in Los Angeles. + +# Example 2: + +Predictions: Passport Number: A12345678; Holder Name: Dean + +References: Dean holds the passport number A12345678. + +# Example 3: + +Predictions: Name: Peter; Date of Birth: April 23, 1985; Passport Number: X123456789 + +References: Peter was born on April 23, 1985, and carries a passport with the number X123456789. + +# Example D.5: Error Type: Reasoning Failure + +# Example 1: + +Predictions: Address: 123 Main St, Apt 4B, New York, NY, Zip Code: 10001, Phone: 555-1234, Unit: 101, Floor: 2. + +References: Officer: Lisa Grant, Sergeant, Downtown Precinct, Patrol Car: VG2301, Equipment: Radar Gun, Model: RGX501, Weapon: Taser X2. + +# Example 2: + +Predictions: Welcome, once more, securely within. + +References: Ledia, an American, resides in Los Angeles. + +# Example 3: + +Predictions: Passport ID: 123456789; Expiry Date: Emily, 38, Issued By Authority, Renewal By Agency. Valid Until 2025, Expiry Passport. + +References: Patient ID: MRN456789; Medical History: Emily, 38, diagnosed with asthma, treated with inhalers. Allergic to dust mites, pollen. + +# Example D.6: Error Type: Other + +# Example 1: + +Predictions: CookieID12345 maintain login status forUserID98765 on www.example.com, facilitating seamless access. Analyzing recordsUserID98765's engagement, deducting 500 page views and a click-through rate of $4.5\%$ across the session. + +References: CookieID12345 maintains login status forUserID98765 on www.example.com, facilitating seamless access. Analytics tracksUserID98765's engagement, documenting 500 page views and a click-through rate of $4.5\%$ across the session. + +# Example 2: + +Predictions: Code: Coordinates: Latitude Longitude: $38.251^{\circ}\mathrm{N}$ -85.754 W, Latitude Longitude: 34.091 N, -118.493 W. + +References: Base Distribution: North Plains Base: $38.251^{\circ}\mathrm{N}$ , $-85.754^{\circ}\mathrm{W}$ , East Valley Site: $34.091^{\circ}\mathrm{N}$ , $-118.493^{\circ}\mathrm{W}$ . + +# Example 3: + +Predictions: Name: Alex Smith; Salary: $87,500; Pay Frequency: Biweekly; Position: Software Developer; Employee ID: EID-257846; Department: IT. + +References: Name: Alex Smith, Salary: $87,500, Pay Frequency: Biweekly, Position: Software Developer, Employee ID: EID-257846, Department: IT. + +Table 15: Error Type Percentages for Different Algorithms in Claude-Sonnet-3.5-1022 Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1333.3351.850.0011.113.700.00
Atbash15.7978.950.003.510.001.75
Polybius42.6245.900.0011.480.000.00
Vigenère2.7332.425.083.5256.250.00
Reverse39.2448.100.005.066.331.27
SwapPairs15.9838.522.052.8738.112.46
DualAvgCode6.8839.688.502.4341.301.21
ParityShift19.7970.834.173.122.080.00
WordShift51.9522.082.608.4412.342.60
+ +Table 16: Error Type Percentages for Different Algorithms in DeepSeek-R1 Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1340.0030.004.2921.431.432.86
Atbash42.5924.070.9329.630.002.78
Polybius48.6317.120.6821.928.902.74
Vigenère4.6018.012.682.3071.650.77
Reverse25.6419.661.7145.306.411.28
SwapPairs9.2025.293.072.3058.621.53
DualAvgCode25.6322.613.5228.6419.100.50
ParityShift7.0229.396.583.9552.190.88
WordShift29.1722.922.0825.4220.000.42
+ +Table 17: Error Type Percentages for Different Algorithms in DeepSeek-V3 Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1310.7355.9315.825.0811.860.56
Atbash8.0738.127.173.5941.261.79
Polybius5.4712.112.342.7376.950.39
Vigenère0.3820.772.690.7774.231.15
Reverse21.5040.195.6113.5518.220.93
SwapPairs1.9218.392.680.3876.250.38
DualAvgCode3.0712.643.452.6877.780.38
ParityShift1.9328.573.860.7764.480.39
WordShift27.8029.464.5617.0120.330.83
+ +Table 18: Error Type Percentages for Different Algorithms in gemini-1.5-pro Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1312.9858.020.765.3422.140.76
Atbash1.1515.003.080.7778.851.15
Polybius4.2117.243.071.9271.651.92
Vigenère2.2914.893.440.7678.630.00
Reverse20.8533.198.9410.2126.380.43
SwapPairs6.4925.571.911.5363.361.15
DualAvgCode2.6813.034.601.9277.390.38
ParityShift3.0828.463.080.3864.230.77
WordShift34.2524.202.7418.7219.630.46
+ +- All models show high name decryption errors and reasoning failures in Vigenère and SwapPairs, highlighting gaps in symbolic reasoning and long-term dependency tracking. + +These observations reveal that no single model excels across all ciphers, emphasizing the need for advancements in structured reasoning and symbolic manipulation for decryption tasks. Future + +Table 19: Error Type Percentages for Different Algorithms in o1-mini Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1326.9538.3013.4817.021.422.84
Atbash37.3531.337.2316.876.021.20
Polybius30.9432.371.4425.188.631.44
Vigenère0.0021.4310.713.5764.290.00
Reverse12.7029.108.2032.3817.210.41
SwapPairs1.919.541.530.0086.640.38
DualAvgCode0.0018.520.003.7077.780.00
ParityShift4.5534.303.314.9652.480.41
WordShift11.5828.574.635.7949.030.39
+ +Table 20: Error Type Percentages for Different Algorithms in o1 Model + +
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1316.1928.574.765.7143.810.95
Atbash29.0949.095.4510.913.641.82
Polybius40.9128.796.0610.6112.121.52
Vigenère4.6236.151.541.1556.150.38
Reverse16.1425.563.5914.3538.571.79
SwapPairs5.2631.585.265.2652.630.00
DualAvgCode24.6233.853.082.3135.380.77
ParityShift4.0426.774.552.0262.120.51
WordShift30.8824.262.9418.3821.322.21
+ +# improvements could focus on: + +- Minimizing the Impact of Semantic Bias in Logical Inference: Cryptographic reasoning tasks often necessitate abstract rule extraction rather than reliance on semantic interpretation. An excessive dependence on linguistic priors can impede the model's ability to identify underlying structural transformations, resulting in systematic errors. Future advancements should focus on reducing semantic interference to improve the extraction of abstract logical patterns. +- Enhancing Comparative Reasoning for Pattern Recognition: While many decryption tasks in CipherBank are straightforward for humans, models frequently fail to derive correct transformation rules from provided exemplars. Strengthening contrastive reasoning mechanisms can enable models to better differentiate encryption structures, facilitating more effective pattern recognition and decryption. +- Addressing Overthinking in Model Reasoning: Experimental results indicate that reasoning models exhibit superior performance on complex tasks but underperform on sim + +pler problems. Analysis of inference trajectories reveals a tendency toward recursive self-evaluation, where models continuously revise their approach, even when a straightforward solution is available. For example, in the Reverse cipher, models occasionally attempt unnecessarily complex reasoning paths instead of applying direct positional transformations. Mitigating such overthinking behaviors could enhance efficiency and robustness in logical reasoning. + +Addressing these limitations will bridge the gap between linguistic fluency and structured cryptographic reasoning, making LLMs more robust in real-world encryption scenarios. \ No newline at end of file diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/images.zip b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..f6c58af02b11fffd296ec3c3d66d570e8d53b99e --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5e11ad3fb4478319bacd134121d2ee181d8121cdc09be8386e3a777b1ad30d7 +size 2052010 diff --git a/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/layout.json b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3777774dcad1431aef6be9974685b0695694b3fc --- /dev/null +++ b/2025/CipherBank_ Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenge/layout.json @@ -0,0 +1,27817 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "spans": [ + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": "Yu Li" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Qizhi Pei" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Mengyuan Sun" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Honglin Lin" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Chenlin Ming" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1,3}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Xin Gao" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Jiang Wu" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Conghui He" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "text", + "content": ", Lijun Wu" + }, + { + "bbox": [ + 130, + 116, + 462, + 145 + ], + "type": "inline_equation", + "content": "^{1*}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 190, + 146, + 403, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 146, + 403, + 159 + ], + "spans": [ + { + "bbox": [ + 190, + 146, + 403, + 159 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 190, + 146, + 403, + 159 + ], + "type": "text", + "content": "Shanghai Artificial Intelligence Laboratory" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "spans": [ + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "type": "text", + "content": "Renmin University of China " + }, + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 142, + 159, + 451, + 174 + ], + "type": "text", + "content": "Shanghai Jiao Tong University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 183, + 174, + 411, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 174, + 411, + 187 + ], + "spans": [ + { + "bbox": [ + 183, + 174, + 411, + 187 + ], + "type": "text", + "content": "{liyu1, heconghui, wulijun}@pjlab.org.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 195, + 187, + 398, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 187, + 398, + 202 + ], + "spans": [ + { + "bbox": [ + 195, + 187, + 398, + 202 + ], + "type": "text", + "content": "https://cipherbankeva.github.io" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 84, + 239, + 274, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 239, + 274, + 695 + ], + "spans": [ + { + "bbox": [ + 84, + 239, + 274, + 695 + ], + "type": "text", + "content": "Large language models (LLMs) have demonstrated remarkable capabilities, especially the recent advancements in reasoning, such as o1 and o3, pushing the boundaries of AI. Despite these impressive achievements in mathematics and coding, the reasoning abilities of LLMs in domains requiring cryptographic expertise remain underexplored. In this paper, we introduce CipherBank, a comprehensive benchmark designed to evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. CipherBank comprises 2,358 meticulously crafted problems, covering 262 unique plaintexts across 5 domains and 14 subdomains, with a focus on privacy-sensitive and real-world scenarios that necessitate encryption. From a cryptographic perspective, CipherBank incorporates 3 major categories of encryption methods, spanning 9 distinct algorithms, ranging from classical ciphers to custom cryptographic techniques. We evaluate state-of-the-art LLMs on CipherBank, e.g., GPT-4o, DeepSeek-V3, and cutting-edge reasoning-focused models such as o1 and DeepSeek-R1. Our results reveal significant gaps in reasoning abilities not only between general-purpose chat LLMs and reasoning-focused LLMs but also in the performance of current reasoning-focused models when applied to classical cryptographic decryption tasks, highlighting the challenges these models face in understanding and manipulating encrypted data. Through detailed analysis and error investigations, we provide several key observations that shed light on the limitations and potential improvement areas for LLMs in cryptographic reasoning. These findings underscore the need for continuous advancements in LLM reasoning capabilities." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 710, + 154, + 723 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 710, + 154, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 710, + 154, + 723 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 731, + 291, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 731, + 291, + 758 + ], + "spans": [ + { + "bbox": [ + 67, + 731, + 291, + 758 + ], + "type": "text", + "content": "Large Language Models (LLMs) have revolutionized artificial intelligence by achieving state-of" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 307, + 222, + 521, + 344 + ], + "blocks": [ + { + "bbox": [ + 307, + 222, + 521, + 344 + ], + "lines": [ + { + "bbox": [ + 307, + 222, + 521, + 344 + ], + "spans": [ + { + "bbox": [ + 307, + 222, + 521, + 344 + ], + "type": "image", + "image_path": "7e5d59106d41d9b0c2418dc813e35174fbce5dbcdc831356661e7a6d45f3346a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 357, + 525, + 381 + ], + "lines": [ + { + "bbox": [ + 302, + 357, + 525, + 381 + ], + "spans": [ + { + "bbox": [ + 302, + 357, + 525, + 381 + ], + "type": "text", + "content": "Figure 1: Comprehensive Performance of SOTA Chat and Reasoning Models on CipherBank." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 409, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 409, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 409, + 526, + 775 + ], + "type": "text", + "content": "the-art (SOTA) performance across diverse domains, from Natural Language Understanding (NLP) (Dong et al., 2019; Karanikolas et al., 2023; Sasaki et al., 2024) to complex problem-solving (Yao et al., 2024; Ge et al., 2023). Recent models, such as GPT-4o (Hurst et al., 2024) and Claude 3.5 (Anthropic, 2024), have demonstrated unprecedented versatility, excelling in tasks ranging from creative writing to technical analysis. A particularly notable advancement lies in the reasoning-enhanced LLMs, which have emerged as a critical benchmark for evaluating LLMs' intelligence and now can solve mathematical problems (Wu et al., 2024; Ahn et al., 2024; Liu et al., 2024c), debug intricate code (Lee et al., 2024; Zhong et al., 2024), and even engage in multi-step logical deduction (Sun et al., 2024; Wang et al., 2023) with human-like proficiency. For instance, specialized architectures like o1 (Jaech et al., 2024) and DeepSeek-R1 (Guo et al., 2025) have pushed the boundaries of AI reasoning, achieving breakthroughs in domains such as theorem proving (Yang et al., 2024b) and algorithmic optimization (Liu et al., 2024b). These achievements underscore the transformative potential of LLMs as general-purpose reasoning engines, capable of adapting to both broad and specialized challenges." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 76, + 70, + 97, + 93 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 70, + 97, + 93 + ], + "spans": [ + { + "bbox": [ + 76, + 70, + 97, + 93 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 97, + 74, + 517, + 107 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 74, + 517, + 107 + ], + "spans": [ + { + "bbox": [ + 97, + 74, + 517, + 107 + ], + "type": "text", + "content": "cipherBank: Exploring the Boundary of LLM Reasoning Capabilities through Cryptography Challenges" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "spans": [ + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "type": "text", + "content": "* Corresponding author" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5929" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 136, + 795, + 456, + 806 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 795, + 456, + 806 + ], + "spans": [ + { + "bbox": [ + 136, + 795, + 456, + 806 + ], + "type": "text", + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5929-5965" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 160, + 807, + 433, + 817 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 807, + 433, + 817 + ], + "spans": [ + { + "bbox": [ + 160, + 807, + 433, + 817 + ], + "type": "text", + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 206 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 206 + ], + "type": "text", + "content": "To quantify progress, the community has proposed numerous benchmarks targeting mathematical reasoning (e.g., MATH (Hendrycks et al., 2021a), AIME1, coding proficiency (e.g., HumanEval (Chen et al., 2021a), MBPP (Austin et al., 2021)), and general logical deduction (e.g., FOLO (Han et al., 2024), MMBench (Yuan Liu, 2023), CaLM (Chen et al., 2024). These testbeds have become indispensable tools for assessing model capabilities." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 208, + 292, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 208, + 292, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 208, + 292, + 518 + ], + "type": "text", + "content": "Despite extensive evaluations in mathematics and coding, one critical domain remains underexplored: cryptographic decryption. Cryptographic reasoning (Shree et al., 2017) demands unique capabilities, including pattern recognition, algorithmic Reverse-engineering, and contextual understanding of security constraints (Schneier, 2002)—skills distinct from those tested in conventional benchmarks. This gap is particularly consequential, as cryptography lies at the heart of modern digital security (Konheim, 2007), with applications spanning privacy-preserving communication (Soomro et al., 2019), secure authentication (Rani et al., 2022), and data integrity (Sarkar et al., 2021). The absence of a rigorous benchmark for cryptographic reasoning not only limits the true understanding of LLM's reasoning ability but also hinders progress toward AI systems capable of contributing to security-critical contexts (e.g., jailbreaking (Wei et al., 2024)). OpenAI has scratched the surface of this challenge and put a demo2 when releasing their strong reasoning model o1, but no serious efforts have been made to reveal this challenge in the committee." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 519, + 292, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 519, + 292, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 519, + 292, + 724 + ], + "type": "text", + "content": "To address this gap, we introduce CipherBank, the first comprehensive benchmark specially designed to evaluate LLMs' reasoning capabilities in cryptographic decryption tasks. CipherBank is meticulously constructed to reflect real-world scenarios requiring encryption, instead of general texts that may serve as a toy testbed, with 2,358 problems derived from 262 unique plaintexts across 5 domains (e.g., Personal Privacy, Financial Information) and 14 subdomains (e.g., Identity Information, Personal Income). As for cipher algorithms, it spans 3 major cryptographic categories—Substitution Ciphers (e.g., Rot13, Vigenère), Transposition Ciphers (e.g., Reverse, SwapPairs), and custom hybrid algo" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 302, + 71, + 527, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 179 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 179 + ], + "type": "text", + "content": "rithms—encompassing 9 distinct encryption methods, covering 5 difficulty levels (from Basic to Expert) to ensure a diverse range of challenges. By integrating privacy-sensitive contexts and multilayered cryptographic challenges, CipherBank provides a nuanced evaluation framework that captures both the complexity and practicality of real-world decryption tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 180, + 527, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 180, + 527, + 492 + ], + "spans": [ + { + "bbox": [ + 302, + 180, + 527, + 492 + ], + "type": "text", + "content": "We evaluate CipherBank on SOTA LLMs, including general-purpose models (GPT-4o (Hurst et al., 2024), DeepSeek-V3 (Liu et al., 2024a)) and reasoning-optimized models (o1 (Jaech et al., 2024), DeepSeek-R1 (Guo et al., 2025)). Results reveal striking limitations: even advanced models struggle with classical ciphers, achieving only 45.14 score on tasks solvable by human cryptanalysts. Notably, we observe a significant performance gap between general chat LLMs and specialized reasoning models, suggesting that current reasoning optimizations inadequately address cryptographic challenges. Besides, we also provide studies on different aspects for deep understandings, such as evaluate on noised plaintexts and different length of plaintexts. Observations show the limitations of current models in decryption reasoning, with chat and reasoning models each exhibiting distinct strengths and weaknesses in cryptographic tasks. These findings highlight the need for targeted improvements in LLMs' cryptographic reasoning, with implications for both AI safety (e.g., adversarial robustness) and applications in cybersecurity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 507, + 458, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 507, + 458, + 521 + ], + "spans": [ + { + "bbox": [ + 302, + 507, + 458, + 521 + ], + "type": "text", + "content": "2 CipherBank Construction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 531, + 528, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 531, + 528, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 531, + 528, + 775 + ], + "type": "text", + "content": "CipherBank is a purpose-built benchmark designed to rigorously evaluate the reasoning capabilities of LLMs in cryptographic decryption tasks. It integrates three core components to ensure comprehensive coverage of real-world scenarios and cryptographic complexity: (1) diverse plaintexts meticulously constructed from multiple dimensions of real-world privacy-sensitive data, ensuring the decryption process aligns with practical requirements; (2) a comprehensive suite of encryption algorithms, including both traditional cryptographic methods and custom-designed algorithms, to thoroughly assess the model's reasoning, inductive, and computational capabilities from multiple perspectives; and (3) a structured problem set with rich metadata, enabling granular performance analysis and detailed error analysis based on the diverse properties of the plaintexts." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 731, + 258, + 753 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 731, + 258, + 753 + ], + "spans": [ + { + "bbox": [ + 67, + 731, + 258, + 753 + ], + "type": "text", + "content": "1https://huggingface.co/datasets/AI-M0/ aimo-validation-aime" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 754, + 203, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 754, + 203, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 754, + 203, + 774 + ], + "type": "text", + "content": "2https://openai.com/index/ learning-to-reason-with-11ms/" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "5930" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 68, + 524, + 330 + ], + "blocks": [ + { + "bbox": [ + 73, + 68, + 524, + 330 + ], + "lines": [ + { + "bbox": [ + 73, + 68, + 524, + 330 + ], + "spans": [ + { + "bbox": [ + 73, + 68, + 524, + 330 + ], + "type": "image", + "image_path": "55a6502770f6ba4f313d235f238ab47edc38299f957c41a17c063b28e5bad7bc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 338, + 525, + 375 + ], + "lines": [ + { + "bbox": [ + 67, + 338, + 525, + 375 + ], + "spans": [ + { + "bbox": [ + 67, + 338, + 525, + 375 + ], + "type": "text", + "content": "Figure 2: Overview of CipherBank. CipherBank consists of simulated privacy data encrypted using various algorithms. The left side of the figure shows five domains, 14 subdomains, and selected tags. The right side displays three encryption categories, nine specific algorithms, and their corresponding difficulty levels." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 396, + 267, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 267, + 423 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 267, + 423 + ], + "type": "text", + "content": "2.1 Plaintiff Data: Design, Sources, and Real-World Alignment" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 433, + 291, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 433, + 291, + 623 + ], + "spans": [ + { + "bbox": [ + 67, + 433, + 291, + 623 + ], + "type": "text", + "content": "To construct CipherBank, we meticulously analyze real-world encryption scenarios and categorize the corresponding data types into five primary domains: Personal Privacy Data, Enterprise Sensitive Data, Public Safety Data, Financial Asset Data and Internet Records. These domains are further refined into 14 subdomains (e.g., Health Information, Policy Data) to ensure comprehensive coverage of encryption needs. Inspired by UltraChat (Ding et al., 2023), we adopt a tag-based approach to systematically structure encryption-relevant data, ensuring semantic consistency and domain relevance. Below, we detail the 3-step process for generating high-quality plaintext data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "content": "Step 1: Tag Definition and Curation. We leverage GPT-4o to generate candidate tags for each subdomain, capturing diverse real-world encryption scenarios. Human experts then curate these tags, eliminating redundancies, irrelevancies, and ambiguous entries, resulting in 89 distinct tags (see Appendix A.1). This structured approach ensures that the generated plaintext data remains realistic, contextually meaningful, and representative of actual encryption use cases. The tags are designed to align with the Variable Length property, enabling" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 396, + 524, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 396, + 524, + 421 + ], + "spans": [ + { + "bbox": [ + 302, + 396, + 524, + 421 + ], + "type": "text", + "content": "the generation of inputs of varying sizes to assess model robustness." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 423, + 526, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 423, + 526, + 707 + ], + "spans": [ + { + "bbox": [ + 302, + 423, + 526, + 707 + ], + "type": "text", + "content": "Step 2: Controlled Text Generation. Our plaintext generation process employs tag combinations to control text granularity: entries with more tags contain richer contextual details and greater length, while those with fewer tags remain concise and specific. To ensure semantic validity, all generated data are filtered to eliminate generic or redundant descriptions, creating a dataset that reflects diverse encryption scenarios with varying complexity. Additionally, we introduce the Noise Perturbation property through controlled noise injection, which serves two key objectives: (1) testing the model's anti-interference capabilities and (2) reducing its reliance on contextual semantics to enhance robustness. Furthermore, we incorporate Sensitive Numerical Data by designing scenarios with complex alphanumeric combinations, including critical identifiers such as ID card and passport number. This multifaceted approach enables a comprehensive evaluation of the model's ability to address sophisticated decryption challenges." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 526, + 775 + ], + "type": "text", + "content": "Step 3: Expert Validation and Refinement. After generation, we conduct expert validation to ensure data quality, correctness, and relevance. Noninformative content, excessively long or short samples, and entries lacking clear privacy attributes are" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "text", + "content": "5931" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 139 + ], + "type": "text", + "content": "filtered out. Through this rigorous refinement process, we retain 262 high-quality plaintext samples. This approach enables a practical and application-driven benchmark for evaluating LLMs' decryption capabilities in cryptographic reasoning tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 148, + 205, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 148, + 205, + 160 + ], + "spans": [ + { + "bbox": [ + 67, + 148, + 205, + 160 + ], + "type": "text", + "content": "2.2 Encryption Algorithms" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 164, + 291, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 164, + 291, + 461 + ], + "spans": [ + { + "bbox": [ + 67, + 164, + 291, + 461 + ], + "type": "text", + "content": "CipherBank incorporates 3 major categories of encryption methods: Substitution Ciphers, Transposition Ciphers, and Custom Ciphers. (1) Substitution-based techniques, including Rot13, Atbash, Polybius and Vigenère, test a model's ability to decode character-level transformations. These ciphers involve monoalphabetic or polyalphabetic substitutions, where each character is replaced by another based on a fixed rule or key. These methods evaluate the model's capacity to decode symbolic mappings and generalize across substitution rules. (2) Transposition-based techniques, such as Reverse and SwapPair, focus on positional rearrangements rather than symbol substitutions. These ciphers challenge the model to recognize structural patterns, such as reversed sequences or pairwise swaps. Unlike substitution ciphers, which alter character identities but preserve their order, transposition ciphers preserve characters but disrupt their sequence. This tests the model's ability to analyze sequential dependencies and reconstruct the original symbol order." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 463, + 291, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 463, + 291, + 678 + ], + "spans": [ + { + "bbox": [ + 67, + 463, + 291, + 678 + ], + "type": "text", + "content": "To further assess LLMs' ability to decrypt uncommon encryption methods, we introduce (3) Custom-designed ciphers that deviate from standard cryptographic schemes. (a) DualAvgCode is inspired by OpenAI's o1 model showcase3, where iterative transformations require models to infer multi-step encryption patterns. (b) ParityShift draws from LSB steganography (Mielikainen, 2006), a common technique in information hiding, incorporating bitwise manipulations based on character parity. (c) WordShift Cipher is designed to evaluate LLMs' ability to decrypt ciphers that combine substitution and transposition encryption, performing Caesar-style letter shifts within each word individually, blending character-level substitution with structural reordering." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 679, + 291, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 679, + 291, + 746 + ], + "spans": [ + { + "bbox": [ + 67, + 679, + 291, + 746 + ], + "type": "text", + "content": "Meanwhile, We categorize the nine algorithms into five difficulty tiers based on key necessity and computational complexity. T1 (Basic) includes simple ciphers like ROT13 and Reverse. T2 (Intermediate) introduces Atbash and WordShift with" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 179 + ], + "type": "text", + "content": "slightly more complex rules. T3 (Moderate) covers DualAvgCode and Polybius, requiring structured encoding. T4 (Advanced) involves ParityShift and SwapPairs with intricate data manipulation. T5 (Expert) features the Vigenère cipher, a polyalphabetic substitution cipher known for its keyword-based complexity. This framework organizes encryption techniques from basic to expert." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 188, + 434, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 188, + 434, + 201 + ], + "spans": [ + { + "bbox": [ + 302, + 188, + 434, + 201 + ], + "type": "text", + "content": "2.3 CipherBank Statistics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 206, + 525, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 206, + 525, + 259 + ], + "spans": [ + { + "bbox": [ + 302, + 206, + 525, + 259 + ], + "type": "text", + "content": "As shown in Figure 2, we provides an overview of CipherBank structure. The encryption algorithm in Section 2.2 applies to the expert-curated dataset from Section 2.1, yielding 2,358 test data points." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 304, + 290, + 525, + 375 + ], + "blocks": [ + { + "bbox": [ + 345, + 269, + 482, + 282 + ], + "lines": [ + { + "bbox": [ + 345, + 269, + 482, + 282 + ], + "spans": [ + { + "bbox": [ + 345, + 269, + 482, + 282 + ], + "type": "text", + "content": "Table 1: Statistics of CipherBank." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 290, + 525, + 375 + ], + "lines": [ + { + "bbox": [ + 304, + 290, + 525, + 375 + ], + "spans": [ + { + "bbox": [ + 304, + 290, + 525, + 375 + ], + "type": "table", + "html": "
Domains#Tag#Plaintext#TestAvg(len)
Personal Privacy Data2350450107.88
Enterprise Sensitive Data1652468103.10
Public Safety Data1763567110.89
Financial Asset Data1344396163.68
Internet Records2053477191.92
Summary892622358134.03
", + "image_path": "535635db629f7b86f2a3c946cf62f939e891dda9bfa0fa0bf18d24756cc16ba4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 388, + 525, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 388, + 525, + 496 + ], + "spans": [ + { + "bbox": [ + 302, + 388, + 525, + 496 + ], + "type": "text", + "content": "Table 1 summarizes the distribution of plaintexts across 5 domains, each with varying numbers of tags, samples, and test cases. Notably, Internet Records has the longest plaintexts (191.92), while Enterprise Sensitive Data has shorter samples (103.10). This diversity ensures a comprehensive evaluation of model performance across different encryption contexts." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 506, + 386, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 506, + 386, + 518 + ], + "spans": [ + { + "bbox": [ + 302, + 506, + 386, + 518 + ], + "type": "text", + "content": "3 Evaluations" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 528, + 411, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 528, + 411, + 540 + ], + "spans": [ + { + "bbox": [ + 302, + 528, + 411, + 540 + ], + "type": "text", + "content": "3.1 Evaluation Setup" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 544, + 525, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 525, + 666 + ], + "type": "text", + "content": "Evaluation Protocols. In terms of testing methodology, CipherBank's evaluation follows the Known-Plaintext Attack framework (Zulkifli and Mohd, 2008), employing a 3-shot testing approach. We prompt the model with three plaintext-ciphertext pairs as demonstrations to infer encryption rules, identify potential keys, and apply the learned patterns to decrypt a new ciphertext. The detailed prompt can be found in Appendix B.1." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 667, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 667, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 667, + 525, + 775 + ], + "type": "text", + "content": "For evaluation metrics, we primarily employ accuracy to measure overall decryption success, which is the ratio of correctly decrypted cases to total test cases, where correctness requires an exact character match with the plaintext. Additionally, to capture finer-grained differences between the decrypted output and the original plaintext, we incorporate Levenshtein similarity (Yujian and Bo," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 752, + 202, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 202, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 202, + 774 + ], + "type": "text", + "content": "3https://openai.com/index/ learning-to-reason-with-llms/" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5932" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 72, + 214, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 214, + 84 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 214, + 84 + ], + "type": "text", + "content": "Example 2.1: Plain-Ciphertext Pair" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 89, + 198, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 89, + 198, + 100 + ], + "spans": [ + { + "bbox": [ + 75, + 89, + 198, + 100 + ], + "type": "text", + "content": "Domain: Personal Privacy Data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 102, + 208, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 102, + 208, + 113 + ], + "spans": [ + { + "bbox": [ + 75, + 102, + 208, + 113 + ], + "type": "text", + "content": "Subdomain: Identity Information" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 116, + 327, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 116, + 327, + 127 + ], + "spans": [ + { + "bbox": [ + 75, + 116, + 327, + 127 + ], + "type": "text", + "content": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 131, + 114, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 131, + 114, + 140 + ], + "spans": [ + { + "bbox": [ + 75, + 131, + 114, + 140 + ], + "type": "text", + "content": "Plaintext:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 144, + 392, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 144, + 392, + 155 + ], + "spans": [ + { + "bbox": [ + 74, + 144, + 392, + 155 + ], + "type": "text", + "content": "Peter was born on April 23, 1985, and carries a passport with the number X123456789." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 158, + 151, + 169 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 158, + 151, + 169 + ], + "spans": [ + { + "bbox": [ + 75, + 158, + 151, + 169 + ], + "type": "text", + "content": "Encryption results:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 172, + 453, + 211 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 75, + 172, + 434, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 172, + 434, + 184 + ], + "spans": [ + { + "bbox": [ + 75, + 172, + 434, + 184 + ], + "type": "text", + "content": "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 186, + 448, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 186, + 448, + 197 + ], + "spans": [ + { + "bbox": [ + 75, + 186, + 448, + 197 + ], + "type": "text", + "content": "(2) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 200, + 453, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 200, + 453, + 211 + ], + "spans": [ + { + "bbox": [ + 75, + 200, + 453, + 211 + ], + "type": "text", + "content": "(3) WordShift : erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwit the bernum 3456789.X12" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 76, + 215, + 96, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 215, + 96, + 224 + ], + "spans": [ + { + "bbox": [ + 76, + 215, + 96, + 224 + ], + "type": "text", + "content": "(4) ..." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 228, + 230, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 228, + 230, + 239 + ], + "spans": [ + { + "bbox": [ + 75, + 228, + 230, + 239 + ], + "type": "text", + "content": "More results can be found in the appendix." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 265, + 290, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 265, + 290, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 265, + 290, + 333 + ], + "type": "text", + "content": "2007). We compute the Levenshtein distance for each sentence individually and report the average Levenshtein similarity across all test cases, providing a more nuanced assessment of model performance beyond binary correctness." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 338, + 291, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 338, + 291, + 406 + ], + "spans": [ + { + "bbox": [ + 67, + 338, + 291, + 406 + ], + "type": "text", + "content": "LLM Candidates. For a comprehensive evaluation, we carefully selected 18 SOTA LLMs for evaluation, ensuring a diverse representation of open-source, closed-source, and reasoning-specialized models. Below, we outline the tested models:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 412, + 291, + 682 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 67, + 412, + 291, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 412, + 291, + 507 + ], + "spans": [ + { + "bbox": [ + 67, + 412, + 291, + 507 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 67, + 412, + 291, + 507 + ], + "type": "text", + "content": " Open-Source Chat Models: We evaluate leading open-source LLMs, including Mistral AI's Mixtral-8x22B (Jiang et al., 2024a), Alibaba's Qwen2.5-72B-Instruct (Yang et al., 2024a), Meta's Llama-3.1-70B-Instruct and Llama-3.3-70B-Instruct (Dubey et al., 2024), as well as the rising star - DeepSeek-V3 (Liu et al., 2024a)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "spans": [ + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "text", + "content": " Closed-Source Models: For proprietary models, evaluation is conducted via API access. The tested models include OpenAI's 4o-mini and GPT-4o series (0806, 1120) (Hurst et al., 2024), DeepMind's Gemini-1.5-Pro (Team, 2024a) and Gemini-2.0-Flash-Exp" + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "text", + "content": ", along with Anthropic's Claude-Sonnet-3.5 " + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "inline_equation", + "content": "(1022)^{5}" + }, + { + "bbox": [ + 67, + 513, + 291, + 607 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "type": "text", + "content": " Reasoning Models: We further investigate models optimized for reasoning tasks, including QwQ-32B-Preview (Team, 2024b), DeepSeek-R1 (Guo et al., 2025), Gemini-2.0-Flash-Thinking " + }, + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "type": "inline_equation", + "content": "(1219)^{6}" + }, + { + "bbox": [ + 67, + 613, + 291, + 682 + ], + "type": "text", + "content": " o1-mini (0912) and o1 (1217) (Jaech et al., 2024)." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 303, + 265, + 422, + 278 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 265, + 422, + 278 + ], + "spans": [ + { + "bbox": [ + 303, + 265, + 422, + 278 + ], + "type": "text", + "content": "3.2 Benchmark Results" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 285, + 525, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 285, + 525, + 338 + ], + "spans": [ + { + "bbox": [ + 302, + 285, + 525, + 338 + ], + "type": "text", + "content": "Table 2 presents the evaluation results of all candidate LLMs (Levenshtein similarity results are in Appendix C.1). Below, we distill the experimental findings into several observations:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 340, + 526, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 340, + 526, + 516 + ], + "spans": [ + { + "bbox": [ + 302, + 340, + 526, + 516 + ], + "type": "text", + "content": "Limitations of Current Models in Cryptographic Reasoning. Despite advancements in LLMs, Table 2 highlights their limitations in structured cryptographic reasoning. The overall performance remains low, with most SOTA models struggling to achieve meaningful accuracy. In Cipher Score, common models like Qwen and LLaMA perform particularly poorly, with some scoring in the single digits or near zero. Even the best-performing models, Claude-3.5 and o1, achieve less than 50 in accuracy, underscoring the significant difficulty of CipherBank and the challenges LLMs face in systematic decryption." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 302, + 517, + 525, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 517, + 525, + 651 + ], + "spans": [ + { + "bbox": [ + 302, + 517, + 525, + 651 + ], + "type": "text", + "content": "Reasoning Models Generally Outperform Chat Models. When comparing reasoning models to chat models, generally we can find that the reasoning models do outperform chat models on all cipher algorithms and achieve better overall performance. The only expectation is the superior performance of Claude-3.5 (45.14) even better than o1, and also the bad performance of QwQ-32B-Preview (only 0.76 accuracy). This clearly demonstrate the advantages of the reasoning-specialized models." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "type": "text", + "content": "Closed-Source Models Retain an Edge Over Open-Source Models. Overall, closed-source models outperform open-source models in cryptographic decryption. Claude-3.5 (45.14) and o1 (40.59) achieve the highest performance across all cipher categories. However, DeepSeek-V3 (9.86) and DeepSeek-R1 (25.91) surpass most models in the GPT and Gemini families, indicating that advanced open-source models are closing the gap." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 710, + 285, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 710, + 285, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 710, + 285, + 732 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 67, + 710, + 285, + 732 + ], + "type": "text", + "content": "https://deepmind.google/technologies/gemini/ flash/" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 67, + 733, + 226, + 752 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 733, + 226, + 752 + ], + "spans": [ + { + "bbox": [ + 67, + 733, + 226, + 752 + ], + "type": "text", + "content": "5https://www.anthropic.com/news/claude-3-5-sonnet" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 67, + 753, + 284, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 753, + 284, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 753, + 284, + 774 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 67, + 753, + 284, + 774 + ], + "type": "text", + "content": "https://deepmind.google/technologies/gemini/flash-thinking/" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5933" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 115, + 526, + 338 + ], + "blocks": [ + { + "bbox": [ + 67, + 69, + 526, + 107 + ], + "lines": [ + { + "bbox": [ + 67, + 69, + 526, + 107 + ], + "spans": [ + { + "bbox": [ + 67, + 69, + 526, + 107 + ], + "type": "text", + "content": "Table 2: 3-shot scores (\\%) of LLMs across three major encryption paradigms and nine specific encryption algorithms on CipherBank. The highest scores in each category are highlighted with a blue background, while the second-best results are underlined for emphasis." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 115, + 526, + 338 + ], + "lines": [ + { + "bbox": [ + 70, + 115, + 526, + 338 + ], + "spans": [ + { + "bbox": [ + 70, + 115, + 526, + 338 + ], + "type": "table", + "html": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
RotAtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.380000.7600.3801.150.30
Qwen2.5-72B-Instruct1.1500000.381.1502.290.55
Llama-3.1-70B-Instruct1.150.3800.38000.380.380.760.38
Llama-3.3-70B-Instruct2.670.38000000.7600.42
DeepSeek-V332.4414.882.290.7628.470.380.381.148.029.86
Closed-source Models
GPT-4o-mini-2024-07-183.692.0300.512.1600.3800.251.00
GPT-4o-2024-08-0638.173.050.380.7625.192.2901.148.408.82
GPT-4o-2024-11-2026.466.990.130.7615.270.760.250.896.116.40
gemini-1.5-pro55.340.760.380.7610.310.760.380.7616.419.54
gemini-2.0-flash-exp35.883.051.530.3829.391.5300.765.348.65
Claude-Sonnet-3.5-102283.2175.1972.901.9163.936.874.9658.2139.1245.14
Reasoning Models
QwQ-32B-Preview1.530.381.910000.380.382.290.76
DeepSeek-R173.2858.7844.270.3810.690.3824.0512.988.4025.91
gemini-2.0-flash-thinking40.4617.1821.761.1522.901.1507.639.1613.49
o1-mini-2024-09-1246.1868.3246.951.535.150.382.937.631.5320.07
o1-2024-12-1759.9279.0179.397.2514.8932.1450.3812.3929.9040.59
", + "image_path": "6732dc1022bb797fbe15fc97d49a045df8e242d5120aa86484b0ff909337e93e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 356, + 290, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 290, + 423 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 290, + 423 + ], + "type": "text", + "content": "Nevertheless, both still lag behind Claude-3.5 and o1, suggesting that while open-source models are improving, there is significant potential for open-source models to achieve even better performance in the future." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 426, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 426, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 291, + 682 + ], + "type": "text", + "content": "The performance variance among models of the same category is remarkably significant. Within the Open-source Chat Models category, the top-performing model, deepseek-v3 (9.86), outperforms the weakest model, Mixtral-8x22B (0.30), by a factor of 33. Similarly, in the Closed-source Models category, Claude-Sonnet-3.5 (45.14) demonstrates a performance 45 times greater than that of GPT-4o-mini (1.00). The disparity is even more pronounced in the Reasoning Models category, where o1 (40.59) surpasses QwQ-32B-Preview (0.76) by a factor of 53. Such substantial performance variations are rarely observed in other benchmarks, highlighting the challenging nature of CipherBank. This benchmark effectively distinguishes the reasoning capabilities of different models through its decryption dimension, providing a robust framework for evaluating model performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 697, + 178, + 710 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 697, + 178, + 710 + ], + "spans": [ + { + "bbox": [ + 67, + 697, + 178, + 710 + ], + "type": "text", + "content": "4 Detailed Analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "content": "In this section, we conduct a detailed analysis from the perspectives of plaintext characteristics, noise levels, testing methodologies, finer-grained evaluation metrics, and error analysis to gain deeper" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 356, + 525, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 356, + 525, + 384 + ], + "spans": [ + { + "bbox": [ + 302, + 356, + 525, + 384 + ], + "type": "text", + "content": "insights into the strengths and limitations of different LLMs in cryptographic decryption." + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 304, + 456, + 523, + 547 + ], + "blocks": [ + { + "bbox": [ + 302, + 397, + 525, + 447 + ], + "lines": [ + { + "bbox": [ + 302, + 397, + 525, + 447 + ], + "spans": [ + { + "bbox": [ + 302, + 397, + 525, + 447 + ], + "type": "text", + "content": "Table 3: Model Performance on Short and Long Plaintiff Setting (Lower Difference and Decrease Ratio Are Better). We highlight the most stable and sensitive results in blue and green respectively." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 456, + 523, + 547 + ], + "lines": [ + { + "bbox": [ + 304, + 456, + 523, + 547 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 523, + 547 + ], + "type": "table", + "html": "
ModelShortLongDiffDecrease Ratio(%)
GPT-4o-2024-11-209.474.465.0152.60
gemini-2.0-flash-exp11.506.425.0844.35
DeepSeek-V313.245.228.0260.60
gemini-2.0-flash-thinking19.908.4711.4342.61
DeepSeek-R132.2720.9411.3333.16
ol-mini-2024-09-1233.7717.3516.4248.57
ol-2024-12-1747.6134.3813.2327.78
Claude-Sonnet-3.548.7047.850.851.74
", + "image_path": "7ea28d201e6469f62838e2bad93547904fd243771f5e36cb024ae1a036c08bb2.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 577, + 456, + 591 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 577, + 456, + 591 + ], + "spans": [ + { + "bbox": [ + 302, + 577, + 456, + 591 + ], + "type": "text", + "content": "4.1 Impact of Plaintext Length" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 301, + 599, + 526, + 775 + ], + "type": "text", + "content": "To test models' sensitivity to text length, we categorize plaintexts into short (fewer than three tags) and long groups, averaging 70.29 and 181.61 characters, respectively. As shown in Table 3 (full results and plaintext examples can be found in Appendix C.2), longer plaintexts lead to a significant performance decline in most models. Most models exhibit a significant decline in decryption performance as text length increases. Among them, Claude-3.5 (-0.85) shows the most stable performance, while o1-mini (-16.42) is the most sensitive. This contrasts with human performance, highlighting LLMs' length bias in decryption reasoning." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5934" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 72, + 215, + 158 + ], + "blocks": [ + { + "bbox": [ + 71, + 72, + 215, + 158 + ], + "lines": [ + { + "bbox": [ + 71, + 72, + 215, + 158 + ], + "spans": [ + { + "bbox": [ + 71, + 72, + 215, + 158 + ], + "type": "image", + "image_path": "4b0ec48e93805d8249bfdf5a2d13a4735aefef2c3348233ea635395b851fa389.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 164, + 217, + 185 + ], + "lines": [ + { + "bbox": [ + 68, + 164, + 217, + 185 + ], + "spans": [ + { + "bbox": [ + 68, + 164, + 217, + 185 + ], + "type": "text", + "content": "(a) Model Robustness to Noisy Inputs: Performance Comparison." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 225, + 73, + 368, + 158 + ], + "blocks": [ + { + "bbox": [ + 225, + 73, + 368, + 158 + ], + "lines": [ + { + "bbox": [ + 225, + 73, + 368, + 158 + ], + "spans": [ + { + "bbox": [ + 225, + 73, + 368, + 158 + ], + "type": "image", + "image_path": "90ad513b31d0dfabfe9b6d16424b1e84a8c0a245a47240b779b8ffa98718a275.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 195, + 494, + 208 + ], + "lines": [ + { + "bbox": [ + 97, + 195, + 494, + 208 + ], + "spans": [ + { + "bbox": [ + 97, + 195, + 494, + 208 + ], + "type": "text", + "content": "Figure 3: Evaluation of LLM Performance Under Different Encryption and Prompting Conditions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 380, + 71, + 523, + 158 + ], + "blocks": [ + { + "bbox": [ + 223, + 164, + 370, + 185 + ], + "lines": [ + { + "bbox": [ + 223, + 164, + 370, + 185 + ], + "spans": [ + { + "bbox": [ + 223, + 164, + 370, + 185 + ], + "type": "text", + "content": "(b) Effect of Encryption Scope: Letters Only vs. Letters & Numbers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 380, + 71, + 523, + 158 + ], + "lines": [ + { + "bbox": [ + 380, + 71, + 523, + 158 + ], + "spans": [ + { + "bbox": [ + 380, + 71, + 523, + 158 + ], + "type": "image", + "image_path": "15a51b917cad39ba4fb7a86e640ecdb18cc5aad829229441318beab38dbe68c5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 377, + 164, + 525, + 185 + ], + "lines": [ + { + "bbox": [ + 377, + 164, + 525, + 185 + ], + "spans": [ + { + "bbox": [ + 377, + 164, + 525, + 185 + ], + "type": "text", + "content": "(c) Evaluating the Benefit of Explicit Algorithm Hints in 3-Shot Prompting." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 228, + 265, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 265, + 240 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 265, + 240 + ], + "type": "text", + "content": "4.2 Effect of Noise on Model Robustness" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 253, + 291, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 291, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 291, + 348 + ], + "type": "text", + "content": "We observe that models frequently substituted synonyms instead of strictly applying decryption rules to each character (examples in Appendix C.2), indicating the presence of shortcut reasoning, where models partially decrypt the text and infer the remainder based on semantic context rather than adhering to the encryption pattern." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 351, + 291, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 351, + 291, + 488 + ], + "spans": [ + { + "bbox": [ + 67, + 351, + 291, + 488 + ], + "type": "text", + "content": "To evaluate robustness and mitigate reliance on semantic inference, we select the 40 plaintexts with the lowest perplexity (PPL) scores, computed using Llama-3.1-8B-Instruct, for noise injection. Figure 3a shows a substantial performance drop across all models, including Claude-3.5 (from 59.17 to 25.08) and o1-mini (from 24.25 to 5.83), highlighting their vulnerability to structural perturbations and further exposing the limitations of current models in systematic reasoning and precise decryption." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 507, + 221, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 221, + 521 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 221, + 521 + ], + "type": "text", + "content": "4.3 Effect of Encryption Scope" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 531, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 531, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 531, + 291, + 775 + ], + "type": "text", + "content": "In previous evaluations, only letters are encrypted. To better reflect real-world scenarios, here we select plaintexts with sensitive numerical data and apply encryption to both letters and numbers, focusing on algorithms that directly affect numbers (test prompt in Appendix C.2). As shown in Table 3b, model performance drops significantly in this more complex setting. This suggests difficulty in adapting decryption strategies to numerical transformations. Even under the same encryption principles, encrypting both letters and numbers greatly increases task complexity, posing a significant challenge for current reasoning models. This highlights a critical limitation in LLMs' ability to generalize across diverse data types, particularly when numerical transformations are involved. Future work should focus on enhancing models' capacity to handle mixed data encryption." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 228, + 503, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 228, + 503, + 255 + ], + "spans": [ + { + "bbox": [ + 302, + 228, + 503, + 255 + ], + "type": "text", + "content": "4.4 Effect of Explicit Algorithm Hints on Decryption Performance" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 301, + 259, + 526, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 259, + 526, + 476 + ], + "spans": [ + { + "bbox": [ + 301, + 259, + 526, + 476 + ], + "type": "text", + "content": "Previous evaluations highlight the significant challenges posed by CipherBank. To evaluate the models' decryption capabilities when provided with algorithm details, we enhance the 3-shot setting by explicitly informing the models of the specific algorithm during testing. Under the revised setting, models are no longer required to independently deduce encryption logic but instead focus on identifying the necessary key and applying the specified decryption rules. The enhanced prompt is provided in Appendix C.2. Table 3c reveals distinct performance patterns. Most chat models show minimal improvement even with algorithm details, struggling with key inference and decryption—highlighting persistent limitations, especially in models like Claude (+5.30) and Gemini (+1.97)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 477, + 525, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 477, + 525, + 585 + ], + "spans": [ + { + "bbox": [ + 302, + 477, + 525, + 585 + ], + "type": "text", + "content": "In contrast, reasoning models show marked performance gains, with R1 (+31.81) and o1-mini (+14.49) achieving significant improvements. The observed contrast underscores a fundamental distinction: chat models primarily rely on surface-level pattern recognition, while reasoning models excel in structured inference when provided with appropriate guidance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 595, + 400, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 595, + 400, + 608 + ], + "spans": [ + { + "bbox": [ + 302, + 595, + 400, + 608 + ], + "type": "text", + "content": "4.5 Error Analysis" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 301, + 613, + 525, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 613, + 525, + 718 + ], + "spans": [ + { + "bbox": [ + 301, + 613, + 525, + 718 + ], + "type": "text", + "content": "We conduct a comprehensive error analysis based on the test results in Table 2, identifying six distinct error types. To gain deeper insights, we examine the three best-performing chat models and three best-performing reasoning models, summarizing their error distributions. Detailed error definitions and examples are provided in Appendix D.1 and D.2." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "content": "As shown in Figure 4, the distribution of error types reveals key differences between reasoning and chat models. Surprisingly, (1) reasoning models exhibit a higher rate of reasoning failures than" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5935" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 71, + 282, + 190 + ], + "blocks": [ + { + "bbox": [ + 78, + 71, + 282, + 190 + ], + "lines": [ + { + "bbox": [ + 78, + 71, + 282, + 190 + ], + "spans": [ + { + "bbox": [ + 78, + 71, + 282, + 190 + ], + "type": "image", + "image_path": "978a939a205b5d785c3dfb5009bc4e72e0e7940913449dc416cc66f0c8385835.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 204, + 291, + 240 + ], + "lines": [ + { + "bbox": [ + 67, + 204, + 291, + 240 + ], + "spans": [ + { + "bbox": [ + 67, + 204, + 291, + 240 + ], + "type": "text", + "content": "Figure 4: Decryption Error Distribution. The left represents chat models, while the right corresponds to reasoning models." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 243, + 291, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 243, + 291, + 542 + ], + "spans": [ + { + "bbox": [ + 69, + 243, + 291, + 542 + ], + "type": "text", + "content": "chat models. A deeper examination of Appendix D.3 reveals that many of these failures occur on simpler tasks, suggesting that reasoning models may overanalyze problems, leading to incorrect conclusions. This indicates that their complex inference processes can sometimes hinder performance on straightforward decryption cases. Conversely, (2) chat models show a higher frequency of omission-insertion and reorganization errors, indicating that while they are stronger in semantic understanding, this often results in excessive auto-completion and sentence restructuring rather than strict rule adherence. This tendency suggests that chat models prioritize fluency over exact decryption, leading to unintended modifications. Additionally, (3) both model types frequently make errors in name decryption, highlighting a broader challenge in handling structured entity transformations. This suggests that current LLMs struggle to consistently apply encryption rules to proper nouns, potentially due to memorization biases or difficulties in preserving entity-level consistency during decryption." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 550, + 161, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 550, + 161, + 563 + ], + "spans": [ + { + "bbox": [ + 67, + 550, + 161, + 563 + ], + "type": "text", + "content": "5 Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 572, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 572, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 572, + 292, + 775 + ], + "type": "text", + "content": "Benchmarks for Reasoning Evaluating reasoning abilities in LLMs has been a key focus in AI research, with various benchmarks assessing models across mathematical, logical, and inferential tasks. MATH (Hendrycks et al., 2021b), MathBench (Liu et al., 2024c), and LiveMath-Bench (Liu et al., 2024d) test arithmetic and algebraic reasoning, while HumanEval (Chen et al., 2021b), DebugBench (Tian et al., 2024) and Big-CodeBench (Zhuo et al., 2024) evaluates code generation that require programming logic. Additionally, BIG-Bench (Srivastava et al., 2022), BBH (Suzgun et al., 2022), and LiveBench (White et al., 2024) measure broader cognitive abilities, such as abstract reasoning and analogical problem" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 260 + ], + "type": "text", + "content": "solving. KOR-Bench (Ma et al., 2024) is new benchmark that examines strong reasoning by introducing Knowledge-Orthogonal Reasoning (KOR) tasks, assessing models' ability to apply newly introduced rules independent of pretrained knowledge. Specially, it also contains a cipher reasoning task, which provides explicit encryption rules and keys, guiding models through step-by-step decryption rather than requiring pattern inference. In contrast, CipherBank presents a more realistic challenge, requiring models to identify encryption patterns from examples without prior knowledge, better reflecting real-world scenarios where encryption schemes are unknown." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 267, + 526, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 267, + 526, + 470 + ], + "spans": [ + { + "bbox": [ + 302, + 267, + 526, + 470 + ], + "type": "text", + "content": "Jailbreaking via Cipher Characters Recent work demonstrates that encoding adversarial prompts via encryption (Yuan et al., 2023; Wei et al., 2024) or obfuscation (Yong et al., 2023; Jiang et al., 2024b; Kang et al., 2024) can bypass LLM safety filters by exploiting models' ability to process encoded inputs. While CipherBench (Handa et al., 2024) evaluates cipher-based jailbreaking, its reliance on 40 curated plaintexts and explicit algorithm hints limits practical relevance. Our CipherBank removes prior guidance, requiring autonomous pattern inference from plaintext-ciphertext pairs to simulate privacy-sensitive decryption scenarios, establishing a robust benchmark for LLM security evaluation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 480, + 381, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 480, + 381, + 493 + ], + "spans": [ + { + "bbox": [ + 302, + 480, + 381, + 493 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 501, + 525, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 501, + 525, + 703 + ], + "spans": [ + { + "bbox": [ + 302, + 501, + 525, + 703 + ], + "type": "text", + "content": "In this work, we introduce CipherBank, a comprehensive benchmark for evaluating reasoning capabilities through cryptographic decryption. CipherBank includes 5 domains, 14 subdomains of plaintext data, 9 encryption algorithms, and 2,358 decryption tasks. By testing SOTA LLMs on CipherBank, we uncover significant limitations in their decryption abilities, revealing distinct strengths and weaknesses between reasoning and chat models. Our analysis identifies key deficiencies in current reasoning approaches and suggests directions for improvement, positioning CipherBank as a novel benchmark for advancing structured inference and cryptographic reasoning in developing future LLMs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 714, + 365, + 726 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 714, + 365, + 726 + ], + "spans": [ + { + "bbox": [ + 302, + 714, + 365, + 726 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 735, + 525, + 775 + ], + "type": "text", + "content": "Our evaluation is constrained by the reliance on closed-source models, which are accessible only via API calls. This introduces potential variability" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5936" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 287 + ], + "type": "text", + "content": "due to API updates and version changes, though we mitigate this by documenting the specific versions and dates used. Additionally, access restrictions prevent us from evaluating more advanced models such as o1 Pro and o3 series, limiting the scope of our benchmark. From a design perspective, CipherBank primarily focuses on classical encryption algorithms, as modern cryptographic schemes introduce complexities beyond current model capabilities. While this choice ensures feasibility in evaluation, it also restricts the benchmark's applicability to real-world cryptographic challenges. As models improve, expanding CipherBank to modern encryption techniques will provide a more comprehensive assessment of reasoning in cryptographic tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 299, + 170, + 312 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 170, + 312 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 170, + 312 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 321, + 291, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 321, + 291, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 321, + 291, + 348 + ], + "type": "text", + "content": "This work is supported by National Key R&D Program of China (2022ZD0160201)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 371, + 127, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 371, + 127, + 384 + ], + "spans": [ + { + "bbox": [ + 68, + 371, + 127, + 384 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 391, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 68, + 391, + 290, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 391, + 290, + 437 + ], + "spans": [ + { + "bbox": [ + 68, + 391, + 290, + 437 + ], + "type": "text", + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. 2024. Large language models for mathematical reasoning: Progresses and challenges. arXiv preprint arXiv:2402.00157." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 446, + 291, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 446, + 291, + 479 + ], + "spans": [ + { + "bbox": [ + 69, + 446, + 291, + 479 + ], + "type": "text", + "content": "Anthropic. 2024. Claude 3.5 sonnet. https://www.anthropic.com/news/claude-3-5-sonnet. Accessed: 2025-02-09." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 489, + 290, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 489, + 290, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 489, + 290, + 544 + ], + "type": "text", + "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, and Charles Sutton. 2021. Program synthesis with large language models. Preprint, arXiv:2108.07732." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 554, + 291, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 554, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 554, + 291, + 774 + ], + "type": "text", + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021a. Evaluating large language models trained on code." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 774 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 304, + 72, + 526, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 526, + 139 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 526, + 139 + ], + "type": "text", + "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde De Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021b. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 148, + 526, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 148, + 526, + 193 + ], + "spans": [ + { + "bbox": [ + 304, + 148, + 526, + 193 + ], + "type": "text", + "content": "Sirui Chen, Bo Peng, Meiqi Chen, Ruiqi Wang, Mengying Xu, Xingyu Zeng, Rui Zhao, Shengjie Zhao, Yu Qiao, and Chaochao Lu. 2024. Causal evaluation of language models. Preprint, arXiv:2405.00622." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 200, + 526, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 200, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 526, + 257 + ], + "type": "text", + "content": "Ning Ding, Yulin Chen, Bokai Xu, Yujia Qin, Zhi Zheng, Shengding Hu, Zhiyuan Liu, Maosong Sun, and Bowen Zhou. 2023. Enhancing chat language models by scaling high-quality instructional conversations. Preprint, arXiv:2305.14233." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 265, + 526, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 265, + 526, + 333 + ], + "spans": [ + { + "bbox": [ + 304, + 265, + 526, + 333 + ], + "type": "text", + "content": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pretraining for natural language understanding and generation. Advances in neural information processing systems, 32." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 341, + 526, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 341, + 526, + 397 + ], + "spans": [ + { + "bbox": [ + 304, + 341, + 526, + 397 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 406, + 526, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 406, + 526, + 461 + ], + "spans": [ + { + "bbox": [ + 304, + 406, + 526, + 461 + ], + "type": "text", + "content": "Yingqiang Ge, Wenyue Hua, Kai Mei, Juntao Tan, Shuyuan Xu, Zelong Li, Yongfeng Zhang, et al. 2023. Openagi: When llm meets domain experts. Advances in Neural Information Processing Systems, 36:5539-5568." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 470, + 526, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 470, + 526, + 526 + ], + "spans": [ + { + "bbox": [ + 304, + 470, + 526, + 526 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 534, + 526, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 534, + 526, + 667 + ], + "spans": [ + { + "bbox": [ + 304, + 534, + 526, + 667 + ], + "type": "text", + "content": "Simeng Han, Hailey Schoelkopf, Yilun Zhao, Zhenting Qi, Martin Riddell, Wenfei Zhou, James Coady, David Peng, Yujie Qiao, Luke Benson, Lucy Sun, Alex Wardle-Solano, Hannah Szabo, Ekaterina Zubova, Matthew Burtell, Jonathan Fan, Yixin Liu, Brian Wong, Malcolm Sailor, Ansong Ni, Linyong Nan, Jungo Kasai, Tao Yu, Rui Zhang, Alexander R. Fabbri, Wojciech Kryscinski, Semih Yavuz, Ye Liu, Xi Victoria Lin, Shafiq Joty, Yingbo Zhou, Caiming Xiong, Rex Ying, Arman Cohen, and Dragomir Radev. 2024. Folio: Natural language reasoning with first-order logic. Preprint, arXiv:2209.00840." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 676, + 526, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 676, + 526, + 721 + ], + "spans": [ + { + "bbox": [ + 304, + 676, + 526, + 721 + ], + "type": "text", + "content": "Divij Handa, Zehua Zhang, Amir Saeidi, and Chitta Baral. 2024. When \"competency\" in reasoning opens the door to vulnerability: Jailbreaking llms via novel complex ciphers. Preprint, arXiv:2402.10601." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 729, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 729, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 729, + 526, + 774 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021a. Measuring mathematical problem solving with the math dataset. NeurIPS." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5937" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 774 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 127 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. 2021b. Measuring mathematical problem solving with the math dataset. arXiv preprint arXiv:2103.03874." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 137, + 289, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 137, + 289, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 137, + 289, + 192 + ], + "type": "text", + "content": "Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 201, + 289, + 257 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 289, + 257 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 289, + 257 + ], + "type": "text", + "content": "Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 266, + 289, + 333 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 266, + 289, + 333 + ], + "spans": [ + { + "bbox": [ + 69, + 266, + 289, + 333 + ], + "type": "text", + "content": "Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. 2024a. Mixtral of experts. arXiv preprint arXiv:2401.04088." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 342, + 289, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 342, + 289, + 396 + ], + "spans": [ + { + "bbox": [ + 69, + 342, + 289, + 396 + ], + "type": "text", + "content": "Fengqing Jiang, Zhangchen Xu, Luyao Niu, Zhen Xiang, Bhaskar Ramasubramanian, Bo Li, and Radha Poovendran. 2024b. Artprompt: Ascii art-based jailbreak attacks against aligned llms. arXiv preprint arXiv:2402.11753." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 406, + 289, + 471 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 406, + 289, + 471 + ], + "spans": [ + { + "bbox": [ + 69, + 406, + 289, + 471 + ], + "type": "text", + "content": "Daniel Kang, Xuechen Li, Ion Stoica, Carlos Guestrin, Matei Zaharia, and Tatsunori Hashimoto. 2024. Exploiting programmatic behavior of llms: Dual-use through standard security attacks. In 2024 IEEE Security and Privacy Workshops (SPW), pages 132-143. IEEE." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 481, + 289, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 481, + 289, + 548 + ], + "spans": [ + { + "bbox": [ + 69, + 481, + 289, + 548 + ], + "type": "text", + "content": "Nikitas Karanikolas, Eirini Manga, Nikoletta Samaridi, Eleni Tousidou, and Michael Vassilakopoulos. 2023. Large language models versus natural language understanding and generation. In Proceedings of the 27th Pan-Hellenic Conference on Progress in Computing and Informatics, pages 278-290." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 557, + 289, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 557, + 289, + 581 + ], + "spans": [ + { + "bbox": [ + 69, + 557, + 289, + 581 + ], + "type": "text", + "content": "Alan G. Konheim. 2007. Computer Security and Cryptography. John Wiley & Sons." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 589, + 289, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 589, + 289, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 589, + 289, + 645 + ], + "type": "text", + "content": "Cheryl Lee, Chunqiu Steven Xia, Longji Yang, Jentse Huang, Zhouruixin Zhu, Lingming Zhang, and Michael R Lyu. 2024. A unified debugging approach via llm-based multi-agent synergy. arXiv preprint arXiv:2404.17153." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 654, + 289, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 654, + 289, + 709 + ], + "spans": [ + { + "bbox": [ + 69, + 654, + 289, + 709 + ], + "type": "text", + "content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. 2024a. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 719, + 289, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 289, + 774 + ], + "type": "text", + "content": "Fei Liu, Yiming Yao, Ping Guo, Zhiyuan Yang, Zhe Zhao, Xi Lin, Xialiang Tong, Mingxuan Yuan, Zhichao Lu, Zhenkun Wang, et al. 2024b. A systematic survey on large language models for algorithm design. arXiv preprint arXiv:2410.14716." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 774 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 138 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 138 + ], + "type": "text", + "content": "Hongwei Liu, Zilong Zheng, Yuxuan Qiao, Haodong Duan, Zhiwei Fei, Fengzhe Zhou, Wenwei Zhang, Songyang Zhang, Dahua Lin, and Kai Chen. 2024c Mathbench: Evaluating the theory and application proficiency of llms with a hierarchical mathematics benchmark. arXiv preprint arXiv:2405.12209." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 147, + 524, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 147, + 524, + 201 + ], + "spans": [ + { + "bbox": [ + 304, + 147, + 524, + 201 + ], + "type": "text", + "content": "Junnan Liu, Hongwei Liu, Linchen Xiao, Ziyi Wang, Kuikun Liu, Songyang Gao, Wenwei Zhang, Songyang Zhang, and Kai Chen. 2024d. Are your llms capable of stable reasoning? arXiv preprint arXiv:2412.13147." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 210, + 524, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 210, + 524, + 277 + ], + "spans": [ + { + "bbox": [ + 304, + 210, + 524, + 277 + ], + "type": "text", + "content": "Kaijing Ma, Xinrun Du, Yunran Wang, Haoran Zhang, Zhoufutu Wen, Xingwei Qu, Jian Yang, Jiaheng Liu, Minghao Liu, Xiang Yue, et al 2024. Kor-bench: Benchmarking language models on knowledge-orthogonal reasoning tasks. arXiv preprint arXiv:2410.06526." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 285, + 524, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 285, + 524, + 308 + ], + "spans": [ + { + "bbox": [ + 304, + 285, + 524, + 308 + ], + "type": "text", + "content": "Jarno Mielikainen. 2006. Lsb matching revisited. IEEE signal processing letters, 13(5):285-287." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 316, + 524, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 524, + 360 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 524, + 360 + ], + "type": "text", + "content": "S. Rani, A. Kataria, and M. Chauhan. 2022. Cyber security techniques, architectures, and design In Holistic Approach to Quantum Cryptography in Cyber Security, pages 41-66. CRC Press." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 369, + 524, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 369, + 524, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 369, + 524, + 413 + ], + "type": "text", + "content": "A. Sarkar, S. R. Chatterjee, and M. Chakraborty. 2021 Role of cryptography in network security. The \"Essence\" of Network Security: An End-to-End Panorama, pages 103-143." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 422, + 524, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 422, + 524, + 455 + ], + "spans": [ + { + "bbox": [ + 304, + 422, + 524, + 455 + ], + "type": "text", + "content": "Miyu Sasaki, Natsumi Watanabe, and Tsukihito Komanaka. 2024. Enhancing contextual understanding of mistral llm with external knowledge bases." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 463, + 524, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 463, + 524, + 486 + ], + "spans": [ + { + "bbox": [ + 304, + 463, + 524, + 486 + ], + "type": "text", + "content": "Bruce Schneier. 2002. Cryptographic design vulnerabilities. Computer, 31(9):29-33." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 495, + 524, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 495, + 524, + 539 + ], + "spans": [ + { + "bbox": [ + 304, + 495, + 524, + 539 + ], + "type": "text", + "content": "Divya Shree, Seema Ahlawat, et al. 2017. A review on cryptography, attacks and cyber security. International Journal of Advanced Research in Computer Science, 8(5)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 547, + 524, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 547, + 524, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 547, + 524, + 602 + ], + "type": "text", + "content": "S. Soomro, M. R. Belgaum, Z. Alansari, et al. 2019 Review and open issues of cryptographic algorithms in cyber security. In 2019 International Conference on Computing, Electronics & Communications Engineering (iCCECE), pages 158-162. IEEE." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 611, + 524, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 611, + 524, + 688 + ], + "spans": [ + { + "bbox": [ + 304, + 611, + 524, + 688 + ], + "type": "text", + "content": "Aarohi Srivastava, Abhinav Rastogi, Abhishek Rao, Abu Awal Md Shoeb, Abubakar Abid, Adam Fisch, Adam R Brown, Adam Santoro, Aditya Gupta, Adrià Garriga-Alonso, et al. 2022. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 697, + 524, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 697, + 524, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 697, + 524, + 774 + ], + "type": "text", + "content": "Hongda Sun, Weikai Xu, Wei Liu, Jian Luan, Bin Wang, Shuo Shang, Ji-Rong Wen, and Rui Yan 2024. Determinlr: Augmenting llm-based logical reasoning from indeterminacy to determinacy. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9828-9862." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5938" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 774 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 139 + ], + "type": "text", + "content": "Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. 2022. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv preprint arXiv:2210.09261." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 149, + 289, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 289, + 184 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 289, + 184 + ], + "type": "text", + "content": "Gemini Team. 2024a. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. Preprint, arXiv:2403.05530." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 195, + 289, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 195, + 289, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 195, + 289, + 216 + ], + "type": "text", + "content": "Qwen Team. 2024b. Qwq: Reflect deeply on the boundaries of the unknown." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 228, + 289, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 228, + 289, + 273 + ], + "spans": [ + { + "bbox": [ + 69, + 228, + 289, + 273 + ], + "type": "text", + "content": "Runchu Tian, Yining Ye, Yujia Qin, Xin Cong, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2024. Debugbench: Evaluating debugging capability of large language models. Preprint, arXiv:2401.04621." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 284, + 289, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 284, + 289, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 284, + 289, + 338 + ], + "type": "text", + "content": "Boshi Wang, Xiang Yue, and Huan Sun. 2023. Can chatgpt defend its belief in truth? evaluating llm reasoning via debate. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 11865-11881." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 351, + 289, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 351, + 289, + 395 + ], + "spans": [ + { + "bbox": [ + 69, + 351, + 289, + 395 + ], + "type": "text", + "content": "Alexander Wei, Nika Haghtalab, and Jacob Steinhardt. 2024. Jailbroken: How does llm safety training fail? Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 407, + 289, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 407, + 289, + 462 + ], + "spans": [ + { + "bbox": [ + 69, + 407, + 289, + 462 + ], + "type": "text", + "content": "Colin White, Samuel Dooley, Manley Roberts, Arka Pal, Ben Feuer, Siddhartha Jain, Ravid Shwartz-Ziv, Neel Jain, Khalid Saifullah, Siddartha Naidu, et al. 2024. Livebench: A challenging, contamination-free llm benchmark. arXiv preprint arXiv:2406.19314." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 474, + 289, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 474, + 289, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 474, + 289, + 539 + ], + "type": "text", + "content": "Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, and Chi Wang. 2024. Mathchat: Converse to tackle challenging math problems with llm agents. In ICLR 2024 Workshop on Large Language Model (LLM) Agents." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 551, + 289, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 551, + 289, + 596 + ], + "spans": [ + { + "bbox": [ + 69, + 551, + 289, + 596 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. 2024a. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 607, + 289, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 607, + 289, + 673 + ], + "spans": [ + { + "bbox": [ + 69, + 607, + 289, + 673 + ], + "type": "text", + "content": "Kaiyu Yang, Aidan Swope, Alex Gu, Rahul Chalamala, Peiyang Song, Shixing Yu, Saad Godil, Ryan J Prenger, and Animashree Anandkumar. 2024b. Leandrojo: Theorem proving with retrieval-augmented language models. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 684, + 289, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 684, + 289, + 729 + ], + "spans": [ + { + "bbox": [ + 69, + 684, + 289, + 729 + ], + "type": "text", + "content": "Wenlin Yao, Haitao Mi, and Dong Yu. 2024. Hdflow: Enhancing llm complex problem-solving with hybrid thinking and dynamic workflows. arXiv preprint arXiv:2409.17433." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 740, + 289, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 740, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 740, + 289, + 774 + ], + "type": "text", + "content": "Zheng-Xin Yong, Cristina Menghini, and Stephen H Bach. 2023. Low-resource languages jailbreak gpt-4. arXiv preprint arXiv:2310.02446." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 370 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 524, + 127 + ], + "type": "text", + "content": "Youliang Yuan, Wenxiang Jiao, Wenxuan Wang, Jen-tse Huang, Pinjia He, Shuming Shi, and Zhaopeng Tu. 2023. Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher. arXiv preprint arXiv:2308.06463." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "spans": [ + { + "bbox": [ + 304, + 136, + 524, + 169 + ], + "type": "text", + "content": "Haodong Duan Yuan Liu. 2023. Mmbench: Is your multi-modal model an all-around player? arXiv:2307.06281." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 178, + 524, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 178, + 524, + 211 + ], + "spans": [ + { + "bbox": [ + 304, + 178, + 524, + 211 + ], + "type": "text", + "content": "Li Yujiang and Liu Bo. 2007. A normalized levenshtein distance metric. IEEE transactions on pattern analysis and machine intelligence, 29(6):1091-1095." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 219, + 524, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 219, + 524, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 219, + 524, + 263 + ], + "type": "text", + "content": "Li Zhong, Zilong Wang, and Jingbo Shang. 2024. Ldb: A large language model debugger via verifying runtime execution step-by-step. arXiv preprint arXiv:2402.16906." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 272, + 524, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 272, + 524, + 338 + ], + "spans": [ + { + "bbox": [ + 304, + 272, + 524, + 338 + ], + "type": "text", + "content": "Terry Yue Zhuo, Minh Chien Vu, Jenny Chim, Han Hu, Wenhao Yu, Ratnadira Widyasari, Imam Nur Bani Yusuf, Haolan Zhan, Junda He, Indraneil Paul, et al. 2024. Bigcodebench: Benchmarking code generation with diverse function calls and complex instructions. arXiv preprint arXiv:2406.15877." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 346, + 524, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 346, + 524, + 370 + ], + "spans": [ + { + "bbox": [ + 304, + 346, + 524, + 370 + ], + "type": "text", + "content": "MZWM Zulkifli and Zaid W Mohd. 2008. Attack on cryptography. Comput. Secur, 12(5):33-45." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5939" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 259, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 259, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 259, + 84 + ], + "type": "text", + "content": "A Detailed Benchmark Description" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 93, + 291, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 93, + 291, + 214 + ], + "spans": [ + { + "bbox": [ + 67, + 93, + 291, + 214 + ], + "type": "text", + "content": "In this chapter, we provide additional details on CipherBank that were not extensively covered in the main text. This includes a detailed breakdown of plaintext tags and their distribution across subdomains, as well as a more comprehensive description of the encryption algorithms used. These details offer deeper insights into the dataset construction and the encryption schemes evaluated in this benchmark." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 224, + 279, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 224, + 279, + 249 + ], + "spans": [ + { + "bbox": [ + 67, + 224, + 279, + 249 + ], + "type": "text", + "content": "A.1 Tags and Plaintext Distribution Across Subdomains" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 255, + 291, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 255, + 291, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 291, + 323 + ], + "type": "text", + "content": "Table 4 provides an overview of the specific tags associated with each subdomain within CipherBank. The dataset spans five primary domains and 14 subdomains, ensuring diverse and realistic plaintext scenarios for cryptographic evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 333, + 265, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 265, + 359 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 265, + 359 + ], + "type": "text", + "content": "A.2 Detailed Descriptions of Encryption Algorithms" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 364, + 291, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 364, + 291, + 525 + ], + "spans": [ + { + "bbox": [ + 67, + 364, + 291, + 525 + ], + "type": "text", + "content": "This section provides detailed descriptions of the nine encryption algorithms used in CipherBank. These algorithms span substitution, transposition, and custom-designed ciphers, covering a range of complexity levels. Notably, Rot13, Atbash, Polybius, DualAvgCode, and ParityShift also support numeric encryption, further enhancing the diversity of decryption challenges. Table 5 outlines each algorithm and its transformation rules. Some detailed encryption examples are provided below, illustrating how different ciphers transform plaintext into ciphertext." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 527, + 291, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 527, + 291, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 527, + 291, + 661 + ], + "type": "text", + "content": "For each encryption algorithm, we have implemented a corresponding decryption algorithm to ensure that ciphertext can be fully restored to its original plaintext. This guarantees the reversibility and integrity of the encryption schemes used in CipherBank, allowing for a rigorous evaluation of model decryption capabilities. The decryption process follows the exact inverse of the encryption transformations, ensuring consistency across all test cases." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 672, + 232, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 672, + 232, + 686 + ], + "spans": [ + { + "bbox": [ + 67, + 672, + 232, + 686 + ], + "type": "text", + "content": "B Experimental Setup Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 694, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 694, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 694, + 291, + 775 + ], + "type": "text", + "content": "In our evaluation, we adopt a 3-shot approach. A more natural Ciphertext-Only Attack (zero-shot) setting was not adopted, as it would reduce the task to brute-force decryption, where the model blindly applies all known encryption algorithms in search of a coherent output. This contradicts the goal" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 526, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 125 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 125 + ], + "type": "text", + "content": "of reasoning-based inference, where the model is expected to deduce encryption rules from provided examples rather than rely on exhaustive trial and error." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 130, + 527, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 130, + 527, + 238 + ], + "spans": [ + { + "bbox": [ + 302, + 130, + 527, + 238 + ], + "type": "text", + "content": "To ensure a balanced evaluation of decryption difficulty, substitution ciphers exclude numbers to prevent inconsistencies arising from differing cyclic structures. In contrast, ciphers that do not involve direct substitution, such as Reverse, Word-Shift, and similar methods, process numbers normally, preserving structural integrity within the encrypted text." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 243, + 527, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 243, + 527, + 324 + ], + "spans": [ + { + "bbox": [ + 302, + 243, + 527, + 324 + ], + "type": "text", + "content": "For all open-source models, we conduct evaluations using the OpenCompass framework with default temperature to ensure consistent outputs. For models evaluated via API, we perform 5 independent test runs per model and report the average result to enhance stability and reliability." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 349, + 463, + 362 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 349, + 463, + 362 + ], + "spans": [ + { + "bbox": [ + 302, + 349, + 463, + 362 + ], + "type": "text", + "content": "B.1 Prompts Used for Querying" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 375, + 527, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 375, + 527, + 539 + ], + "spans": [ + { + "bbox": [ + 302, + 375, + 527, + 539 + ], + "type": "text", + "content": "This section outlines the prompts used to query models during evaluation. To ensure consistency, all models were tested under a 3-shot setting, where they were provided with three plaintext-ciphertext pairs before attempting to decrypt a new ciphertext. The prompts were designed to encourage logical inference rather than relying on prior knowledge, guiding models to extract encryption patterns and apply the learned rules systematically. Below, Figure 5 provides the system prompt (some reasoning models may not support system prompts), while Figure 6 present the detailed user prompts." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 562, + 449, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 562, + 449, + 576 + ], + "spans": [ + { + "bbox": [ + 302, + 562, + 449, + 576 + ], + "type": "text", + "content": "B.2 Post-processing Methods" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "spans": [ + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "type": "text", + "content": "During querying, we instruct the model to think step by step and enclose the final decrypted output within ... tags. To extract the decoded plaintext, we apply the regular expression ' result " + }, + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "type": "inline_equation", + "content": "\\text{串}" + }, + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "type": "inline_equation", + "content": "(\\text{串} ?)" + }, + { + "bbox": [ + 302, + 589, + 526, + 737 + ], + "type": "text", + "content": " /result>, capturing the content between these tags. The matching process is case-insensitive, aligning with algorithms like Polybius, which inherently do not differentiate between uppercase and lowercase letters when restoring plaintext. This ensures consistency across different decryption schemes." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 762, + 516, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 762, + 516, + 774 + ], + "spans": [ + { + "bbox": [ + 315, + 762, + 516, + 774 + ], + "type": "text", + "content": "7https://github.com/open-compass/opencompass" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5940" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 69, + 144, + 523, + 719 + ], + "blocks": [ + { + "bbox": [ + 173, + 122, + 420, + 135 + ], + "lines": [ + { + "bbox": [ + 173, + 122, + 420, + 135 + ], + "spans": [ + { + "bbox": [ + 173, + 122, + 420, + 135 + ], + "type": "text", + "content": "Table 4: Tag Distribution Across Subdomains in CipherBank" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 144, + 523, + 719 + ], + "lines": [ + { + "bbox": [ + 69, + 144, + 523, + 719 + ], + "spans": [ + { + "bbox": [ + 69, + 144, + 523, + 719 + ], + "type": "table", + "html": "
DomainSubdomainTags
Personal Privacy DataIdentity InformationName, ID Card Number, Passport Number, Date of Birth, Gender, Nationality, Marital Status, Mobile Number, Family Member Information (e.g., immediate family names, contact information), Residential Address
Health InformationMedical Record Number (Patient ID), Diagnosis Records, Surgery Records, Examination Reports (e.g., X-ray, CT scan results, heart rate, blood pressure, blood sugar level, blood type), Disease History, Allergy History, Vaccination Records, Family Medical History
Educational DataStudent ID (Student Number), School Records (Enrollment Date, Graduation Date), Academic Records (Subjects, Grades, GPA, Ranking), Degree Information (Bachelor, Master, Doctorate), Awards and Penalties Records (Disciplinary Records)
Enterprise Sensitive DataBusiness InformationBusiness Plans (e.g., Annual Plan, Five-Year Plan), Marketing Strategy (e.g., Marketing Promotion Plan, Advertising Budget), Customer Lists (e.g., Customer Contacts, Preferences), Supplier Information (Supplier List, Cooperation Agreements), Internal Financial Budgets (Cost Structure, Profit Forecasts)
Intellectual PropertyProduct Design Plans (e.g., Prototype Drawings, Design Documents), Internal Technical Documents (e.g., Technical Manuals, Specifications), Test Data (e.g., Product Performance Test Results, Quality Control Records), Copyright Data, Patent Data
Employee InformationContact Information (e.g., Phone Numbers, Email Addresses), Work Experience, Position and Department Information, Salary and Benefits Information (e.g., Salary Amount, Bonuses, Allowances), Performance Evaluation (e.g., Performance Scores, Promotion Records), Contract Information (e.g., Employment Contract, Non-Disclosure Agreement)
Public Safety DataPolice DataCase Information (Case Number, Case Type, Filing Date), Criminal Records (Suspect Information, Crime Time, Crime Location), Alarm Records (Informer Information, Alarm Time, Alarm Content), Investigation Reports (Investigation Results, Investigation Progress), Arrest Records (Arrest Time, Location, Action Description), Traffic Enforcement Data (Violation Records, Penalty Information), Police Officer Information (Officer Number, Name, Position, Department), Police Resource Allocation (Vehicle, Equipment, Weapon Usage Records)
National Security DataBorder Crossing Records (Entry and Exit Personnel Information, Vehicle Registration), Customs Inspection Data (Cargo List, Contraband Records), Territorial Patrol Data (Patrol Reports, Anomalies Records), Cyber Security Monitoring Data (Cyber Attack Records, Threat Intelligence)
Military DataOperation Plans, Target Location, Troop Deployment, Military Base Distribution, Defense Works Location
Financial Confidential DataBanking InformationAccount Number, Bank Card Number, Payment Method, Payment Platform ID, Transaction Details, Loan Amount, Interest Rate, Repayment Plan, Investment Records (Stocks, Funds, Bonds)
Personal IncomeSalary Amount, Pay Date, Tax Number, Tax Return Records
Internet RecordsBrowsing RecordsPage Interaction, Search Behavior, Click Activity, Device Information, Geolocation, Checkout Process, Multimedia Interaction, Download Records
Cookie DataSession Management, User Identification, Ad Targeting, Behavior Tracking, Authentication Tokens, Login Status
User PreferencesPreferred Genres, Device Usage Habits, Notification Preferences, Shopping Preferences, Video Preferences, Reading Habits
", + "image_path": "f3e59a11922e149ec1e17250999d1d78fdb40700af0eee247df6a8342de9aab0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5941" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 79, + 355, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 79, + 355, + 89 + ], + "spans": [ + { + "bbox": [ + 75, + 79, + 355, + 89 + ], + "type": "text", + "content": "Example A.1: Plain-Ciphertext Pair (Identity Information) - Only Letter" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 95, + 198, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 95, + 198, + 105 + ], + "spans": [ + { + "bbox": [ + 75, + 95, + 198, + 105 + ], + "type": "text", + "content": "Domain: Personal Privacy Data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 109, + 207, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 109, + 207, + 120 + ], + "spans": [ + { + "bbox": [ + 75, + 109, + 207, + 120 + ], + "type": "text", + "content": "Subdomain: Identity Information" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 122, + 327, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 122, + 327, + 134 + ], + "spans": [ + { + "bbox": [ + 75, + 122, + 327, + 134 + ], + "type": "text", + "content": "Tag Combination: [\"Name\", \"Date of Birth\", \"Passport Number\"]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 137, + 114, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 137, + 114, + 147 + ], + "spans": [ + { + "bbox": [ + 76, + 137, + 114, + 147 + ], + "type": "text", + "content": "Plaintext:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 150, + 392, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 150, + 392, + 162 + ], + "spans": [ + { + "bbox": [ + 75, + 150, + 392, + 162 + ], + "type": "text", + "content": "Peter was born on April 23, 1985, and carries a passport with the number X123456789." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 164, + 151, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 164, + 151, + 175 + ], + "spans": [ + { + "bbox": [ + 76, + 164, + 151, + 175 + ], + "type": "text", + "content": "Encryption results:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 178, + 518, + 328 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 75, + 178, + 434, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 178, + 434, + 190 + ], + "spans": [ + { + "bbox": [ + 75, + 178, + 434, + 190 + ], + "type": "text", + "content": "(1) Rot13: Crgre jnf obea ba Ncevy 23, 1985, naq pneevrf n cnffcbeg jvgu gur ahzore K123456789." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 192, + 434, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 192, + 434, + 203 + ], + "spans": [ + { + "bbox": [ + 75, + 192, + 434, + 203 + ], + "type": "text", + "content": "(2) Atbash: Kvgvi dzh ylim lm Zkiro 23, 1985, zmw xziirvh z kzhhklig drgs gsv mfnyvi C123456789." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 206, + 518, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 206, + 518, + 230 + ], + "spans": [ + { + "bbox": [ + 75, + 206, + 518, + 230 + ], + "type": "text", + "content": "(3) Polybius: 34 15 42 15 36 45 11 41 12 33 36 32 33 32 11 34 36 23 26 2 3, 1985, 11 32 14 13 11 36 36 23 15 41 11 34 11 41 41 34 33 36 42 45 23 42 22 42 22 15 32 43 31 12 15 36 46 12 3 4 5 6 7 89." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 234, + 450, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 234, + 450, + 245 + ], + "spans": [ + { + "bbox": [ + 75, + 234, + 450, + 245 + ], + "type": "text", + "content": "(4) Vigenère: Pgeet wcd dzrp op Arcin 23, 1985, cyd natcigd pcdszrv wkeh eh nwxbgc Z123456789." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 248, + 440, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 248, + 440, + 259 + ], + "spans": [ + { + "bbox": [ + 75, + 248, + 440, + 259 + ], + "type": "text", + "content": "(5) Reverse: .987654321X rebmun eht htiw tropssap a seirrac dna ,5891 ,32 lirpA no nrob saw reteP" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 262, + 448, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 262, + 448, + 273 + ], + "spans": [ + { + "bbox": [ + 75, + 262, + 448, + 273 + ], + "type": "text", + "content": "(6) SwapPairs: ePet raw sobnro npAir l32,9158,na dacrei s aapssoptrw ti hht eunbmreX 21436587.9" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 276, + 518, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 276, + 518, + 301 + ], + "spans": [ + { + "bbox": [ + 75, + 276, + 518, + 301 + ], + "type": "text", + "content": "(7) DualAvgCode: OQdfsudfqs vxaart acnpqsmo npmo AAoqqshjkm 23, 1985, aamoce bdaaqsqshjdfrt aa oqaartroqnpssu vxhjsugi sugidf motvlnacdfqs WY123456789." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 304, + 453, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 304, + 453, + 315 + ], + "spans": [ + { + "bbox": [ + 75, + 304, + 453, + 315 + ], + "type": "text", + "content": "(8) ParityShift: Qduds vzr cnso no Zqshm 23, 1985, zoe bzsshrd z qzrrqnsu vuhui uid otlcds Y123456789." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 75, + 317, + 451, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 317, + 451, + 328 + ], + "spans": [ + { + "bbox": [ + 75, + 317, + 451, + 328 + ], + "type": "text", + "content": "(9) WordShift: erPet was nbor no ilApr 23, 5,198 and riescar a sportpas hwt the bernum 3456789.X12" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 75, + 356, + 318, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 356, + 318, + 368 + ], + "spans": [ + { + "bbox": [ + 75, + 356, + 318, + 368 + ], + "type": "text", + "content": "Example A.2: Plain-Ciphertext Pair (Police Data) - Only Letter" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 75, + 373, + 185, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 373, + 185, + 383 + ], + "spans": [ + { + "bbox": [ + 75, + 373, + 185, + 383 + ], + "type": "text", + "content": "# Domain: Public Safety Data" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 76, + 386, + 176, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 386, + 176, + 396 + ], + "spans": [ + { + "bbox": [ + 76, + 386, + 176, + 396 + ], + "type": "text", + "content": "Subdomain: Police Data" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 75, + 400, + 476, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 400, + 476, + 412 + ], + "spans": [ + { + "bbox": [ + 75, + 400, + 476, + 412 + ], + "type": "text", + "content": "Tag Combination: [\"Suspect Information\", \"Crime Time\", \"Crime Location\", \"Police Officer Information\"]" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 415, + 114, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 415, + 114, + 424 + ], + "spans": [ + { + "bbox": [ + 76, + 415, + 114, + 424 + ], + "type": "text", + "content": "Plaintext:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 428, + 518, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 428, + 518, + 453 + ], + "spans": [ + { + "bbox": [ + 75, + 428, + 518, + 453 + ], + "type": "text", + "content": "Suspect: Jonathan, Crime: Burglary, Time: 2022-03-12 14:30, Location: 123 Elm Street, Officer Smith observed suspicious activity near 5th Ave on 2022-03-13." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 456, + 151, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 456, + 151, + 467 + ], + "spans": [ + { + "bbox": [ + 76, + 456, + 151, + 467 + ], + "type": "text", + "content": "Encryption results:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 470, + 518, + 760 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 75, + 470, + 518, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 470, + 518, + 495 + ], + "spans": [ + { + "bbox": [ + 75, + 470, + 518, + 495 + ], + "type": "text", + "content": "(1) Rot13: Fhcrpg: Wbanguna, Pevzr: Ohetynel, Gvzr: 2022-03-12 14:30, Ybpngvba: 123 Ryz Fgerrg, Bssvpre Fzygu bofreirq fhcvpbhf npgvivgl arne 5gu Nir ba 2022-03-13." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 75, + 498, + 518, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 498, + 518, + 523 + ], + "spans": [ + { + "bbox": [ + 75, + 498, + 518, + 523 + ], + "type": "text", + "content": "(2) Atbash: Hfhkvyg: Qlmzgszm, Xirnv: Yfitozib, Grnv: 2022-03-12 14:30, Olxzgrlm: 123 Von Hgivvg, Luurxvi Hnrgs lyhvieww hfhkrxrlfh zxgrergb mvzi 5gs Zev lm 2022-03-13." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 75, + 526, + 518, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 526, + 518, + 577 + ], + "spans": [ + { + "bbox": [ + 75, + 526, + 518, + 577 + ], + "type": "text", + "content": "(3) Polybius: 41 43 41 34 15 13 42 : 24 33 32 11 42 22 11 32 , 13 36 23 31 15 : 12 43 36 21 26 11 36 51 , 42 23 31 15 : 20 22 - 03 - 1214 : 30 , 2633131142233332 : 123152631414236151542 , 331616231315364131234222 3312411536441514414341342313233343411113422344234513215113654222114415332022 -03 - 13." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 75, + 581, + 518, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 581, + 518, + 607 + ], + "spans": [ + { + "bbox": [ + 75, + 581, + 518, + 607 + ], + "type": "text", + "content": "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 75, + 610, + 518, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 610, + 518, + 634 + ], + "spans": [ + { + "bbox": [ + 75, + 610, + 518, + 634 + ], + "type": "text", + "content": "(5) Reverse: 31-30-2202 no evA ht5 raen ytivitca suoicipsus devresbo htimS reciffO ,teertS mlE 321 :noitacoL ,03:41 21-30-2202 :emiT ,yralgruB :emirC ,nahtanoJ :tcepsuS." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 75, + 637, + 518, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 637, + 518, + 662 + ], + "spans": [ + { + "bbox": [ + 75, + 637, + 518, + 662 + ], + "type": "text", + "content": "(6) SwapPairs: uSpsc:tJ notaah,nC irem :uBgralyr ,iTem :02220-3211 :403 ,oLacitno :21 3lE mtSerte ,fOifec rmStihboesvrdes suipicuo scaitivyn ae rt5 hvA eno2 20-2301-3." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 75, + 666, + 518, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 666, + 518, + 704 + ], + "spans": [ + { + "bbox": [ + 75, + 666, + 518, + 704 + ], + "type": "text", + "content": "(7) DualAvgCode: RTvtrqdfbdu: IKnpmoaasugiaamo, BDqshlndf: ACtvqsfkmaaqsz, SUhjndf: 2022-03-12 14:30, KMnpbdaasuhjnpmo: 123 DFkmln RTsuqsdfdu, NPegeghjbddfq RSInhjsugi npacrtdfquuwdfce rttvrtoqhjbdhjnpvtrt aabb-suhjuwhjsuxz modfaaqs 5sugi AAuwdf npmo 2022-03-13." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 75, + 707, + 518, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 707, + 518, + 732 + ], + "spans": [ + { + "bbox": [ + 75, + 707, + 518, + 732 + ], + "type": "text", + "content": "(8) ParityShift: Rtrqduu: Knozuizo, Bshld: Ctsfznxsx, Uhld: 2022-03-12 14:30, Mnbzuhno: 123 Dml Rusddu, Ngghbds Rlhuicnrdswde rtrqhbntr zbuwhux odzs 5ui Zwd no 2022-03-13." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 75, + 735, + 518, + 760 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 735, + 518, + 760 + ], + "spans": [ + { + "bbox": [ + 75, + 735, + 518, + 760 + ], + "type": "text", + "content": "(9) **WordShift:** pect:Sus athan,Jon me:Cri glary,Bur e:Tim 2-03-12202 30,14: ation:Loc 123 Elm eet,Str icerOff thSmi ervedobs picioussus ivityact rnea 5th Ave no 2-03-13202." + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5942" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 84, + 369, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 84, + 369, + 96 + ], + "spans": [ + { + "bbox": [ + 75, + 84, + 369, + 96 + ], + "type": "text", + "content": "Example A.3: Plain-Ciphertext Pair (Health Information) - Letter&Number" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 101, + 198, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 101, + 198, + 113 + ], + "spans": [ + { + "bbox": [ + 75, + 101, + 198, + 113 + ], + "type": "text", + "content": "# Domain: Personal Privacy Data" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 116, + 204, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 116, + 204, + 126 + ], + "spans": [ + { + "bbox": [ + 75, + 116, + 204, + 126 + ], + "type": "text", + "content": "Subdomain: Health Information" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 129, + 289, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 129, + 289, + 141 + ], + "spans": [ + { + "bbox": [ + 75, + 129, + 289, + 141 + ], + "type": "text", + "content": "Tag Combination: [\"Patient ID\", \"Diagnosis Records\"]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 143, + 114, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 143, + 114, + 153 + ], + "spans": [ + { + "bbox": [ + 75, + 143, + 114, + 153 + ], + "type": "text", + "content": "Plaintext:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 157, + 396, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 157, + 396, + 169 + ], + "spans": [ + { + "bbox": [ + 75, + 157, + 396, + 169 + ], + "type": "text", + "content": "Patient ID: R094713; Name: Jamie Lee; Age: 45; Gender: Female; EMR: EHR-234987." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 171, + 151, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 171, + 151, + 182 + ], + "spans": [ + { + "bbox": [ + 75, + 171, + 151, + 182 + ], + "type": "text", + "content": "Encryption results:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 185, + 520, + 322 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 75, + 185, + 431, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 185, + 431, + 196 + ], + "spans": [ + { + "bbox": [ + 75, + 185, + 431, + 196 + ], + "type": "text", + "content": "(1) Rot13: Cngvrag VQ: E327046; Anzr: Wznvr Yrr; Ntr: 78; Traqe: Srznyr; RZE: RUE-567210." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 199, + 439, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 199, + 439, + 210 + ], + "spans": [ + { + "bbox": [ + 75, + 199, + 439, + 210 + ], + "type": "text", + "content": "(2) Atbash: Kzgrvmg RW: I905286; Mznv: Qznrv Ovv; Ztv: 54; Tvmwvi: Uvnzov; VNI: VSI-765012." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 213, + 520, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 213, + 520, + 237 + ], + "spans": [ + { + "bbox": [ + 75, + 213, + 520, + 237 + ], + "type": "text", + "content": "(3) Polybius: 34 11 42 23 15 32 42 23 14 : 36 66 65 56 63 53 55 ; 32 11 31 15 : 24 11 31 23 15 26 15 15 ; 11 21 15 : 56 61 ; 21 15 32 14 15 36 : 16 15 31 11 26 15 ; 15 31 36 : 15 22 36 - 54 55 56 65 64 63." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 240, + 430, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 240, + 430, + 252 + ], + "spans": [ + { + "bbox": [ + 75, + 240, + 430, + 252 + ], + "type": "text", + "content": "(4) Reverse: .789432-R HRE ;elameF :redneG ;54 :egA ;eeL eimaJ :emaN ;317490 R :DI tneitaP" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 254, + 456, + 265 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 254, + 456, + 265 + ], + "spans": [ + { + "bbox": [ + 75, + 254, + 456, + 265 + ], + "type": "text", + "content": "(5) SwapPairs: aPteti DI: 0R94713; aNme: aJmei eLe; gAe: 45; eGndre: eFmale; MRE: HRE-239487." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 269, + 449, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 269, + 449, + 280 + ], + "spans": [ + { + "bbox": [ + 75, + 269, + 449, + 280 + ], + "type": "text", + "content": "(6) **WordShift:** atientP ID: R94713; ameN: Jamie eLe; geA: 45; enderG: emaleF; REM: EHR-234987." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 282, + 520, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 282, + 520, + 307 + ], + "spans": [ + { + "bbox": [ + 75, + 282, + 520, + 307 + ], + "type": "text", + "content": "(7) DualAvgCode: OQaaushjdmosu HJCE: QS009935680224; MOaalndf: IKaalnhjdf KMdfd; AAfhdf: 3546; FHdfmoced-fqs: EGdfnaakmdf; DFLNQS: DFGIQS-132435997968." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 310, + 457, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 310, + 457, + 322 + ], + "spans": [ + { + "bbox": [ + 75, + 310, + 457, + 322 + ], + "type": "text", + "content": "(8) ParityShift: Qzuhdou HE: S185602; Ozld: Kzlhd Mdd; Zfd: 54; Fdoeds: Gdlzmd; DLS: DIS-325896." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 75, + 363, + 376, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 363, + 376, + 375 + ], + "spans": [ + { + "bbox": [ + 75, + 363, + 376, + 375 + ], + "type": "text", + "content": "Example A.4: Plain-Ciphertext Pair (Banking Information) - Letter&Number" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 75, + 380, + 218, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 380, + 218, + 390 + ], + "spans": [ + { + "bbox": [ + 75, + 380, + 218, + 390 + ], + "type": "text", + "content": "# Domain: Financial Confidential Data" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 75, + 393, + 210, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 393, + 210, + 404 + ], + "spans": [ + { + "bbox": [ + 75, + 393, + 210, + 404 + ], + "type": "text", + "content": "## Subdomain: Banking Information" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 407, + 404, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 407, + 404, + 418 + ], + "spans": [ + { + "bbox": [ + 75, + 407, + 404, + 418 + ], + "type": "text", + "content": "Tag Combination: [\"Account Number\", \"Bank Card Number\", \"Payment Platform ID\"]" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 75, + 421, + 114, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 421, + 114, + 431 + ], + "spans": [ + { + "bbox": [ + 75, + 421, + 114, + 431 + ], + "type": "text", + "content": "Plaintext:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 75, + 435, + 520, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 435, + 520, + 460 + ], + "spans": [ + { + "bbox": [ + 75, + 435, + 520, + 460 + ], + "type": "text", + "content": "Account Number: 123456789, Bank: LA Bank, Card Number: 9876-5432-1098-7654, Payment Method: Virtual Credit Card, Payment Platform ID: ABC123XYZ, Timestamp: 2023-09-15 14:35, Amount: $250.00." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 463, + 151, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 463, + 151, + 474 + ], + "spans": [ + { + "bbox": [ + 75, + 463, + 151, + 474 + ], + "type": "text", + "content": "Encryption results:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 477, + 520, + 753 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 75, + 477, + 518, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 477, + 518, + 502 + ], + "spans": [ + { + "bbox": [ + 75, + 477, + 518, + 502 + ], + "type": "text", + "content": "(1) Rot13: Nppbhag Ahzore: 456789012, Onax: YN Onax, Pneq Ahzore: 2109-8765-4321-0987, CnIzrag Zrgubq: Iveghny PerqvG Pneq, CnIzrag CyngsbEZ VQ: NOP456KLM, Gvzrfgnzc: 5356-32-48 47:68, Nzbhag: $583.33." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 505, + 518, + 530 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 505, + 518, + 530 + ], + "spans": [ + { + "bbox": [ + 75, + 505, + 518, + 530 + ], + "type": "text", + "content": "(2) Atbash: Zxlfmg Mfnyvi: 876543210, Yzmp: OZ Yzmp, Xziw Mfnyvi: 0123-4567-8901-2345, Kzbnvmg Nvgslw: Erigfzo Xivwr Xziw, Kzbnvmg Kozgulin RW: ZYX876CBA, Grnvhgznk: 7976-90-84 85:64, Znlfmg: $749.99." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 75, + 533, + 520, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 533, + 520, + 585 + ], + "spans": [ + { + "bbox": [ + 75, + 533, + 520, + 585 + ], + "type": "text", + "content": "(3) Polybius: 11 13 13 33 43 32 42 32 43 31 12 15 36 : 53 54 55 56 61 62 63 64 65 , 12 11 32 25 : 26 11 12 11 32 25 , 13 11\n36 14 32 43 31 12 15 36 : 65 64 63 62 - 61 56 55 54 - 53 66 65 64 - 63 62 61 56 , 34 11 51 31 15 32 42 31 15 42 22 33 14 :\n44 23 36 42 43 11 26 13 36 15 14 23 42 13 11 36 14 , 34 11 51 31 15 32 42 34 26 11 42 16 33 36 31 23 14 : 11 12 13 53 54\n55 46 51 52 , 42 23 31 15 41 42 11 31 34 : 54 66 54 55 - 66 65 - 53 61 53 56 : 55 61 , 11 31 33 43 32 42 : $546166 .6666 ." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 75, + 588, + 518, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 588, + 518, + 614 + ], + "spans": [ + { + "bbox": [ + 75, + 588, + 518, + 614 + ], + "type": "text", + "content": "(4) Vigenère: Swdpgnt: Jqyavsap, Eciop: Mutrlccy, Tkxe: 2022-03-12 14:30, Lqnavtop: 123 Plo Svcege, Zfhtcgc Uxivs qmsgcvgo ufsrtckzuu aeeixta nglr 5tj Axp qy 2022-03-13." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 75, + 617, + 520, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 617, + 520, + 641 + ], + "spans": [ + { + "bbox": [ + 75, + 617, + 520, + 641 + ], + "type": "text", + "content": "(5) Reverse: .00.052$ :tnuomA ,53:41 51-90-3202 :pmatsemit ,ZYX321CBA :DI mroftalP tnemyap ,draC tiderC lautriV :dohtem tnemyap ,4567-8901-2345-6789 :rebnuN draC ,knaB AL :knaB ,987654321 :rebnuN tnuoccA" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 75, + 645, + 518, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 645, + 518, + 669 + ], + "spans": [ + { + "bbox": [ + 75, + 645, + 518, + 669 + ], + "type": "text", + "content": "(6) SwapPairs: cAotcnu mNuber: 214365879, aBnk: A Lank, aCrd Nmu:bre 8967-5423-1980-7564, aPymnet Mtohed: Vritaul Cerdti aCdr, aPymnet Ptaforml DI: BAC321YXZ, iTmsetamp: 3202-90-51 53:41, aAmount: $250.00." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 75, + 671, + 520, + 724 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 671, + 520, + 724 + ], + "spans": [ + { + "bbox": [ + 75, + 671, + 520, + 724 + ], + "type": "text", + "content": "(7) DualAvgCode: AAbbddnptvmosu MOtvlnacdfqs: 021324354657687999, ACAamojl: KMAA ACAamojl, BDaaqsc MEtvlnacdfqs: 99796857-46352413-02009979-68574635, OQaaxzlndfmosu LNdfsuginpce: UWhjssutvaakm BDqsdfcehjsu BDaaqsc, OQaaxzlndfmosu OQkmaasuegnpqsln HJCE: AAACBD021324WYXZZZ, SUhjlndfrtsuaalnoq: 13001324-0099-0246 0235:2446, AAlnnptvmosu: $134600.0000." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 75, + 727, + 518, + 753 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 727, + 518, + 753 + ], + "spans": [ + { + "bbox": [ + 75, + 727, + 518, + 753 + ], + "type": "text", + "content": "(8) ParityShift: Zbbntou Otlcds: 032547698, Czoj: MZ Czoj, Bzse Otlcds: 8967-4523-0189-6745, Qzxldou Lduine: Whsutzm Bsdehu Bzse, Qzxldou Qmzugsnl HE: ZCB032YXA, Uhldruzlj: 3132-18-04 05:24, Zlntou: $341.11." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5943" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 138, + 523, + 724 + ], + "blocks": [ + { + "bbox": [ + 168, + 116, + 424, + 130 + ], + "lines": [ + { + "bbox": [ + 168, + 116, + 424, + 130 + ], + "spans": [ + { + "bbox": [ + 168, + 116, + 424, + 130 + ], + "type": "text", + "content": "Table 5: Descriptions of Encryption Algorithms in CipherBank" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 138, + 523, + 724 + ], + "lines": [ + { + "bbox": [ + 68, + 138, + 523, + 724 + ], + "spans": [ + { + "bbox": [ + 68, + 138, + 523, + 724 + ], + "type": "table", + "html": "
AlgorithmDescription
Rot13A simple substitution cipher that shifts each letter 13 places forward in the alphabet. Encryption and decryption are identical, as applying the transformation twice restores the original text. Non-alphabetic characters remain unchanged.Additionally, Rot13 in CipherBank supports number encryption by shifting digits cyclically within the range 0-9.
AtbashA monoalphabetic substitution cipher where each letter is replaced with its counterpart from the reversed alphabet (e.g., A→Z, B→Y). Since the transformation is symmetric, encryption and decryption follow the same process CipherBank's Atbash implementation extends this to digits, where each number is replaced with its complement relative to 9 (e.g., 0→9, 1→8, ..., 9→0).
PolybiusA fractionating substitution cipher that replaces each letter with a two-digit coordinate from a 6×6 grid, mapping characters to numerical positions. Traditional Polybius squares typically use a 5×5 grid, supporting only letter encryption while merging I and J into the same cell, leading to ambiguity during decryption. To address this limitation and enable number encryption, CipherBank extends the Polybius square to a 6×6 grid, allowing both letters and numbers to be uniquely represented as coordinate pairs, increasing the cipher's complexity.
VigenèreA polyalphabetic substitution cipher that employs multiple shifting alphabets determined by a repeating key. Unlike monoalphabetic ciphers that use a single mapping, Vigenère utilizes multiple substitution tables, where each plaintext letter is shifted based on the corresponding key character's position in the alphabet. By default, the key is set to "ACL".This multi-table approach enhances security by distributing letter frequencies across different shifts, making it more resistant to frequency analysis. Decryption reverses this process by applying the inverse shifts dictated by the key. Unlike Rot13, it requires a key for both encryption and decryption.
ReverseA transposition cipher that reverses the order of all characters in the plaintext. Since it does not substitute characters, it preserves all information but alters the sequence, making it effective against naive attacks.
SwapPairsA transposition cipher that swaps adjacent characters in the plaintext. If the text length is odd, the final character remains unchanged. Decryption follows the same swapping process.
DualAvgCodeA custom transformation where each letter expands into two adjacent characters, shifting one position forward and one position backward in the ASCII table. Special cases (e.g., 'a', 'z', 'A', 'Z') are duplicated instead CipherBank extends this method to digits, where each number expands into two adjacent values (e.g., 2 → "13", 5 → "46"), increasing redundancy in the encrypted text.
ParityShiftA custom encryption method that shifts each letter one position forward or backward based on its ASCII parity. Even-ASCII characters shift forward, while odd-ASCII characters shift backward. For digits, ParityShift follows a similar rule, shifting numbers based on their parity (e.g., even numbers shift up, odd numbers shift down within 0-9).
WordShiftA transformation applied at the word level rather than the character level. Each word undergoes a left shift by a fixed number of positions, cycling characters within the word while preserving word spacing. Decryption reverses this shift, ensuring character order is restored within each word. By default, the shift is set to 3 positions.
", + "image_path": "d6bb30e1962e77b8709915e18c6d880330246ae23981f9ae39cd774eda9323ed.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "5944" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 116, + 126, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 116, + 126, + 126 + ], + "spans": [ + { + "bbox": [ + 76, + 116, + 126, + 126 + ], + "type": "text", + "content": "Example B.1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 133, + 109, + 142 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 133, + 109, + 142 + ], + "spans": [ + { + "bbox": [ + 76, + 133, + 109, + 142 + ], + "type": "text", + "content": "## Role:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 147, + 207, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 147, + 207, + 158 + ], + "spans": [ + { + "bbox": [ + 93, + 147, + 207, + 158 + ], + "type": "text", + "content": "Cryptography Analysis Expert." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 161, + 113, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 161, + 113, + 170 + ], + "spans": [ + { + "bbox": [ + 76, + 161, + 113, + 170 + ], + "type": "text", + "content": "## Goals:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 174, + 500, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 174, + 500, + 186 + ], + "spans": [ + { + "bbox": [ + 92, + 174, + 500, + 186 + ], + "type": "text", + "content": "Utilize the provided ciphertext and plaintext examples to analyze encryption patterns and decrypt new ciphertext." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 189, + 129, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 189, + 129, + 198 + ], + "spans": [ + { + "bbox": [ + 76, + 189, + 129, + 198 + ], + "type": "text", + "content": "## Workflow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 92, + 202, + 483, + 227 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 92, + 202, + 483, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 202, + 483, + 213 + ], + "spans": [ + { + "bbox": [ + 92, + 202, + 483, + 213 + ], + "type": "text", + "content": "1. Analyze the provided ciphertext and plaintext examples to identify possible encryption patterns and rules." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 92, + 216, + 446, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 216, + 446, + 227 + ], + "spans": [ + { + "bbox": [ + 92, + 216, + 446, + 227 + ], + "type": "text", + "content": "2. Apply the decryption algorithm to the new ciphertext, attempt to decrypt, and verify the results." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 244, + 243, + 348, + 254 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 243, + 348, + 254 + ], + "spans": [ + { + "bbox": [ + 244, + 243, + 348, + 254 + ], + "type": "text", + "content": "Figure 5: System Prompt" + } + ] + } + ], + "index": 9, + "type": "text" + }, + { + "bbox": [ + 76, + 354, + 126, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 354, + 126, + 364 + ], + "spans": [ + { + "bbox": [ + 76, + 354, + 126, + 364 + ], + "type": "text", + "content": "Example B.2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 370, + 138, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 370, + 138, + 380 + ], + "spans": [ + { + "bbox": [ + 76, + 370, + 138, + 380 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 92, + 384, + 460, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 384, + 460, + 395 + ], + "spans": [ + { + "bbox": [ + 92, + 384, + 460, + 395 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 93, + 398, + 163, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 398, + 163, + 409 + ], + "spans": [ + { + "bbox": [ + 93, + 398, + 163, + 409 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 92, + 412, + 437, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 412, + 437, + 423 + ], + "spans": [ + { + "bbox": [ + 92, + 412, + 437, + 423 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 76, + 426, + 129, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 426, + 129, + 436 + ], + "spans": [ + { + "bbox": [ + 76, + 426, + 129, + 436 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 94, + 440, + 143, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 440, + 143, + 450 + ], + "spans": [ + { + "bbox": [ + 94, + 440, + 143, + 450 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 454, + 158, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 158, + 465 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 158, + 465 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 129, + 468, + 445, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 468, + 445, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 468, + 445, + 479 + ], + "type": "text", + "content": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 482, + 152, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 482, + 152, + 491 + ], + "spans": [ + { + "bbox": [ + 111, + 482, + 152, + 491 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 495, + 435, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 495, + 435, + 506 + ], + "spans": [ + { + "bbox": [ + 129, + 495, + 435, + 506 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 94, + 510, + 143, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 510, + 143, + 520 + ], + "spans": [ + { + "bbox": [ + 94, + 510, + 143, + 520 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 111, + 524, + 158, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 524, + 158, + 534 + ], + "spans": [ + { + "bbox": [ + 111, + 524, + 158, + 534 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 129, + 537, + 465, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 465, + 548 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 465, + 548 + ], + "type": "text", + "content": "Pnfr Ahzone: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyat Qngr: Bpgbore 19, 2024" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 111, + 552, + 152, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 552, + 152, + 561 + ], + "spans": [ + { + "bbox": [ + 111, + 552, + 152, + 561 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 129, + 565, + 460, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 565, + 460, + 576 + ], + "spans": [ + { + "bbox": [ + 129, + 565, + 460, + 576 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 94, + 579, + 143, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 579, + 143, + 590 + ], + "spans": [ + { + "bbox": [ + 94, + 579, + 143, + 590 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 111, + 594, + 158, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 594, + 158, + 604 + ], + "spans": [ + { + "bbox": [ + 111, + 594, + 158, + 604 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 129, + 607, + 514, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 607, + 514, + 618 + ], + "spans": [ + { + "bbox": [ + 129, + 607, + 514, + 618 + ], + "type": "text", + "content": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 111, + 622, + 152, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 622, + 152, + 631 + ], + "spans": [ + { + "bbox": [ + 111, + 622, + 152, + 631 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 129, + 634, + 499, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 634, + 499, + 645 + ], + "spans": [ + { + "bbox": [ + 129, + 634, + 499, + 645 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 76, + 649, + 113, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 649, + 113, + 660 + ], + "spans": [ + { + "bbox": [ + 76, + 649, + 113, + 660 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 94, + 663, + 140, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 663, + 140, + 674 + ], + "spans": [ + { + "bbox": [ + 94, + 663, + 140, + 674 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 111, + 677, + 272, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 677, + 272, + 688 + ], + "spans": [ + { + "bbox": [ + 111, + 677, + 272, + 688 + ], + "type": "text", + "content": "Yrqvn, na Nzrevpna, erfvqrva Ybf Natryrf." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 94, + 691, + 135, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 691, + 135, + 700 + ], + "spans": [ + { + "bbox": [ + 94, + 691, + 135, + 700 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 190, + 714, + 402, + 727 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 714, + 402, + 727 + ], + "spans": [ + { + "bbox": [ + 190, + 714, + 402, + 727 + ], + "type": "text", + "content": "Figure 6: User Prompt (Rot13 - 3shot - Only Letter)" + } + ] + } + ], + "index": 35, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5945" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 70, + 254, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 70, + 254, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 70, + 254, + 84 + ], + "type": "text", + "content": "C Extended Experimental Results" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 276, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 276, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 276, + 118 + ], + "type": "text", + "content": "C.1 Levenshtein Distance Evaluation from Main Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 123, + 290, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 123, + 290, + 244 + ], + "spans": [ + { + "bbox": [ + 67, + 123, + 290, + 244 + ], + "type": "text", + "content": "In the main text, most reported results are based on accuracy, which provides a binary assessment of decryption success. However, accuracy does not account for cases where decrypted outputs closely resemble the ground truth but contain minor errors. To provide a more fine-grained evaluation, we also compute Levenshtein similarity, which measures the edit distance between the model output and the correct plaintext." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 245, + 289, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 245, + 289, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 245, + 289, + 270 + ], + "type": "text", + "content": "We define the Levenshtein similarity score as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 108, + 280, + 290, + 310 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 280, + 290, + 310 + ], + "spans": [ + { + "bbox": [ + 108, + 280, + 290, + 310 + ], + "type": "interline_equation", + "content": "S _ {\\mathrm {l e v}} = 1 - \\frac {d _ {\\mathrm {l e v}} \\left(P _ {\\mathrm {p r e d}} , P _ {\\mathrm {r e f}}\\right)}{\\max \\left(\\left| P _ {\\mathrm {p r e d}} \\right| , \\left| P _ {\\mathrm {r e f}} \\right|\\right)} \\tag {1}", + "image_path": "001fad55bde1c74d56b18c61347900d7e6dfdb45be9db21de8c1c42c748291a7.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 316, + 112, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 316, + 112, + 326 + ], + "spans": [ + { + "bbox": [ + 79, + 316, + 112, + 326 + ], + "type": "text", + "content": "where:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 337, + 290, + 400 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 80, + 337, + 290, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 337, + 290, + 364 + ], + "spans": [ + { + "bbox": [ + 80, + 337, + 290, + 364 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 80, + 337, + 290, + 364 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{lev}}(P_{\\mathrm{pred}}, P_{\\mathrm{ref}})" + }, + { + "bbox": [ + 80, + 337, + 290, + 364 + ], + "type": "text", + "content": " is the Levenshtein distance between the predicted and reference plaintexts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "spans": [ + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "inline_equation", + "content": "|P_{\\mathrm{pred}}|" + }, + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "inline_equation", + "content": "|P_{\\mathrm{ref}}|" + }, + { + "bbox": [ + 80, + 373, + 290, + 400 + ], + "type": "text", + "content": " denote the lengths of the predicted and reference plaintexts, respectively." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 409, + 290, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 409, + 290, + 476 + ], + "spans": [ + { + "bbox": [ + 67, + 409, + 290, + 476 + ], + "type": "text", + "content": "This metric normalizes the edit distance by the length of the longer string, ensuring that similarity is measured on a scale from 0 to 1, where 1 represents an exact match and lower values indicate increasing deviations from the ground truth." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 477, + 290, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 477, + 290, + 543 + ], + "spans": [ + { + "bbox": [ + 67, + 477, + 290, + 543 + ], + "type": "text", + "content": "The corresponding Levenshtein-based evaluation results for Table 2 are presented in Table 6 and Figure 7, offering deeper insights into models' decryption performance beyond strict accuracy metrics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 545, + 290, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 545, + 290, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 545, + 290, + 719 + ], + "type": "text", + "content": "One key observation is that most models achieve significantly higher Levenshtein similarity scores than their accuracy scores, indicating that even when decryption is incorrect, outputs often retain structural similarities to the original plaintext. This suggests that models capture some encryption patterns but struggle with full decryption, failing to consistently apply correct transformations. Notably, Claude-Sonnet-3.5 achieves near-perfect scores (" + }, + { + "bbox": [ + 67, + 545, + 290, + 719 + ], + "type": "inline_equation", + "content": ">0.99" + }, + { + "bbox": [ + 67, + 545, + 290, + 719 + ], + "type": "text", + "content": " for most ciphers), demonstrating its ability to minimize decryption errors while maintaining structural accuracy, making it the most reliable model overall." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 721, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 290, + 774 + ], + "type": "text", + "content": "Interestingly, reasoning models such as DeepSeek-R1 and o1 exhibit a large gap between accuracy and Levenshtein similarity. Despite their moderate accuracy, their similarity scores" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 71, + 525, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 165 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 165 + ], + "type": "text", + "content": "often exceed 0.80, indicating that they frequently produce outputs that preserve much of the original structure but contain systematic errors. This suggests that reasoning models are better at capturing encryption logic but may struggle with precise execution, sometimes overcomplicating simpler tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 166, + 525, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 166, + 525, + 274 + ], + "spans": [ + { + "bbox": [ + 302, + 166, + 525, + 274 + ], + "type": "text", + "content": "Conversely, chat models such as DeepSeek-V3 and Llama-based models exhibit high variability, showing relatively low accuracy but moderate Levenshtein similarity (0.40 - 0.70). This indicates a tendency toward semantic approximation rather than strict decryption, where models generate linguistically plausible outputs that fail to adhere to precise encryption rules." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 275, + 525, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 275, + 525, + 382 + ], + "spans": [ + { + "bbox": [ + 302, + 275, + 525, + 382 + ], + "type": "text", + "content": "Another notable trend is that transposition ciphers (e.g., Reverse, SwapPairs) yield lower Levenshtein similarity scores across all models, confirming that character reordering remains a major challenge. Unlike substitution ciphers, where models can rely on token-level mappings, transposition ciphers require strict positional tracking, which even the strongest models struggle to handle effectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 383, + 525, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 383, + 525, + 531 + ], + "spans": [ + { + "bbox": [ + 302, + 383, + 525, + 531 + ], + "type": "text", + "content": "Overall, Levenshtein similarity results highlight fundamental differences in how chat and reasoning models approach decryption. Chat models rely more on semantic fluency, leading to structurally incorrect but coherent outputs, whereas reasoning models exhibit stronger pattern retention but occasionally fail due to overgeneralization or overthinking. These findings suggest that while LLMs can approximate decryption rules, achieving precise symbolic transformations remains a significant challenge, especially for positional-based ciphers." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 540, + 485, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 540, + 485, + 554 + ], + "spans": [ + { + "bbox": [ + 302, + 540, + 485, + 554 + ], + "type": "text", + "content": "C.2 Additional Analysis and Insights" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 558, + 525, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 558, + 525, + 639 + ], + "spans": [ + { + "bbox": [ + 302, + 558, + 525, + 639 + ], + "type": "text", + "content": "In this section, we present more detailed experimental results that complement the findings in the main text. These additional analyses provide further insights into model performance across different encryption schemes, highlighting trends, challenges, and specific cases where models excel or struggle." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 640, + 525, + 760 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 525, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 525, + 760 + ], + "type": "text", + "content": "In the analysis of length sensitivity, plaintexts of different lengths can be seen in Figure 8. The impact of plaintext length on decryption performance is shown in Table 7 and Table 8, where we compare model accuracy on short vs. long texts. These results illustrate how increasing text length affects model performance, revealing notable differences in decryption robustness across various architectures" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 761, + 525, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 761, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 313, + 761, + 525, + 774 + ], + "type": "text", + "content": "The dataset used for the noise interference experi" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5946" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 105, + 525, + 314 + ], + "blocks": [ + { + "bbox": [ + 170, + 84, + 424, + 97 + ], + "lines": [ + { + "bbox": [ + 170, + 84, + 424, + 97 + ], + "spans": [ + { + "bbox": [ + 170, + 84, + 424, + 97 + ], + "type": "text", + "content": "Table 6: Results on CipherBank(3-shot) Levenshtein similarity" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 105, + 525, + 314 + ], + "lines": [ + { + "bbox": [ + 70, + 105, + 525, + 314 + ], + "spans": [ + { + "bbox": [ + 70, + 105, + 525, + 314 + ], + "type": "table", + "html": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13At ba shPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftLevenshtein Similarity
Open-source Chat Models
Mixtral-8x22B-v0.10.45420.37440.26940.40320.38100.47450.33300.38710.64010.4130
Qwen2.5-72B-Instruct0.55560.42880.30420.40160.40220.53080.37180.47860.84270.4796
Llama-3.1-70B-Instruct0.57760.43780.31320.44310.37750.55420.39900.45050.72880.4758
Llama-3.3-70B-Instruct0.57540.40540.13170.43970.24820.53750.38330.40960.45800.3988
DeepSeek-V30.91950.75940.45620.48440.90880.69750.42050.57310.88870.6787
Closed-source Models
GPT-4o-mini-2024-07-180.64590.49350.24630.44990.56640.60050.34180.41880.72580.4988
GPT-4o-2024-08-060.96030.58760.34450.53460.81700.79680.43040.58500.89400.6612
GPT-4o-2024-11-200.93400.60540.35110.53380.72770.67800.42350.55300.87150.6309
gemini-1.5-pro0.93090.50430.49690.52010.75360.73170.47840.57200.88190.6522
gemini-2.0-flash-exp0.96160.65670.48130.50640.89010.75690.44760.53080.86050.6769
Claude-Sonnet-3.5-10220.99840.99610.99550.71430.98930.92620.78740.98830.97120.9296
Reasoning Models
QwQ-32B-Preview0.24770.15910.12310.16600.14440.16660.15640.16450.30570.1815
DeepSeek-R10.99200.97610.93440.52270.73680.72130.83160.69280.84910.8063
gemini-2.0-flash-thinking0.96640.85710.90740.55110.85080.77880.42610.73530.87770.7723
o1-mini-2024-09-120.97570.98600.95630.54120.59590.52670.39540.69350.72360.7105
o1-2024-12-170.83200.99280.96400.56420.77250.92080.86530.65620.93350.8335
", + "image_path": "2a89c3c3ad84718ae761e02302e066bb4cbab67961539f3b4c074c9f45bed335.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 90, + 352, + 503, + 571 + ], + "blocks": [ + { + "bbox": [ + 90, + 352, + 503, + 571 + ], + "lines": [ + { + "bbox": [ + 90, + 352, + 503, + 571 + ], + "spans": [ + { + "bbox": [ + 90, + 352, + 503, + 571 + ], + "type": "image", + "image_path": "683e3b08ad6cbbf27df8bf47c11d0fbb8fd2e73a78a8436de133e5b3ea35e2c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 581, + 436, + 594 + ], + "lines": [ + { + "bbox": [ + 157, + 581, + 436, + 594 + ], + "spans": [ + { + "bbox": [ + 157, + 581, + 436, + 594 + ], + "type": "text", + "content": "Figure 7: Model Performance - Accuracy vs. Levenshtein Similarity." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 71, + 652, + 525, + 759 + ], + "blocks": [ + { + "bbox": [ + 198, + 631, + 395, + 644 + ], + "lines": [ + { + "bbox": [ + 198, + 631, + 395, + 644 + ], + "spans": [ + { + "bbox": [ + 198, + 631, + 395, + 644 + ], + "type": "text", + "content": "Table 7: Decryption Performance on Short Texts" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 652, + 525, + 759 + ], + "lines": [ + { + "bbox": [ + 71, + 652, + 525, + 759 + ], + "spans": [ + { + "bbox": [ + 71, + 652, + 525, + 759 + ], + "type": "table", + "html": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V340.0027.834.351.7429.570.870.872.6111.313.24
DeepSeek-R180.0071.3053.040.8718.260.8735.6518.2612.1732.27
GPT-4o-2024-11-2034.7813.040.87021.741.740.871.7410.439.47
gemini-2.0-flash-exp42.614.351.740.8740.872.6101.748.7011.50
Claude-Sonnet-3.5-102286.0977.3969.573.4877.398.709.5763.4842.6148.70
gemini-2.0-flash-thinking52.1726.9633.912.6133.910.87013.9114.7819.90
o1-mini-2024-09-1264.3582.6165.22015.6506.6713.912.6133.77
o1-2024-12-1761.7489.5784.550.8723.4846.6761.7417.1735.8047.61
", + "image_path": "5ab78ee30cbe0da825efc4ff3ffd9d7169db76c1c41e28261fc84db42c1fdd0d.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5947" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 165, + 525, + 270 + ], + "blocks": [ + { + "bbox": [ + 198, + 145, + 395, + 157 + ], + "lines": [ + { + "bbox": [ + 198, + 145, + 395, + 157 + ], + "spans": [ + { + "bbox": [ + 198, + 145, + 395, + 157 + ], + "type": "text", + "content": "Table 8: Decryption Performance on Long Texts" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 165, + 525, + 270 + ], + "lines": [ + { + "bbox": [ + 71, + 165, + 525, + 270 + ], + "spans": [ + { + "bbox": [ + 71, + 165, + 525, + 270 + ], + "type": "table", + "html": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
DeepSeek-V326.534.760.6809.520005.445.22
DeepSeek-R168.0348.9837.4104.76014.978.845.4420.94
GPT-4o-2024-11-2020.414.080012.240003.404.46
gemini-2.0-flash-exp30.612.041.36020.410.68002.726.42
Claude-Sonnet-3.5-102292.5278.9182.311.3663.955.442.7263.2740.1447.85
gemini-2.0-flash-thinking31.299.5212.24014.291.3602.724.768.47
o1-mini-2024-09-1231.9757.1432.6500002.72017.35
o1-2024-12-1758.5070.7561.110.688.1615.3841.58.6625.6634.38
", + "image_path": "6d39235f9e2f33dd38174e9bd779196a688af66ce1dc0121c6b682e99f413446.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 429, + 206, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 429, + 206, + 440 + ], + "spans": [ + { + "bbox": [ + 76, + 429, + 206, + 440 + ], + "type": "text", + "content": "Example C.1: Plaintiff Examples" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 446, + 241, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 446, + 241, + 456 + ], + "spans": [ + { + "bbox": [ + 76, + 446, + 241, + 456 + ], + "type": "text", + "content": "Short: James, American, is married to Susan." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 460, + 519, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 460, + 519, + 512 + ], + "spans": [ + { + "bbox": [ + 75, + 460, + 519, + 512 + ], + "type": "text", + "content": "Long: John Smith, born on January 15, 1990, holds American nationality and resides at 123 Elm Street, Springfield, Illinois. His mobile number is +1-312-555-6789, and his ID card number is IDURITY1234567. He is married to Jane Smith, who can be reached at +1-312-555-6789. They have two children: Emily (16, high school) and Michael (12, middle school). Their address and contact information are the same." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 521, + 171, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 521, + 171, + 532 + ], + "spans": [ + { + "bbox": [ + 76, + 521, + 171, + 532 + ], + "type": "text", + "content": "Short:Jimmy,GPA:3.71." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 535, + 519, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 535, + 519, + 560 + ], + "spans": [ + { + "bbox": [ + 76, + 535, + 519, + 560 + ], + "type": "text", + "content": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 568, + 327, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 568, + 327, + 579 + ], + "spans": [ + { + "bbox": [ + 76, + 568, + 327, + 579 + ], + "type": "text", + "content": "Short: Medical Record Number: 987-654-321; Patient Name: James." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 582, + 519, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 582, + 519, + 608 + ], + "spans": [ + { + "bbox": [ + 76, + 582, + 519, + 608 + ], + "type": "text", + "content": "Long: David Wilson, Masters in Data Science, GPA: 3.95, Expected Graduation: 2023, Courses: Big Data Analytics, Machine Learning, Data Visualization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 616, + 221, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 616, + 221, + 627 + ], + "spans": [ + { + "bbox": [ + 76, + 616, + 221, + 627 + ], + "type": "text", + "content": "Short: Lucas, lucas@ucc.company.com" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 630, + 519, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 630, + 519, + 669 + ], + "spans": [ + { + "bbox": [ + 76, + 630, + 519, + 669 + ], + "type": "text", + "content": "Long: Hank, Senior Developer, IT Department, Salary: \\(95,000, Bonuses: \\)5,000, Allowances: $2,000 (Remote Work), Performance Rating: A, Full-time, Start Date: 2020-03-15, Last Promotion: 2021-08-10, Benefits: Health Insurance, Retirement 5%, Training: \\)1,500/year, Projects: Nexus, Zeta, Feedback: 4.5/5" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 189, + 684, + 404, + 696 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 684, + 404, + 696 + ], + "spans": [ + { + "bbox": [ + 189, + 684, + 404, + 696 + ], + "type": "text", + "content": "Figure 8: Samples used for length sensitivity analysis" + } + ] + } + ], + "index": 11, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5948" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 72, + 190, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 190, + 84 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 190, + 84 + ], + "type": "text", + "content": "Example C.2: Noise Example" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 89, + 120, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 89, + 120, + 100 + ], + "spans": [ + { + "bbox": [ + 75, + 89, + 120, + 100 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 75, + 102, + 241, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 102, + 241, + 113 + ], + "spans": [ + { + "bbox": [ + 75, + 102, + 241, + 113 + ], + "type": "text", + "content": "Origin: Card Number: 9876 5432 1098 7654" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 116, + 249, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 116, + 249, + 126 + ], + "spans": [ + { + "bbox": [ + 75, + 116, + 249, + 126 + ], + "type": "text", + "content": "Noise: Card Numbr: 9876 54-32 1O98 765 four" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 75, + 136, + 120, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 136, + 120, + 147 + ], + "spans": [ + { + "bbox": [ + 75, + 136, + 120, + 147 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 149, + 373, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 149, + 373, + 161 + ], + "spans": [ + { + "bbox": [ + 75, + 149, + 373, + 161 + ], + "type": "text", + "content": "Origin: Pay Date: 2023-05-15, Income: " + }, + { + "bbox": [ + 75, + 149, + 373, + 161 + ], + "type": "inline_equation", + "content": "75,000, Currency: USD, Bonus:" + }, + { + "bbox": [ + 75, + 149, + 373, + 161 + ], + "type": "text", + "content": "5,000" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 163, + 483, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 163, + 483, + 175 + ], + "spans": [ + { + "bbox": [ + 75, + 163, + 483, + 175 + ], + "type": "text", + "content": " Noise: Pay Date (scheduled): 2023-05-15! Income approx: $75,000. Currency spec: USD, and Bonus = $5,000." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 184, + 120, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 184, + 120, + 195 + ], + "spans": [ + { + "bbox": [ + 75, + 184, + 120, + 195 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 197, + 401, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 197, + 401, + 208 + ], + "spans": [ + { + "bbox": [ + 75, + 197, + 401, + 208 + ], + "type": "text", + "content": "Predictions: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 211, + 493, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 211, + 493, + 223 + ], + "spans": [ + { + "bbox": [ + 75, + 211, + 493, + 223 + ], + "type": "text", + "content": "References: Officer Identification-No.: P12345, Full-Name: John (J.), Job-Title: Sergeant, Dept.: Homicide Squad." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 161, + 237, + 430, + 250 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 237, + 430, + 250 + ], + "spans": [ + { + "bbox": [ + 161, + 237, + 430, + 250 + ], + "type": "text", + "content": "Figure 9: The samples used for the noise comparison experiments." + } + ] + } + ], + "index": 10, + "type": "text" + }, + { + "type": "table", + "bbox": [ + 69, + 282, + 525, + 438 + ], + "blocks": [ + { + "bbox": [ + 198, + 261, + 393, + 274 + ], + "lines": [ + { + "bbox": [ + 198, + 261, + 393, + 274 + ], + "spans": [ + { + "bbox": [ + 198, + 261, + 393, + 274 + ], + "type": "text", + "content": "Table 9: Decryption Performance without Noise" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 282, + 525, + 438 + ], + "lines": [ + { + "bbox": [ + 69, + 282, + 525, + 438 + ], + "spans": [ + { + "bbox": [ + 69, + 282, + 525, + 438 + ], + "type": "table", + "html": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V350.0031.5018.506.509.0017.0022.08
DeepSeek-R183.5077.5042.002.5020.005.5038.50
Closed-source Models
GPT-4o-2024-11-2049.5010.5013.5003.505.5013.75
Gemini-2.0-flash-exp45.007.5042.502.505.0015.5019.67
Claude-Sonnet-3.5-102292.5085.0062.5010.0070.0035.0059.17
Gemini-2.0-flash-thinking62.5033.5022.50017.501.5022.92
o1-mini-2024-09-1255.5067.505.00017.50024.25
", + "image_path": "4b1f854a6290bc56ccef374753099879756833e56bcdb3542a341b5e8def1ebf.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 69, + 469, + 525, + 624 + ], + "blocks": [ + { + "bbox": [ + 202, + 448, + 390, + 460 + ], + "lines": [ + { + "bbox": [ + 202, + 448, + 390, + 460 + ], + "spans": [ + { + "bbox": [ + 202, + 448, + 390, + 460 + ], + "type": "text", + "content": "Table 10: Decryption Performance with Noise" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 469, + 525, + 624 + ], + "lines": [ + { + "bbox": [ + 69, + 469, + 525, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 469, + 525, + 624 + ], + "type": "table", + "html": "
ModelRot13AtbashReverseSwapPairsParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V38.5010.507.5000.501.504.75
DeepSeek-R133.5023.004.5001.50010.42
Closed-source Models
GPT-4o-2024-11-205.5004.500001.67
Gemini-2.0-flash-exp2.50002.50000.83
Claude-Sonnet-3.5-102250.5040.0020.002.5030.007.5025.08
Gemini-2.0-flash-thinking30.5019.003.5002.5009.25
o1-mini-2024-09-1215.0020.000005.83
", + "image_path": "b071647c4c6be2e71114225ed2cb060b6ef0d56925b587016ca315a9f3408aa2.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 645, + 290, + 752 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 645, + 290, + 752 + ], + "spans": [ + { + "bbox": [ + 67, + 645, + 290, + 752 + ], + "type": "text", + "content": "iments can be found in Figure 9. Detailed results on the impact of noise on decryption performance are presented in Table 9 and Table 10, comparing model performance on short and long plaintexts under noisy conditions. These findings highlight the varying degrees of resilience across models, with some maintaining reasonable performance under noise while others degrade significantly." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 78, + 761, + 289, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 761, + 289, + 775 + ], + "spans": [ + { + "bbox": [ + 78, + 761, + 289, + 775 + ], + "type": "text", + "content": "In the analysis of the impact of encryption scope" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 645, + 526, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 645, + 526, + 766 + ], + "spans": [ + { + "bbox": [ + 302, + 645, + 526, + 766 + ], + "type": "text", + "content": "on decryption performance, the test prompts used are shown in Figure 10. Detailed results are presented in Table 11. This analysis compares model performance when encrypting only letters versus encrypting both letters and numbers. The results highlight how different models handle the increased complexity introduced by number encryption, showing varying degrees of adaptability. While some models maintain relatively stable per" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5949" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 72, + 127, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 127, + 84 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 127, + 84 + ], + "type": "text", + "content": "Example C.3" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 89, + 138, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 89, + 138, + 100 + ], + "spans": [ + { + "bbox": [ + 75, + 89, + 138, + 100 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 102, + 461, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 102, + 461, + 115 + ], + "spans": [ + { + "bbox": [ + 92, + 102, + 461, + 115 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 116, + 163, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 116, + 163, + 128 + ], + "spans": [ + { + "bbox": [ + 93, + 116, + 163, + 128 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 130, + 437, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 130, + 437, + 142 + ], + "spans": [ + { + "bbox": [ + 92, + 130, + 437, + 142 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 144, + 129, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 144, + 129, + 155 + ], + "spans": [ + { + "bbox": [ + 75, + 144, + 129, + 155 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 94, + 158, + 143, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 158, + 143, + 169 + ], + "spans": [ + { + "bbox": [ + 94, + 158, + 143, + 169 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 173, + 158, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 173, + 158, + 183 + ], + "spans": [ + { + "bbox": [ + 111, + 173, + 158, + 183 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 129, + 186, + 449, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 186, + 449, + 197 + ], + "spans": [ + { + "bbox": [ + 129, + 186, + 449, + 197 + ], + "type": "text", + "content": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 84, 8009; Kzhhklig Mfnyvi: B012345678" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 200, + 152, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 200, + 152, + 211 + ], + "spans": [ + { + "bbox": [ + 111, + 200, + 152, + 211 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 214, + 435, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 214, + 435, + 225 + ], + "spans": [ + { + "bbox": [ + 129, + 214, + 435, + 225 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 94, + 228, + 143, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 228, + 143, + 238 + ], + "spans": [ + { + "bbox": [ + 94, + 228, + 143, + 238 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 243, + 158, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 243, + 158, + 253 + ], + "spans": [ + { + "bbox": [ + 111, + 243, + 158, + 253 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 256, + 485, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 256, + 485, + 267 + ], + "spans": [ + { + "bbox": [ + 129, + 256, + 485, + 267 + ], + "type": "text", + "content": "Xzhv Mfnyvi: 7975-KH-998765; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 80, 7975" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 270, + 152, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 270, + 152, + 280 + ], + "spans": [ + { + "bbox": [ + 111, + 270, + 152, + 280 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 283, + 461, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 283, + 461, + 295 + ], + "spans": [ + { + "bbox": [ + 129, + 283, + 461, + 295 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 94, + 298, + 143, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 298, + 143, + 308 + ], + "spans": [ + { + "bbox": [ + 94, + 298, + 143, + 308 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 111, + 312, + 158, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 312, + 158, + 322 + ], + "spans": [ + { + "bbox": [ + 111, + 312, + 158, + 322 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 129, + 326, + 519, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 326, + 519, + 338 + ], + "spans": [ + { + "bbox": [ + 129, + 326, + 519, + 338 + ], + "type": "text", + "content": "gzc_rwvmgrurvi: GCM-7976-ZYX543, gzc_ivxliwh: bvzi: 7976, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 75, + 340, + 102, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 340, + 102, + 349 + ], + "spans": [ + { + "bbox": [ + 75, + 340, + 102, + 349 + ], + "type": "text", + "content": "379.99" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 111, + 354, + 152, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 354, + 152, + 364 + ], + "spans": [ + { + "bbox": [ + 111, + 354, + 152, + 364 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 129, + 367, + 499, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 367, + 499, + 379 + ], + "spans": [ + { + "bbox": [ + 129, + 367, + 499, + 379 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 75, + 381, + 113, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 381, + 113, + 392 + ], + "spans": [ + { + "bbox": [ + 75, + 381, + 113, + 392 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 93, + 395, + 141, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 395, + 141, + 407 + ], + "spans": [ + { + "bbox": [ + 93, + 395, + 141, + 407 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 111, + 409, + 282, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 409, + 282, + 420 + ], + "spans": [ + { + "bbox": [ + 111, + 409, + 282, + 420 + ], + "type": "text", + "content": "Wvzm slowh gsv kzhhklig mfnyvi Z87654321." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 94, + 424, + 135, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 424, + 135, + 433 + ], + "spans": [ + { + "bbox": [ + 94, + 424, + 135, + 433 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 174, + 447, + 418, + 460 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 174, + 447, + 418, + 460 + ], + "spans": [ + { + "bbox": [ + 174, + 447, + 418, + 460 + ], + "type": "text", + "content": "Figure 10: User Prompt (Atbash - 3shot - Letter & Number)" + } + ] + } + ], + "index": 26, + "type": "text" + }, + { + "type": "table", + "bbox": [ + 69, + 491, + 525, + 640 + ], + "blocks": [ + { + "bbox": [ + 162, + 471, + 430, + 483 + ], + "lines": [ + { + "bbox": [ + 162, + 471, + 430, + 483 + ], + "spans": [ + { + "bbox": [ + 162, + 471, + 430, + 483 + ], + "type": "text", + "content": "Table 11: Impact of Encryption Scope on Decryption Performance" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 69, + 491, + 525, + 640 + ], + "lines": [ + { + "bbox": [ + 69, + 491, + 525, + 640 + ], + "spans": [ + { + "bbox": [ + 69, + 491, + 525, + 640 + ], + "type": "table", + "html": "
ModelRot13AtbashPolybiusDualAvgCodeParityShiftAccuracyavg
Open-source Models
DeepSeek-V368.94/23.3224.02/14.6419.35/6.013.51/011.31/025.23 / 8.79
DeepSeek-R159.10/43.0563.19/23.0239.21/43.2337.36/013.05/0.7642.38 / 22.01
Closed-source Models
GPT-4o-2024-11-2027.53/010.08/00/02.54/02.67/08.56 / 0
gemini-2.0-flash-exp47.54/07.50/2.507.50/5.050/02.67/013.04 / 1.51
Claude-Sonnet-3.5-102292.50/50.0087.56/27.5365.00/32.2515.00/062.54/17.3564.52 / 25.43
gemini-2.0-flash-thinking35.00/2.650/2.540/10.000/02.50/07.50 / 3.04
o1-mini-2024-09-1250.00/32.5972.57/35.0040.00/42.530/07.50/0.7634.01 / 22.18
", + "image_path": "cae536e8c44905a6bbfa91268e2bf8afe91cbd3e25937d2387a1e8cf921c8509.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "lines": [ + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "type": "text", + "content": "Note: Values before the " + }, + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "type": "inline_equation", + "content": "\\prime /{}^{\\prime }" + }, + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "type": "text", + "content": " indicate performance when encrypting letters only, while values after the " + }, + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "type": "inline_equation", + "content": "{}^{\\prime }/{}^{\\prime }" + }, + { + "bbox": [ + 67, + 644, + 525, + 666 + ], + "type": "text", + "content": " represent performance when encrypting both letters and numbers." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 28 + }, + { + "bbox": [ + 67, + 697, + 291, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 697, + 291, + 724 + ], + "spans": [ + { + "bbox": [ + 67, + 697, + 291, + 724 + ], + "type": "text", + "content": "formance, others exhibit significant drops when required to decrypt mixed alphanumeric ciphertexts." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "type": "text", + "content": "For the enhanced prompt template, please refer to Figures 11-19, while more detailed experimental results can be found in Table 12." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 303, + 697, + 524, + 723 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 697, + 524, + 723 + ], + "spans": [ + { + "bbox": [ + 303, + 697, + 524, + 723 + ], + "type": "text", + "content": "C.3 Impact of Plaintext Source on Decryption Performance" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 302, + 735, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 735, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 735, + 526, + 775 + ], + "type": "text", + "content": "To assess how plaintext characteristics influence decryption performance, we compare results on synthetically generated privacy-sensitive data ver" + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5950" + } + ] + } + ], + "index": 34 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "text", + "content": "Example C.4" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "spans": [ + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "content": "Uses the Caesar cipher with a fixed shift of 13 positions. For each letter in the Plaintext, shift it forward by 13 positions in the alphabet to produce the Ciphertext." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 93, + 342, + 143, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 342, + 143, + 353 + ], + "spans": [ + { + "bbox": [ + 93, + 342, + 143, + 353 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 370, + 446, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 370, + 446, + 381 + ], + "spans": [ + { + "bbox": [ + 129, + 370, + 446, + 381 + ], + "type": "text", + "content": "Anzr: Rzvyl Wbuafb; Qngr bs Ovegu: Whyl 15, 1990; Cnffcbeg Ahzore: L987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "spans": [ + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "spans": [ + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 439, + 465, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 439, + 465, + 451 + ], + "spans": [ + { + "bbox": [ + 129, + 439, + 465, + 451 + ], + "type": "text", + "content": "Pnfr Ahzore: 2024-CF-001234; Pnfr Glcr: Gursg/Oernx-Va; Svyvat Qngr: Bpgbore 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "spans": [ + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "type": "text", + "content": "gnk_vqragvsvre: GKA-2023-NOP456, gnk_erpbeqf: Irne: 2023, fgnghf: Cebprffrq, ershaq_vffhrq: 620.00" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "type": "text", + "content": "Yrqvn, na Nzrevpna, erfvqrf va Ybf Natryrf." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "spans": [ + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "type": "text", + "content": "Figure 11:Enhanced Prompt (ROT13 - 3shot - Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "text", + "content": "5951" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "text", + "content": "Example C.5" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 92, + 300, + 493, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 300, + 493, + 312 + ], + "spans": [ + { + "bbox": [ + 92, + 300, + 493, + 312 + ], + "type": "text", + "content": "Uses the Atbash cipher. Each letter in the \\*\\*Plaintext\\*\\* is replaced with its reverse counterpart in the alphabet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 314, + 129, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 314, + 129, + 326 + ], + "spans": [ + { + "bbox": [ + 76, + 314, + 129, + 326 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 328, + 143, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 328, + 143, + 339 + ], + "spans": [ + { + "bbox": [ + 94, + 328, + 143, + 339 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 343, + 158, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 343, + 158, + 353 + ], + "spans": [ + { + "bbox": [ + 111, + 343, + 158, + 353 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 356, + 448, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 356, + 448, + 367 + ], + "spans": [ + { + "bbox": [ + 129, + 356, + 448, + 367 + ], + "type": "text", + "content": "Mznv: Vnrob Qlsmhlm; Wzgv lu Yrigs: Qfob 15, 1990; Kzhhklig Mfnyvi: B987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "spans": [ + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "spans": [ + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "spans": [ + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "spans": [ + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 425, + 485, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 425, + 485, + 437 + ], + "spans": [ + { + "bbox": [ + 129, + 425, + 485, + 437 + ], + "type": "text", + "content": "Xzhv Mfnyvi: 2024-KH-001234; Xzhv Gbkv: Gsvug/Yivzp-Rm; Urormt Wzgv: Lxglyvi 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 440, + 152, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 440, + 152, + 449 + ], + "spans": [ + { + "bbox": [ + 111, + 440, + 152, + 449 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 453, + 461, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 453, + 461, + 465 + ], + "spans": [ + { + "bbox": [ + 129, + 453, + 461, + 465 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 94, + 467, + 143, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 467, + 143, + 478 + ], + "spans": [ + { + "bbox": [ + 94, + 467, + 143, + 478 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 482, + 158, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 482, + 158, + 492 + ], + "spans": [ + { + "bbox": [ + 111, + 482, + 158, + 492 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 495, + 520, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 495, + 520, + 507 + ], + "spans": [ + { + "bbox": [ + 129, + 495, + 520, + 507 + ], + "type": "text", + "content": "gzc_rwvmgrurvi: GCM-2023-ZYX456, gzc_ivxliwh: bvzi: 2023, hgzgfh: Kilxvhhvw, ivufmw_rhhfvw:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 510, + 102, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 510, + 102, + 518 + ], + "spans": [ + { + "bbox": [ + 76, + 510, + 102, + 518 + ], + "type": "text", + "content": "620.00" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 111, + 524, + 152, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 524, + 152, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 524, + 152, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 94, + 565, + 140, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 565, + 140, + 576 + ], + "spans": [ + { + "bbox": [ + 94, + 565, + 140, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 111, + 579, + 289, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 289, + 590 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 289, + 590 + ], + "type": "text", + "content": "Ovwrz,zm Znvirxzm, ivhrwhrm Olh Zmtvovh." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "spans": [ + { + "bbox": [ + 187, + 617, + 406, + 629 + ], + "type": "text", + "content": "Figure 12:Enhanced Prompt (Atbash - 3shot - Letter)" + } + ] + } + ], + "index": 28, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5952" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 186, + 127, + 197 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 186, + 127, + 197 + ], + "spans": [ + { + "bbox": [ + 75, + 186, + 127, + 197 + ], + "type": "text", + "content": "Example C.6" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 203, + 138, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 203, + 138, + 213 + ], + "spans": [ + { + "bbox": [ + 75, + 203, + 138, + 213 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 216, + 519, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 216, + 519, + 228 + ], + "spans": [ + { + "bbox": [ + 92, + 216, + 519, + 228 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 231, + 163, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 231, + 163, + 242 + ], + "spans": [ + { + "bbox": [ + 93, + 231, + 163, + 242 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 244, + 437, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 244, + 437, + 256 + ], + "spans": [ + { + "bbox": [ + 92, + 244, + 437, + 256 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 259, + 152, + 269 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 259, + 152, + 269 + ], + "spans": [ + { + "bbox": [ + 75, + 259, + 152, + 269 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "spans": [ + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "text", + "content": "Uses the Polybius cipher. Each letter in the " + }, + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "text", + "content": " Plaintext\\*\\* is mapped to a pair of coordinates in the Polybius square, forming the " + }, + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 75, + 272, + 519, + 297 + ], + "type": "text", + "content": " Ciphertext\\*." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 301, + 128, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 301, + 128, + 311 + ], + "spans": [ + { + "bbox": [ + 75, + 301, + 128, + 311 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 314, + 143, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 314, + 143, + 325 + ], + "spans": [ + { + "bbox": [ + 94, + 314, + 143, + 325 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 328, + 158, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 328, + 158, + 339 + ], + "spans": [ + { + "bbox": [ + 111, + 328, + 158, + 339 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 342, + 518, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 342, + 518, + 353 + ], + "spans": [ + { + "bbox": [ + 129, + 342, + 518, + 353 + ], + "type": "text", + "content": "32 11 31 15 : 15 31 23 26 51 24 33 22 32 41 33 32 ; 14 11 42 15 33 16 12 23 36 42 22 : 24 43 26 51 15 , 19" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 355, + 327, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 355, + 327, + 366 + ], + "spans": [ + { + "bbox": [ + 75, + 355, + 327, + 366 + ], + "type": "text", + "content": "90;3411414134333642324331121536:51987654321" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "spans": [ + { + "bbox": [ + 111, + 370, + 152, + 380 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "spans": [ + { + "bbox": [ + 129, + 383, + 435, + 395 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "spans": [ + { + "bbox": [ + 94, + 397, + 143, + 408 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "spans": [ + { + "bbox": [ + 111, + 412, + 158, + 423 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 129, + 425, + 518, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 425, + 518, + 436 + ], + "spans": [ + { + "bbox": [ + 129, + 425, + 518, + 436 + ], + "type": "text", + "content": "13 11 41 15 32 43 31 12 15 36 : 2 0 2 4 - 34 41 - 0 0 1 2 3 4 ; 13 11 41 15 42 51 34 15 : 42 22 15 16 42 / 12" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 75, + 439, + 393, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 439, + 393, + 449 + ], + "spans": [ + { + "bbox": [ + 75, + 439, + 393, + 449 + ], + "type": "text", + "content": "36 15 11 25 - 23 32 ; 16 23 26 23 32 21 14 11 42 15 : 33 13 42 33 12 15 36 19 , 20 24" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 111, + 454, + 152, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 152, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 152, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 129, + 467, + 460, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 460, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 460, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 508, + 518, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 508, + 518, + 520 + ], + "spans": [ + { + "bbox": [ + 129, + 508, + 518, + 520 + ], + "type": "text", + "content": "42 11 46 _ 23 14 15 32 42 23 16 23 15 36 : 42 46 32 _ 20 23 - 11 12 13 456 , 42 11 46 _ 36 15 13 33 36 14" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 523, + 519, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 523, + 519, + 534 + ], + "spans": [ + { + "bbox": [ + 75, + 523, + 519, + 534 + ], + "type": "text", + "content": "41:51 15 11 36:2023,41 42 11 42 43 41:34 36 33 13 15 41 41 15 14,36 15 16 43 32 14_23 41 41 43 15 14:620." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 538, + 88, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 538, + 88, + 547 + ], + "spans": [ + { + "bbox": [ + 75, + 538, + 88, + 547 + ], + "type": "text", + "content": "00" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 551, + 152, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 551, + 152, + 560 + ], + "spans": [ + { + "bbox": [ + 111, + 551, + 152, + 560 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 129, + 565, + 499, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 565, + 499, + 576 + ], + "spans": [ + { + "bbox": [ + 129, + 565, + 499, + 576 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 75, + 579, + 113, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 579, + 113, + 590 + ], + "spans": [ + { + "bbox": [ + 75, + 579, + 113, + 590 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 94, + 593, + 140, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 593, + 140, + 604 + ], + "spans": [ + { + "bbox": [ + 94, + 593, + 140, + 604 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 111, + 607, + 505, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 607, + 505, + 618 + ], + "spans": [ + { + "bbox": [ + 111, + 607, + 505, + 618 + ], + "type": "text", + "content": "26 15 14 23 11 , 11 32 11 31 15 36 23 13 11 32 , 36 15 41 23 14 15 41 23 32 26 33 41 11 32 21 15 26 15 41 ." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 94, + 621, + 134, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 621, + 134, + 630 + ], + "spans": [ + { + "bbox": [ + 94, + 621, + 134, + 630 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 184, + 645, + 408, + 657 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 645, + 408, + 657 + ], + "spans": [ + { + "bbox": [ + 184, + 645, + 408, + 657 + ], + "type": "text", + "content": "Figure 13: Enhanced Prompt (Polybius - 3shot - Letter)" + } + ] + } + ], + "index": 31, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5953" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "text", + "content": "Example C.7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 93, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 94, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 93, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 300, + 518, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 300, + 518, + 326 + ], + "spans": [ + { + "bbox": [ + 75, + 300, + 518, + 326 + ], + "type": "text", + "content": "Uses the Vigenère cipher. Each letter in the **Plaintext** is shifted by the corresponding letter in the **Key** to produce the **Ciphertext**." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 128, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 128, + 338 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 128, + 338 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 343, + 143, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 343, + 143, + 353 + ], + "spans": [ + { + "bbox": [ + 94, + 343, + 143, + 353 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 370, + 438, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 370, + 438, + 381 + ], + "spans": [ + { + "bbox": [ + 129, + 370, + 438, + 381 + ], + "type": "text", + "content": "Nexe: Eotla Jqsnuzn; Dcee zf Miteh: Jwwy 15, 1990; Pcdsrzrv Nwbgc: J987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "spans": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "spans": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 439, + 477, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 439, + 477, + 451 + ], + "spans": [ + { + "bbox": [ + 129, + 439, + 477, + 451 + ], + "type": "text", + "content": "Ccde Yuomet: 2024-PU-001234; Naup Vjpg: Vsehe/Dcecv-Ky; Qintni Dcee: Oeeodpr 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "spans": [ + { + "bbox": [ + 129, + 509, + 515, + 521 + ], + "type": "text", + "content": "tci_koepeihtet: VIN-2023-CMC456, tci_tpcqcdu: jecc: 2023, dtceuu: Rcoepsupd, rgqupo_kdswpd: 620.00" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 524, + 153, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 94, + 565, + 141, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 565, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 94, + 565, + 141, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 273, + 591 + ], + "type": "text", + "content": "Lgoic, cy Cxettccy, ceutgdg ky Nzs Lniplgd." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 183, + 617, + 409, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 617, + 409, + 629 + ], + "spans": [ + { + "bbox": [ + 183, + 617, + 409, + 629 + ], + "type": "text", + "content": "Figure 14: Enhanced Prompt (Vigenère - 3shot - Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5954" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 221, + 127, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 221, + 127, + 232 + ], + "spans": [ + { + "bbox": [ + 75, + 221, + 127, + 232 + ], + "type": "text", + "content": "Example C.8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 238, + 138, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 238, + 138, + 248 + ], + "spans": [ + { + "bbox": [ + 75, + 238, + 138, + 248 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 251, + 520, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 251, + 520, + 263 + ], + "spans": [ + { + "bbox": [ + 92, + 251, + 520, + 263 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 265, + 163, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 265, + 163, + 276 + ], + "spans": [ + { + "bbox": [ + 93, + 265, + 163, + 276 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 279, + 437, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 279, + 437, + 290 + ], + "spans": [ + { + "bbox": [ + 92, + 279, + 437, + 290 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 294, + 152, + 305 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 294, + 152, + 305 + ], + "spans": [ + { + "bbox": [ + 75, + 294, + 152, + 305 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "spans": [ + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "text", + "content": "Reverses the " + }, + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "text", + "content": " Plaintiff\\*\\* to create the " + }, + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 92, + 307, + 298, + 318 + ], + "type": "text", + "content": " Ciphertext\\*\\*." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 322, + 128, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 322, + 128, + 332 + ], + "spans": [ + { + "bbox": [ + 75, + 322, + 128, + 332 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 93, + 335, + 143, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 335, + 143, + 346 + ], + "spans": [ + { + "bbox": [ + 93, + 335, + 143, + 346 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 349, + 158, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 349, + 158, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 349, + 158, + 360 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 130, + 363, + 434, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 363, + 434, + 374 + ], + "spans": [ + { + "bbox": [ + 130, + 363, + 434, + 374 + ], + "type": "text", + "content": "123456789Y :rebmuN tropssaP ;0991 ,51 yluJ :htriB fo etaD ;nosnhoJ ylimE :emaN" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 378, + 152, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 378, + 152, + 386 + ], + "spans": [ + { + "bbox": [ + 111, + 378, + 152, + 386 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "spans": [ + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 93, + 405, + 143, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 405, + 143, + 416 + ], + "spans": [ + { + "bbox": [ + 93, + 405, + 143, + 416 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "spans": [ + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 433, + 459, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 433, + 459, + 444 + ], + "spans": [ + { + "bbox": [ + 129, + 433, + 459, + 444 + ], + "type": "text", + "content": "4202,91 rebotcO :etaD gniliF ;nI-kaerB/tfehT :epyT ESA C;432100-SP-4202 :rebmuN ESA C" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 447, + 152, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 447, + 152, + 456 + ], + "spans": [ + { + "bbox": [ + 111, + 447, + 152, + 456 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 460, + 461, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 460, + 461, + 472 + ], + "spans": [ + { + "bbox": [ + 129, + 460, + 461, + 472 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 93, + 475, + 143, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 475, + 143, + 485 + ], + "spans": [ + { + "bbox": [ + 93, + 475, + 143, + 485 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 489, + 158, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 489, + 158, + 499 + ], + "spans": [ + { + "bbox": [ + 111, + 489, + 158, + 499 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 502, + 505, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 502, + 505, + 513 + ], + "spans": [ + { + "bbox": [ + 129, + 502, + 505, + 513 + ], + "type": "text", + "content": "}00.026 :deussi_dnufer,dessecorP:sutats,3202:raey{sdrocer_xat,654CBA-3202-NXT:reifitnedi_xat" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 517, + 152, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 517, + 152, + 526 + ], + "spans": [ + { + "bbox": [ + 111, + 517, + 152, + 526 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 530, + 499, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 530, + 499, + 541 + ], + "spans": [ + { + "bbox": [ + 129, + 530, + 499, + 541 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 544, + 113, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 544, + 113, + 555 + ], + "spans": [ + { + "bbox": [ + 75, + 544, + 113, + 555 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 93, + 559, + 140, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 559, + 140, + 569 + ], + "spans": [ + { + "bbox": [ + 93, + 559, + 140, + 569 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 572, + 272, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 572, + 272, + 583 + ], + "spans": [ + { + "bbox": [ + 111, + 572, + 272, + 583 + ], + "type": "text", + "content": ".selegnAsoL ni sediser,naciremAna ,aideL" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 93, + 587, + 135, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 587, + 135, + 596 + ], + "spans": [ + { + "bbox": [ + 93, + 587, + 135, + 596 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 185, + 610, + 407, + 622 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 610, + 407, + 622 + ], + "spans": [ + { + "bbox": [ + 185, + 610, + 407, + 622 + ], + "type": "text", + "content": "Figure 15:Enhanced Prompt (Reverse -3shot-Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5955" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 127, + 225 + ], + "type": "text", + "content": "Example C.9" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "spans": [ + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "text", + "content": "For each pair of letters in the " + }, + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "text", + "content": " Plaintext\\*\\*, their positions are swapped to produce the " + }, + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 74, + 300, + 519, + 326 + ], + "type": "text", + "content": " Ciphertext\\*\\*. If the number of letters is odd, the last letter remains in its original position." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 129, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 129, + 339 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 129, + 339 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 93, + 343, + 143, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 343, + 143, + 353 + ], + "spans": [ + { + "bbox": [ + 93, + 343, + 143, + 353 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 370, + 434, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 370, + 434, + 381 + ], + "spans": [ + { + "bbox": [ + 129, + 370, + 434, + 381 + ], + "type": "text", + "content": "aNem :mEli yoJnhos;nD ta efoB riht :uJy11 ,51 99;0P sapsro tuNbmre :9Y78563412" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "spans": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "spans": [ + { + "bbox": [ + 93, + 412, + 143, + 423 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "spans": [ + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "type": "text", + "content": "aCesN mueb:r2 20-4SP0-1032;4C sa eyTep :hTfe/trBae-knI ;iFilgnD ta:eO tcbore1 ,92 204" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 93, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 509, + 497, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 509, + 497, + 520 + ], + "spans": [ + { + "bbox": [ + 129, + 509, + 497, + 520 + ], + "type": "text", + "content": "at_xdineititre:XT-N0232A-CB54,6t xar_cerosd :yae:r2 20,3s atut:sP orecssde ,erufdni_sseu:d6 020.0" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 579, + 270, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 270, + 590 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 270, + 590 + ], + "type": "text", + "content": "eLid,aa nmArecina ,erised sniL soA gnlese." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 93, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "spans": [ + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "type": "text", + "content": "Figure 16: Enhanced Prompt (SwapPairs - 3shot - Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5956" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 193, + 131, + 203 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 193, + 131, + 203 + ], + "spans": [ + { + "bbox": [ + 76, + 193, + 131, + 203 + ], + "type": "text", + "content": "Example C.10" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 210, + 138, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 210, + 138, + 220 + ], + "spans": [ + { + "bbox": [ + 76, + 210, + 138, + 220 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 223, + 520, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 223, + 520, + 234 + ], + "spans": [ + { + "bbox": [ + 92, + 223, + 520, + 234 + ], + "type": "text", + "content": "Decrypt the provided Ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 238, + 163, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 238, + 163, + 249 + ], + "spans": [ + { + "bbox": [ + 93, + 238, + 163, + 249 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 251, + 437, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 251, + 437, + 263 + ], + "spans": [ + { + "bbox": [ + 92, + 251, + 437, + 263 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 265, + 152, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 265, + 152, + 276 + ], + "spans": [ + { + "bbox": [ + 75, + 265, + 152, + 276 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 279, + 518, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 279, + 518, + 305 + ], + "spans": [ + { + "bbox": [ + 74, + 279, + 518, + 305 + ], + "type": "text", + "content": "This encryption method converts each letter of the **Plaintext** into two letters in the **Ciphertext** such that the average of their ASCII values equals the ASCII value of the original letter." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 307, + 129, + 318 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 307, + 129, + 318 + ], + "spans": [ + { + "bbox": [ + 76, + 307, + 129, + 318 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 322, + 143, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 322, + 143, + 332 + ], + "spans": [ + { + "bbox": [ + 94, + 322, + 143, + 332 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 335, + 158, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 335, + 158, + 346 + ], + "spans": [ + { + "bbox": [ + 111, + 335, + 158, + 346 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 349, + 520, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 349, + 520, + 374 + ], + "spans": [ + { + "bbox": [ + 129, + 349, + 520, + 374 + ], + "type": "text", + "content": "MOaalndf: DFlnhjkmxz IKnpgimortnpmo; CEaasudf npeg AChjqssugi: IKtvkmxz 15, 1990; OQaartrtoqnacdfqx: XZ987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 377, + 152, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 377, + 152, + 386 + ], + "spans": [ + { + "bbox": [ + 111, + 377, + 152, + 386 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "spans": [ + { + "bbox": [ + 129, + 391, + 435, + 401 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 94, + 405, + 143, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 405, + 143, + 416 + ], + "spans": [ + { + "bbox": [ + 94, + 405, + 143, + 416 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "spans": [ + { + "bbox": [ + 111, + 419, + 158, + 429 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 433, + 518, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 433, + 518, + 458 + ], + "spans": [ + { + "bbox": [ + 129, + 433, + 518, + 458 + ], + "type": "text", + "content": "BDaartdf MOtvlnacdfqs: 2024-OQRT-001234; BDaartdf SUxzoqdf: SUgidfgsu/ACqsdfaajl-HJmo; h CEaesudf: NPbdsunpacdfqs 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 461, + 152, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 461, + 152, + 470 + ], + "spans": [ + { + "bbox": [ + 111, + 461, + 152, + 470 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 474, + 460, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 474, + 460, + 486 + ], + "spans": [ + { + "bbox": [ + 129, + 474, + 460, + 486 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 94, + 488, + 143, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 488, + 143, + 499 + ], + "spans": [ + { + "bbox": [ + 94, + 488, + 143, + 499 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 503, + 158, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 503, + 158, + 513 + ], + "spans": [ + { + "bbox": [ + 111, + 503, + 158, + 513 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 516, + 520, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 516, + 520, + 528 + ], + "spans": [ + { + "bbox": [ + 129, + 516, + 520, + 528 + ], + "type": "text", + "content": "suaawy_hjcedfmosuhjeghjdfqs: SUWYMO-2023-AAACBD456, suaawy_qsdfbnpqscert: xzdfaaqs: 2023," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 75, + 530, + 327, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 530, + 327, + 541 + ], + "spans": [ + { + "bbox": [ + 75, + 530, + 327, + 541 + ], + "type": "text", + "content": "rtsuasutvrt: OQqsnpbbdftrtdfce, qsdfgtvmoce_hjrttrtvdfce: 620.00" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 111, + 544, + 152, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 152, + 554 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 152, + 554 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 129, + 558, + 499, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 558, + 499, + 569 + ], + "spans": [ + { + "bbox": [ + 129, + 558, + 499, + 569 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 76, + 572, + 113, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 572, + 113, + 582 + ], + "spans": [ + { + "bbox": [ + 76, + 572, + 113, + 582 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 93, + 586, + 141, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 586, + 141, + 597 + ], + "spans": [ + { + "bbox": [ + 93, + 586, + 141, + 597 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 111, + 599, + 428, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 599, + 428, + 611 + ], + "spans": [ + { + "bbox": [ + 111, + 599, + 428, + 611 + ], + "type": "text", + "content": "KMdfcehjaa, aamo AAlndfqshjbdaamo, qsdfrthjcedfrh jhmo KMnprt AAmofhdfkmdfrt." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 94, + 614, + 135, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 614, + 135, + 624 + ], + "spans": [ + { + "bbox": [ + 94, + 614, + 135, + 624 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 173, + 638, + 420, + 650 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 638, + 420, + 650 + ], + "spans": [ + { + "bbox": [ + 173, + 638, + 420, + 650 + ], + "type": "text", + "content": "Figure 17: Enhanced Prompt (DualAvgCode - 3shot - Letter)" + } + ] + } + ], + "index": 28, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5957" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 130, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 130, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 130, + 225 + ], + "type": "text", + "content": "Example C.11" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "spans": [ + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "text", + "content": "For each letter in the " + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "text", + "content": " Plaintext\\*\\*: - If the ASCII value is even, add 1 to it to get the corresponding character in the " + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "text", + "content": " Ciphertext\\*. - If the ASCII value is odd, subtract 1 to get the new character in the " + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 75, + 300, + 518, + 325 + ], + "type": "text", + "content": " Ciphertext\\*." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "spans": [ + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 370, + 443, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 370, + 443, + 381 + ], + "spans": [ + { + "bbox": [ + 129, + 370, + 443, + 381 + ], + "type": "text", + "content": "Ozld: Dlhmax Kniorno; Ezud ng Chsui: Ktmx 15, 1990; Qzrrqnsu Otlcds: X987654321" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "spans": [ + { + "bbox": [ + 129, + 397, + 436, + 409 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "spans": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 439, + 478, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 439, + 478, + 451 + ], + "spans": [ + { + "bbox": [ + 129, + 439, + 478, + 451 + ], + "type": "text", + "content": "Bzrd Otlcds: 2024-QR-001234; Bzrd Uxqd: Uidgu/Csdzj-Ho; Ghmhof Ezud: Nbuncds 19, 2024" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 495, + 158, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 507 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 507 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 509, + 518, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 509, + 518, + 520 + ], + "spans": [ + { + "bbox": [ + 129, + 509, + 518, + 520 + ], + "type": "text", + "content": "uzy_hedoughghds: UYO-2023-ZCB456, uzy_sdbnser: xdzs: 2023, ruzutr: Qsnbdrrde, sdgtoe_hrrte: 620.00" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 500, + 549 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 561 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 579, + 282, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 282, + 590 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 282, + 590 + ], + "type": "text", + "content": "Mdehz, zo Zldshbzo, sdrhedr ho Mnr Zofdmdr." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 180, + 617, + 413, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 617, + 413, + 629 + ], + "spans": [ + { + "bbox": [ + 180, + 617, + 413, + 629 + ], + "type": "text", + "content": "Figure 18: Enhanced Prompt (ParityShift - 3shot - Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5958" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 214, + 131, + 225 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 131, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 131, + 225 + ], + "type": "text", + "content": "Example C.12" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "spans": [ + { + "bbox": [ + 76, + 231, + 138, + 241 + ], + "type": "text", + "content": "Background:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "spans": [ + { + "bbox": [ + 92, + 244, + 520, + 256 + ], + "type": "text", + "content": "Decrypt the provided ciphertext and return the corresponding Plaintext following the given algorithm flow and examples." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "spans": [ + { + "bbox": [ + 93, + 259, + 163, + 269 + ], + "type": "text", + "content": "Think step by step." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "spans": [ + { + "bbox": [ + 92, + 272, + 437, + 284 + ], + "type": "text", + "content": "Provide the Plaintext result in the format text, where text is the decrypted text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "spans": [ + { + "bbox": [ + 76, + 286, + 152, + 297 + ], + "type": "text", + "content": "Algorithm Flow:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "spans": [ + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "content": "The algorithm splits the " + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "content": " Plaintext\\*\\* into words based on spaces. Each word is then individually encrypted using the Caesar cipher, resulting in the " + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "inline_equation", + "content": "^{**}" + }, + { + "bbox": [ + 74, + 300, + 518, + 326 + ], + "type": "text", + "content": " ciphertext\\*\\*." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "spans": [ + { + "bbox": [ + 76, + 328, + 128, + 339 + ], + "type": "text", + "content": "Examples:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "spans": [ + { + "bbox": [ + 94, + 342, + 143, + 353 + ], + "type": "text", + "content": "* Example 1:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "spans": [ + { + "bbox": [ + 111, + 356, + 158, + 367 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 129, + 370, + 434, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 370, + 434, + 380 + ], + "spans": [ + { + "bbox": [ + 129, + 370, + 434, + 380 + ], + "type": "text", + "content": "e:Nam lyEmi nson;Joh eDat fo th:Bir yJul 15,0;199 sportPas ber:Number 7654321Y98" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "spans": [ + { + "bbox": [ + 111, + 384, + 153, + 394 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "spans": [ + { + "bbox": [ + 129, + 397, + 435, + 409 + ], + "type": "text", + "content": "Name: Emily Johnson; Date of Birth: July 15, 1990; Passport Number: Y987654321" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "spans": [ + { + "bbox": [ + 94, + 412, + 143, + 423 + ], + "type": "text", + "content": "* Example 2:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "spans": [ + { + "bbox": [ + 111, + 426, + 158, + 437 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "spans": [ + { + "bbox": [ + 129, + 439, + 459, + 450 + ], + "type": "text", + "content": "eCas ber:Num 4-PS-001234;202 eCas e:Typ ft/Break-In;The ingFil e:Dat oberOct 19, 4202" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 454, + 153, + 464 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "spans": [ + { + "bbox": [ + 129, + 467, + 461, + 479 + ], + "type": "text", + "content": "Case Number: 2024-PS-001234; Case Type: Theft/In; Filing Date: October 19, 2024" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "spans": [ + { + "bbox": [ + 94, + 481, + 143, + 492 + ], + "type": "text", + "content": "* Example 3:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "spans": [ + { + "bbox": [ + 111, + 495, + 158, + 506 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 129, + 508, + 497, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 508, + 497, + 520 + ], + "spans": [ + { + "bbox": [ + 129, + 508, + 497, + 520 + ], + "type": "text", + "content": "identifier:tax -2023-ABC456,TXNRecords:tax ar:ye 3,202 tus:sta cessed,Pro und_iuied:ref .00620" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 523, + 153, + 533 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "spans": [ + { + "bbox": [ + 129, + 537, + 499, + 548 + ], + "type": "text", + "content": "tax_identityer: TXN-2023-ABC456, tax_record: year: 2023, status: Processed, refund_issued: 620.00" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 551, + 113, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 551, + 113, + 562 + ], + "spans": [ + { + "bbox": [ + 76, + 551, + 113, + 562 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "spans": [ + { + "bbox": [ + 93, + 565, + 141, + 576 + ], + "type": "text", + "content": "- Ciphertext:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 111, + 579, + 272, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 579, + 272, + 591 + ], + "spans": [ + { + "bbox": [ + 111, + 579, + 272, + 591 + ], + "type": "text", + "content": "ia,Led na rican,Ame idesres ni Los eles.Ang" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "spans": [ + { + "bbox": [ + 94, + 593, + 135, + 602 + ], + "type": "text", + "content": "- Plaintiff:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "spans": [ + { + "bbox": [ + 180, + 617, + 412, + 629 + ], + "type": "text", + "content": "Figure 19: Enhanced Prompt (WordShift - 3shot - Letter)" + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5959" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 90, + 525, + 303 + ], + "blocks": [ + { + "bbox": [ + 189, + 69, + 404, + 82 + ], + "lines": [ + { + "bbox": [ + 189, + 69, + 404, + 82 + ], + "spans": [ + { + "bbox": [ + 189, + 69, + 404, + 82 + ], + "type": "text", + "content": "Table 12: Results on CipherBank(Enhanced Prompt)" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 90, + 525, + 303 + ], + "lines": [ + { + "bbox": [ + 70, + 90, + 525, + 303 + ], + "spans": [ + { + "bbox": [ + 70, + 90, + 525, + 303 + ], + "type": "table", + "html": "
ModelSubstitution CiphersTransposition CiphersCustom CiphersCipher Score
Rot13AtbashPolybiusVigenèreReverseSwapPairsDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Chat Models
Mixtral-8x22B-v0.10.760000.3802.670.380.380.51
Qwen2.5-72B-Instruct12.609.1600002.290.381.532.88
Llama-3.1-70B-Instruct2.671.15001.530.381.15000.76
Llama-3.3-70B-Instruct4.581.5300.381.1501.15000.98
DeepSeek-V341.6027.860.380.3865.955.3412.660.765.1717.79
Closed-source Models
GPT-4o-mini-2024-07-1821.7619.0800.384.3900005.07
GPT-4o-2024-08-0645.4224.0500.7651.538.401.911.1510.3115.95
GPT-4o-2024-11-2045.4241.980053.638.023.821.159.5418.17
gemini-1.5-pro63.695.730.760.3814.122.670.381.9110.6911.15
gemini-2.0-flash-exp45.0422.902.290.3846.564.583.8201.1514.08
Claude-Sonnet-3.5-102292.7582.0678.242.4879.399.732.4862.0244.8550.44
Reasoning Models
QwQ-32B-Preview1.913.052.670002.670.380.381.23
DeepSeek-R188.3786.5472.730.7646.9675.0173.1774.421.5157.72
gemini-2.0-flash-thinking37.9819.0910.50055.344.964.770.386.1115.46
ol-mini-2024-09-1254.2072.1450.00.7611.0718.7047.3349.627.2534.56
", + "image_path": "be4b85a911a0f3570d25ab52c1a8437eef2655f1d3404adbbbd16fd89883e3b7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 322, + 291, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 322, + 291, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 322, + 291, + 389 + ], + "type": "text", + "content": "sus externally sourced structured text (e.g., quotes from Shakespeare's works). The structured text exhibits greater linguistic familiarity, while the privacy-sensitive data represents real-world encryption needs, lacking inherent semantic patterns." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 390, + 291, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 291, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 291, + 581 + ], + "type": "text", + "content": "As shown in Table 13 and Table 14, models generally perform better on structured text, suggesting that they leverage linguistic priors rather than strictly following decryption rules. When encountering encrypted text with recognizable patterns, models tend to shortcut reasoning, aligning decoded fragments with plausible linguistic structures instead of strictly adhering to learned transformation rules. Conversely, for less structured, domain-specific text, models struggle to infer decryption patterns, reinforcing the advantage of CipherBank's privacy-sensitive dataset, which forces models to engage in independent reasoning rather than rely on pretraining biases." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 590, + 168, + 603 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 168, + 603 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 168, + 603 + ], + "type": "text", + "content": "D Error Analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 612, + 190, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 612, + 190, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 612, + 190, + 624 + ], + "type": "text", + "content": "D.1 Error Classification" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 629, + 291, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 629, + 291, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 629, + 291, + 698 + ], + "type": "text", + "content": "This section defines the error categories observed in model decryption outputs. These classifications help identify systematic failure patterns and provide insights into how models approach cryptographic reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 80, + 708, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 708, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 80, + 708, + 291, + 775 + ], + "type": "text", + "content": "(A) Omission/Insertion: The model output contains missing or extra characters, words, or punctuation compared to the reference plaintext. These errors indicate incomplete decryption or unintended modifications, leading to" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 324, + 322, + 453, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 322, + 453, + 335 + ], + "spans": [ + { + "bbox": [ + 324, + 322, + 453, + 335 + ], + "type": "text", + "content": "partial but inaccurate results." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 344, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 316, + 344, + 525, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 344, + 525, + 424 + ], + "spans": [ + { + "bbox": [ + 316, + 344, + 525, + 424 + ], + "type": "text", + "content": "- (B) Name Decryption Error: The decryption result is correct except for the name part, which remains incorrect or partially distorted. This suggests challenges in handling named entities, possibly due to memorization effects or entity-based biases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 433, + 526, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 433, + 526, + 542 + ], + "spans": [ + { + "bbox": [ + 316, + 433, + 526, + 542 + ], + "type": "text", + "content": "- (C) Semantic Inference: The model makes errors based on semantic reasoning rather than strictly following decryption rules. Instead of decoding symbols precisely, the model hallucinates plausible but incorrect outputs that fit the general meaning of the sentence. This indicates a tendency to prioritize linguistic coherence over strict decryption fidelity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 550, + 525, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 550, + 525, + 644 + ], + "spans": [ + { + "bbox": [ + 316, + 550, + 525, + 644 + ], + "type": "text", + "content": "- (D) Reorganization: The output preserves the exact meaning of the reference plaintext but rearranges the sentence structure. This suggests that the model prioritizes fluency over strict character-level fidelity, leading to errors in cryptographic tasks where precision is essential." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 653, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 653, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 316, + 653, + 526, + 775 + ], + "type": "text", + "content": "- (E) Reasoning Failure: The model output is significantly different from the reference, and decryption is essentially unsuccessful. This suggests a fundamental failure in identifying encryption patterns, leading to outputs that bear little resemblance to the expected plaintext. This category includes cases where the model fails to infer transformation rules or apply correct decryption strategies." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5960" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 90, + 525, + 208 + ], + "blocks": [ + { + "bbox": [ + 173, + 69, + 420, + 82 + ], + "lines": [ + { + "bbox": [ + 173, + 69, + 420, + 82 + ], + "spans": [ + { + "bbox": [ + 173, + 69, + 420, + 82 + ], + "type": "text", + "content": "Table 13: Decryption Performance on Privacy-Sensitive Data" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 90, + 525, + 208 + ], + "lines": [ + { + "bbox": [ + 68, + 90, + 525, + 208 + ], + "spans": [ + { + "bbox": [ + 68, + 90, + 525, + 208 + ], + "type": "table", + "html": "
ModelRot13AtbashPolybiusVigenèreReverseSwapDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V324.3415.6415.70033.723.5104.3515.6412.54
DeepSeek-R157.8871.0271.554.3533.574.35012.718.7029.35
Closed-source Models
GPT-4o-2024-11-2021.7421.740030.438.700013.0410.63
Gemini-2.0-Flash-Exp47.834.354.35052.1704.354.3513.0414.49
Claude-Sonnet-3.5-102286.9678.2665.224.3591.3013.044.3552.1747.8349.28
Gemini-2.0-Flash-Thinking39.134.350060.87004.3530.4315.46
o1-Mini-2024-09-1260.8786.9669.5708.70013.0417.394.3528.99
", + "image_path": "67364b7148e07ce1d85c2a4ea8fb38d75ff13dcce828846e8062ec62f7a4d1b4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 68, + 239, + 525, + 354 + ], + "blocks": [ + { + "bbox": [ + 187, + 218, + 405, + 231 + ], + "lines": [ + { + "bbox": [ + 187, + 218, + 405, + 231 + ], + "spans": [ + { + "bbox": [ + 187, + 218, + 405, + 231 + ], + "type": "text", + "content": "Table 14: Decryption Performance on Structured Text" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 68, + 239, + 525, + 354 + ], + "lines": [ + { + "bbox": [ + 68, + 239, + 525, + 354 + ], + "spans": [ + { + "bbox": [ + 68, + 239, + 525, + 354 + ], + "type": "table", + "html": "
ModelRot13AtbashPolybiusVigenèreReverseSwapPairDualAvgCodeParityShiftWordShiftAccuracyavg
Open-source Models
DeepSeek-V376.1224.0315.70052.1729.40012.7155.1329.47
DeepSeek-R184.5185.041007.5979.108.708.7015.6430.4346.63
Closed-source Models
GPT-4o-2024-11-2078.2639.134.35086.9621.7404.3543.4830.92
Gemini-2.0-Flash-Exp86.9613.044.35086.968.70017.3943.4828.99
Claude-Sonnet-3.5-102291.3095.6595.654.3510052.178.7078.2695.6569.08
Gemini-2.0-Flash-Thinking86.9613.048.70069.5717.390052.1727.54
o1-Mini-2024-09-1282.6195.6578.26060.874.3513.0417.3943.4843.96
", + "image_path": "3f19db9ab6803c61a4f28ce8404c5d001da0c21536c79523550d35e0e90aadc4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 375, + 290, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 375, + 290, + 401 + ], + "spans": [ + { + "bbox": [ + 80, + 375, + 290, + 401 + ], + "type": "text", + "content": "- (F) Other: Miscellaneous errors that do not fit into the defined categories." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 415, + 291, + 481 + ], + "type": "text", + "content": "This classification framework provides a structured approach to analyzing decryption errors, helping to pinpoint systematic weaknesses and guide future improvements in cryptographic reasoning models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 494, + 261, + 507 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 494, + 261, + 507 + ], + "spans": [ + { + "bbox": [ + 67, + 494, + 261, + 507 + ], + "type": "text", + "content": "D.2 Examples of Different Error Types" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 513, + 291, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 513, + 291, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 513, + 291, + 635 + ], + "type": "text", + "content": "To further illustrate the types of decryption errors encountered in our evaluation, we provide concrete examples corresponding to each error category. These cases demonstrate how models fail in various aspects of decryption, including omission-insertion, name decryption errors, semantic inference, reorganization, reasoning failures, and other anomalies. Example D.1 - D6 showcase representative examples of each error type." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 646, + 259, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 646, + 259, + 659 + ], + "spans": [ + { + "bbox": [ + 67, + 646, + 259, + 659 + ], + "type": "text", + "content": "D.3 Detailed Error Distribution Tables" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 666, + 290, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 290, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 290, + 732 + ], + "type": "text", + "content": "Tables 15-20 present a detailed breakdown of error distributions across different encryption algorithms for the six selected models. From these results, we identify several common trends and model-specific differences." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "content": "Challenges in Name Decryption and Symbolic Reasoning. Across all models, name decryption errors remain prevalent, particularly in Atbash and" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 375, + 526, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 375, + 526, + 455 + ], + "spans": [ + { + "bbox": [ + 302, + 375, + 526, + 455 + ], + "type": "text", + "content": "Polybius, indicating persistent difficulties in handling entity-based transformations. Additionally, models struggle with key-based and transposition ciphers such as Vigenère and SwapPairs, suggesting limitations in tracking multi-step transformations and generalizing decryption strategies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 456, + 527, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 456, + 527, + 563 + ], + "spans": [ + { + "bbox": [ + 302, + 456, + 527, + 563 + ], + "type": "text", + "content": "Semantic Overreliance vs. Overthinking in Decryption. Chat models often exhibit semantic inference errors, where decrypted outputs align with linguistic patterns rather than encryption rules. In contrast, reasoning models tend to overthink simple tasks, leading to unnecessary self-correction loops that degrade performance in straightforward ciphers like Reverse." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 565, + 527, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 565, + 527, + 645 + ], + "spans": [ + { + "bbox": [ + 302, + 565, + 527, + 645 + ], + "type": "text", + "content": "Structural Alignment and Insertion Errors. Frequent omission and insertion errors in WordShift and Reverse ciphers highlight difficulties in preserving character order. This suggests that models rely on semantic priors rather than strict symbolic reasoning, leading to misaligned outputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 646, + 393, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 646, + 393, + 660 + ], + "spans": [ + { + "bbox": [ + 314, + 646, + 393, + 660 + ], + "type": "text", + "content": "Key Takeaways:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 670, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 316, + 670, + 525, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 525, + 710 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 525, + 710 + ], + "type": "text", + "content": "- Chat models (Claude, Gemini) perform well in substitution ciphers but struggle with complex rule-based encryption." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 721, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 721, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 316, + 721, + 526, + 775 + ], + "type": "text", + "content": "- Reasoning models (DeepSeek-R1, o1) maintain better structural accuracy but underperform in transposition-based and key-dependent ciphers." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "text", + "content": "5961" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 98, + 254, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 98, + 254, + 110 + ], + "spans": [ + { + "bbox": [ + 75, + 98, + 254, + 110 + ], + "type": "text", + "content": "Example D.1: Error Type: Omission/Insertion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 115, + 120, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 115, + 120, + 126 + ], + "spans": [ + { + "bbox": [ + 76, + 115, + 120, + 126 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 128, + 252, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 128, + 252, + 139 + ], + "spans": [ + { + "bbox": [ + 76, + 128, + 252, + 139 + ], + "type": "text", + "content": "Predictions: Card Number: ID 1245-6789-0123" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 142, + 306, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 142, + 306, + 153 + ], + "spans": [ + { + "bbox": [ + 76, + 142, + 306, + 153 + ], + "type": "text", + "content": "References: Clark holds the ID Card Number 1245-6789-0123." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 162, + 120, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 162, + 120, + 173 + ], + "spans": [ + { + "bbox": [ + 76, + 162, + 120, + 173 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 176, + 252, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 176, + 252, + 187 + ], + "spans": [ + { + "bbox": [ + 76, + 176, + 252, + 187 + ], + "type": "text", + "content": "Predictions: Card Number: ID 1245-6789-0123" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 190, + 306, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 190, + 306, + 200 + ], + "spans": [ + { + "bbox": [ + 76, + 190, + 306, + 200 + ], + "type": "text", + "content": "References: Clark holds the ID Card Number 1245-6789-0123." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 210, + 120, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 210, + 120, + 221 + ], + "spans": [ + { + "bbox": [ + 76, + 210, + 120, + 221 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 223, + 299, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 223, + 299, + 234 + ], + "spans": [ + { + "bbox": [ + 76, + 223, + 299, + 234 + ], + "type": "text", + "content": "Predictions: Salary Amount: $67,000; Pay Date: 2023-10-25" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 237, + 299, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 237, + 299, + 248 + ], + "spans": [ + { + "bbox": [ + 76, + 237, + 299, + 248 + ], + "type": "text", + "content": "References: Salary Amount: $67,000, Pay Date: 2023-10-25." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 316, + 273, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 316, + 273, + 327 + ], + "spans": [ + { + "bbox": [ + 76, + 316, + 273, + 327 + ], + "type": "text", + "content": "Example D.2: Error Type: Name Decryption Error" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 333, + 120, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 333, + 120, + 343 + ], + "spans": [ + { + "bbox": [ + 76, + 333, + 120, + 343 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 76, + 346, + 284, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 346, + 284, + 357 + ], + "spans": [ + { + "bbox": [ + 76, + 346, + 284, + 357 + ], + "type": "text", + "content": "Predictions: Learn, an American, inside on Los Angeles." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 76, + 360, + 283, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 360, + 283, + 371 + ], + "spans": [ + { + "bbox": [ + 76, + 360, + 283, + 371 + ], + "type": "text", + "content": "References: Ledia, an American, resides in Los Angeles." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 380, + 120, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 380, + 120, + 391 + ], + "spans": [ + { + "bbox": [ + 76, + 380, + 120, + 391 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 76, + 394, + 518, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 394, + 518, + 419 + ], + "spans": [ + { + "bbox": [ + 76, + 394, + 518, + 419 + ], + "type": "text", + "content": "Predictions: Individual ID: A1234567; Name: John Doe; Age: 34; Gender Identity: Cisgender 16k11.2 Location, Country State Citizenship." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 76, + 422, + 518, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 422, + 518, + 447 + ], + "spans": [ + { + "bbox": [ + 76, + 422, + 518, + 447 + ], + "type": "text", + "content": "References: Individual ID: A1234567; Name: Jane Doe; Age: 34; Genetic Testing: Chromosome 16p11.2 Deletion, Celiac Disease Predisposition." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 76, + 455, + 120, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 455, + 120, + 466 + ], + "spans": [ + { + "bbox": [ + 76, + 455, + 120, + 466 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 469, + 293, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 469, + 293, + 481 + ], + "spans": [ + { + "bbox": [ + 76, + 469, + 293, + 481 + ], + "type": "text", + "content": "Predictions: Handed lost the passport Number A12345678." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 76, + 483, + 286, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 483, + 286, + 494 + ], + "spans": [ + { + "bbox": [ + 76, + 483, + 286, + 494 + ], + "type": "text", + "content": "References: Dean holds the passport number A12345678." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 76, + 561, + 254, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 561, + 254, + 573 + ], + "spans": [ + { + "bbox": [ + 76, + 561, + 254, + 573 + ], + "type": "text", + "content": "Example D.3: Error Type: Semantic Inference" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 578, + 120, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 578, + 120, + 589 + ], + "spans": [ + { + "bbox": [ + 76, + 578, + 120, + 589 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 76, + 592, + 519, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 592, + 519, + 616 + ], + "spans": [ + { + "bbox": [ + 76, + 592, + 519, + 616 + ], + "type": "text", + "content": "Predictions: Jessica Brown, Bachelor of Biology, GPA: 3.9, Graduated 2023, Skills: Genetics, Microbiology, Ecology, Bioinformatics." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 619, + 519, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 619, + 519, + 645 + ], + "spans": [ + { + "bbox": [ + 76, + 619, + 519, + 645 + ], + "type": "text", + "content": "References: Jessica Brown, Bachelors in Biology, GPA: 3.9, Graduated 2023, Courses: Genetics, Microbiology, Ecology, Biochemistry." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 76, + 653, + 120, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 653, + 120, + 664 + ], + "spans": [ + { + "bbox": [ + 76, + 653, + 120, + 664 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 76, + 667, + 519, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 667, + 519, + 679 + ], + "spans": [ + { + "bbox": [ + 76, + 667, + 519, + 679 + ], + "type": "text", + "content": "Predictions: Patent-pending design specification PR2023_KURITY, Company Z, including batch production requirements." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 76, + 682, + 508, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 682, + 508, + 692 + ], + "spans": [ + { + "bbox": [ + 76, + 682, + 508, + 692 + ], + "type": "text", + "content": "References: Patent-pending design specification PR2023_KURITY, Company Z, including batch performance analysis." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 76, + 701, + 120, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 701, + 120, + 712 + ], + "spans": [ + { + "bbox": [ + 76, + 701, + 120, + 712 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 76, + 715, + 393, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 715, + 393, + 726 + ], + "spans": [ + { + "bbox": [ + 76, + 715, + 393, + 726 + ], + "type": "text", + "content": "Predictions: Office ID: P12345, Name: John, Position: Manager, Department: Finance." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 76, + 729, + 401, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 729, + 401, + 740 + ], + "spans": [ + { + "bbox": [ + 76, + 729, + 401, + 740 + ], + "type": "text", + "content": "References: Officer ID: P12345, Name: John, Position: Sergeant, Department: Homicide." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5962" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 89, + 240, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 89, + 240, + 100 + ], + "spans": [ + { + "bbox": [ + 76, + 89, + 240, + 100 + ], + "type": "text", + "content": "Example D.4: Error Type: Reorganization" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 76, + 105, + 121, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 105, + 121, + 116 + ], + "spans": [ + { + "bbox": [ + 76, + 105, + 121, + 116 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 119, + 343, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 119, + 343, + 131 + ], + "spans": [ + { + "bbox": [ + 76, + 119, + 343, + 131 + ], + "type": "text", + "content": "Predictions: Name: Ledia; Nationality: American; Address: Los Angeles" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 133, + 283, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 133, + 283, + 144 + ], + "spans": [ + { + "bbox": [ + 76, + 133, + 283, + 144 + ], + "type": "text", + "content": "References: Ledia, an American, resides in Los Angeles." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 153, + 120, + 163 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 153, + 120, + 163 + ], + "spans": [ + { + "bbox": [ + 76, + 153, + 120, + 163 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 76, + 167, + 313, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 167, + 313, + 178 + ], + "spans": [ + { + "bbox": [ + 76, + 167, + 313, + 178 + ], + "type": "text", + "content": "Predictions: Passport Number: A12345678; Holder Name: Dean" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 180, + 286, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 180, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 76, + 180, + 286, + 191 + ], + "type": "text", + "content": "References: Dean holds the passport number A12345678." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 200, + 120, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 200, + 120, + 211 + ], + "spans": [ + { + "bbox": [ + 76, + 200, + 120, + 211 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 214, + 400, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 214, + 400, + 225 + ], + "spans": [ + { + "bbox": [ + 76, + 214, + 400, + 225 + ], + "type": "text", + "content": "Predictions: Name: Peter; Date of Birth: April 23, 1985; Passport Number: X123456789" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 76, + 227, + 437, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 227, + 437, + 239 + ], + "spans": [ + { + "bbox": [ + 76, + 227, + 437, + 239 + ], + "type": "text", + "content": "References: Peter was born on April 23, 1985, and carries a passport with the number X123456789." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 288, + 250, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 288, + 250, + 300 + ], + "spans": [ + { + "bbox": [ + 76, + 288, + 250, + 300 + ], + "type": "text", + "content": "Example D.5: Error Type: Reasoning Failure" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 305, + 120, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 305, + 120, + 316 + ], + "spans": [ + { + "bbox": [ + 76, + 305, + 120, + 316 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 76, + 318, + 500, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 318, + 500, + 329 + ], + "spans": [ + { + "bbox": [ + 76, + 318, + 500, + 329 + ], + "type": "text", + "content": "Predictions: Address: 123 Main St, Apt 4B, New York, NY, Zip Code: 10001, Phone: 555-1234, Unit: 101, Floor: 2." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 76, + 333, + 519, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 333, + 519, + 357 + ], + "spans": [ + { + "bbox": [ + 76, + 333, + 519, + 357 + ], + "type": "text", + "content": "References: Officer: Lisa Grant, Sergeant, Downtown Precinct, Patrol Car: VG2301, Equipment: Radar Gun, Model: RGX501, Weapon: Taser X2." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 366, + 120, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 366, + 120, + 377 + ], + "spans": [ + { + "bbox": [ + 76, + 366, + 120, + 377 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 76, + 380, + 263, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 380, + 263, + 391 + ], + "spans": [ + { + "bbox": [ + 76, + 380, + 263, + 391 + ], + "type": "text", + "content": "Predictions: Welcome, once more, securely within." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 76, + 394, + 283, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 394, + 283, + 405 + ], + "spans": [ + { + "bbox": [ + 76, + 394, + 283, + 405 + ], + "type": "text", + "content": "References: Ledia, an American, resides in Los Angeles." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 76, + 413, + 120, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 413, + 120, + 424 + ], + "spans": [ + { + "bbox": [ + 76, + 413, + 120, + 424 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 427, + 519, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 427, + 519, + 452 + ], + "spans": [ + { + "bbox": [ + 76, + 427, + 519, + 452 + ], + "type": "text", + "content": "Predictions: Passport ID: 123456789; Expiry Date: Emily, 38, Issued By Authority, Renewal By Agency. Valid Until 2025, Expiry Passport." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 76, + 454, + 518, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 454, + 518, + 481 + ], + "spans": [ + { + "bbox": [ + 76, + 454, + 518, + 481 + ], + "type": "text", + "content": "References: Patient ID: MRN456789; Medical History: Emily, 38, diagnosed with asthma, treated with inhalers. Allergic to dust mites, pollen." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 76, + 529, + 204, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 529, + 204, + 541 + ], + "spans": [ + { + "bbox": [ + 76, + 529, + 204, + 541 + ], + "type": "text", + "content": "Example D.6: Error Type: Other" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 76, + 546, + 120, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 546, + 120, + 556 + ], + "spans": [ + { + "bbox": [ + 76, + 546, + 120, + 556 + ], + "type": "text", + "content": "Example 1:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 76, + 559, + 519, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 559, + 519, + 584 + ], + "spans": [ + { + "bbox": [ + 76, + 559, + 519, + 584 + ], + "type": "text", + "content": "Predictions: CookieID12345 maintain login status forUserID98765 on www.example.com, facilitating seamless access. Analyzing recordsUserID98765's engagement, deducting 500 page views and a click-through rate of " + }, + { + "bbox": [ + 76, + 559, + 519, + 584 + ], + "type": "inline_equation", + "content": "4.5\\%" + }, + { + "bbox": [ + 76, + 559, + 519, + 584 + ], + "type": "text", + "content": " across the session." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 76, + 587, + 519, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 587, + 519, + 626 + ], + "spans": [ + { + "bbox": [ + 76, + 587, + 519, + 626 + ], + "type": "text", + "content": "References: CookieID12345 maintains login status forUserID98765 on www.example.com, facilitating seamless access. Analytics tracksUserID98765's engagement, documenting 500 page views and a click-through rate of " + }, + { + "bbox": [ + 76, + 587, + 519, + 626 + ], + "type": "inline_equation", + "content": "4.5\\%" + }, + { + "bbox": [ + 76, + 587, + 519, + 626 + ], + "type": "text", + "content": " across the session." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 76, + 634, + 120, + 645 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 634, + 120, + 645 + ], + "spans": [ + { + "bbox": [ + 76, + 634, + 120, + 645 + ], + "type": "text", + "content": "Example 2:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 76, + 649, + 519, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 649, + 519, + 660 + ], + "spans": [ + { + "bbox": [ + 76, + 649, + 519, + 660 + ], + "type": "text", + "content": "Predictions: Code: Coordinates: Latitude Longitude: " + }, + { + "bbox": [ + 76, + 649, + 519, + 660 + ], + "type": "inline_equation", + "content": "38.251^{\\circ}\\mathrm{N}" + }, + { + "bbox": [ + 76, + 649, + 519, + 660 + ], + "type": "text", + "content": " -85.754 W, Latitude Longitude: 34.091 N, -118.493 W." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "spans": [ + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "content": "References: Base Distribution: North Plains Base: " + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "inline_equation", + "content": "38.251^{\\circ}\\mathrm{N}" + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "inline_equation", + "content": "-85.754^{\\circ}\\mathrm{W}" + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "content": ", East Valley Site: " + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "inline_equation", + "content": "34.091^{\\circ}\\mathrm{N}" + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "inline_equation", + "content": "-118.493^{\\circ}\\mathrm{W}" + }, + { + "bbox": [ + 76, + 662, + 500, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 76, + 682, + 120, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 682, + 120, + 693 + ], + "spans": [ + { + "bbox": [ + 76, + 682, + 120, + 693 + ], + "type": "text", + "content": "Example 3:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 76, + 696, + 519, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 696, + 519, + 721 + ], + "spans": [ + { + "bbox": [ + 76, + 696, + 519, + 721 + ], + "type": "text", + "content": "Predictions: Name: Alex Smith; Salary: $87,500; Pay Frequency: Biweekly; Position: Software Developer; Employee ID: EID-257846; Department: IT." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 76, + 724, + 519, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 724, + 519, + 749 + ], + "spans": [ + { + "bbox": [ + 76, + 724, + 519, + 749 + ], + "type": "text", + "content": "References: Name: Alex Smith, Salary: $87,500, Pay Frequency: Biweekly, Position: Software Developer, Employee ID: EID-257846, Department: IT." + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5963" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 90, + 523, + 216 + ], + "blocks": [ + { + "bbox": [ + 108, + 69, + 485, + 82 + ], + "lines": [ + { + "bbox": [ + 108, + 69, + 485, + 82 + ], + "spans": [ + { + "bbox": [ + 108, + 69, + 485, + 82 + ], + "type": "text", + "content": "Table 15: Error Type Percentages for Different Algorithms in Claude-Sonnet-3.5-1022 Model" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 90, + 523, + 216 + ], + "lines": [ + { + "bbox": [ + 71, + 90, + 523, + 216 + ], + "spans": [ + { + "bbox": [ + 71, + 90, + 523, + 216 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1333.3351.850.0011.113.700.00
Atbash15.7978.950.003.510.001.75
Polybius42.6245.900.0011.480.000.00
Vigenère2.7332.425.083.5256.250.00
Reverse39.2448.100.005.066.331.27
SwapPairs15.9838.522.052.8738.112.46
DualAvgCode6.8839.688.502.4341.301.21
ParityShift19.7970.834.173.122.080.00
WordShift51.9522.082.608.4412.342.60
", + "image_path": "38bbe446e13ba18d15fd06b41a04cc47cea4e522e54d39791afb5a5aaf575fd0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 71, + 247, + 524, + 374 + ], + "blocks": [ + { + "bbox": [ + 129, + 226, + 463, + 238 + ], + "lines": [ + { + "bbox": [ + 129, + 226, + 463, + 238 + ], + "spans": [ + { + "bbox": [ + 129, + 226, + 463, + 238 + ], + "type": "text", + "content": "Table 16: Error Type Percentages for Different Algorithms in DeepSeek-R1 Model" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 247, + 524, + 374 + ], + "lines": [ + { + "bbox": [ + 71, + 247, + 524, + 374 + ], + "spans": [ + { + "bbox": [ + 71, + 247, + 524, + 374 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1340.0030.004.2921.431.432.86
Atbash42.5924.070.9329.630.002.78
Polybius48.6317.120.6821.928.902.74
Vigenère4.6018.012.682.3071.650.77
Reverse25.6419.661.7145.306.411.28
SwapPairs9.2025.293.072.3058.621.53
DualAvgCode25.6322.613.5228.6419.100.50
ParityShift7.0229.396.583.9552.190.88
WordShift29.1722.922.0825.4220.000.42
", + "image_path": "34b7cd012ee0b1d154c089665a8631ea41624ba43ea234e86da6c5535cd2f699.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 71, + 404, + 524, + 531 + ], + "blocks": [ + { + "bbox": [ + 129, + 383, + 463, + 396 + ], + "lines": [ + { + "bbox": [ + 129, + 383, + 463, + 396 + ], + "spans": [ + { + "bbox": [ + 129, + 383, + 463, + 396 + ], + "type": "text", + "content": "Table 17: Error Type Percentages for Different Algorithms in DeepSeek-V3 Model" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 404, + 524, + 531 + ], + "lines": [ + { + "bbox": [ + 71, + 404, + 524, + 531 + ], + "spans": [ + { + "bbox": [ + 71, + 404, + 524, + 531 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1310.7355.9315.825.0811.860.56
Atbash8.0738.127.173.5941.261.79
Polybius5.4712.112.342.7376.950.39
Vigenère0.3820.772.690.7774.231.15
Reverse21.5040.195.6113.5518.220.93
SwapPairs1.9218.392.680.3876.250.38
DualAvgCode3.0712.643.452.6877.780.38
ParityShift1.9328.573.860.7764.480.39
WordShift27.8029.464.5617.0120.330.83
", + "image_path": "e4e0296214268fc410679b21684a7bfc38294b9c1d366dfab17f6371d932a377.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 71, + 561, + 524, + 688 + ], + "blocks": [ + { + "bbox": [ + 127, + 540, + 465, + 553 + ], + "lines": [ + { + "bbox": [ + 127, + 540, + 465, + 553 + ], + "spans": [ + { + "bbox": [ + 127, + 540, + 465, + 553 + ], + "type": "text", + "content": "Table 18: Error Type Percentages for Different Algorithms in gemini-1.5-pro Model" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 561, + 524, + 688 + ], + "lines": [ + { + "bbox": [ + 71, + 561, + 524, + 688 + ], + "spans": [ + { + "bbox": [ + 71, + 561, + 524, + 688 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1312.9858.020.765.3422.140.76
Atbash1.1515.003.080.7778.851.15
Polybius4.2117.243.071.9271.651.92
Vigenère2.2914.893.440.7678.630.00
Reverse20.8533.198.9410.2126.380.43
SwapPairs6.4925.571.911.5363.361.15
DualAvgCode2.6813.034.601.9277.390.38
ParityShift3.0828.463.080.3864.230.77
WordShift34.2524.202.7418.7219.630.46
", + "image_path": "7a2923eab0537b84bd6e80cfb2c8cbb128c80505478ea991161ca78009d5c0f9.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 81, + 708, + 290, + 762 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 708, + 290, + 762 + ], + "spans": [ + { + "bbox": [ + 81, + 708, + 290, + 762 + ], + "type": "text", + "content": "- All models show high name decryption errors and reasoning failures in Vigenère and SwapPairs, highlighting gaps in symbolic reasoning and long-term dependency tracking." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 708, + 525, + 761 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 761 + ], + "type": "text", + "content": "These observations reveal that no single model excels across all ciphers, emphasizing the need for advancements in structured reasoning and symbolic manipulation for decryption tasks. Future" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5964" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 90, + 526, + 217 + ], + "blocks": [ + { + "bbox": [ + 141, + 69, + 451, + 82 + ], + "lines": [ + { + "bbox": [ + 141, + 69, + 451, + 82 + ], + "spans": [ + { + "bbox": [ + 141, + 69, + 451, + 82 + ], + "type": "text", + "content": "Table 19: Error Type Percentages for Different Algorithms in o1-mini Model" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 90, + 526, + 217 + ], + "lines": [ + { + "bbox": [ + 71, + 90, + 526, + 217 + ], + "spans": [ + { + "bbox": [ + 71, + 90, + 526, + 217 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1326.9538.3013.4817.021.422.84
Atbash37.3531.337.2316.876.021.20
Polybius30.9432.371.4425.188.631.44
Vigenère0.0021.4310.713.5764.290.00
Reverse12.7029.108.2032.3817.210.41
SwapPairs1.919.541.530.0086.640.38
DualAvgCode0.0018.520.003.7077.780.00
ParityShift4.5534.303.314.9652.480.41
WordShift11.5828.574.635.7949.030.39
", + "image_path": "888e0e791066465d2ecee9c620e29c5df183ec12ed2f0597a481f3b8033d86a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 71, + 248, + 526, + 375 + ], + "blocks": [ + { + "bbox": [ + 151, + 227, + 440, + 239 + ], + "lines": [ + { + "bbox": [ + 151, + 227, + 440, + 239 + ], + "spans": [ + { + "bbox": [ + 151, + 227, + 440, + 239 + ], + "type": "text", + "content": "Table 20: Error Type Percentages for Different Algorithms in o1 Model" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 248, + 526, + 375 + ], + "lines": [ + { + "bbox": [ + 71, + 248, + 526, + 375 + ], + "spans": [ + { + "bbox": [ + 71, + 248, + 526, + 375 + ], + "type": "table", + "html": "
AlgorithmOmission/InsertionName Decryption ErrorError Types
Semantic InferenceReorganizationReasoning FailureOther
Rot1316.1928.574.765.7143.810.95
Atbash29.0949.095.4510.913.641.82
Polybius40.9128.796.0610.6112.121.52
Vigenère4.6236.151.541.1556.150.38
Reverse16.1425.563.5914.3538.571.79
SwapPairs5.2631.585.265.2652.630.00
DualAvgCode24.6233.853.082.3135.380.77
ParityShift4.0426.774.552.0262.120.51
WordShift30.8824.262.9418.3821.322.21
", + "image_path": "d91db57a0bc8aaf456f6c85fb32452751b8acbfdcdb7a525c373c13caa576ec1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 394, + 207, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 394, + 207, + 407 + ], + "spans": [ + { + "bbox": [ + 68, + 394, + 207, + 407 + ], + "type": "text", + "content": "improvements could focus on:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 81, + 417, + 291, + 775 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 81, + 417, + 291, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 417, + 291, + 567 + ], + "spans": [ + { + "bbox": [ + 81, + 417, + 291, + 567 + ], + "type": "text", + "content": "- Minimizing the Impact of Semantic Bias in Logical Inference: Cryptographic reasoning tasks often necessitate abstract rule extraction rather than reliance on semantic interpretation. An excessive dependence on linguistic priors can impede the model's ability to identify underlying structural transformations, resulting in systematic errors. Future advancements should focus on reducing semantic interference to improve the extraction of abstract logical patterns." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 81, + 576, + 291, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 576, + 291, + 709 + ], + "spans": [ + { + "bbox": [ + 81, + 576, + 291, + 709 + ], + "type": "text", + "content": "- Enhancing Comparative Reasoning for Pattern Recognition: While many decryption tasks in CipherBank are straightforward for humans, models frequently fail to derive correct transformation rules from provided exemplars. Strengthening contrastive reasoning mechanisms can enable models to better differentiate encryption structures, facilitating more effective pattern recognition and decryption." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 81, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 81, + 721, + 291, + 775 + ], + "type": "text", + "content": "- Addressing Overthinking in Model Reasoning: Experimental results indicate that reasoning models exhibit superior performance on complex tasks but underperform on sim" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 324, + 394, + 526, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 394, + 526, + 543 + ], + "spans": [ + { + "bbox": [ + 324, + 394, + 526, + 543 + ], + "type": "text", + "content": "pler problems. Analysis of inference trajectories reveals a tendency toward recursive self-evaluation, where models continuously revise their approach, even when a straightforward solution is available. For example, in the Reverse cipher, models occasionally attempt unnecessarily complex reasoning paths instead of applying direct positional transformations. Mitigating such overthinking behaviors could enhance efficiency and robustness in logical reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 552, + 525, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 552, + 525, + 607 + ], + "spans": [ + { + "bbox": [ + 302, + 552, + 525, + 607 + ], + "type": "text", + "content": "Addressing these limitations will bridge the gap between linguistic fluency and structured cryptographic reasoning, making LLMs more robust in real-world encryption scenarios." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5965" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 36 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_content_list.json b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..70a4c97867176a69c6007b8bf67d6fa64d8646a6 --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_content_list.json @@ -0,0 +1,2438 @@ +[ + { + "type": "text", + "text": "CitaLaw: Enhancing LLM with Citations in Legal Domain", + "text_level": 1, + "bbox": [ + 193, + 90, + 805, + 111 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kepu Zhang $^{1}$ , Weijie Yu $^{2*}$ , Sunhao Dai $^{1}$ , Jun Xu $^{1}$", + "bbox": [ + 287, + 137, + 712, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1Gaoling School of Artificial Intelligence, Renmin University of China", + "bbox": [ + 211, + 156, + 784, + 173 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ University of International Business and Economics", + "bbox": [ + 280, + 173, + 715, + 187 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "kepuzhang@ruc.edu.cn, yu@uibep.edu.cn", + "bbox": [ + 319, + 190, + 677, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we propose Citalaw, the first benchmark designed to evaluate LLMs' ability to produce legally sound responses with appropriate citations. Citalaw features a diverse set of legal questions for both laypersons and practitioners, paired with a comprehensive corpus of law articles and precedent cases as a reference pool. This framework enables LLM-based systems to retrieve supporting citations from the reference corpus and align these citations with the corresponding sentences in their responses. Moreover, we introduce syllogism-inspired evaluation methods to assess the legal alignment between retrieved references and LLM-generated responses, as well as their consistency with user questions. Extensive experiments on 2 open-domain and 7 legal-specific LLMs demonstrate that integrating legal references substantially enhances response quality. Furthermore, our proposed syllogism-based evaluation method exhibits strong agreement with human judgments.", + "bbox": [ + 141, + 288, + 460, + 602 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 615, + 260, + 630 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Generating responses supported by citations, such as relevant law articles and precedent cases, is essential for ensuring the trustworthiness of large language models (LLMs) in legal tasks. For laypersons seeking legal advice (Fei et al., 2023), LLM-generated responses grounded in citations provide verifiable information, fostering trust in the system. Conversely, for legal practitioners such as lawyers and judges, citations serve as supportive evidence that aids in analyzing complex cases, validating legal arguments, and ensuring decisions align with established legal principles (Li et al., 2024; Zhong et al., 2020; Abdallah et al., 2023).", + "bbox": [ + 112, + 640, + 489, + 848 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, a growing body of benchmark research (Gao et al., 2023a; Li et al., 2023) has focused on enabling LLMs to provide citations for the", + "bbox": [ + 112, + 851, + 489, + 898 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8b18c275924d2d7c798f99f411d1b1dd24b37d58177b2cd07259e65b86050300.jpg", + "image_caption": [ + "Figure 1: The framework of our CitaLaw." + ], + "image_footnote": [], + "bbox": [ + 515, + 256, + 878, + 388 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "statements they generate. For instance, ALCE (Gao et al., 2023b) introduces a benchmark designed to evaluate the ability of LLMs to generate citation-supported outputs, aiming to improve factual accuracy. WebCiteS (Deng et al., 2024) provides a curated database of manually annotated summaries and citations to enhance performance in text summarization and citation generation.", + "bbox": [ + 507, + 435, + 884, + 564 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While these studies have made notable progress in general domains, they face significant challenges when applied to the legal domain. First, laypersons and legal practitioners interact with LLMs differently and have distinct expectations for citations. Laypersons typically seek legal advice and rely on citations to verify the accuracy of LLM responses, whereas legal practitioners pose more complex queries, using LLMs for legal reasoning, with citations serving as supportive evidence. Existing studies fail to address these differences, leading to unsatisfactory performance in real-world applications. Second, existing methods often fall short in providing the diverse references required in legal contexts, such as law articles and precedent cases. Law articles establish the foundational legal framework, while precedent cases offer concrete examples and interpretive guidance. These two types of references inherently align with the distinct characteristics of civil and common law systems. Third, traditional citation evaluation measures, such as ROUGE (Lin, 2004), rely on surface-level similar", + "bbox": [ + 507, + 568, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author", + "bbox": [ + 134, + 906, + 285, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "11183", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Findings of the Association for Computational Linguistics: ACL 2025, pages 11183-11196", + "bbox": [ + 220, + 945, + 778, + 958 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics", + "bbox": [ + 268, + 959, + 727, + 972 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b0903bfb6decf7c8b09ba1de2359ba276ce2f70217a00854a97e5ed3d8807b8a.jpg", + "image_caption": [ + "Figure 2: Examples from the two subsets of CitaLaw, with text in red, blue, and yellow representing the three dimensions of the syllogism: major premise, minor premise (circumstances, illegal acts), and conclusion (legal decisions), respectively. [A] and [C] denote citations to relevant law articles and precedent cases, respectively." + ], + "image_footnote": [], + "bbox": [ + 122, + 80, + 873, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "ities and are often insufficient to assess the alignment between references and LLM-generated responses. In the legal domain, effective evaluation requires a deeper understanding of logical and semantic relationships.", + "bbox": [ + 112, + 374, + 487, + 455 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To overcome the above challenges, we propose CitaLaw, the first benchmark tailored to evaluate LLMs' capabilities in generating legally grounded responses supported by accurate and context-aware citations. As shown in Figure 1, CitaLaw incorporates four distinct legal-specific features:", + "bbox": [ + 112, + 458, + 487, + 555 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) CitaLaw has two subsets tailored for laypersons and practitioners, with examples in Figure 2. Laypersons typically ask shorter, conversational questions, while practitioners often pose specialized, detailed questions.", + "(2) CitaLaw includes a retrieval corpus comprising two commonly used references: law articles, which provide clear and concise guidelines for addressing user questions, and precedent cases, which offer legal reasoning and support for judicial decisions. Recognizing the distinct needs of laypersons and practitioners, we provide only law articles for laypersons to ensure clarity, while practitioners have access to both law articles and precedent cases to support more complex legal reasoning.", + "(3) In addition to traditional global-level metrics such as MAUVE (Pillutla et al., 2021), we propose a syllogism-based evaluation method to assess both the response correctness and the citation quality. This method provides a more granular evaluation by focusing on three key dimensions: circumstances, illegal acts, and legal decisions." + ], + "bbox": [ + 112, + 558, + 487, + 920 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(4) We consider two types of response generation methods. The first type, Citation-Guided Generation (CGG), involves generating responses by incorporating retrieved references during generation. The second type, Answer Refinement Generation (ARG), refines the LLMs' initial response (CloseBook) by retrieving and incorporating reference information. This category includes ARG-Q, which retrieves citations using only the user query, and ARG-QA, which retrieves citations using both the user query and the LLM's initial response.", + "bbox": [ + 507, + 374, + 884, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extensive experiments on two open-domain and seven legal-specific LLMs reveal the following key insights: 1) Incorporating legal references into the LLM significantly improves the quality of responses; 2) Including references as part of the LLM's input consistently outperforms answer-refinement methods; 3) Leveraging references to refine the LLM's responses yields better alignment of responses and references. 4) For fine-tuning LLMs in legal scenarios, incorporating law articles, syllogistic reasoning, and full-scale fine-tuning achieves promising performance. 5) Open-domain LLMs surprisingly outperform legal-specific LLMs in certain scenarios; 6) Human evaluations show a strong correlation with our syllogism-based methods.", + "bbox": [ + 507, + 552, + 882, + 791 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are as follows:", + "bbox": [ + 527, + 795, + 865, + 810 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- To the best of our knowledge, CitaLaw is the first benchmark designed to evaluate the capability of LLMs to generate legally grounded responses with accurate and context-aware citations. CitaLaw includes questions tailored to both laypersons and practitioners, paired", + "bbox": [ + 531, + 825, + 882, + 921 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "11184", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with a citation corpus comprising law articles and precedent cases.", + "bbox": [ + 149, + 84, + 487, + 116 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a two-level evaluation framework that combines global-level metrics with a syllogism-based reasoning approach. Additionally, we explore two mainstream methods for legal response generation: citation-guided and answer refinement.", + "- Through extensive experiments on two open-domain and seven legal-specific LLMs, we demonstrate the effectiveness of integrating legal references into response generation and validate our syllogism-based evaluation method. Additionally, we provide actionable insights for the practical deployment of LLMs in legal scenarios." + ], + "bbox": [ + 136, + 128, + 487, + 365 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 114, + 379, + 270, + 394 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLM for Legal Task. A amount of work has explored applying LLMs to legal tasks (Savelka et al., 2023; Wu et al., 2023b; Yu et al., 2022a; Blair-Stanek et al., 2023). Building LLMs tailored for legal scenarios is a popular direction (Yue et al., 2023; Wu et al., 2023a; He et al., 2023). There are also some benchmarks that explore the capabilities of LLMs in legal tasks. LawBench (Fei et al., 2023) evaluates LLMs' legal knowledge across three cognitive aspects. LAiW (Dai et al., 2023) assesses LLMs' legal reasoning abilities based on legal practice logic. LexEval (Li et al., 2024) evaluates LLMs' legal capabilities based on a new legal cognitive ability classification system. However, none of them have considered enhancing the trustworthiness of LLMs in legal scenarios by generating outputs with citations.", + "bbox": [ + 112, + 405, + 489, + 678 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Citation in LLM. Attribution (Li et al., 2023) in LLMs refers to providing supporting evidence for the answers generated by the model, presented in the form of citations. ALCE (Gao et al., 2023b) is an automated benchmark for evaluating LLMs' ability to generate outputs with citations, aimed at improving the factual accuracy of the generated responses. WebCiteS (Deng et al., 2024) provides a database containing 7,000 manually annotated summaries and citations to enhance LLMs' capabilities in summarization and citation. RARR (Gao et al., 2023a) enhances LLM outputs by automatically adding citations, and modifying the responses. ExpertQA (Malaviya et al., 2024) verifies and modifies citations through expert review to ensure re", + "bbox": [ + 112, + 678, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "liability. In contrast to the above works, CitaLaw focuses specifically on citation in legal scenarios.", + "bbox": [ + 507, + 84, + 880, + 116 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Task Setup and Dataset Construction", + "text_level": 1, + "bbox": [ + 507, + 129, + 865, + 145 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Suppose we have a legal corpus $D$ , which consists of either a collection of precedent cases $(D_{l})$ or law articles $(D_{c})$ . Given a user question $x$ posed by either a layperson or a practitioner, the LLM-based system is tasked with retrieving supportive citations from $D$ and generating a legally grounded response $y$ . The response $y$ comprises a list of $n$ sentences, i.e., $y = [s_1,\\dots ,s_n]$ , where each sentence $s_i$ refers to at most one corresponding citation. As illustrated in Figure 2, the system is further required to attach each citation to its relevant sentence, with \"[A]\" and \"[C]\" denoting references to law articles and precedent cases, respectively.", + "bbox": [ + 507, + 156, + 884, + 365 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To enable the evaluation of this task, we construct the specialized dataset (Table 1 shows the statistics) as follows:", + "bbox": [ + 507, + 367, + 882, + 413 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To simulate the behavior of laypersons, we include questions that are more conversational, lack detailed case descriptions, and are relatively short in length. We use the consultation section from LawBench (Fei et al., 2023), which collects user queries from the Hualv website1 and answers provided by lawyers or legal consulting firms.", + "bbox": [ + 507, + 414, + 882, + 527 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To simulate the behavior of legal practitioners, we include questions that are more professional, often accompanied by detailed case descriptions, and generally longer. For this purpose, we use the open-ended question section from LexEval (Li et al., 2024), which consists of subjective questions from the National Uniform Legal Profession Qualification Examination. These questions are particularly challenging for LLMs, requiring them to understand the case fully and apply legal knowledge accurately to generate answers.", + "bbox": [ + 507, + 529, + 882, + 705 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In terms of the corpus, we construct a comprehensive corpus from multiple sources, including law articles and precedent cases. Specifically, for law articles, we collect approximately 50,000 documents from LexiLaw $^2$ , covering areas such as Civil Law, Criminal Law, and judicial interpretations. For precedent cases, we include both criminal and civil cases. Criminal cases are sourced from the LeCaRD legal retrieval dataset (Ma et al., 2021b), ELAM (Yu et al., 2022b), and civil cases from the CAIL legal summary", + "bbox": [ + 507, + 707, + 884, + 883 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{1}$ www.66law.com", + "bbox": [ + 532, + 892, + 643, + 906 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://github.com/CSHaitao/LexiLaw", + "bbox": [ + 532, + 906, + 771, + 920 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "11185", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/1538c5a5008ee0e70ddbaa813d5a4617650be79590a5b11ce02b1f164dafec90.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Dataset#QLenQLenAQ Type
Layperson50057.62107.40Question
Practitioner500618.96193.46Case + Question
", + "bbox": [ + 117, + 82, + 497, + 133 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Dataset statistics. #Q indicates the number of questions, $\\mathrm{Len}_Q$ and $\\mathrm{Len}_A$ denote the average lengths of questions and gold answers, and Q Type refers to the question type.", + "bbox": [ + 112, + 143, + 489, + 200 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "dataset, LJP-MSJudge (Ma et al., 2021a), and the pre-training data of fuzi.mingcha (Wu et al., 2023a). As a supplement to precedent cases, we also incorporate question-and-answer pairs from fine-tuning datasets of legal LLMs as part of the precedent cases. These QA pairs are collected from DISC-LawLLM (Yue et al., 2023), LawGPT_zh (Liu et al., 2023), and HanFei (He et al., 2023). In total, the constructed corpus contains approximately 500,000 documents, ensuring sufficient coverage of both law articles and precedent cases to support diverse legal tasks.", + "bbox": [ + 112, + 222, + 489, + 414 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 Method", + "text_level": 1, + "bbox": [ + 112, + 426, + 218, + 439 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Response Generation", + "text_level": 1, + "bbox": [ + 112, + 451, + 327, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We consider two types of methods in this study.", + "bbox": [ + 112, + 470, + 468, + 487 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Citation-Guided Generation (CGG) produces response $y_{cgg}$ given a user question $x$ by referring retrieved relevant document(s) $D_R$ :", + "bbox": [ + 112, + 488, + 485, + 536 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ny _ {\\mathrm {c g g}} = f _ {\\mathrm {L L M}} \\left(x, D _ {R}, p _ {1}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 203, + 544, + 485, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $f_{\\mathrm{LLM}}$ denotes a open-domain or a legal specific LLM; $p_1$ is the direct generation prompt. All prompt settings are detailed in Appendix A.", + "bbox": [ + 112, + 568, + 487, + 615 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Answer Refinement Generation (ARG) is a two-stage method that generates the final response $y_{\\mathrm{arg}}$ by refining the LLM's initial response $y_{\\mathrm{init}}$ through the retrieval and incorporation of reference information. This process can be formulated as:", + "bbox": [ + 112, + 615, + 487, + 696 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ny _ {\\text {i n i t}} = f _ {\\mathrm {L L M}} (x, p _ {2}), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 220, + 705, + 485, + 720 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $p_2$ is the prompt instructing the LLM to directly generate an initial response without reference information. We refer to this step as CloseBook. The initial response $y_{\\mathrm{init}}$ is then refined as:", + "bbox": [ + 112, + 728, + 489, + 793 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ny _ {\\text {a r g}} = f _ {\\mathrm {L L M}} \\left(y _ {\\text {i n i t}}, D _ {R}, p _ {3}\\right), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 194, + 801, + 485, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $p_3$ is the prompt guiding the LLM to refine the $y_{\\mathrm{init}}$ using the retrieved documents $D_R$ .", + "bbox": [ + 112, + 825, + 485, + 856 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Laypersons and practitioners interact with LLMs differently and have distinct expectations for citations. When $x$ is submitted by a layperson, the corresponding $D_{R}$ consists of relevant law articles. In", + "bbox": [ + 112, + 857, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "contrast, when $x$ is submitted by a practitioner, the corresponding $D_R$ includes both relevant law articles and precedent cases. The process for retrieving $D_R$ from $D$ is detailed in the next subsection.", + "bbox": [ + 507, + 84, + 882, + 149 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Citation Retrieval", + "text_level": 1, + "bbox": [ + 507, + 160, + 697, + 174 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We explore state-of-the-art open-domain dense retriever BGE (Xiao et al., 2023), along with two legal-specific dense retrievers, CriminalBERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019). We also investigate two types of retrieval queries: $x$ (the user question alone, ARG-Q) and $[x; y_{\\mathrm{init}}]$ (the concatenation of the user query $x$ and the initial response $y_{\\mathrm{init}}$ , where $[]$ denotes the concatenation operation, ARG-QA). The impact of different retrieval models on performance will be analyzed in the experiments.", + "bbox": [ + 507, + 181, + 882, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3 Citation Attachment", + "text_level": 1, + "bbox": [ + 507, + 370, + 717, + 385 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Building on the retrieved citations, this subsection outlines the process of attaching these law articles or precedents to specific sentences in the LLM-generated responses. This process involves answering two key questions:", + "bbox": [ + 507, + 392, + 882, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "What kind of sentences can be associated with citations? We utilize co-occurring words and legal entity extraction to identify sentences that explicitly reference legal concepts, actions, or terms relevant to the retrieved citations. Specifically, we construct a pool of legal terminologies using THUOCL3 and LaWGPT (Zhou et al., 2024). A sentence is considered eligible if it contains any of the terminologies from this pool. Additionally, we use SpaCy (Honnibal et al., 2020) to extract legal entities from each sentence. If a sentence includes legal entities, it is also deemed eligible for citation attachment.", + "bbox": [ + 507, + 473, + 882, + 665 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "How are citations attached to the identified sentences? If a sentence is deemed eligible for citation attachment, we associate it with retrieved citations as follows. For the laypersons, the retrieved law article $c_{l} \\in D_{l}$ is attached to the most relevant sentence $s_{k} \\in y$ :", + "bbox": [ + 507, + 667, + 882, + 763 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nC _ {\\text {L a y}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 521, + 776, + 882, + 815 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $(s_k, c_l)$ represents attaching the reference $c_l$ to the sentence $s_i$ , and $\\mathrm{sim}(\\cdot)$ is computed using sentence-BERT (Reimers, 2019). We set $|C_{\\mathrm{Lay}}| = 1$ because, typically, a layperson's query pertains to only one specific legal article. For practitioners,", + "bbox": [ + 507, + 816, + 882, + 897 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "3https://github.com/thunlp/THUOCL", + "bbox": [ + 529, + 904, + 759, + 920 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "11186", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we attach the retrieved law article in the same way as for laypersons. Additionally, we associate the retrieved precedent cases $c_{c} \\in D_{c}$ with each $s_{i} \\in y$ , which is formulated as:", + "bbox": [ + 112, + 84, + 489, + 148 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} C _ {\\text {P r a}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\} (5) \\\\ \\cup \\{(s _ {i}, c _ {c}) |, c _ {c} = \\underset {c _ {j} \\in D _ {c}} {\\arg \\max } \\operatorname {s i m} (s _ {i}, c _ {j}) \\}, (5) \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 121, + 156, + 489, + 214 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $|D_c| = 3$ , meaning each response $y$ can be associated with up to three precedents4.", + "bbox": [ + 112, + 224, + 487, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Evaluation", + "text_level": 1, + "bbox": [ + 112, + 268, + 243, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "CitaLaw provides a comprehensive evaluation framework incorporating metrics for fluency, correctness, and citation quality. This framework is divided into two levels of analysis: global level and the proposed syllogism level.", + "bbox": [ + 112, + 294, + 489, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Syllogism, a foundational framework in legal reasoning, comprises three key components: the major premise, the minor premise, and the conclusion. In our legal context, these correspond to the relevant law article or precedent case (major premise), the factual circumstances and actions of a specific case (minor premise), and the resulting legal decision (conclusion). By integrating this syllogistic framework, CitaLaw goes beyond surface-level correctness to evaluate the logical coherence and alignment of LLM-generated responses with established legal principles.", + "bbox": [ + 112, + 374, + 489, + 568 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1 Fluency (Style Consistency)", + "text_level": 1, + "bbox": [ + 112, + 577, + 378, + 593 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure the LLM-generated responses align with the user's requirements, the system must adapt its style based on the user's background. For laypersons, responses should avoid excessive technical jargon to ensure accessibility and comprehension. Conversely, responses for legal practitioners should adopt a formal and professional tone to maintain credibility and utility. To achieve this aim, we concatenate the user query and the LLM-generated response and apply MAUVE (Pillutla et al., 2021) to assess their style consistency.", + "bbox": [ + 112, + 598, + 489, + 776 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.2 Correctness", + "text_level": 1, + "bbox": [ + 112, + 785, + 253, + 800 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At the global level, we use established metrics ROUGE (Lin, 2004) and BERTScore (Zhang et al., 2019). ROUGE measures word-level overlap between the generated and labeled responses, with scores reported for ROUGE-1, ROUGE-2, and", + "bbox": [ + 112, + 806, + 489, + 887 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "ROUGE-L. BERTScore captures semantic similarity between the generated and labeled responses, and we report the F-score (BERT-F) for evaluation. These metrics assess the overall correctness of LLM-generated responses.", + "bbox": [ + 507, + 84, + 884, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At the syllogism level, we leverage the Qwen2 (Yang et al., 2024) to extract key components, including the circumstances, illegal acts, and legal decisions. We use sentence-BERT (Reimers, 2019) to measure the alignment between the labeled responses and the generated outputs across these dimensions, resulting in $\\mathrm{Correct}_{\\mathrm{c}}$ , $\\mathrm{Correct}_{\\mathrm{a}}$ , and $\\mathrm{Correct}_{\\mathrm{d}}$ . This syllogism-level evaluation allows us to assess the logical coherence of the responses, ensuring that they align with the underlying legal reasoning principles.", + "bbox": [ + 507, + 167, + 885, + 344 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.3 Citation Quality", + "text_level": 1, + "bbox": [ + 507, + 361, + 684, + 376 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As previously discussed, we assume that a question submitted by laypersons typically corresponds to a specific law article. Therefore, at the global level, we evaluate the citation quality of the retrieved law article (premise) by measuring its entailment with the associated sentence in the LLM's response (hypothesis). Specifically, we use an NLI model to compute $\\text{Cita}_{\\text{Law}}$ , which quantifies the degree to which the law article entails the attached sentence. This metric reflects how effectively the response aligns with the cited law article. We employ DISC-LawLLM (Yue et al., 2023) as the NLI model due to its strong agreement with human evaluations (as discussed in Sec. 6.3) and its superior performance compared to other NLI models (as detailed in Sec. 6.5).", + "bbox": [ + 507, + 387, + 884, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At the syllogism level, we evaluate the quality of precedent case citations by examining three key components: circumstances, illegal acts, and legal decisions. After extracting these elements from both the retrieved cases and the associated sentence in the LLM's response, we utilize DISC-LawLLM to assess the entailment for each component. This evaluation yields three distinct scores: $\\text{Cita}_{\\text{c}}$ for circumstances, $\\text{Cita}_{\\text{a}}$ for illegal acts, and $\\text{Cita}_{\\text{d}}$ for legal decisions, providing a more detailed and nuanced assessment of citation quality within the syllogism framework.", + "bbox": [ + 507, + 646, + 885, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "6 Experiments", + "text_level": 1, + "bbox": [ + 507, + 858, + 655, + 875 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We conduct extensive experiments on our CitaLaw using the proposed two-level evaluation methods.", + "bbox": [ + 507, + 889, + 882, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "4 Considering the input window size of LLMs, we set up to retrieve 3 precedent cases.", + "bbox": [ + 112, + 894, + 487, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "11187", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2d46d4ac9531fb6d2b19fb2e82f5e1e1272b4f917c051d82cde4acedf12f3f1f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-FCorrecteCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)CloseBook22.6316.471.9513.3458.4673.0568.2466.8767.3843.15
CGG61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
ARG-Q61.2723.175.6517.8364.2369.0475.4574.4779.1052.24
ARG-QA51.8323.736.9618.5364.8471.3774.8174.6680.8051.95
Qwen2 (Qwen2-7B-Instruct)CloseBook21.0415.292.2711.3158.3970.8971.7169.9372.3543.69
CGG75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
ARG-Q66.5520.864.5015.4264.5966.9677.8275.6681.4852.65
ARG-QA66.8021.734.7816.3464.8569.3176.3575.0582.8353.11
Legal LLM (CGG)DISC-LawLLM72.7022.464.1415.4865.0665.2178.5576.1783.4653.69
fuzi.mingcha56.5824.545.7017.4865.8663.2879.5677.9481.6452.51
LexiLaw71.8924.966.2518.9165.6868.8978.1276.7282.4254.87
Tailing13.9515.934.1312.8959.4772.0069.1168.3882.6744.28
zhihai37.5020.984.5913.6964.5467.7577.6876.9977.1648.99
LawGPT_zh51.6023.335.2816.1765.1463.7279.4377.5286.1852.04
Hanfei51.1223.955.1918.7665.1270.8375.0174.2176.9751.24
", + "bbox": [ + 117, + 82, + 878, + 275 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Performance comparisons on the Layperson dataset. The best performance is indicated in bold.", + "bbox": [ + 144, + 286, + 847, + 300 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 112, + 326, + 334, + 343 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1.1 Evaluated Models", + "text_level": 1, + "bbox": [ + 112, + 347, + 315, + 361 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We selected two categories of LLMs for testing: The legal LLMs include (1) fuzi.mingcha (6B) (Wu et al., 2023a), (2) LexiLaw5 (6B), (3) Tailing6 (7B), (4) DISC-LawLLM (13B) (Yue et al., 2023), (5) zhihai (7B) (Wu et al.), (6) LawGPT_zh (6B) (Liu et al., 2023), (7) HanFei (7B) (He et al., 2023). The open-domain LLMs include Qwen2 (7B) (Yang et al., 2024) and Llama3 (8B) (AI@Meta, 2024). For these models, we tested all methods mentioned in Sec. 4, including: (1) CloseBook, (2) CGG, (3) ARG-Q and (4) ARG-QA. For the legal LLMs, we generate responses using CGG. Appendix B has the details.", + "bbox": [ + 112, + 366, + 489, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.1.2 Implementation Details", + "text_level": 1, + "bbox": [ + 112, + 583, + 359, + 599 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our implementation is based on the Huggingface Transformers library (Wolf et al., 2020) with PyTorch. We use bge-base-zh-v1.5 (Xiao et al., 2023) as the retrieval model and conduct all experiments on Nvidia A6000 GPUs. Additional details are provided in Appendix C and https://github.com/ke-01/CitaLaw.", + "bbox": [ + 112, + 602, + 489, + 715 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.2 Main Results", + "text_level": 1, + "bbox": [ + 112, + 725, + 265, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The results on the Layperson and Practitioner datasets are presented in Table 2 and Table 3. We analyze the results from three perspectives:", + "bbox": [ + 112, + 746, + 487, + 794 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6.2.1 Performance of Open-Domain LLM", + "text_level": 1, + "bbox": [ + 112, + 803, + 448, + 818 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Legal references improve the response quality. Compared to CloseBook, the overall performance in CGG, ARG-Q, and ARG-QA has improved. This indicates that incorporating references into the", + "bbox": [ + 112, + 822, + 487, + 885 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "LLM helps it better understand both the question and the required direction for the answer, thereby enhancing performance in terms of style consistency, correctness, and citation quality.", + "bbox": [ + 507, + 326, + 882, + 391 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CGG achieves better response quality. We observe that CGG achieves optimal performance, especially response correctness, suggesting that incorporating legal references into the LLM input is more effective than refining the LLM's response. This is because including legal knowledge as input allows the LLM to consider relevant context when generating replies, whereas refining the response might lead to excessive alterations.", + "bbox": [ + 507, + 394, + 882, + 538 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ARG improves the alignment of responses and references. We can observe that ARG outperforms CGG in citation-related metrics overall. This is because CGG merely incorporates reference information as input, which may lead the model to overlook some reference details during the generation process. In contrast, ARG modifies the answer based on the references after generation, making it easier to ensure the completeness of citations.", + "bbox": [ + 507, + 543, + 882, + 687 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Chinese data fine-tuning can bring benefits. Both the Layperson and Practitioner datasets are Chinese datasets. Qwen2 (Fine-tuning on more Chinese data) achieved better performance than Llama3, demonstrating the benefits of using Chinese data for fine-tuning.", + "bbox": [ + 507, + 692, + 882, + 788 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "CloseBook tends to state circumstances. CloseBook performs better in terms of correctness regarding circumstances compared to the other dimensions. This suggests that when judicial knowledge references are not used, the LLM is more likely to repeat the circumstances itself, rather than providing an appropriate response to the illegal acts and the legal decision.", + "bbox": [ + 507, + 793, + 882, + 920 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "5https://github.com/CSHaitao/LexiLaw", + "bbox": [ + 134, + 892, + 376, + 906 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "$^{6}$ https://github.com/DUTIR-LegalIntelligence/Tailing", + "bbox": [ + 134, + 906, + 462, + 920 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "11188", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/a066d02470012f39beeccbe312d971a870db0dcd6faf2866a40a68706574cc97.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-F\\( Correct_c \\)\\( Correct_a \\)\\( Correct_d \\)\\( Cita_{Law} \\)\\( Cita_c \\)\\( Cita_a \\)\\( Cita_d \\)Avg
Llama3 (Llam3-8B-Instruct)CloseBook23.8123.057.2919.2362.8376.3071.0570.3263.4966.9568.8365.4651.55
CGG36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
ARG-Q42.6520.395.0715.7562.8270.4973.6772.0068.6169.4870.5168.3453.31
ARG-QA36.9418.644.5614.6361.5071.0772.3870.3269.4068.9570.4269.5152.36
Qwen2 (Qwen2-7B-Instruct)CloseBook61.9130.4410.5423.5367.5574.3579.8478.5268.5568.0370.3069.7158.61
CGG39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
ARG-Q41.0220.575.1415.6263.3167.8474.7173.9473.0168.9673.2073.6454.25
ARG-QA21.9716.673.0612.4760.7067.4971.1670.8871.7669.0171.0471.3350.63
Legal LLM (CGG )DISC-LawLLM38.1121.376.7516.9660.8473.4272.1471.7963.9267.4268.2265.4552.20
fuzi.mingcha66.5528.959.5122.6967.0670.7376.6677.4765.9266.9469.2868.6957.54
LexiLaw57.7429.018.9323.8365.6370.3676.6775.9765.2866.9368.8968.0356.44
Tailing50.1626.529.1622.4465.3575.9673.8370.3064.6566.9467.5666.0954.91
zhihai26.2921.386.0015.5364.4765.5976.3877.3767.9366.3063.1759.8250.85
LawGPT_zh47.1029.168.9222.5567.6469.4879.3780.2366.9068.3867.5568.9456.35
HanFei75.7232.9812.4626.9168.7273.2578.6378.1167.0367.4568.6367.7359.80
", + "bbox": [ + 117, + 79, + 878, + 244 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Performance comparisons on the Practitioner dataset. The best performance is indicated in bold.", + "bbox": [ + 142, + 256, + 850, + 269 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2.2 Performance of Legal LLM", + "text_level": 1, + "bbox": [ + 112, + 286, + 389, + 300 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Law article training achieves gains. In the Layperson dataset, LexiLaw achieves optimal performance overall. This is because the questions in the Layperson dataset often require only law articles to provide answers clearly, and LexiLaw's training explicitly used law articles, allowing it to effectively handle such questions.", + "bbox": [ + 112, + 306, + 487, + 418 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Full-parameter training offers advantages. Hanfei achieves the best results in the Practitioner dataset, as it is a fully parameter-trained legal LLM. Full-parameter fine-tuning allows it to effectively simulate a legal expert, thus performing well.", + "bbox": [ + 112, + 419, + 489, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Syllogistic reasoning is useful. fuzi.mingcha performs well on syllogism evaluation metrics, particularly on the Layperson dataset. This is due to its fine-tuning of syllogism judgment data.", + "bbox": [ + 112, + 501, + 489, + 565 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.2.3 Open Domain LLM vs. Legal LLM", + "text_level": 1, + "bbox": [ + 112, + 576, + 452, + 590 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Impact of LLM Backbone. We can observe that some legal LLMs perform worse than open-domain LLMs. This is because Qwen2 and Llama3 are the latest open-domain LLMs, and their overall capabilities have significantly improved. In contrast, most legal LLMs are built on earlier generations of LLMs, which have weaker base models, leading to poorer overall performance.", + "bbox": [ + 112, + 596, + 489, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Effectiveness of legal knowledge. Overall, the upper limit of legal LLMs is higher than that of open-domain LLMs. This is because legal LLMs, after extensive training on legal knowledge, have developed strong capabilities in solving legal issues. As a result, even though their base models are outdated, they can still perform effectively.", + "bbox": [ + 112, + 726, + 489, + 839 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.3 Human Evaluation", + "text_level": 1, + "bbox": [ + 112, + 851, + 310, + 865 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we compared the syllogism-level metric with human evaluation. Details of legal human annotators can be found in Appendix D.", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The syllogism-level evaluation of citation quality is divided into two stages: Stage 1: Extracting key components. Stage 2: Assessing the entailment using an NLI model.", + "bbox": [ + 507, + 286, + 882, + 350 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Stage 1: We randomly selected 50 questions each from the Layperson and Practitioner datasets. After splitting the cases into individual clauses, annotators were provided with the full case and its clauses. They do a three-class classification of each clause. The Qwen2's annotations were then compared with human annotations. The Cohen's kappa coefficient (Cohen, 1960) of 0.7876 indicates substantial agreement (0.61-0.80) between the model's and human annotators' labels.", + "bbox": [ + 507, + 351, + 884, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Stage 2: We randomly selected 50 questions from the Practitioner dataset and used Qwen2 to extract key components of pairs of responses and citations. Annotators assessed the degree to which the citations entailed the corresponding response components using a 5-point scale (1: low, 5: high), with descriptions provided in Appendix D. The entailment probabilities given by DISC-LawLLM, which range from 0 to 1, were scaled to the same 1-5 range by multiplying by 5 and rounding. We then compared the scaled model outputs with the human evaluations and calculated Cohen's kappa coefficient. The kappa score of 0.6923 again indicates substantial agreement (0.61-0.80) between the model and human judgments.", + "bbox": [ + 507, + 513, + 882, + 755 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "6.4 Effects on Different Retrieval Models", + "text_level": 1, + "bbox": [ + 507, + 769, + 847, + 784 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We selected BGE as the retrieval model in the main experiment. In this section, we explore the impact of using different retrieval models. Specifically, we evaluate Criminal-BERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019), two legal domain models based on BERT, fine-tuned on large-scale criminal and civil law documents, respectively. We replaced the retrieval model and tested the CGG", + "bbox": [ + 507, + 791, + 882, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "11189", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bb02b0dcacfe9e2343a7e3dce9d3c1df14239b907d3877ece25c69828cb44528.jpg", + "image_caption": [ + "Figure 3: Performance of different retrieval models. Lay is short for Layperson dataset and Pra is short for Practitioner dataset." + ], + "image_footnote": [], + "bbox": [ + 119, + 83, + 473, + 186 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1fbc1d82ad26023059c2cbac91cbdf2370eca84e69adf8ce3919ae5ef0ed0727.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 121, + 247, + 478, + 338 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/2710aec7dc9ffc3bb3faa79cb8e962183333b9a0069341b63d3806ae4b8136a3.jpg", + "image_caption": [ + "(a) Methods for CitaLaw metric with Layperson dataset.", + "(b) Metrics for CGG method with Practitioner dataset.", + "Figure 4: The performance of different NLI models when the LLM is Llama." + ], + "image_footnote": [], + "bbox": [ + 121, + 360, + 478, + 453 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "method on the Layperson dataset. The average results across all metrics are shown in Figure 3, with detailed metric results provided in Appendix E.", + "bbox": [ + 112, + 526, + 487, + 575 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown, on the Layperson dataset, BGE significantly outperforms the other two models. This is because the dataset consists of questions from laypersons, which are more everyday in nature. In contrast, the two legal BERT models, having been trained extensively on legal cases, show a distributional mismatch with open-domain data, leading to poorer performance. On the Practitioner dataset, which features professional legal questions, BGE still achieves the best performance. This can be attributed to its extensive training on diverse data, likely including some legal data, and its use of more advanced model architectures and techniques. However, the two legal BERT models perform comparably to BGE, showcasing the benefits of their specialized training on legal data.", + "bbox": [ + 112, + 577, + 489, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6.5 Effects on Different NLI Models", + "text_level": 1, + "bbox": [ + 112, + 848, + 413, + 864 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We opted to use legal LLMs as the NLI model in our experiments, as they support longer input lengths and incorporate substantial legal knowl", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "edge. In Section 6.3, we verified that DISC-LawLLM and human achieved good consistency. In this section, we explore the performance of several legal LLMs in the NLI task. Besides DISC-LawLLM, we evaluated LexiLaw, LawGPT_zh, and Hanfei, which demonstrated strong performance in the main experiments.", + "bbox": [ + 507, + 84, + 884, + 197 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figures 4 (a), we examined the ability of four legal LLMs to evaluate Llama across the Close-Book, CGG, ARG-Q, and ARG-QA methods using the CitaLaw metric on the Layperson dataset. In Figures 4 (b), we investigated the performance of four legal LLMs in evaluating the CGG method applied to Llama across the metrics CitaLaw, CitaC, Cita a, and Cita d on the Practitioner dataset.", + "bbox": [ + 507, + 198, + 884, + 326 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We can observe that Hanfei provides lower entailment scores across both datasets. This is because it is a fully parameter-tuned legal LLM, which results in a diminished capability to handle the general task of entailment reasoning. Additionally, we found that on the Practitioner dataset, other legal LLMs achieved results closer to those of DISC-LawLLM, while on the Layperson dataset, the performance gap was significantly larger. This is because the Practitioner dataset is more judicially oriented, aligning with the knowledge seen during the fine-tuning of legal LLMs. In contrast, due to limited training on general-purpose data, other legal LLMs struggle to accurately determine entailment relationships in the Layperson dataset. Similar conclusions can be drawn when the LLM is Qwen in Appendix F.", + "bbox": [ + 507, + 329, + 884, + 602 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Conclusion", + "text_level": 1, + "bbox": [ + 509, + 619, + 638, + 634 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduce CitaLaw, a benchmark designed to explore LLMs to generate responses with citations in legal scenarios, thus improving the trustworthiness of LLMs. CitaLaw includes two categories of questions: laypersons and practitioners. For laypersons, CitaLaw provides law articles as citations to help them understand the LLM's response clearly. For practitioners, both law articles and precedent cases are provided as citations, better supporting their needs for complex reasoning. CitaLaw offers global-level and syllogism-level metrics and supports the integration of citations into LLM inputs to guide generation or using citations to refine LLM's response. We conducted extensive experiments on 7 legal-domain LLMs and 2 popular open-domain LLMs, providing valuable insights for the deployment of LLMs in legal scenarios.", + "bbox": [ + 507, + 646, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "11190", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "8 Limitations", + "text_level": 1, + "bbox": [ + 112, + 84, + 250, + 98 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "While Citalaw provides a robust framework for evaluating LLMs in legal scenarios, several limitations should be acknowledged to guide future extensions of this work.", + "bbox": [ + 112, + 109, + 487, + 173 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "First, the datasets used in CitaLaw are primarily sourced from the Chinese legal system, which may limit the benchmark's applicability to other jurisdictions. However, by incorporating both law articles and precedent cases to align with the principles of civil and common law systems, CitaLaw demonstrates strong potential for adaptation to diverse legal contexts.", + "bbox": [ + 110, + 174, + 487, + 303 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Second, the syllogism-based evaluation framework simplifies legal reasoning into three key components: the major premise (law articles or precedent cases), the minor premise (case circumstances and actions), and the conclusion (legal decision). While this structured approach is effective for systematic evaluation, real-world legal reasoning may encompass additional complexities.", + "bbox": [ + 112, + 303, + 489, + 432 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "9 Ethical Considerations", + "text_level": 1, + "bbox": [ + 112, + 444, + 346, + 458 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Data Privacy and Confidentiality. The legal datasets used in CitaLaw include law articles, precedent cases, user questions, and golden responses. These documents were sourced from publicly available databases, ensuring compliance with data privacy and confidentiality standards. We carefully reviewed the datasets to ensure that no personally identifiable information (PII) or sensitive details about individuals were inadvertently included.", + "bbox": [ + 112, + 469, + 487, + 629 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Alignment with Legal Standards. Legal AI systems must align with the ethical and professional standards of the legal domain. Our work emphasizes the need for syllogism-based reasoning to ensure logical consistency and adherence to legal principles.", + "bbox": [ + 112, + 631, + 487, + 727 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Transparency and Explainability. Legal reasoning must be transparent and interpretable, particularly when used in sensitive or high-stakes domains. The metrics proposed in CitaLaw, including syllogism-based evaluation, aim to improve explainability by breaking down the reasoning process into logical components.", + "bbox": [ + 112, + 728, + 487, + 840 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Responsibility in System Deployment. Citalaw is intended as a research benchmark and should not be directly deployed in high-stakes legal decision-making without human oversight. While the benchmark aims to enhance the trustworthiness", + "bbox": [ + 112, + 841, + 487, + 920 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "of LLM-generated responses, legal professionals should always verify the citations and legal interpretations provided by such systems. Misuse of automated systems without adequate validation could lead to inaccurate legal advice or unintended consequences in legal proceedings.", + "bbox": [ + 507, + 84, + 882, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "10 Acknowledgements", + "text_level": 1, + "bbox": [ + 509, + 193, + 721, + 209 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This work was funded by the National Key R&D Program of China (2023YFA1008704), the National Natural Science Foundation of China (62472426). Supported by fund for building world-class universities (disciplines) of Renmin University of China. Work partially done at Beijing Key Laboratory of Research on Large Models and Intelligent Governance, and Engineering Research Center of Next-Generation Intelligent Search and Recommendation, MOE. Supported by the Beijing Social Science Foundation Planning Project (Grant No. 24GLC041), the Fundamental Research Funds for the Central Universities in UIBE (Grant No. 24QN06, 24PYTS22).", + "bbox": [ + 507, + 218, + 884, + 443 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 510, + 470, + 608, + 483 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Abdelrahman Abdallah, Bhawna Piryani, and Adam Jatowt. 2023. Exploring the state of the art in legal qa systems. Journal of Big Data, 10(1):127.", + "AI@Meta. 2024. Llama 3 model card.", + "Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023. Can gpt-3 perform statutory reasoning? In Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law, pages 22-31.", + "Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1):37-46.", + "Yongfu Dai, Duanyu Feng, Jimin Huang, Haochen Jia, Qianqian Xie, Yifang Zhang, Weiguang Han, Wei Tian, and Hao Wang. 2023. Laiw: A chinese legal large language models benchmark (a technical report). arXiv preprint arXiv:2310.05620.", + "Haolin Deng, Chang Wang, Xin Li, Dezhang Yuan, Junlang Zhan, Tianhua Zhou, Jin Ma, Jun Gao, and Ruifeng Xu. 2024. Websites: Attributed query-focused summarization on chinese web search results with citations. arXiv preprint arXiv:2403.01774.", + "Jacob Devlin. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.", + "Zhiwei Fei, Xiaoyu Shen, Dawei Zhu, Fengzhe Zhou, Zhuo Han, Songyang Zhang, Kai Chen, Zongwen" + ], + "bbox": [ + 509, + 492, + 884, + 921 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "11191", + "bbox": [ + 477, + 927, + 522, + 940 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shen, and Jidong Ge. 2023. Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289.", + "Luyu Gao, Zhuyun Dai, Panupong Pasupat, Anthony Chen, Arun Tejasvi Chaganty, Yicheng Fan, Vincent Zhao, Ni Lao, Hongrae Lee, Da-Cheng Juan, et al. 2023a. Rarr: Researching and revising what language models say, using language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16477-16508.", + "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023b. Enabling large language models to generate text with citations. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6465-6488.", + "Wanwei He, Jiabao Wen, Lei Zhang, Hao Cheng, Bowen Qin, Yunshui Li, Feng Jiang, Junying Chen, Benyou Wang, and Min Yang. 2023. Hanfei-1.0. https://github.com/siat-nlp/HanFei.", + "Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python.", + "Dongfang Li, Zetian Sun, Xinshuo Hu, Zhenyu Liu, Ziyang Chen, Baotian Hu, Aiguo Wu, and Min Zhang. 2023. A survey of large language models attribution. arXiv preprint arXiv:2311.03731.", + "Haitao Li, You Chen, Qingyao Ai, Yueyue Wu, Ruizhe Zhang, and Yiqun Liu. 2024. Lexeval: A comprehensive chinese legal benchmark for evaluating large language models. Preprint, arXiv:2409.20288.", + "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.", + "Hongcheng Liu, Yusheng Liao, Yutong Meng, and Yuhao Wang. 2023. Xiezhi: Chinese law large language model. https://github.com/LiuHC0428/LAW_GPT.", + "Luyao Ma, Yating Zhang, Tianyi Wang, Xiaozhong Liu, Wei Ye, Changlong Sun, and Shikun Zhang. 2021a. Legal judgment prediction with multi-stage case representation learning in the real court setting. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 993-1002.", + "Yixiao Ma, Yunqiu Shao, Yueyue Wu, Yiqun Liu, Ruizhe Zhang, Min Zhang, and Shaoping Ma. 2021b. Lecard: A legal case retrieval dataset for chinese law system. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2342-2348.", + "Chaitanya Malaviya, Subin Lee, Sihao Chen, Elizabeth Sieber, Mark Yatskar, and Dan Roth. 2024. Expertqa: Expert-curated questions and attributed answers. In Proceedings of the 2024 Conference of the North" + ], + "bbox": [ + 115, + 85, + 487, + 919 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 3025-3045.", + "Krishna Pillutla, Swabha Swayamdipta, Rowan Zellers, John Thickstun, Sean Welleck, Yejin Choi, and Zaid Harchaoui. 2021. Mauve: Measuring the gap between neural text and human text using divergence frontiers. Advances in Neural Information Processing Systems, 34:4816-4828.", + "N Reimers. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084.", + "Jaromir Savelka, Kevin D Ashley, Morgan A Gray, Hannes Westermann, and Huihui Xu. 2023. Explaining legal concepts with augmented large language models (gpt-4). arXiv preprint arXiv:2306.09525.", + "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45.", + "Shiguang Wu, Zhongkun Liu, Zhen Zhang, Zheng Chen, Wentao Deng, Wenhao Zhang, Jiyuan Yang, Zhitao Yao, Yougang Lyu, Xin Xin, Shen Gao, Pengjie Ren, Zhaochun Ren, and Zhumin Chen. 2023a. fuzi.mingcha. https://github.com/irlab-sdu/fuzi.mingcha.", + "Yiquan Wu, Yuhang Liu, Yifei Liu, Ang Li, Siying Zhou, and Kun Kuang. wisdominterrogatory. Available at GitHub.", + "Yiquan Wu, Siying Zhou, Yifei Liu, Weiming Lu, Xiaozhong Liu, Yating Zhang, Changlong Sun, Fei Wu, and Kun Kuang. 2023b. Precedent-enhanced legal judgment prediction with llm and domain-model collaboration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12060-12075.", + "Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597.", + "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu" + ], + "bbox": [ + 510, + 85, + 880, + 919 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "11192", + "bbox": [ + 477, + 928, + 524, + 940 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Cui, Zhenru Zhang, and Zhihao Fan. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671.", + "Fangyi Yu, Lee Quartey, and Frank Schilder. 2022a. Legal prompting: Teaching a language model to think like a lawyer. arXiv preprint arXiv:2212.01326.", + "Weijie Yu, Zhongxiang Sun, Jun Xu, Zhenhua Dong, Xu Chen, Hongteng Xu, and Ji-Rong Wen. 2022b. Explainable legal case matching via inverse optimal transport-based rationale extraction. In Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval, pages 657-668.", + "Shengbin Yue, Wei Chen, Siyuan Wang, Bingxuan Li, Chenchen Shen, Shujun Liu, Yuxuan Zhou, Yao Xiao, Song Yun, Xuanjing Huang, and Zhongyu Wei. 2023. Disc-lawllm: Fine-tuning large language models for intelligent legal services. Preprint, arXiv:2309.11325.", + "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675.", + "Haoxi Zhong, Chaojun Xiao, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2020. Jecqa: a legal-domain question answering dataset. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 9701-9708.", + "Haoxi Zhong, Zhengyan Zhang, Zhiyuan Liu, and Maosong Sun. 2019. Open chinese language pretrained model zoo. Technical report.", + "Zhi Zhou, Jiang-Xin Shi, Peng-Xiao Song, Xiao-Wen Yang, Yi-Xuan Jin, Lan-Zhe Guo, and Yu-Feng Li. 2024. Lawgpt: A chinese legal knowledge-enhanced large language model. Preprint, arXiv:2406.04614." + ], + "bbox": [ + 115, + 85, + 487, + 602 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11193", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A The Used Prompts", + "text_level": 1, + "bbox": [ + 114, + 83, + 312, + 99 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure 5 illustrates the prompts used in this paper, including $p_1$ , $p_2$ , $p_3$ in Eq. 1, Eq. 2 and Eq. 3.", + "bbox": [ + 112, + 114, + 489, + 148 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B More Details of Evaluated Models and Datasets", + "text_level": 1, + "bbox": [ + 114, + 164, + 482, + 197 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For the Legal LLMs, we choose (1) fuzi.mingcha (6B) (Wu et al., 2023a): It leverages unsupervised judicial corpora for training and uses syllogistic reasoning judgment data for fine-tuning. (2) LexiLaw7 (6B): It specifically utilizes legal articles and legal reference books for training. (3) Tailing8 (7B): It uses judicial text validation data, information extraction data, and judgment data for training. (4) DISC-LawLLM (13B) (Yue et al., 2023): In addition to fine-tuning with pairs, it also uses triplet data for fine-tuning to enhance the model's ability to leverage external knowledge. (5) zhihai (7B) (Wu et al.): It utilizes ChatGPT to modify the existing dataset and then performs secondary pre-training. (6) LawGPT_zh (6B) (Liu et al., 2023): It primarily uses scenario-based dialogues and knowledge-based question-answering data for fine-tuning based on LoRA. (7) HanFei (7B) (He et al., 2023): It is the first fully parameter-trained legal LLM in China. Because in the main experiment, CGG has the best overall performance, for the legal LLMs, we generate responses using CGG.", + "bbox": [ + 115, + 212, + 489, + 565 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 4 and Table 5 are the website URLs and corresponding licenses of the evaluated models and datasets.", + "bbox": [ + 112, + 568, + 487, + 617 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C More Details on Implementation", + "text_level": 1, + "bbox": [ + 114, + 634, + 433, + 653 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Considering the length of legal texts and the input window for the LLMs is limited, all experiments in this paper are conducted using a zero-shot setting. We use the Chinese-performing-well Qwen2-1.5B (Yang et al., 2024)9 to complete the MAUVE calculations. For RGUGE, We use version 1.0.1 of ROUGE for calculation. For BERTScore, we use bert-base-chinese (Devlin, 2018)10 to compute it. Regarding sentence-BERT, we employ paraphrase-multilingual-MiniLM-L12-v2 (Reimers, 2019)11.", + "bbox": [ + 112, + 665, + 489, + 827 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "D Human Evaluation", + "text_level": 1, + "bbox": [ + 509, + 83, + 714, + 98 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We hired four legal annotators from a Chinese university, all of whom have legal education backgrounds and are familiar with the cases in the dataset they need to annotate. We explained to the annotators that the data they annotated would be used for scientific research and paid them a reasonable remuneration based on local conditions. They are all graduate students from the judicial field, with practical experience in the legal profession. Two are male, two are female, aged between 24 and 30, and all have over five years of judicial theory study. Two annotators were responsible for the first stage of annotation, while the other two were responsible for the second stage, with all working together on the annotation process.", + "bbox": [ + 507, + 108, + 884, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 6 shows a detailed description of each level used to evaluate the agreement of the NLI model with human evaluations.", + "bbox": [ + 507, + 350, + 882, + 397 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "E Different Retrieval Models", + "text_level": 1, + "bbox": [ + 507, + 410, + 778, + 425 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Tables 7 and 8 present the performance of different retrieval models—Criminal-BERT, CivilBERT, and BGE—on each metric for the CGG method across the two datasets. It can be observed that when Llama3 and Qwen2 are used as LLMs, BGE achieves the best performance as the retrieval model. Comparing the two datasets, on the Layperson dataset, where the questions are more general, Criminal-BERT and Civil-BERT, which focus on legal cases, perform relatively poorly. In contrast, on the Practitioner dataset, despite no structural or training improvements, Criminal-BERT and CivilBERT achieve results comparable to BGE, highlighting the importance of legal knowledge in judicial QA tasks.", + "bbox": [ + 507, + 435, + 884, + 676 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The differences between the two datasets also underscore the significance of selecting an appropriate retrieval model.", + "bbox": [ + 507, + 677, + 882, + 724 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "F Different NLI Models", + "text_level": 1, + "bbox": [ + 507, + 736, + 732, + 752 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figures 6 (a) and (b) show the entailment scores given by four legal LLMs as NLI models under different methods (CloseBook, CGG, ARG-Q, ARGQA) and metrics(CitaLaw, CitaS, CitaB, and CitaC) when Qwen is used as the LLM. Similar conclusions to those in Section 6.5 can be drawn.", + "bbox": [ + 507, + 762, + 884, + 858 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "7https://github.com/CSHaitao/LexiLaw", + "bbox": [ + 136, + 843, + 378, + 856 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "$^{8}$ https://github.com/DUTIR-LegalIntelligence/Tailing", + "bbox": [ + 136, + 856, + 463, + 870 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "9https://huggingface.co/Qwen/Qwen2-1.5B", + "bbox": [ + 136, + 870, + 401, + 882 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "10https://huggingface.co/google-bert/bert-base-chinese", + "bbox": [ + 136, + 882, + 463, + 895 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "11 https://huggingface.co/sentence-", + "bbox": [ + 136, + 895, + 344, + 907 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "transformers/paraphrase-multilingual-MiniLM-L12-v2", + "bbox": [ + 115, + 908, + 448, + 920 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "11194", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Answer the question based on the provided law article and cite it appropriately. Only output the answer and citations, without including any additional content. When citing the law article, use [A1] at the end of the relevant sentence.", + "bbox": [ + 129, + 87, + 443, + 165 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Below is the provided law article: Law article [A1]: {Law article 1}", + "bbox": [ + 132, + 177, + 339, + 205 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Question: {Question} Answer:", + "bbox": [ + 132, + 217, + 268, + 241 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(a) Layperson", + "text_level": 1, + "bbox": [ + 243, + 244, + 336, + 256 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Refine the text based on the references and only output the refined text.", + "bbox": [ + 132, + 262, + 426, + 288 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Text: {Answer} \nReference: {References}", + "bbox": [ + 132, + 300, + 285, + 326 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Refined text (only output the Refined text, without any additional content):", + "bbox": [ + 132, + 338, + 442, + 365 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(c) Response Refinement", + "text_level": 1, + "bbox": [ + 206, + 370, + 371, + 382 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Answer the question based on the provided documents and cite them appropriately.", + "bbox": [ + 468, + 87, + 855, + 114 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Only output the answer and citations, without including any additional content.", + "bbox": [ + 468, + 115, + 835, + 139 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "When citing precedent cases, use [C1], [C2], or [C3] at the end of the sentence. When citing the law article, use [A1].", + "bbox": [ + 468, + 140, + 853, + 166 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Below are the provided documents: \nPrecedent case [C1]: {Precedent case 1}", + "bbox": [ + 468, + 178, + 719, + 204 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Precedent case [C2]: {Precedent case 2}", + "bbox": [ + 470, + 205, + 719, + 216 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Precedent case [C3]: {Precedent case 3} \nLaw article [A1]: {Law article 1}", + "bbox": [ + 470, + 217, + 719, + 243 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Question: {Question} Answer:", + "bbox": [ + 468, + 255, + 603, + 279 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(b) Practitioner", + "text_level": 1, + "bbox": [ + 615, + 284, + 719, + 297 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Answer the question. Only output the answer without any additional content.", + "bbox": [ + 468, + 303, + 806, + 329 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Question: {Question} Answer:", + "bbox": [ + 468, + 341, + 603, + 365 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(d) Without Reference", + "text_level": 1, + "bbox": [ + 594, + 370, + 739, + 382 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/96a30d23176633519f60baaa92417ea8b03345e190eef6af9094729a1d7ef0ce.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeLLMURLLicence
Open domainQwen2-7B-Instructhttps://huggingface.co/Qwen/Qwen2-7B-InstructApache-2.0 license
Llam3-8B-Instructhttps://github.com/meta-llama/llama3META LLAMA 3 COMMUNITY License
Legal Domainfuzi.mingchahttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
DISC-LawLLMhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
LawGPT_zhhttps://github.com/LiuHC0428/LAW-GPT
Hanfeihttps://github.com/siat-nlp/HanFeiApache-2.0 license
Tailinghttps://github.com/DUTIR-LegalIntelligence/Tailing
LexiLawhttps://github.com/CSHaitao/LexiLawMIT license
zhihaihttps://github.com/zhihaiLLM/wisdomInterrogatoryApache-2.0 license
", + "bbox": [ + 154, + 453, + 842, + 563 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 4: The LLM source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License.", + "bbox": [ + 112, + 571, + 882, + 600 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1b8aaf2ed2a7460c159e4afcce3fcd90678b2d5fe400a1644ae0c81d1b896487.jpg", + "image_caption": [ + "Figure 5: Prompts used in this paper. (a) The prompt $p_1$ is used to retrieve one law article in the Layperson dataset. (b) The prompt $p_1$ is used to retrieve one law article and three precedent cases in the Practitioner dataset. (c) The prompt $p_3$ is used to refine the LLM's answer based on references. (d) The prompt $p_2$ is used for LLM responses without references." + ], + "image_footnote": [], + "bbox": [ + 122, + 639, + 478, + 731 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1871806c373c10877618a242a1d2f56ced23ad1f3ac4caa451cbf99ee3a98601.jpg", + "image_caption": [ + "(a) Metrics for CGG method with Layperson dataset.", + "(b) Methods for CitaLaw metric with Practitioner dataset.", + "Figure 6: The performance of different NLI models when the LLM is Qwen." + ], + "image_footnote": [], + "bbox": [ + 122, + 752, + 478, + 841 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "11195", + "bbox": [ + 477, + 927, + 524, + 940 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/73a65b8872e18e177946d159ac17775e763b3dd786f8015bae458aa6daedd73a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TypeDatasetURLLicence
QuestionLayperson Practitionerhttps://github.com/open-compass/LawBenchApache-2.0 license
https://github.com/CSHaitao/LexEvalMIT License
CorpusLeCaRDhttps://github.com/myx666/LeCaRDMIT License
ELAMhttps://github.com/ruc-wjyu/IOT-MatchMIT License
CAIL2021-sfzyhttps://github.com/china-ai-law-challenge/CAIL2021
LJP-MSJudg fuzi.mingchahttps://github.com/mly-nlp/LJP-MSJudge
DISC-LawLLMhttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
LawGPT_zhhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
Hanfeihttps://github.com/LiuHC0428/LAW-GPT
https://github.com/siat-nlp/HanFeiApache-2.0 license
", + "bbox": [ + 194, + 118, + 801, + 250 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/080385d2bcacab2925a1b61996cc0e63dbd2fddf493e74f5c86d6f067e0c4776.jpg", + "table_caption": [ + "Table 5: The dataset source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License." + ], + "table_footnote": [], + "table_body": "
ScoreDescription
1No Entailment: The former does not entail the latter at all, with no logical connection between the two.
2Weak Entailment: A partial entailment where the former somewhat relates to the latter, but the connection is weak and not fully conclusive.
3Moderate Entailment: A moderate degree of entailment, meaning the former generally leads to the latter in most cases, but exceptions exist.
4Strong Entailment: A strong logical relationship where the former can derive the latter in the vast majority of cases.
5Complete Entailment: The former fully entails the latter in all cases, with an unambiguous and definitive logical connection between them.
", + "bbox": [ + 119, + 373, + 878, + 451 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/027878eda94a6faf8c54569893beefb2f3a5cb08aa3267780c477ccced102680.jpg", + "table_caption": [ + "Table 6: Scoring Criteria for Human Evaluation of Entailment." + ], + "table_footnote": [], + "table_body": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)Criminal37.4418.072.1813.1561.7164.0363.5664.3680.3444.98
Civil56.1618.272.3413.4461.9063.2263.8963.3580.9747.06
BGE61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
Qwen2 (Qwen2-7B-Instruct)Criminal55.2621.094.5314.3264.7363.1064.8965.8561.6046.15
Civil52.4420.484.1613.8164.4561.7964.9465.6259.8845.29
BGE75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
", + "bbox": [ + 119, + 555, + 875, + 655 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/14d673f55be52d60b789d60dd518ee6ad7c5133dc3ce3a80ce4cb29596d4df1e.jpg", + "table_caption": [ + "Table 7: Performance comparisons on retrieval models in the Layperson dataset when the method is CGG. The best performance is indicated in bold." + ], + "table_footnote": [], + "table_body": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawCitaCCitaaCitaDAvg
Llama3 (Llam3-8B-Instruct)Criminal34.2525.797.8619.4265.0366.2776.3076.8270.5966.4170.0969.4754.03
Civil39.8426.398.0720.0265.2765.4175.7875.7369.2167.5269.5469.1654.33
BGE36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
Qwen2 (Qwen2-7B-Instruct)Criminal32.4931.7911.0923.9369.7972.0080.8181.5368.4268.4271.8671.5456.97
Civil33.3731.6711.0623.8469.6373.3580.5781.2769.1166.4170.0969.4756.65
BGE39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
", + "bbox": [ + 117, + 764, + 878, + 850 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 8: Performance comparisons on retrieval models in the Practitioner dataset when the method is CGG. The best performance is indicated in bold.", + "bbox": [ + 112, + 860, + 882, + 890 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "11196", + "bbox": [ + 475, + 927, + 524, + 940 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_model.json b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_model.json new file mode 100644 index 0000000000000000000000000000000000000000..736c1a6d7158732a2da9f0ed04bc8ebb0f920bea --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_model.json @@ -0,0 +1,2956 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.194, + 0.091, + 0.806, + 0.112 + ], + "angle": 0, + "content": "CitaLaw: Enhancing LLM with Citations in Legal Domain" + }, + { + "type": "text", + "bbox": [ + 0.289, + 0.139, + 0.713, + 0.157 + ], + "angle": 0, + "content": "Kepu Zhang\\(^{1}\\), Weijie Yu\\(^{2*}\\), Sunhao Dai\\(^{1}\\), Jun Xu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.213, + 0.157, + 0.786, + 0.174 + ], + "angle": 0, + "content": "1Gaoling School of Artificial Intelligence, Renmin University of China" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.174, + 0.716, + 0.189 + ], + "angle": 0, + "content": "\\(^{2}\\) University of International Business and Economics" + }, + { + "type": "text", + "bbox": [ + 0.321, + 0.191, + 0.678, + 0.207 + ], + "angle": 0, + "content": "kepuzhang@ruc.edu.cn, yu@uibep.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.277 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.29, + 0.462, + 0.603 + ], + "angle": 0, + "content": "In this paper, we propose Citalaw, the first benchmark designed to evaluate LLMs' ability to produce legally sound responses with appropriate citations. Citalaw features a diverse set of legal questions for both laypersons and practitioners, paired with a comprehensive corpus of law articles and precedent cases as a reference pool. This framework enables LLM-based systems to retrieve supporting citations from the reference corpus and align these citations with the corresponding sentences in their responses. Moreover, we introduce syllogism-inspired evaluation methods to assess the legal alignment between retrieved references and LLM-generated responses, as well as their consistency with user questions. Extensive experiments on 2 open-domain and 7 legal-specific LLMs demonstrate that integrating legal references substantially enhances response quality. Furthermore, our proposed syllogism-based evaluation method exhibits strong agreement with human judgments." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.616, + 0.262, + 0.631 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.642, + 0.49, + 0.85 + ], + "angle": 0, + "content": "Generating responses supported by citations, such as relevant law articles and precedent cases, is essential for ensuring the trustworthiness of large language models (LLMs) in legal tasks. For laypersons seeking legal advice (Fei et al., 2023), LLM-generated responses grounded in citations provide verifiable information, fostering trust in the system. Conversely, for legal practitioners such as lawyers and judges, citations serve as supportive evidence that aids in analyzing complex cases, validating legal arguments, and ensuring decisions align with established legal principles (Li et al., 2024; Zhong et al., 2020; Abdallah et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.852, + 0.49, + 0.899 + ], + "angle": 0, + "content": "Recently, a growing body of benchmark research (Gao et al., 2023a; Li et al., 2023) has focused on enabling LLMs to provide citations for the" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.258, + 0.88, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.399, + 0.84, + 0.413 + ], + "angle": 0, + "content": "Figure 1: The framework of our CitaLaw." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.436, + 0.885, + 0.565 + ], + "angle": 0, + "content": "statements they generate. For instance, ALCE (Gao et al., 2023b) introduces a benchmark designed to evaluate the ability of LLMs to generate citation-supported outputs, aiming to improve factual accuracy. WebCiteS (Deng et al., 2024) provides a curated database of manually annotated summaries and citations to enhance performance in text summarization and citation generation." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.569, + 0.885, + 0.922 + ], + "angle": 0, + "content": "While these studies have made notable progress in general domains, they face significant challenges when applied to the legal domain. First, laypersons and legal practitioners interact with LLMs differently and have distinct expectations for citations. Laypersons typically seek legal advice and rely on citations to verify the accuracy of LLM responses, whereas legal practitioners pose more complex queries, using LLMs for legal reasoning, with citations serving as supportive evidence. Existing studies fail to address these differences, leading to unsatisfactory performance in real-world applications. Second, existing methods often fall short in providing the diverse references required in legal contexts, such as law articles and precedent cases. Law articles establish the foundational legal framework, while precedent cases offer concrete examples and interpretive guidance. These two types of references inherently align with the distinct characteristics of civil and common law systems. Third, traditional citation evaluation measures, such as ROUGE (Lin, 2004), rely on surface-level similar" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.907, + 0.287, + 0.921 + ], + "angle": 0, + "content": "* Corresponding author" + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11183" + }, + { + "type": "footer", + "bbox": [ + 0.221, + 0.946, + 0.779, + 0.959 + ], + "angle": 0, + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 11183-11196" + }, + { + "type": "footer", + "bbox": [ + 0.27, + 0.96, + 0.729, + 0.973 + ], + "angle": 0, + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.123, + 0.082, + 0.874, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.317, + 0.883, + 0.361 + ], + "angle": 0, + "content": "Figure 2: Examples from the two subsets of CitaLaw, with text in red, blue, and yellow representing the three dimensions of the syllogism: major premise, minor premise (circumstances, illegal acts), and conclusion (legal decisions), respectively. [A] and [C] denote citations to relevant law articles and precedent cases, respectively." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.375, + 0.489, + 0.456 + ], + "angle": 0, + "content": "ities and are often insufficient to assess the alignment between references and LLM-generated responses. In the legal domain, effective evaluation requires a deeper understanding of logical and semantic relationships." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.46, + 0.489, + 0.556 + ], + "angle": 0, + "content": "To overcome the above challenges, we propose CitaLaw, the first benchmark tailored to evaluate LLMs' capabilities in generating legally grounded responses supported by accurate and context-aware citations. As shown in Figure 1, CitaLaw incorporates four distinct legal-specific features:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.56, + 0.489, + 0.64 + ], + "angle": 0, + "content": "(1) CitaLaw has two subsets tailored for laypersons and practitioners, with examples in Figure 2. Laypersons typically ask shorter, conversational questions, while practitioners often pose specialized, detailed questions." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.645, + 0.489, + 0.805 + ], + "angle": 0, + "content": "(2) CitaLaw includes a retrieval corpus comprising two commonly used references: law articles, which provide clear and concise guidelines for addressing user questions, and precedent cases, which offer legal reasoning and support for judicial decisions. Recognizing the distinct needs of laypersons and practitioners, we provide only law articles for laypersons to ensure clarity, while practitioners have access to both law articles and precedent cases to support more complex legal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.489, + 0.921 + ], + "angle": 0, + "content": "(3) In addition to traditional global-level metrics such as MAUVE (Pillutla et al., 2021), we propose a syllogism-based evaluation method to assess both the response correctness and the citation quality. This method provides a more granular evaluation by focusing on three key dimensions: circumstances, illegal acts, and legal decisions." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.56, + 0.489, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.375, + 0.885, + 0.552 + ], + "angle": 0, + "content": "(4) We consider two types of response generation methods. The first type, Citation-Guided Generation (CGG), involves generating responses by incorporating retrieved references during generation. The second type, Answer Refinement Generation (ARG), refines the LLMs' initial response (CloseBook) by retrieving and incorporating reference information. This category includes ARG-Q, which retrieves citations using only the user query, and ARG-QA, which retrieves citations using both the user query and the LLM's initial response." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.553, + 0.884, + 0.793 + ], + "angle": 0, + "content": "Extensive experiments on two open-domain and seven legal-specific LLMs reveal the following key insights: 1) Incorporating legal references into the LLM significantly improves the quality of responses; 2) Including references as part of the LLM's input consistently outperforms answer-refinement methods; 3) Leveraging references to refine the LLM's responses yields better alignment of responses and references. 4) For fine-tuning LLMs in legal scenarios, incorporating law articles, syllogistic reasoning, and full-scale fine-tuning achieves promising performance. 5) Open-domain LLMs surprisingly outperform legal-specific LLMs in certain scenarios; 6) Human evaluations show a strong correlation with our syllogism-based methods." + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.796, + 0.866, + 0.811 + ], + "angle": 0, + "content": "In summary, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "- To the best of our knowledge, CitaLaw is the first benchmark designed to evaluate the capability of LLMs to generate legally grounded responses with accurate and context-aware citations. CitaLaw includes questions tailored to both laypersons and practitioners, paired" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11184" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.15, + 0.085, + 0.488, + 0.117 + ], + "angle": 0, + "content": "with a citation corpus comprising law articles and precedent cases." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.129, + 0.487, + 0.224 + ], + "angle": 0, + "content": "- We propose a two-level evaluation framework that combines global-level metrics with a syllogism-based reasoning approach. Additionally, we explore two mainstream methods for legal response generation: citation-guided and answer refinement." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.239, + 0.488, + 0.366 + ], + "angle": 0, + "content": "- Through extensive experiments on two open-domain and seven legal-specific LLMs, we demonstrate the effectiveness of integrating legal references into response generation and validate our syllogism-based evaluation method. Additionally, we provide actionable insights for the practical deployment of LLMs in legal scenarios." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.129, + 0.488, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.38, + 0.271, + 0.395 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.406, + 0.49, + 0.679 + ], + "angle": 0, + "content": "LLM for Legal Task. A amount of work has explored applying LLMs to legal tasks (Savelka et al., 2023; Wu et al., 2023b; Yu et al., 2022a; Blair-Stanek et al., 2023). Building LLMs tailored for legal scenarios is a popular direction (Yue et al., 2023; Wu et al., 2023a; He et al., 2023). There are also some benchmarks that explore the capabilities of LLMs in legal tasks. LawBench (Fei et al., 2023) evaluates LLMs' legal knowledge across three cognitive aspects. LAiW (Dai et al., 2023) assesses LLMs' legal reasoning abilities based on legal practice logic. LexEval (Li et al., 2024) evaluates LLMs' legal capabilities based on a new legal cognitive ability classification system. However, none of them have considered enhancing the trustworthiness of LLMs in legal scenarios by generating outputs with citations." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.68, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Citation in LLM. Attribution (Li et al., 2023) in LLMs refers to providing supporting evidence for the answers generated by the model, presented in the form of citations. ALCE (Gao et al., 2023b) is an automated benchmark for evaluating LLMs' ability to generate outputs with citations, aimed at improving the factual accuracy of the generated responses. WebCiteS (Deng et al., 2024) provides a database containing 7,000 manually annotated summaries and citations to enhance LLMs' capabilities in summarization and citation. RARR (Gao et al., 2023a) enhances LLM outputs by automatically adding citations, and modifying the responses. ExpertQA (Malaviya et al., 2024) verifies and modifies citations through expert review to ensure re" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.085, + 0.882, + 0.117 + ], + "angle": 0, + "content": "liability. In contrast to the above works, CitaLaw focuses specifically on citation in legal scenarios." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.13, + 0.866, + 0.146 + ], + "angle": 0, + "content": "3 Task Setup and Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.158, + 0.885, + 0.366 + ], + "angle": 0, + "content": "Suppose we have a legal corpus \\(D\\), which consists of either a collection of precedent cases \\((D_{l})\\) or law articles \\((D_{c})\\). Given a user question \\(x\\) posed by either a layperson or a practitioner, the LLM-based system is tasked with retrieving supportive citations from \\(D\\) and generating a legally grounded response \\(y\\). The response \\(y\\) comprises a list of \\(n\\) sentences, i.e., \\(y = [s_1,\\dots ,s_n]\\), where each sentence \\(s_i\\) refers to at most one corresponding citation. As illustrated in Figure 2, the system is further required to attach each citation to its relevant sentence, with \"[A]\" and \"[C]\" denoting references to law articles and precedent cases, respectively." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.368, + 0.884, + 0.414 + ], + "angle": 0, + "content": "To enable the evaluation of this task, we construct the specialized dataset (Table 1 shows the statistics) as follows:" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.416, + 0.884, + 0.528 + ], + "angle": 0, + "content": "To simulate the behavior of laypersons, we include questions that are more conversational, lack detailed case descriptions, and are relatively short in length. We use the consultation section from LawBench (Fei et al., 2023), which collects user queries from the Hualv website1 and answers provided by lawyers or legal consulting firms." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.53, + 0.884, + 0.706 + ], + "angle": 0, + "content": "To simulate the behavior of legal practitioners, we include questions that are more professional, often accompanied by detailed case descriptions, and generally longer. For this purpose, we use the open-ended question section from LexEval (Li et al., 2024), which consists of subjective questions from the National Uniform Legal Profession Qualification Examination. These questions are particularly challenging for LLMs, requiring them to understand the case fully and apply legal knowledge accurately to generate answers." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.708, + 0.885, + 0.884 + ], + "angle": 0, + "content": "In terms of the corpus, we construct a comprehensive corpus from multiple sources, including law articles and precedent cases. Specifically, for law articles, we collect approximately 50,000 documents from LexiLaw\\(^2\\), covering areas such as Civil Law, Criminal Law, and judicial interpretations. For precedent cases, we include both criminal and civil cases. Criminal cases are sourced from the LeCaRD legal retrieval dataset (Ma et al., 2021b), ELAM (Yu et al., 2022b), and civil cases from the CAIL legal summary" + }, + { + "type": "page_footnote", + "bbox": [ + 0.533, + 0.894, + 0.644, + 0.907 + ], + "angle": 0, + "content": "\\(^{1}\\)www.66law.com" + }, + { + "type": "page_footnote", + "bbox": [ + 0.533, + 0.907, + 0.773, + 0.921 + ], + "angle": 0, + "content": "\\(^{2}\\)https://github.com/CSHaitao/LexiLaw" + }, + { + "type": "list", + "bbox": [ + 0.533, + 0.894, + 0.773, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11185" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.083, + 0.498, + 0.134 + ], + "angle": 0, + "content": "
Dataset#QLenQLenAQ Type
Layperson50057.62107.40Question
Practitioner500618.96193.46Case + Question
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.144, + 0.49, + 0.202 + ], + "angle": 0, + "content": "Table 1: Dataset statistics. #Q indicates the number of questions, \\(\\mathrm{Len}_Q\\) and \\(\\mathrm{Len}_A\\) denote the average lengths of questions and gold answers, and Q Type refers to the question type." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.223, + 0.49, + 0.416 + ], + "angle": 0, + "content": "dataset, LJP-MSJudge (Ma et al., 2021a), and the pre-training data of fuzi.mingcha (Wu et al., 2023a). As a supplement to precedent cases, we also incorporate question-and-answer pairs from fine-tuning datasets of legal LLMs as part of the precedent cases. These QA pairs are collected from DISC-LawLLM (Yue et al., 2023), LawGPT_zh (Liu et al., 2023), and HanFei (He et al., 2023). In total, the constructed corpus contains approximately 500,000 documents, ensuring sufficient coverage of both law articles and precedent cases to support diverse legal tasks." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.427, + 0.219, + 0.441 + ], + "angle": 0, + "content": "4 Method" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.452, + 0.329, + 0.468 + ], + "angle": 0, + "content": "4.1 Response Generation" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.472, + 0.469, + 0.488 + ], + "angle": 0, + "content": "We consider two types of methods in this study." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.489, + 0.487, + 0.537 + ], + "angle": 0, + "content": "Citation-Guided Generation (CGG) produces response \\( y_{cgg} \\) given a user question \\( x \\) by referring retrieved relevant document(s) \\( D_R \\):" + }, + { + "type": "equation", + "bbox": [ + 0.204, + 0.545, + 0.487, + 0.561 + ], + "angle": 0, + "content": "\\[\ny _ {\\mathrm {c g g}} = f _ {\\mathrm {L L M}} \\left(x, D _ {R}, p _ {1}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.569, + 0.489, + 0.616 + ], + "angle": 0, + "content": "where \\( f_{\\mathrm{LLM}} \\) denotes a open-domain or a legal specific LLM; \\( p_1 \\) is the direct generation prompt. All prompt settings are detailed in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.617, + 0.489, + 0.697 + ], + "angle": 0, + "content": "Answer Refinement Generation (ARG) is a two-stage method that generates the final response \\( y_{\\mathrm{arg}} \\) by refining the LLM's initial response \\( y_{\\mathrm{init}} \\) through the retrieval and incorporation of reference information. This process can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.221, + 0.706, + 0.487, + 0.721 + ], + "angle": 0, + "content": "\\[\ny _ {\\text {i n i t}} = f _ {\\mathrm {L L M}} (x, p _ {2}), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.73, + 0.49, + 0.794 + ], + "angle": 0, + "content": "where \\( p_2 \\) is the prompt instructing the LLM to directly generate an initial response without reference information. We refer to this step as CloseBook. The initial response \\( y_{\\mathrm{init}} \\) is then refined as:" + }, + { + "type": "equation", + "bbox": [ + 0.195, + 0.802, + 0.487, + 0.818 + ], + "angle": 0, + "content": "\\[\ny _ {\\text {a r g}} = f _ {\\mathrm {L L M}} \\left(y _ {\\text {i n i t}}, D _ {R}, p _ {3}\\right), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.826, + 0.487, + 0.857 + ], + "angle": 0, + "content": "where \\(p_3\\) is the prompt guiding the LLM to refine the \\(y_{\\mathrm{init}}\\) using the retrieved documents \\(D_R\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.858, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Laypersons and practitioners interact with LLMs differently and have distinct expectations for citations. When \\( x \\) is submitted by a layperson, the corresponding \\( D_{R} \\) consists of relevant law articles. In" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.085, + 0.883, + 0.15 + ], + "angle": 0, + "content": "contrast, when \\( x \\) is submitted by a practitioner, the corresponding \\( D_R \\) includes both relevant law articles and precedent cases. The process for retrieving \\( D_R \\) from \\( D \\) is detailed in the next subsection." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.161, + 0.698, + 0.175 + ], + "angle": 0, + "content": "4.2 Citation Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.182, + 0.884, + 0.36 + ], + "angle": 0, + "content": "We explore state-of-the-art open-domain dense retriever BGE (Xiao et al., 2023), along with two legal-specific dense retrievers, CriminalBERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019). We also investigate two types of retrieval queries: \\( x \\) (the user question alone, ARG-Q) and \\( [x; y_{\\mathrm{init}}] \\) (the concatenation of the user query \\( x \\) and the initial response \\( y_{\\mathrm{init}} \\), where \\( [] \\) denotes the concatenation operation, ARG-QA). The impact of different retrieval models on performance will be analyzed in the experiments." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.372, + 0.719, + 0.386 + ], + "angle": 0, + "content": "4.3 Citation Attachment" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.393, + 0.884, + 0.472 + ], + "angle": 0, + "content": "Building on the retrieved citations, this subsection outlines the process of attaching these law articles or precedents to specific sentences in the LLM-generated responses. This process involves answering two key questions:" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.474, + 0.884, + 0.666 + ], + "angle": 0, + "content": "What kind of sentences can be associated with citations? We utilize co-occurring words and legal entity extraction to identify sentences that explicitly reference legal concepts, actions, or terms relevant to the retrieved citations. Specifically, we construct a pool of legal terminologies using THUOCL3 and LaWGPT (Zhou et al., 2024). A sentence is considered eligible if it contains any of the terminologies from this pool. Additionally, we use SpaCy (Honnibal et al., 2020) to extract legal entities from each sentence. If a sentence includes legal entities, it is also deemed eligible for citation attachment." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.668, + 0.884, + 0.764 + ], + "angle": 0, + "content": "How are citations attached to the identified sentences? If a sentence is deemed eligible for citation attachment, we associate it with retrieved citations as follows. For the laypersons, the retrieved law article \\( c_{l} \\in D_{l} \\) is attached to the most relevant sentence \\( s_{k} \\in y \\):" + }, + { + "type": "equation", + "bbox": [ + 0.522, + 0.777, + 0.883, + 0.816 + ], + "angle": 0, + "content": "\\[\nC _ {\\text {L a y}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.818, + 0.884, + 0.898 + ], + "angle": 0, + "content": "where \\((s_k, c_l)\\) represents attaching the reference \\(c_l\\) to the sentence \\(s_i\\), and \\(\\mathrm{sim}(\\cdot)\\) is computed using sentence-BERT (Reimers, 2019). We set \\(|C_{\\mathrm{Lay}}| = 1\\) because, typically, a layperson's query pertains to only one specific legal article. For practitioners," + }, + { + "type": "page_footnote", + "bbox": [ + 0.53, + 0.906, + 0.761, + 0.921 + ], + "angle": 0, + "content": "3https://github.com/thunlp/THUOCL" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11186" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.149 + ], + "angle": 0, + "content": "we attach the retrieved law article in the same way as for laypersons. Additionally, we associate the retrieved precedent cases \\( c_{c} \\in D_{c} \\) with each \\( s_{i} \\in y \\), which is formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.157, + 0.49, + 0.215 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} C _ {\\text {P r a}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\} (5) \\\\ \\cup \\{(s _ {i}, c _ {c}) |, c _ {c} = \\underset {c _ {j} \\in D _ {c}} {\\arg \\max } \\operatorname {s i m} (s _ {i}, c _ {j}) \\}, (5) \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.225, + 0.489, + 0.259 + ], + "angle": 0, + "content": "where \\( |D_c| = 3 \\), meaning each response \\( y \\) can be associated with up to three precedents4." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.269, + 0.245, + 0.284 + ], + "angle": 0, + "content": "5 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.295, + 0.49, + 0.374 + ], + "angle": 0, + "content": "CitaLaw provides a comprehensive evaluation framework incorporating metrics for fluency, correctness, and citation quality. This framework is divided into two levels of analysis: global level and the proposed syllogism level." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.375, + 0.49, + 0.569 + ], + "angle": 0, + "content": "Syllogism, a foundational framework in legal reasoning, comprises three key components: the major premise, the minor premise, and the conclusion. In our legal context, these correspond to the relevant law article or precedent case (major premise), the factual circumstances and actions of a specific case (minor premise), and the resulting legal decision (conclusion). By integrating this syllogistic framework, CitaLaw goes beyond surface-level correctness to evaluate the logical coherence and alignment of LLM-generated responses with established legal principles." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.578, + 0.379, + 0.594 + ], + "angle": 0, + "content": "5.1 Fluency (Style Consistency)" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.599, + 0.49, + 0.777 + ], + "angle": 0, + "content": "To ensure the LLM-generated responses align with the user's requirements, the system must adapt its style based on the user's background. For laypersons, responses should avoid excessive technical jargon to ensure accessibility and comprehension. Conversely, responses for legal practitioners should adopt a formal and professional tone to maintain credibility and utility. To achieve this aim, we concatenate the user query and the LLM-generated response and apply MAUVE (Pillutla et al., 2021) to assess their style consistency." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.787, + 0.255, + 0.801 + ], + "angle": 0, + "content": "5.2 Correctness" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.807, + 0.49, + 0.888 + ], + "angle": 0, + "content": "At the global level, we use established metrics ROUGE (Lin, 2004) and BERTScore (Zhang et al., 2019). ROUGE measures word-level overlap between the generated and labeled responses, with scores reported for ROUGE-1, ROUGE-2, and" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.166 + ], + "angle": 0, + "content": "ROUGE-L. BERTScore captures semantic similarity between the generated and labeled responses, and we report the F-score (BERT-F) for evaluation. These metrics assess the overall correctness of LLM-generated responses." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.168, + 0.886, + 0.345 + ], + "angle": 0, + "content": "At the syllogism level, we leverage the Qwen2 (Yang et al., 2024) to extract key components, including the circumstances, illegal acts, and legal decisions. We use sentence-BERT (Reimers, 2019) to measure the alignment between the labeled responses and the generated outputs across these dimensions, resulting in \\(\\mathrm{Correct}_{\\mathrm{c}}\\), \\(\\mathrm{Correct}_{\\mathrm{a}}\\), and \\(\\mathrm{Correct}_{\\mathrm{d}}\\). This syllogism-level evaluation allows us to assess the logical coherence of the responses, ensuring that they align with the underlying legal reasoning principles." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.362, + 0.685, + 0.378 + ], + "angle": 0, + "content": "5.3 Citation Quality" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.388, + 0.885, + 0.644 + ], + "angle": 0, + "content": "As previously discussed, we assume that a question submitted by laypersons typically corresponds to a specific law article. Therefore, at the global level, we evaluate the citation quality of the retrieved law article (premise) by measuring its entailment with the associated sentence in the LLM's response (hypothesis). Specifically, we use an NLI model to compute \\( \\text{Cita}_{\\text{Law}} \\), which quantifies the degree to which the law article entails the attached sentence. This metric reflects how effectively the response aligns with the cited law article. We employ DISC-LawLLM (Yue et al., 2023) as the NLI model due to its strong agreement with human evaluations (as discussed in Sec. 6.3) and its superior performance compared to other NLI models (as detailed in Sec. 6.5)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.647, + 0.886, + 0.84 + ], + "angle": 0, + "content": "At the syllogism level, we evaluate the quality of precedent case citations by examining three key components: circumstances, illegal acts, and legal decisions. After extracting these elements from both the retrieved cases and the associated sentence in the LLM's response, we utilize DISC-LawLLM to assess the entailment for each component. This evaluation yields three distinct scores: \\( \\text{Cita}_{\\text{c}} \\) for circumstances, \\( \\text{Cita}_{\\text{a}} \\) for illegal acts, and \\( \\text{Cita}_{\\text{d}} \\) for legal decisions, providing a more detailed and nuanced assessment of citation quality within the syllogism framework." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.859, + 0.656, + 0.876 + ], + "angle": 0, + "content": "6 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.89, + 0.884, + 0.922 + ], + "angle": 0, + "content": "We conduct extensive experiments on our CitaLaw using the proposed two-level evaluation methods." + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.895, + 0.488, + 0.922 + ], + "angle": 0, + "content": "4 Considering the input window size of LLMs, we set up to retrieve 3 precedent cases." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11187" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.083, + 0.88, + 0.276 + ], + "angle": 0, + "content": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-FCorrecteCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)CloseBook22.6316.471.9513.3458.4673.0568.2466.8767.3843.15
CGG61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
ARG-Q61.2723.175.6517.8364.2369.0475.4574.4779.1052.24
ARG-QA51.8323.736.9618.5364.8471.3774.8174.6680.8051.95
Qwen2 (Qwen2-7B-Instruct)CloseBook21.0415.292.2711.3158.3970.8971.7169.9372.3543.69
CGG75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
ARG-Q66.5520.864.5015.4264.5966.9677.8275.6681.4852.65
ARG-QA66.8021.734.7816.3464.8569.3176.3575.0582.8353.11
Legal LLM (CGG)DISC-LawLLM72.7022.464.1415.4865.0665.2178.5576.1783.4653.69
fuzi.mingcha56.5824.545.7017.4865.8663.2879.5677.9481.6452.51
LexiLaw71.8924.966.2518.9165.6868.8978.1276.7282.4254.87
Tailing13.9515.934.1312.8959.4772.0069.1168.3882.6744.28
zhihai37.5020.984.5913.6964.5467.7577.6876.9977.1648.99
LawGPT_zh51.6023.335.2816.1765.1463.7279.4377.5286.1852.04
Hanfei51.1223.955.1918.7665.1270.8375.0174.2176.9751.24
" + }, + { + "type": "table_caption", + "bbox": [ + 0.146, + 0.287, + 0.848, + 0.302 + ], + "angle": 0, + "content": "Table 2: Performance comparisons on the Layperson dataset. The best performance is indicated in bold." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.327, + 0.335, + 0.344 + ], + "angle": 0, + "content": "6.1 Experimental Settings" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.348, + 0.317, + 0.362 + ], + "angle": 0, + "content": "6.1.1 Evaluated Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.367, + 0.49, + 0.576 + ], + "angle": 0, + "content": "We selected two categories of LLMs for testing: The legal LLMs include (1) fuzi.mingcha (6B) (Wu et al., 2023a), (2) LexiLaw5 (6B), (3) Tailing6 (7B), (4) DISC-LawLLM (13B) (Yue et al., 2023), (5) zhihai (7B) (Wu et al.), (6) LawGPT_zh (6B) (Liu et al., 2023), (7) HanFei (7B) (He et al., 2023). The open-domain LLMs include Qwen2 (7B) (Yang et al., 2024) and Llama3 (8B) (AI@Meta, 2024). For these models, we tested all methods mentioned in Sec. 4, including: (1) CloseBook, (2) CGG, (3) ARG-Q and (4) ARG-QA. For the legal LLMs, we generate responses using CGG. Appendix B has the details." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.585, + 0.36, + 0.6 + ], + "angle": 0, + "content": "6.1.2 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.604, + 0.49, + 0.716 + ], + "angle": 0, + "content": "Our implementation is based on the Huggingface Transformers library (Wolf et al., 2020) with PyTorch. We use bge-base-zh-v1.5 (Xiao et al., 2023) as the retrieval model and conduct all experiments on Nvidia A6000 GPUs. Additional details are provided in Appendix C and https://github.com/ke-01/CitaLaw." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.726, + 0.266, + 0.74 + ], + "angle": 0, + "content": "6.2 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.747, + 0.488, + 0.795 + ], + "angle": 0, + "content": "The results on the Layperson and Practitioner datasets are presented in Table 2 and Table 3. We analyze the results from three perspectives:" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.804, + 0.45, + 0.819 + ], + "angle": 0, + "content": "6.2.1 Performance of Open-Domain LLM" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.823, + 0.489, + 0.887 + ], + "angle": 0, + "content": "Legal references improve the response quality. Compared to CloseBook, the overall performance in CGG, ARG-Q, and ARG-QA has improved. This indicates that incorporating references into the" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.327, + 0.884, + 0.392 + ], + "angle": 0, + "content": "LLM helps it better understand both the question and the required direction for the answer, thereby enhancing performance in terms of style consistency, correctness, and citation quality." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.395, + 0.884, + 0.539 + ], + "angle": 0, + "content": "CGG achieves better response quality. We observe that CGG achieves optimal performance, especially response correctness, suggesting that incorporating legal references into the LLM input is more effective than refining the LLM's response. This is because including legal knowledge as input allows the LLM to consider relevant context when generating replies, whereas refining the response might lead to excessive alterations." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.544, + 0.884, + 0.688 + ], + "angle": 0, + "content": "ARG improves the alignment of responses and references. We can observe that ARG outperforms CGG in citation-related metrics overall. This is because CGG merely incorporates reference information as input, which may lead the model to overlook some reference details during the generation process. In contrast, ARG modifies the answer based on the references after generation, making it easier to ensure the completeness of citations." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.693, + 0.884, + 0.789 + ], + "angle": 0, + "content": "Chinese data fine-tuning can bring benefits. Both the Layperson and Practitioner datasets are Chinese datasets. Qwen2 (Fine-tuning on more Chinese data) achieved better performance than Llama3, demonstrating the benefits of using Chinese data for fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.794, + 0.884, + 0.921 + ], + "angle": 0, + "content": "CloseBook tends to state circumstances. CloseBook performs better in terms of correctness regarding circumstances compared to the other dimensions. This suggests that when judicial knowledge references are not used, the LLM is more likely to repeat the circumstances itself, rather than providing an appropriate response to the illegal acts and the legal decision." + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.893, + 0.378, + 0.907 + ], + "angle": 0, + "content": "5https://github.com/CSHaitao/LexiLaw" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.907, + 0.463, + 0.921 + ], + "angle": 0, + "content": "\\(^{6}\\)https://github.com/DUTIR-LegalIntelligence/Tailing" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.893, + 0.463, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11188" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.119, + 0.08, + 0.88, + 0.246 + ], + "angle": 0, + "content": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-F\\( Correct_c \\)\\( Correct_a \\)\\( Correct_d \\)\\( Cita_{Law} \\)\\( Cita_c \\)\\( Cita_a \\)\\( Cita_d \\)Avg
Llama3 (Llam3-8B-Instruct)CloseBook23.8123.057.2919.2362.8376.3071.0570.3263.4966.9568.8365.4651.55
CGG36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
ARG-Q42.6520.395.0715.7562.8270.4973.6772.0068.6169.4870.5168.3453.31
ARG-QA36.9418.644.5614.6361.5071.0772.3870.3269.4068.9570.4269.5152.36
Qwen2 (Qwen2-7B-Instruct)CloseBook61.9130.4410.5423.5367.5574.3579.8478.5268.5568.0370.3069.7158.61
CGG39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
ARG-Q41.0220.575.1415.6263.3167.8474.7173.9473.0168.9673.2073.6454.25
ARG-QA21.9716.673.0612.4760.7067.4971.1670.8871.7669.0171.0471.3350.63
Legal LLM (CGG )DISC-LawLLM38.1121.376.7516.9660.8473.4272.1471.7963.9267.4268.2265.4552.20
fuzi.mingcha66.5528.959.5122.6967.0670.7376.6677.4765.9266.9469.2868.6957.54
LexiLaw57.7429.018.9323.8365.6370.3676.6775.9765.2866.9368.8968.0356.44
Tailing50.1626.529.1622.4465.3575.9673.8370.3064.6566.9467.5666.0954.91
zhihai26.2921.386.0015.5364.4765.5976.3877.3767.9366.3063.1759.8250.85
LawGPT_zh47.1029.168.9222.5567.6469.4879.3780.2366.9068.3867.5568.9456.35
HanFei75.7232.9812.4626.9168.7273.2578.6378.1167.0367.4568.6367.7359.80
" + }, + { + "type": "table_caption", + "bbox": [ + 0.143, + 0.257, + 0.851, + 0.271 + ], + "angle": 0, + "content": "Table 3: Performance comparisons on the Practitioner dataset. The best performance is indicated in bold." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.287, + 0.391, + 0.302 + ], + "angle": 0, + "content": "6.2.2 Performance of Legal LLM" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.307, + 0.488, + 0.419 + ], + "angle": 0, + "content": "Law article training achieves gains. In the Layperson dataset, LexiLaw achieves optimal performance overall. This is because the questions in the Layperson dataset often require only law articles to provide answers clearly, and LexiLaw's training explicitly used law articles, allowing it to effectively handle such questions." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.42, + 0.49, + 0.501 + ], + "angle": 0, + "content": "Full-parameter training offers advantages. Hanfei achieves the best results in the Practitioner dataset, as it is a fully parameter-trained legal LLM. Full-parameter fine-tuning allows it to effectively simulate a legal expert, thus performing well." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.502, + 0.49, + 0.566 + ], + "angle": 0, + "content": "Syllogistic reasoning is useful. fuzi.mingcha performs well on syllogism evaluation metrics, particularly on the Layperson dataset. This is due to its fine-tuning of syllogism judgment data." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.577, + 0.453, + 0.592 + ], + "angle": 0, + "content": "6.2.3 Open Domain LLM vs. Legal LLM" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.597, + 0.49, + 0.725 + ], + "angle": 0, + "content": "Impact of LLM Backbone. We can observe that some legal LLMs perform worse than open-domain LLMs. This is because Qwen2 and Llama3 are the latest open-domain LLMs, and their overall capabilities have significantly improved. In contrast, most legal LLMs are built on earlier generations of LLMs, which have weaker base models, leading to poorer overall performance." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.727, + 0.49, + 0.84 + ], + "angle": 0, + "content": "Effectiveness of legal knowledge. Overall, the upper limit of legal LLMs is higher than that of open-domain LLMs. This is because legal LLMs, after extensive training on legal knowledge, have developed strong capabilities in solving legal issues. As a result, even though their base models are outdated, they can still perform effectively." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.852, + 0.312, + 0.866 + ], + "angle": 0, + "content": "6.3 Human Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "In this section, we compared the syllogism-level metric with human evaluation. Details of legal human annotators can be found in Appendix D." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.287, + 0.883, + 0.351 + ], + "angle": 0, + "content": "The syllogism-level evaluation of citation quality is divided into two stages: Stage 1: Extracting key components. Stage 2: Assessing the entailment using an NLI model." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.353, + 0.885, + 0.512 + ], + "angle": 0, + "content": "Stage 1: We randomly selected 50 questions each from the Layperson and Practitioner datasets. After splitting the cases into individual clauses, annotators were provided with the full case and its clauses. They do a three-class classification of each clause. The Qwen2's annotations were then compared with human annotations. The Cohen's kappa coefficient (Cohen, 1960) of 0.7876 indicates substantial agreement (0.61-0.80) between the model's and human annotators' labels." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.514, + 0.884, + 0.756 + ], + "angle": 0, + "content": "Stage 2: We randomly selected 50 questions from the Practitioner dataset and used Qwen2 to extract key components of pairs of responses and citations. Annotators assessed the degree to which the citations entailed the corresponding response components using a 5-point scale (1: low, 5: high), with descriptions provided in Appendix D. The entailment probabilities given by DISC-LawLLM, which range from 0 to 1, were scaled to the same 1-5 range by multiplying by 5 and rounding. We then compared the scaled model outputs with the human evaluations and calculated Cohen's kappa coefficient. The kappa score of 0.6923 again indicates substantial agreement (0.61-0.80) between the model and human judgments." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.77, + 0.848, + 0.785 + ], + "angle": 0, + "content": "6.4 Effects on Different Retrieval Models" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.793, + 0.883, + 0.922 + ], + "angle": 0, + "content": "We selected BGE as the retrieval model in the main experiment. In this section, we explore the impact of using different retrieval models. Specifically, we evaluate Criminal-BERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019), two legal domain models based on BERT, fine-tuned on large-scale criminal and civil law documents, respectively. We replaced the retrieval model and tested the CGG" + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11189" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.121, + 0.084, + 0.475, + 0.187 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.199, + 0.49, + 0.242 + ], + "angle": 0, + "content": "Figure 3: Performance of different retrieval models. Lay is short for Layperson dataset and Pra is short for Practitioner dataset." + }, + { + "type": "image", + "bbox": [ + 0.122, + 0.248, + 0.48, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.131, + 0.346, + 0.471, + 0.36 + ], + "angle": 0, + "content": "(a) Methods for CitaLaw metric with Layperson dataset." + }, + { + "type": "image", + "bbox": [ + 0.122, + 0.361, + 0.48, + 0.454 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.136, + 0.459, + 0.466, + 0.472 + ], + "angle": 0, + "content": "(b) Metrics for CGG method with Practitioner dataset." + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.483, + 0.487, + 0.511 + ], + "angle": 0, + "content": "Figure 4: The performance of different NLI models when the LLM is Llama." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.527, + 0.489, + 0.576 + ], + "angle": 0, + "content": "method on the Layperson dataset. The average results across all metrics are shown in Figure 3, with detailed metric results provided in Appendix E." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.578, + 0.49, + 0.835 + ], + "angle": 0, + "content": "As shown, on the Layperson dataset, BGE significantly outperforms the other two models. This is because the dataset consists of questions from laypersons, which are more everyday in nature. In contrast, the two legal BERT models, having been trained extensively on legal cases, show a distributional mismatch with open-domain data, leading to poorer performance. On the Practitioner dataset, which features professional legal questions, BGE still achieves the best performance. This can be attributed to its extensive training on diverse data, likely including some legal data, and its use of more advanced model architectures and techniques. However, the two legal BERT models perform comparably to BGE, showcasing the benefits of their specialized training on legal data." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.85, + 0.414, + 0.865 + ], + "angle": 0, + "content": "6.5 Effects on Different NLI Models" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "We opted to use legal LLMs as the NLI model in our experiments, as they support longer input lengths and incorporate substantial legal knowl" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.198 + ], + "angle": 0, + "content": "edge. In Section 6.3, we verified that DISC-LawLLM and human achieved good consistency. In this section, we explore the performance of several legal LLMs in the NLI task. Besides DISC-LawLLM, we evaluated LexiLaw, LawGPT_zh, and Hanfei, which demonstrated strong performance in the main experiments." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.199, + 0.885, + 0.327 + ], + "angle": 0, + "content": "In Figures 4 (a), we examined the ability of four legal LLMs to evaluate Llama across the Close-Book, CGG, ARG-Q, and ARG-QA methods using the CitaLaw metric on the Layperson dataset. In Figures 4 (b), we investigated the performance of four legal LLMs in evaluating the CGG method applied to Llama across the metrics CitaLaw, CitaC, Cita a, and Cita d on the Practitioner dataset." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.33, + 0.885, + 0.603 + ], + "angle": 0, + "content": "We can observe that Hanfei provides lower entailment scores across both datasets. This is because it is a fully parameter-tuned legal LLM, which results in a diminished capability to handle the general task of entailment reasoning. Additionally, we found that on the Practitioner dataset, other legal LLMs achieved results closer to those of DISC-LawLLM, while on the Layperson dataset, the performance gap was significantly larger. This is because the Practitioner dataset is more judicially oriented, aligning with the knowledge seen during the fine-tuning of legal LLMs. In contrast, due to limited training on general-purpose data, other legal LLMs struggle to accurately determine entailment relationships in the Layperson dataset. Similar conclusions can be drawn when the LLM is Qwen in Appendix F." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.62, + 0.64, + 0.635 + ], + "angle": 0, + "content": "7 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.648, + 0.885, + 0.922 + ], + "angle": 0, + "content": "We introduce CitaLaw, a benchmark designed to explore LLMs to generate responses with citations in legal scenarios, thus improving the trustworthiness of LLMs. CitaLaw includes two categories of questions: laypersons and practitioners. For laypersons, CitaLaw provides law articles as citations to help them understand the LLM's response clearly. For practitioners, both law articles and precedent cases are provided as citations, better supporting their needs for complex reasoning. CitaLaw offers global-level and syllogism-level metrics and supports the integration of citations into LLM inputs to guide generation or using citations to refine LLM's response. We conducted extensive experiments on 7 legal-domain LLMs and 2 popular open-domain LLMs, providing valuable insights for the deployment of LLMs in legal scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11190" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.251, + 0.099 + ], + "angle": 0, + "content": "8 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.11, + 0.488, + 0.174 + ], + "angle": 0, + "content": "While Citalaw provides a robust framework for evaluating LLMs in legal scenarios, several limitations should be acknowledged to guide future extensions of this work." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.175, + 0.489, + 0.304 + ], + "angle": 0, + "content": "First, the datasets used in CitaLaw are primarily sourced from the Chinese legal system, which may limit the benchmark's applicability to other jurisdictions. However, by incorporating both law articles and precedent cases to align with the principles of civil and common law systems, CitaLaw demonstrates strong potential for adaptation to diverse legal contexts." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.304, + 0.49, + 0.433 + ], + "angle": 0, + "content": "Second, the syllogism-based evaluation framework simplifies legal reasoning into three key components: the major premise (law articles or precedent cases), the minor premise (case circumstances and actions), and the conclusion (legal decision). While this structured approach is effective for systematic evaluation, real-world legal reasoning may encompass additional complexities." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.445, + 0.347, + 0.459 + ], + "angle": 0, + "content": "9 Ethical Considerations" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.47, + 0.489, + 0.63 + ], + "angle": 0, + "content": "Data Privacy and Confidentiality. The legal datasets used in CitaLaw include law articles, precedent cases, user questions, and golden responses. These documents were sourced from publicly available databases, ensuring compliance with data privacy and confidentiality standards. We carefully reviewed the datasets to ensure that no personally identifiable information (PII) or sensitive details about individuals were inadvertently included." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.632, + 0.489, + 0.728 + ], + "angle": 0, + "content": "Alignment with Legal Standards. Legal AI systems must align with the ethical and professional standards of the legal domain. Our work emphasizes the need for syllogism-based reasoning to ensure logical consistency and adherence to legal principles." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.489, + 0.841 + ], + "angle": 0, + "content": "Transparency and Explainability. Legal reasoning must be transparent and interpretable, particularly when used in sensitive or high-stakes domains. The metrics proposed in CitaLaw, including syllogism-based evaluation, aim to improve explainability by breaking down the reasoning process into logical components." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.842, + 0.489, + 0.921 + ], + "angle": 0, + "content": "Responsibility in System Deployment. Citalaw is intended as a research benchmark and should not be directly deployed in high-stakes legal decision-making without human oversight. While the benchmark aims to enhance the trustworthiness" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.182 + ], + "angle": 0, + "content": "of LLM-generated responses, legal professionals should always verify the citations and legal interpretations provided by such systems. Misuse of automated systems without adequate validation could lead to inaccurate legal advice or unintended consequences in legal proceedings." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.194, + 0.722, + 0.21 + ], + "angle": 0, + "content": "10 Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.219, + 0.885, + 0.444 + ], + "angle": 0, + "content": "This work was funded by the National Key R&D Program of China (2023YFA1008704), the National Natural Science Foundation of China (62472426). Supported by fund for building world-class universities (disciplines) of Renmin University of China. Work partially done at Beijing Key Laboratory of Research on Large Models and Intelligent Governance, and Engineering Research Center of Next-Generation Intelligent Search and Recommendation, MOE. Supported by the Beijing Social Science Foundation Planning Project (Grant No. 24GLC041), the Fundamental Research Funds for the Central Universities in UIBE (Grant No. 24QN06, 24PYTS22)." + }, + { + "type": "title", + "bbox": [ + 0.511, + 0.471, + 0.61, + 0.485 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.493, + 0.883, + 0.535 + ], + "angle": 0, + "content": "Abdelrahman Abdallah, Bhawna Piryani, and Adam Jatowt. 2023. Exploring the state of the art in legal qa systems. Journal of Big Data, 10(1):127." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.543, + 0.774, + 0.557 + ], + "angle": 0, + "content": "AI@Meta. 2024. Llama 3 model card." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.567, + 0.885, + 0.634 + ], + "angle": 0, + "content": "Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023. Can gpt-3 perform statutory reasoning? In Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law, pages 22-31." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.643, + 0.885, + 0.683 + ], + "angle": 0, + "content": "Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1):37-46." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.693, + 0.885, + 0.759 + ], + "angle": 0, + "content": "Yongfu Dai, Duanyu Feng, Jimin Huang, Haochen Jia, Qianqian Xie, Yifang Zhang, Weiguang Han, Wei Tian, and Hao Wang. 2023. Laiw: A chinese legal large language models benchmark (a technical report). arXiv preprint arXiv:2310.05620." + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.768, + 0.885, + 0.835 + ], + "angle": 0, + "content": "Haolin Deng, Chang Wang, Xin Li, Dezhang Yuan, Junlang Zhan, Tianhua Zhou, Jin Ma, Jun Gao, and Ruifeng Xu. 2024. Websites: Attributed query-focused summarization on chinese web search results with citations. arXiv preprint arXiv:2403.01774." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.844, + 0.885, + 0.884 + ], + "angle": 0, + "content": "Jacob Devlin. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.894, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Zhiwei Fei, Xiaoyu Shen, Dawei Zhu, Fengzhe Zhou, Zhuo Han, Songyang Zhang, Kai Chen, Zongwen" + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.493, + 0.885, + 0.922 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.524, + 0.941 + ], + "angle": 0, + "content": "11191" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.489, + 0.126 + ], + "angle": 0, + "content": "Shen, and Jidong Ge. 2023. Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.136, + 0.488, + 0.241 + ], + "angle": 0, + "content": "Luyu Gao, Zhuyun Dai, Panupong Pasupat, Anthony Chen, Arun Tejasvi Chaganty, Yicheng Fan, Vincent Zhao, Ni Lao, Hongrae Lee, Da-Cheng Juan, et al. 2023a. Rarr: Researching and revising what language models say, using language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16477-16508." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.251, + 0.488, + 0.316 + ], + "angle": 0, + "content": "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023b. Enabling large language models to generate text with citations. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6465-6488." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.327, + 0.487, + 0.379 + ], + "angle": 0, + "content": "Wanwei He, Jiabao Wen, Lei Zhang, Hao Cheng, Bowen Qin, Yunshui Li, Feng Jiang, Junying Chen, Benyou Wang, and Min Yang. 2023. Hanfei-1.0. https://github.com/siat-nlp/HanFei." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.389, + 0.488, + 0.429 + ], + "angle": 0, + "content": "Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.44, + 0.487, + 0.491 + ], + "angle": 0, + "content": "Dongfang Li, Zetian Sun, Xinshuo Hu, Zhenyu Liu, Ziyang Chen, Baotian Hu, Aiguo Wu, and Min Zhang. 2023. A survey of large language models attribution. arXiv preprint arXiv:2311.03731." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.502, + 0.487, + 0.554 + ], + "angle": 0, + "content": "Haitao Li, You Chen, Qingyao Ai, Yueyue Wu, Ruizhe Zhang, and Yiqun Liu. 2024. Lexeval: A comprehensive chinese legal benchmark for evaluating large language models. Preprint, arXiv:2409.20288." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.565, + 0.486, + 0.605 + ], + "angle": 0, + "content": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.615, + 0.487, + 0.666 + ], + "angle": 0, + "content": "Hongcheng Liu, Yusheng Liao, Yutong Meng, and Yuhao Wang. 2023. Xiezhi: Chinese law large language model. https://github.com/LiuHC0428/LAW_GPT." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.677, + 0.487, + 0.769 + ], + "angle": 0, + "content": "Luyao Ma, Yating Zhang, Tianyi Wang, Xiaozhong Liu, Wei Ye, Changlong Sun, and Shikun Zhang. 2021a. Legal judgment prediction with multi-stage case representation learning in the real court setting. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 993-1002." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.779, + 0.487, + 0.858 + ], + "angle": 0, + "content": "Yixiao Ma, Yunqiu Shao, Yueyue Wu, Yiqun Liu, Ruizhe Zhang, Min Zhang, and Shaoping Ma. 2021b. Lecard: A legal case retrieval dataset for chinese law system. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2342-2348." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.868, + 0.487, + 0.92 + ], + "angle": 0, + "content": "Chaitanya Malaviya, Subin Lee, Sihao Chen, Elizabeth Sieber, Mark Yatskar, and Dan Roth. 2024. Expertqa: Expert-curated questions and attributed answers. In Proceedings of the 2024 Conference of the North" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.489, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.086, + 0.882, + 0.126 + ], + "angle": 0, + "content": "American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 3025-3045." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.136, + 0.882, + 0.214 + ], + "angle": 0, + "content": "Krishna Pillutla, Swabha Swayamdipta, Rowan Zellers, John Thickstun, Sean Welleck, Yejin Choi, and Zaid Harchaoui. 2021. Mauve: Measuring the gap between neural text and human text using divergence frontiers. Advances in Neural Information Processing Systems, 34:4816-4828." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.224, + 0.882, + 0.262 + ], + "angle": 0, + "content": "N Reimers. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.273, + 0.882, + 0.326 + ], + "angle": 0, + "content": "Jaromir Savelka, Kevin D Ashley, Morgan A Gray, Hannes Westermann, and Huihui Xu. 2023. Explaining legal concepts with augmented large language models (gpt-4). arXiv preprint arXiv:2306.09525." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.335, + 0.882, + 0.428 + ], + "angle": 0, + "content": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.436, + 0.882, + 0.515 + ], + "angle": 0, + "content": "Shiguang Wu, Zhongkun Liu, Zhen Zhang, Zheng Chen, Wentao Deng, Wenhao Zhang, Jiyuan Yang, Zhitao Yao, Yougang Lyu, Xin Xin, Shen Gao, Pengjie Ren, Zhaochun Ren, and Zhumin Chen. 2023a. fuzi.mingcha. https://github.com/irlab-sdu/fuzi.mingcha." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.525, + 0.882, + 0.563 + ], + "angle": 0, + "content": "Yiquan Wu, Yuhang Liu, Yifei Liu, Ang Li, Siying Zhou, and Kun Kuang. wisdominterrogatory. Available at GitHub." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.574, + 0.882, + 0.666 + ], + "angle": 0, + "content": "Yiquan Wu, Siying Zhou, Yifei Liu, Weiming Lu, Xiaozhong Liu, Yating Zhang, Changlong Sun, Fei Wu, and Kun Kuang. 2023b. Precedent-enhanced legal judgment prediction with llm and domain-model collaboration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12060-12075." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.676, + 0.882, + 0.728 + ], + "angle": 0, + "content": "Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.738, + 0.882, + 0.92 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.882, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.929, + 0.525, + 0.941 + ], + "angle": 0, + "content": "11192" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.489, + 0.113 + ], + "angle": 0, + "content": "Cui, Zhenru Zhang, and Zhihao Fan. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.123, + 0.489, + 0.163 + ], + "angle": 0, + "content": "Fangyi Yu, Lee Quartey, and Frank Schilder. 2022a. Legal prompting: Teaching a language model to think like a lawyer. arXiv preprint arXiv:2212.01326." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.173, + 0.489, + 0.264 + ], + "angle": 0, + "content": "Weijie Yu, Zhongxiang Sun, Jun Xu, Zhenhua Dong, Xu Chen, Hongteng Xu, and Ji-Rong Wen. 2022b. Explainable legal case matching via inverse optimal transport-based rationale extraction. In Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval, pages 657-668." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.274, + 0.489, + 0.352 + ], + "angle": 0, + "content": "Shengbin Yue, Wei Chen, Siyuan Wang, Bingxuan Li, Chenchen Shen, Shujun Liu, Yuxuan Zhou, Yao Xiao, Song Yun, Xuanjing Huang, and Zhongyu Wei. 2023. Disc-lawllm: Fine-tuning large language models for intelligent legal services. Preprint, arXiv:2309.11325." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.363, + 0.489, + 0.414 + ], + "angle": 0, + "content": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.426, + 0.489, + 0.491 + ], + "angle": 0, + "content": "Haoxi Zhong, Chaojun Xiao, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2020. Jecqa: a legal-domain question answering dataset. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 9701-9708." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.501, + 0.489, + 0.542 + ], + "angle": 0, + "content": "Haoxi Zhong, Zhengyan Zhang, Zhiyuan Liu, and Maosong Sun. 2019. Open chinese language pretrained model zoo. Technical report." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.551, + 0.489, + 0.604 + ], + "angle": 0, + "content": "Zhi Zhou, Jiang-Xin Shi, Peng-Xiao Song, Xiao-Wen Yang, Yi-Xuan Jin, Lan-Zhe Guo, and Yu-Feng Li. 2024. Lawgpt: A chinese legal knowledge-enhanced large language model. Preprint, arXiv:2406.04614." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.489, + 0.604 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.525, + 0.941 + ], + "angle": 0, + "content": "11193" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.084, + 0.314, + 0.101 + ], + "angle": 0, + "content": "A The Used Prompts" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.115, + 0.49, + 0.149 + ], + "angle": 0, + "content": "Figure 5 illustrates the prompts used in this paper, including \\( p_1 \\), \\( p_2 \\), \\( p_3 \\) in Eq. 1, Eq. 2 and Eq. 3." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.165, + 0.484, + 0.198 + ], + "angle": 0, + "content": "B More Details of Evaluated Models and Datasets" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.213, + 0.49, + 0.567 + ], + "angle": 0, + "content": "For the Legal LLMs, we choose (1) fuzi.mingcha (6B) (Wu et al., 2023a): It leverages unsupervised judicial corpora for training and uses syllogistic reasoning judgment data for fine-tuning. (2) LexiLaw7 (6B): It specifically utilizes legal articles and legal reference books for training. (3) Tailing8 (7B): It uses judicial text validation data, information extraction data, and judgment data for training. (4) DISC-LawLLM (13B) (Yue et al., 2023): In addition to fine-tuning with pairs, it also uses triplet data for fine-tuning to enhance the model's ability to leverage external knowledge. (5) zhihai (7B) (Wu et al.): It utilizes ChatGPT to modify the existing dataset and then performs secondary pre-training. (6) LawGPT_zh (6B) (Liu et al., 2023): It primarily uses scenario-based dialogues and knowledge-based question-answering data for fine-tuning based on LoRA. (7) HanFei (7B) (He et al., 2023): It is the first fully parameter-trained legal LLM in China. Because in the main experiment, CGG has the best overall performance, for the legal LLMs, we generate responses using CGG." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.569, + 0.489, + 0.618 + ], + "angle": 0, + "content": "Table 4 and Table 5 are the website URLs and corresponding licenses of the evaluated models and datasets." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.636, + 0.434, + 0.654 + ], + "angle": 0, + "content": "C More Details on Implementation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.667, + 0.49, + 0.828 + ], + "angle": 0, + "content": "Considering the length of legal texts and the input window for the LLMs is limited, all experiments in this paper are conducted using a zero-shot setting. We use the Chinese-performing-well Qwen2-1.5B (Yang et al., 2024)9 to complete the MAUVE calculations. For RGUGE, We use version 1.0.1 of ROUGE for calculation. For BERTScore, we use bert-base-chinese (Devlin, 2018)10 to compute it. Regarding sentence-BERT, we employ paraphrase-multilingual-MiniLM-L12-v2 (Reimers, 2019)11." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.084, + 0.715, + 0.099 + ], + "angle": 0, + "content": "D Human Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.109, + 0.885, + 0.35 + ], + "angle": 0, + "content": "We hired four legal annotators from a Chinese university, all of whom have legal education backgrounds and are familiar with the cases in the dataset they need to annotate. We explained to the annotators that the data they annotated would be used for scientific research and paid them a reasonable remuneration based on local conditions. They are all graduate students from the judicial field, with practical experience in the legal profession. Two are male, two are female, aged between 24 and 30, and all have over five years of judicial theory study. Two annotators were responsible for the first stage of annotation, while the other two were responsible for the second stage, with all working together on the annotation process." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.351, + 0.884, + 0.398 + ], + "angle": 0, + "content": "Table 6 shows a detailed description of each level used to evaluate the agreement of the NLI model with human evaluations." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.411, + 0.779, + 0.426 + ], + "angle": 0, + "content": "E Different Retrieval Models" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.436, + 0.885, + 0.677 + ], + "angle": 0, + "content": "Tables 7 and 8 present the performance of different retrieval models—Criminal-BERT, CivilBERT, and BGE—on each metric for the CGG method across the two datasets. It can be observed that when Llama3 and Qwen2 are used as LLMs, BGE achieves the best performance as the retrieval model. Comparing the two datasets, on the Layperson dataset, where the questions are more general, Criminal-BERT and Civil-BERT, which focus on legal cases, perform relatively poorly. In contrast, on the Practitioner dataset, despite no structural or training improvements, Criminal-BERT and CivilBERT achieve results comparable to BGE, highlighting the importance of legal knowledge in judicial QA tasks." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.678, + 0.884, + 0.725 + ], + "angle": 0, + "content": "The differences between the two datasets also underscore the significance of selecting an appropriate retrieval model." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.737, + 0.734, + 0.753 + ], + "angle": 0, + "content": "F Different NLI Models" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.763, + 0.885, + 0.859 + ], + "angle": 0, + "content": "Figures 6 (a) and (b) show the entailment scores given by four legal LLMs as NLI models under different methods (CloseBook, CGG, ARG-Q, ARGQA) and metrics(CitaLaw, CitaS, CitaB, and CitaC) when Qwen is used as the LLM. Similar conclusions to those in Section 6.5 can be drawn." + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.844, + 0.379, + 0.857 + ], + "angle": 0, + "content": "7https://github.com/CSHaitao/LexiLaw" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.857, + 0.464, + 0.871 + ], + "angle": 0, + "content": "\\(^{8}\\)https://github.com/DUTIR-LegalIntelligence/Tailing" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.871, + 0.403, + 0.883 + ], + "angle": 0, + "content": "9https://huggingface.co/Qwen/Qwen2-1.5B" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.883, + 0.464, + 0.896 + ], + "angle": 0, + "content": "10https://huggingface.co/google-bert/bert-base-chinese" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.896, + 0.346, + 0.908 + ], + "angle": 0, + "content": "11 https://huggingface.co/sentence-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.117, + 0.909, + 0.45, + 0.921 + ], + "angle": 0, + "content": "transformers/paraphrase-multilingual-MiniLM-L12-v2" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.844, + 0.464, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11194" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.131, + 0.089, + 0.444, + 0.166 + ], + "angle": 0, + "content": "Answer the question based on the provided law article and cite it appropriately. Only output the answer and citations, without including any additional content. When citing the law article, use [A1] at the end of the relevant sentence." + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.178, + 0.341, + 0.206 + ], + "angle": 0, + "content": "Below is the provided law article: Law article [A1]: {Law article 1}" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.218, + 0.269, + 0.242 + ], + "angle": 0, + "content": "Question: {Question} Answer:" + }, + { + "type": "title", + "bbox": [ + 0.244, + 0.246, + 0.337, + 0.258 + ], + "angle": 0, + "content": "(a) Layperson" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.263, + 0.427, + 0.29 + ], + "angle": 0, + "content": "Refine the text based on the references and only output the refined text." + }, + { + "type": "text", + "bbox": [ + 0.134, + 0.302, + 0.287, + 0.328 + ], + "angle": 0, + "content": "Text: {Answer} \nReference: {References}" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.34, + 0.443, + 0.366 + ], + "angle": 0, + "content": "Refined text (only output the Refined text, without any additional content):" + }, + { + "type": "title", + "bbox": [ + 0.208, + 0.371, + 0.372, + 0.384 + ], + "angle": 0, + "content": "(c) Response Refinement" + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.089, + 0.856, + 0.115 + ], + "angle": 0, + "content": "Answer the question based on the provided documents and cite them appropriately." + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.116, + 0.836, + 0.14 + ], + "angle": 0, + "content": "Only output the answer and citations, without including any additional content." + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.141, + 0.855, + 0.167 + ], + "angle": 0, + "content": "When citing precedent cases, use [C1], [C2], or [C3] at the end of the sentence. When citing the law article, use [A1]." + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.179, + 0.721, + 0.205 + ], + "angle": 0, + "content": "Below are the provided documents: \nPrecedent case [C1]: {Precedent case 1}" + }, + { + "type": "text", + "bbox": [ + 0.472, + 0.206, + 0.72, + 0.217 + ], + "angle": 0, + "content": "Precedent case [C2]: {Precedent case 2}" + }, + { + "type": "text", + "bbox": [ + 0.472, + 0.218, + 0.72, + 0.244 + ], + "angle": 0, + "content": "Precedent case [C3]: {Precedent case 3} \nLaw article [A1]: {Law article 1}" + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.256, + 0.605, + 0.28 + ], + "angle": 0, + "content": "Question: {Question} Answer:" + }, + { + "type": "title", + "bbox": [ + 0.616, + 0.285, + 0.72, + 0.298 + ], + "angle": 0, + "content": "(b) Practitioner" + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.304, + 0.807, + 0.33 + ], + "angle": 0, + "content": "Answer the question. Only output the answer without any additional content." + }, + { + "type": "text", + "bbox": [ + 0.47, + 0.342, + 0.605, + 0.366 + ], + "angle": 0, + "content": "Question: {Question} Answer:" + }, + { + "type": "title", + "bbox": [ + 0.595, + 0.371, + 0.741, + 0.384 + ], + "angle": 0, + "content": "(d) Without Reference" + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.394, + 0.884, + 0.449 + ], + "angle": 0, + "content": "Figure 5: Prompts used in this paper. (a) The prompt \\( p_1 \\) is used to retrieve one law article in the Layperson dataset. (b) The prompt \\( p_1 \\) is used to retrieve one law article and three precedent cases in the Practitioner dataset. (c) The prompt \\( p_3 \\) is used to refine the LLM's answer based on references. (d) The prompt \\( p_2 \\) is used for LLM responses without references." + }, + { + "type": "table", + "bbox": [ + 0.156, + 0.454, + 0.843, + 0.564 + ], + "angle": 0, + "content": "
TypeLLMURLLicence
Open domainQwen2-7B-Instructhttps://huggingface.co/Qwen/Qwen2-7B-InstructApache-2.0 license
Llam3-8B-Instructhttps://github.com/meta-llama/llama3META LLAMA 3 COMMUNITY License
Legal Domainfuzi.mingchahttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
DISC-LawLLMhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
LawGPT_zhhttps://github.com/LiuHC0428/LAW-GPT
Hanfeihttps://github.com/siat-nlp/HanFeiApache-2.0 license
Tailinghttps://github.com/DUTIR-LegalIntelligence/Tailing
LexiLawhttps://github.com/CSHaitao/LexiLawMIT license
zhihaihttps://github.com/zhihaiLLM/wisdomInterrogatoryApache-2.0 license
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.572, + 0.884, + 0.601 + ], + "angle": 0, + "content": "Table 4: The LLM source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License." + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.64, + 0.48, + 0.732 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.139, + 0.738, + 0.462, + 0.751 + ], + "angle": 0, + "content": "(a) Metrics for CGG method with Layperson dataset." + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.753, + 0.48, + 0.843 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.127, + 0.849, + 0.474, + 0.862 + ], + "angle": 0, + "content": "(b) Methods for CitaLaw metric with Practitioner dataset." + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.873, + 0.487, + 0.903 + ], + "angle": 0, + "content": "Figure 6: The performance of different NLI models when the LLM is Qwen." + }, + { + "type": "page_number", + "bbox": [ + 0.478, + 0.928, + 0.525, + 0.941 + ], + "angle": 0, + "content": "11195" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.195, + 0.12, + 0.803, + 0.252 + ], + "angle": 0, + "content": "
TypeDatasetURLLicence
QuestionLayperson Practitionerhttps://github.com/open-compass/LawBenchApache-2.0 license
https://github.com/CSHaitao/LexEvalMIT License
CorpusLeCaRDhttps://github.com/myx666/LeCaRDMIT License
ELAMhttps://github.com/ruc-wjyu/IOT-MatchMIT License
CAIL2021-sfzyhttps://github.com/china-ai-law-challenge/CAIL2021
LJP-MSJudg fuzi.mingchahttps://github.com/mly-nlp/LJP-MSJudge
DISC-LawLLMhttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
LawGPT_zhhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
Hanfeihttps://github.com/LiuHC0428/LAW-GPT
https://github.com/siat-nlp/HanFeiApache-2.0 license
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.261, + 0.884, + 0.292 + ], + "angle": 0, + "content": "Table 5: The dataset source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.374, + 0.88, + 0.452 + ], + "angle": 0, + "content": "
ScoreDescription
1No Entailment: The former does not entail the latter at all, with no logical connection between the two.
2Weak Entailment: A partial entailment where the former somewhat relates to the latter, but the connection is weak and not fully conclusive.
3Moderate Entailment: A moderate degree of entailment, meaning the former generally leads to the latter in most cases, but exceptions exist.
4Strong Entailment: A strong logical relationship where the former can derive the latter in the vast majority of cases.
5Complete Entailment: The former fully entails the latter in all cases, with an unambiguous and definitive logical connection between them.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.285, + 0.462, + 0.712, + 0.476 + ], + "angle": 0, + "content": "Table 6: Scoring Criteria for Human Evaluation of Entailment." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.556, + 0.876, + 0.656 + ], + "angle": 0, + "content": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)Criminal37.4418.072.1813.1561.7164.0363.5664.3680.3444.98
Civil56.1618.272.3413.4461.9063.2263.8963.3580.9747.06
BGE61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
Qwen2 (Qwen2-7B-Instruct)Criminal55.2621.094.5314.3264.7363.1064.8965.8561.6046.15
Civil52.4420.484.1613.8164.4561.7964.9465.6259.8845.29
BGE75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.667, + 0.884, + 0.697 + ], + "angle": 0, + "content": "Table 7: Performance comparisons on retrieval models in the Layperson dataset when the method is CGG. The best performance is indicated in bold." + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.765, + 0.879, + 0.851 + ], + "angle": 0, + "content": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawCitaCCitaaCitaDAvg
Llama3 (Llam3-8B-Instruct)Criminal34.2525.797.8619.4265.0366.2776.3076.8270.5966.4170.0969.4754.03
Civil39.8426.398.0720.0265.2765.4175.7875.7369.2167.5269.5469.1654.33
BGE36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
Qwen2 (Qwen2-7B-Instruct)Criminal32.4931.7911.0923.9369.7972.0080.8181.5368.4268.4271.8671.5456.97
Civil33.3731.6711.0623.8469.6373.3580.5781.2769.1166.4170.0969.4756.65
BGE39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.861, + 0.884, + 0.891 + ], + "angle": 0, + "content": "Table 8: Performance comparisons on retrieval models in the Practitioner dataset when the method is CGG. The best performance is indicated in bold." + }, + { + "type": "page_number", + "bbox": [ + 0.477, + 0.928, + 0.526, + 0.941 + ], + "angle": 0, + "content": "11196" + } + ] +] \ No newline at end of file diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_origin.pdf b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..634f7c340c8b8ad1ef525455cb185b7b5841b45d --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/81aca763-e861-40de-ad3f-640af6cf3d30_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a196eb6bb662138c7cea0c8cb70ee8e33d451cfcd962746ef4cd95da7bfa062 +size 666158 diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/full.md b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/full.md new file mode 100644 index 0000000000000000000000000000000000000000..03d2223c6524982e429e02c308f56557aa94aa0b --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/full.md @@ -0,0 +1,421 @@ +# CitaLaw: Enhancing LLM with Citations in Legal Domain + +Kepu Zhang $^{1}$ , Weijie Yu $^{2*}$ , Sunhao Dai $^{1}$ , Jun Xu $^{1}$ + +1Gaoling School of Artificial Intelligence, Renmin University of China + +$^{2}$ University of International Business and Economics + +kepuzhang@ruc.edu.cn, yu@uibep.edu.cn + +# Abstract + +In this paper, we propose Citalaw, the first benchmark designed to evaluate LLMs' ability to produce legally sound responses with appropriate citations. Citalaw features a diverse set of legal questions for both laypersons and practitioners, paired with a comprehensive corpus of law articles and precedent cases as a reference pool. This framework enables LLM-based systems to retrieve supporting citations from the reference corpus and align these citations with the corresponding sentences in their responses. Moreover, we introduce syllogism-inspired evaluation methods to assess the legal alignment between retrieved references and LLM-generated responses, as well as their consistency with user questions. Extensive experiments on 2 open-domain and 7 legal-specific LLMs demonstrate that integrating legal references substantially enhances response quality. Furthermore, our proposed syllogism-based evaluation method exhibits strong agreement with human judgments. + +# 1 Introduction + +Generating responses supported by citations, such as relevant law articles and precedent cases, is essential for ensuring the trustworthiness of large language models (LLMs) in legal tasks. For laypersons seeking legal advice (Fei et al., 2023), LLM-generated responses grounded in citations provide verifiable information, fostering trust in the system. Conversely, for legal practitioners such as lawyers and judges, citations serve as supportive evidence that aids in analyzing complex cases, validating legal arguments, and ensuring decisions align with established legal principles (Li et al., 2024; Zhong et al., 2020; Abdallah et al., 2023). + +Recently, a growing body of benchmark research (Gao et al., 2023a; Li et al., 2023) has focused on enabling LLMs to provide citations for the + +![](images/8b18c275924d2d7c798f99f411d1b1dd24b37d58177b2cd07259e65b86050300.jpg) +Figure 1: The framework of our CitaLaw. + +statements they generate. For instance, ALCE (Gao et al., 2023b) introduces a benchmark designed to evaluate the ability of LLMs to generate citation-supported outputs, aiming to improve factual accuracy. WebCiteS (Deng et al., 2024) provides a curated database of manually annotated summaries and citations to enhance performance in text summarization and citation generation. + +While these studies have made notable progress in general domains, they face significant challenges when applied to the legal domain. First, laypersons and legal practitioners interact with LLMs differently and have distinct expectations for citations. Laypersons typically seek legal advice and rely on citations to verify the accuracy of LLM responses, whereas legal practitioners pose more complex queries, using LLMs for legal reasoning, with citations serving as supportive evidence. Existing studies fail to address these differences, leading to unsatisfactory performance in real-world applications. Second, existing methods often fall short in providing the diverse references required in legal contexts, such as law articles and precedent cases. Law articles establish the foundational legal framework, while precedent cases offer concrete examples and interpretive guidance. These two types of references inherently align with the distinct characteristics of civil and common law systems. Third, traditional citation evaluation measures, such as ROUGE (Lin, 2004), rely on surface-level similar + +![](images/b0903bfb6decf7c8b09ba1de2359ba276ce2f70217a00854a97e5ed3d8807b8a.jpg) +Figure 2: Examples from the two subsets of CitaLaw, with text in red, blue, and yellow representing the three dimensions of the syllogism: major premise, minor premise (circumstances, illegal acts), and conclusion (legal decisions), respectively. [A] and [C] denote citations to relevant law articles and precedent cases, respectively. + +ities and are often insufficient to assess the alignment between references and LLM-generated responses. In the legal domain, effective evaluation requires a deeper understanding of logical and semantic relationships. + +To overcome the above challenges, we propose CitaLaw, the first benchmark tailored to evaluate LLMs' capabilities in generating legally grounded responses supported by accurate and context-aware citations. As shown in Figure 1, CitaLaw incorporates four distinct legal-specific features: + +(1) CitaLaw has two subsets tailored for laypersons and practitioners, with examples in Figure 2. Laypersons typically ask shorter, conversational questions, while practitioners often pose specialized, detailed questions. +(2) CitaLaw includes a retrieval corpus comprising two commonly used references: law articles, which provide clear and concise guidelines for addressing user questions, and precedent cases, which offer legal reasoning and support for judicial decisions. Recognizing the distinct needs of laypersons and practitioners, we provide only law articles for laypersons to ensure clarity, while practitioners have access to both law articles and precedent cases to support more complex legal reasoning. +(3) In addition to traditional global-level metrics such as MAUVE (Pillutla et al., 2021), we propose a syllogism-based evaluation method to assess both the response correctness and the citation quality. This method provides a more granular evaluation by focusing on three key dimensions: circumstances, illegal acts, and legal decisions. + +(4) We consider two types of response generation methods. The first type, Citation-Guided Generation (CGG), involves generating responses by incorporating retrieved references during generation. The second type, Answer Refinement Generation (ARG), refines the LLMs' initial response (CloseBook) by retrieving and incorporating reference information. This category includes ARG-Q, which retrieves citations using only the user query, and ARG-QA, which retrieves citations using both the user query and the LLM's initial response. + +Extensive experiments on two open-domain and seven legal-specific LLMs reveal the following key insights: 1) Incorporating legal references into the LLM significantly improves the quality of responses; 2) Including references as part of the LLM's input consistently outperforms answer-refinement methods; 3) Leveraging references to refine the LLM's responses yields better alignment of responses and references. 4) For fine-tuning LLMs in legal scenarios, incorporating law articles, syllogistic reasoning, and full-scale fine-tuning achieves promising performance. 5) Open-domain LLMs surprisingly outperform legal-specific LLMs in certain scenarios; 6) Human evaluations show a strong correlation with our syllogism-based methods. + +In summary, our contributions are as follows: + +- To the best of our knowledge, CitaLaw is the first benchmark designed to evaluate the capability of LLMs to generate legally grounded responses with accurate and context-aware citations. CitaLaw includes questions tailored to both laypersons and practitioners, paired + +with a citation corpus comprising law articles and precedent cases. + +- We propose a two-level evaluation framework that combines global-level metrics with a syllogism-based reasoning approach. Additionally, we explore two mainstream methods for legal response generation: citation-guided and answer refinement. +- Through extensive experiments on two open-domain and seven legal-specific LLMs, we demonstrate the effectiveness of integrating legal references into response generation and validate our syllogism-based evaluation method. Additionally, we provide actionable insights for the practical deployment of LLMs in legal scenarios. + +# 2 Related Work + +LLM for Legal Task. A amount of work has explored applying LLMs to legal tasks (Savelka et al., 2023; Wu et al., 2023b; Yu et al., 2022a; Blair-Stanek et al., 2023). Building LLMs tailored for legal scenarios is a popular direction (Yue et al., 2023; Wu et al., 2023a; He et al., 2023). There are also some benchmarks that explore the capabilities of LLMs in legal tasks. LawBench (Fei et al., 2023) evaluates LLMs' legal knowledge across three cognitive aspects. LAiW (Dai et al., 2023) assesses LLMs' legal reasoning abilities based on legal practice logic. LexEval (Li et al., 2024) evaluates LLMs' legal capabilities based on a new legal cognitive ability classification system. However, none of them have considered enhancing the trustworthiness of LLMs in legal scenarios by generating outputs with citations. + +Citation in LLM. Attribution (Li et al., 2023) in LLMs refers to providing supporting evidence for the answers generated by the model, presented in the form of citations. ALCE (Gao et al., 2023b) is an automated benchmark for evaluating LLMs' ability to generate outputs with citations, aimed at improving the factual accuracy of the generated responses. WebCiteS (Deng et al., 2024) provides a database containing 7,000 manually annotated summaries and citations to enhance LLMs' capabilities in summarization and citation. RARR (Gao et al., 2023a) enhances LLM outputs by automatically adding citations, and modifying the responses. ExpertQA (Malaviya et al., 2024) verifies and modifies citations through expert review to ensure re + +liability. In contrast to the above works, CitaLaw focuses specifically on citation in legal scenarios. + +# 3 Task Setup and Dataset Construction + +Suppose we have a legal corpus $D$ , which consists of either a collection of precedent cases $(D_{l})$ or law articles $(D_{c})$ . Given a user question $x$ posed by either a layperson or a practitioner, the LLM-based system is tasked with retrieving supportive citations from $D$ and generating a legally grounded response $y$ . The response $y$ comprises a list of $n$ sentences, i.e., $y = [s_1,\dots ,s_n]$ , where each sentence $s_i$ refers to at most one corresponding citation. As illustrated in Figure 2, the system is further required to attach each citation to its relevant sentence, with "[A]" and "[C]" denoting references to law articles and precedent cases, respectively. + +To enable the evaluation of this task, we construct the specialized dataset (Table 1 shows the statistics) as follows: + +To simulate the behavior of laypersons, we include questions that are more conversational, lack detailed case descriptions, and are relatively short in length. We use the consultation section from LawBench (Fei et al., 2023), which collects user queries from the Hualv website1 and answers provided by lawyers or legal consulting firms. + +To simulate the behavior of legal practitioners, we include questions that are more professional, often accompanied by detailed case descriptions, and generally longer. For this purpose, we use the open-ended question section from LexEval (Li et al., 2024), which consists of subjective questions from the National Uniform Legal Profession Qualification Examination. These questions are particularly challenging for LLMs, requiring them to understand the case fully and apply legal knowledge accurately to generate answers. + +In terms of the corpus, we construct a comprehensive corpus from multiple sources, including law articles and precedent cases. Specifically, for law articles, we collect approximately 50,000 documents from LexiLaw $^2$ , covering areas such as Civil Law, Criminal Law, and judicial interpretations. For precedent cases, we include both criminal and civil cases. Criminal cases are sourced from the LeCaRD legal retrieval dataset (Ma et al., 2021b), ELAM (Yu et al., 2022b), and civil cases from the CAIL legal summary + +
Dataset#QLenQLenAQ Type
Layperson50057.62107.40Question
Practitioner500618.96193.46Case + Question
+ +Table 1: Dataset statistics. #Q indicates the number of questions, $\mathrm{Len}_Q$ and $\mathrm{Len}_A$ denote the average lengths of questions and gold answers, and Q Type refers to the question type. + +dataset, LJP-MSJudge (Ma et al., 2021a), and the pre-training data of fuzi.mingcha (Wu et al., 2023a). As a supplement to precedent cases, we also incorporate question-and-answer pairs from fine-tuning datasets of legal LLMs as part of the precedent cases. These QA pairs are collected from DISC-LawLLM (Yue et al., 2023), LawGPT_zh (Liu et al., 2023), and HanFei (He et al., 2023). In total, the constructed corpus contains approximately 500,000 documents, ensuring sufficient coverage of both law articles and precedent cases to support diverse legal tasks. + +# 4 Method + +# 4.1 Response Generation + +We consider two types of methods in this study. + +Citation-Guided Generation (CGG) produces response $y_{cgg}$ given a user question $x$ by referring retrieved relevant document(s) $D_R$ : + +$$ +y _ {\mathrm {c g g}} = f _ {\mathrm {L L M}} \left(x, D _ {R}, p _ {1}\right), \tag {1} +$$ + +where $f_{\mathrm{LLM}}$ denotes a open-domain or a legal specific LLM; $p_1$ is the direct generation prompt. All prompt settings are detailed in Appendix A. + +Answer Refinement Generation (ARG) is a two-stage method that generates the final response $y_{\mathrm{arg}}$ by refining the LLM's initial response $y_{\mathrm{init}}$ through the retrieval and incorporation of reference information. This process can be formulated as: + +$$ +y _ {\text {i n i t}} = f _ {\mathrm {L L M}} (x, p _ {2}), \tag {2} +$$ + +where $p_2$ is the prompt instructing the LLM to directly generate an initial response without reference information. We refer to this step as CloseBook. The initial response $y_{\mathrm{init}}$ is then refined as: + +$$ +y _ {\text {a r g}} = f _ {\mathrm {L L M}} \left(y _ {\text {i n i t}}, D _ {R}, p _ {3}\right), \tag {3} +$$ + +where $p_3$ is the prompt guiding the LLM to refine the $y_{\mathrm{init}}$ using the retrieved documents $D_R$ . + +Laypersons and practitioners interact with LLMs differently and have distinct expectations for citations. When $x$ is submitted by a layperson, the corresponding $D_{R}$ consists of relevant law articles. In + +contrast, when $x$ is submitted by a practitioner, the corresponding $D_R$ includes both relevant law articles and precedent cases. The process for retrieving $D_R$ from $D$ is detailed in the next subsection. + +# 4.2 Citation Retrieval + +We explore state-of-the-art open-domain dense retriever BGE (Xiao et al., 2023), along with two legal-specific dense retrievers, CriminalBERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019). We also investigate two types of retrieval queries: $x$ (the user question alone, ARG-Q) and $[x; y_{\mathrm{init}}]$ (the concatenation of the user query $x$ and the initial response $y_{\mathrm{init}}$ , where $[]$ denotes the concatenation operation, ARG-QA). The impact of different retrieval models on performance will be analyzed in the experiments. + +# 4.3 Citation Attachment + +Building on the retrieved citations, this subsection outlines the process of attaching these law articles or precedents to specific sentences in the LLM-generated responses. This process involves answering two key questions: + +What kind of sentences can be associated with citations? We utilize co-occurring words and legal entity extraction to identify sentences that explicitly reference legal concepts, actions, or terms relevant to the retrieved citations. Specifically, we construct a pool of legal terminologies using THUOCL3 and LaWGPT (Zhou et al., 2024). A sentence is considered eligible if it contains any of the terminologies from this pool. Additionally, we use SpaCy (Honnibal et al., 2020) to extract legal entities from each sentence. If a sentence includes legal entities, it is also deemed eligible for citation attachment. + +How are citations attached to the identified sentences? If a sentence is deemed eligible for citation attachment, we associate it with retrieved citations as follows. For the laypersons, the retrieved law article $c_{l} \in D_{l}$ is attached to the most relevant sentence $s_{k} \in y$ : + +$$ +C _ {\text {L a y}} = \left\{\left(s _ {k}, c _ {l}\right) \mid s _ {k} = \underset {s _ {i} \in y} {\arg \max } \operatorname {s i m} \left(s _ {i}, c _ {l}\right) \right\}, \tag {4} +$$ + +where $(s_k, c_l)$ represents attaching the reference $c_l$ to the sentence $s_i$ , and $\mathrm{sim}(\cdot)$ is computed using sentence-BERT (Reimers, 2019). We set $|C_{\mathrm{Lay}}| = 1$ because, typically, a layperson's query pertains to only one specific legal article. For practitioners, + +we attach the retrieved law article in the same way as for laypersons. Additionally, we associate the retrieved precedent cases $c_{c} \in D_{c}$ with each $s_{i} \in y$ , which is formulated as: + +$$ +\begin{array}{l} C _ {\text {P r a}} = \left\{\left(s _ {k}, c _ {l}\right) \mid s _ {k} = \underset {s _ {i} \in y} {\arg \max } \operatorname {s i m} \left(s _ {i}, c _ {l}\right) \right\} (5) \\ \cup \{(s _ {i}, c _ {c}) |, c _ {c} = \underset {c _ {j} \in D _ {c}} {\arg \max } \operatorname {s i m} (s _ {i}, c _ {j}) \}, (5) \\ \end{array} +$$ + +where $|D_c| = 3$ , meaning each response $y$ can be associated with up to three precedents4. + +# 5 Evaluation + +CitaLaw provides a comprehensive evaluation framework incorporating metrics for fluency, correctness, and citation quality. This framework is divided into two levels of analysis: global level and the proposed syllogism level. + +Syllogism, a foundational framework in legal reasoning, comprises three key components: the major premise, the minor premise, and the conclusion. In our legal context, these correspond to the relevant law article or precedent case (major premise), the factual circumstances and actions of a specific case (minor premise), and the resulting legal decision (conclusion). By integrating this syllogistic framework, CitaLaw goes beyond surface-level correctness to evaluate the logical coherence and alignment of LLM-generated responses with established legal principles. + +# 5.1 Fluency (Style Consistency) + +To ensure the LLM-generated responses align with the user's requirements, the system must adapt its style based on the user's background. For laypersons, responses should avoid excessive technical jargon to ensure accessibility and comprehension. Conversely, responses for legal practitioners should adopt a formal and professional tone to maintain credibility and utility. To achieve this aim, we concatenate the user query and the LLM-generated response and apply MAUVE (Pillutla et al., 2021) to assess their style consistency. + +# 5.2 Correctness + +At the global level, we use established metrics ROUGE (Lin, 2004) and BERTScore (Zhang et al., 2019). ROUGE measures word-level overlap between the generated and labeled responses, with scores reported for ROUGE-1, ROUGE-2, and + +ROUGE-L. BERTScore captures semantic similarity between the generated and labeled responses, and we report the F-score (BERT-F) for evaluation. These metrics assess the overall correctness of LLM-generated responses. + +At the syllogism level, we leverage the Qwen2 (Yang et al., 2024) to extract key components, including the circumstances, illegal acts, and legal decisions. We use sentence-BERT (Reimers, 2019) to measure the alignment between the labeled responses and the generated outputs across these dimensions, resulting in $\mathrm{Correct}_{\mathrm{c}}$ , $\mathrm{Correct}_{\mathrm{a}}$ , and $\mathrm{Correct}_{\mathrm{d}}$ . This syllogism-level evaluation allows us to assess the logical coherence of the responses, ensuring that they align with the underlying legal reasoning principles. + +# 5.3 Citation Quality + +As previously discussed, we assume that a question submitted by laypersons typically corresponds to a specific law article. Therefore, at the global level, we evaluate the citation quality of the retrieved law article (premise) by measuring its entailment with the associated sentence in the LLM's response (hypothesis). Specifically, we use an NLI model to compute $\text{Cita}_{\text{Law}}$ , which quantifies the degree to which the law article entails the attached sentence. This metric reflects how effectively the response aligns with the cited law article. We employ DISC-LawLLM (Yue et al., 2023) as the NLI model due to its strong agreement with human evaluations (as discussed in Sec. 6.3) and its superior performance compared to other NLI models (as detailed in Sec. 6.5). + +At the syllogism level, we evaluate the quality of precedent case citations by examining three key components: circumstances, illegal acts, and legal decisions. After extracting these elements from both the retrieved cases and the associated sentence in the LLM's response, we utilize DISC-LawLLM to assess the entailment for each component. This evaluation yields three distinct scores: $\text{Cita}_{\text{c}}$ for circumstances, $\text{Cita}_{\text{a}}$ for illegal acts, and $\text{Cita}_{\text{d}}$ for legal decisions, providing a more detailed and nuanced assessment of citation quality within the syllogism framework. + +# 6 Experiments + +We conduct extensive experiments on our CitaLaw using the proposed two-level evaluation methods. + +
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-FCorrecteCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)CloseBook22.6316.471.9513.3458.4673.0568.2466.8767.3843.15
CGG61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
ARG-Q61.2723.175.6517.8364.2369.0475.4574.4779.1052.24
ARG-QA51.8323.736.9618.5364.8471.3774.8174.6680.8051.95
Qwen2 (Qwen2-7B-Instruct)CloseBook21.0415.292.2711.3158.3970.8971.7169.9372.3543.69
CGG75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
ARG-Q66.5520.864.5015.4264.5966.9677.8275.6681.4852.65
ARG-QA66.8021.734.7816.3464.8569.3176.3575.0582.8353.11
Legal LLM (CGG)DISC-LawLLM72.7022.464.1415.4865.0665.2178.5576.1783.4653.69
fuzi.mingcha56.5824.545.7017.4865.8663.2879.5677.9481.6452.51
LexiLaw71.8924.966.2518.9165.6868.8978.1276.7282.4254.87
Tailing13.9515.934.1312.8959.4772.0069.1168.3882.6744.28
zhihai37.5020.984.5913.6964.5467.7577.6876.9977.1648.99
LawGPT_zh51.6023.335.2816.1765.1463.7279.4377.5286.1852.04
Hanfei51.1223.955.1918.7665.1270.8375.0174.2176.9751.24
+ +Table 2: Performance comparisons on the Layperson dataset. The best performance is indicated in bold. + +# 6.1 Experimental Settings + +# 6.1.1 Evaluated Models + +We selected two categories of LLMs for testing: The legal LLMs include (1) fuzi.mingcha (6B) (Wu et al., 2023a), (2) LexiLaw5 (6B), (3) Tailing6 (7B), (4) DISC-LawLLM (13B) (Yue et al., 2023), (5) zhihai (7B) (Wu et al.), (6) LawGPT_zh (6B) (Liu et al., 2023), (7) HanFei (7B) (He et al., 2023). The open-domain LLMs include Qwen2 (7B) (Yang et al., 2024) and Llama3 (8B) (AI@Meta, 2024). For these models, we tested all methods mentioned in Sec. 4, including: (1) CloseBook, (2) CGG, (3) ARG-Q and (4) ARG-QA. For the legal LLMs, we generate responses using CGG. Appendix B has the details. + +# 6.1.2 Implementation Details + +Our implementation is based on the Huggingface Transformers library (Wolf et al., 2020) with PyTorch. We use bge-base-zh-v1.5 (Xiao et al., 2023) as the retrieval model and conduct all experiments on Nvidia A6000 GPUs. Additional details are provided in Appendix C and https://github.com/ke-01/CitaLaw. + +# 6.2 Main Results + +The results on the Layperson and Practitioner datasets are presented in Table 2 and Table 3. We analyze the results from three perspectives: + +# 6.2.1 Performance of Open-Domain LLM + +Legal references improve the response quality. Compared to CloseBook, the overall performance in CGG, ARG-Q, and ARG-QA has improved. This indicates that incorporating references into the + +LLM helps it better understand both the question and the required direction for the answer, thereby enhancing performance in terms of style consistency, correctness, and citation quality. + +CGG achieves better response quality. We observe that CGG achieves optimal performance, especially response correctness, suggesting that incorporating legal references into the LLM input is more effective than refining the LLM's response. This is because including legal knowledge as input allows the LLM to consider relevant context when generating replies, whereas refining the response might lead to excessive alterations. + +ARG improves the alignment of responses and references. We can observe that ARG outperforms CGG in citation-related metrics overall. This is because CGG merely incorporates reference information as input, which may lead the model to overlook some reference details during the generation process. In contrast, ARG modifies the answer based on the references after generation, making it easier to ensure the completeness of citations. + +Chinese data fine-tuning can bring benefits. Both the Layperson and Practitioner datasets are Chinese datasets. Qwen2 (Fine-tuning on more Chinese data) achieved better performance than Llama3, demonstrating the benefits of using Chinese data for fine-tuning. + +CloseBook tends to state circumstances. CloseBook performs better in terms of correctness regarding circumstances compared to the other dimensions. This suggests that when judicial knowledge references are not used, the LLM is more likely to repeat the circumstances itself, rather than providing an appropriate response to the illegal acts and the legal decision. + +
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-F\( Correct_c \)\( Correct_a \)\( Correct_d \)\( Cita_{Law} \)\( Cita_c \)\( Cita_a \)\( Cita_d \)Avg
Llama3 (Llam3-8B-Instruct)CloseBook23.8123.057.2919.2362.8376.3071.0570.3263.4966.9568.8365.4651.55
CGG36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
ARG-Q42.6520.395.0715.7562.8270.4973.6772.0068.6169.4870.5168.3453.31
ARG-QA36.9418.644.5614.6361.5071.0772.3870.3269.4068.9570.4269.5152.36
Qwen2 (Qwen2-7B-Instruct)CloseBook61.9130.4410.5423.5367.5574.3579.8478.5268.5568.0370.3069.7158.61
CGG39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
ARG-Q41.0220.575.1415.6263.3167.8474.7173.9473.0168.9673.2073.6454.25
ARG-QA21.9716.673.0612.4760.7067.4971.1670.8871.7669.0171.0471.3350.63
Legal LLM (CGG )DISC-LawLLM38.1121.376.7516.9660.8473.4272.1471.7963.9267.4268.2265.4552.20
fuzi.mingcha66.5528.959.5122.6967.0670.7376.6677.4765.9266.9469.2868.6957.54
LexiLaw57.7429.018.9323.8365.6370.3676.6775.9765.2866.9368.8968.0356.44
Tailing50.1626.529.1622.4465.3575.9673.8370.3064.6566.9467.5666.0954.91
zhihai26.2921.386.0015.5364.4765.5976.3877.3767.9366.3063.1759.8250.85
LawGPT_zh47.1029.168.9222.5567.6469.4879.3780.2366.9068.3867.5568.9456.35
HanFei75.7232.9812.4626.9168.7273.2578.6378.1167.0367.4568.6367.7359.80
+ +Table 3: Performance comparisons on the Practitioner dataset. The best performance is indicated in bold. + +# 6.2.2 Performance of Legal LLM + +Law article training achieves gains. In the Layperson dataset, LexiLaw achieves optimal performance overall. This is because the questions in the Layperson dataset often require only law articles to provide answers clearly, and LexiLaw's training explicitly used law articles, allowing it to effectively handle such questions. + +Full-parameter training offers advantages. Hanfei achieves the best results in the Practitioner dataset, as it is a fully parameter-trained legal LLM. Full-parameter fine-tuning allows it to effectively simulate a legal expert, thus performing well. + +Syllogistic reasoning is useful. fuzi.mingcha performs well on syllogism evaluation metrics, particularly on the Layperson dataset. This is due to its fine-tuning of syllogism judgment data. + +# 6.2.3 Open Domain LLM vs. Legal LLM + +Impact of LLM Backbone. We can observe that some legal LLMs perform worse than open-domain LLMs. This is because Qwen2 and Llama3 are the latest open-domain LLMs, and their overall capabilities have significantly improved. In contrast, most legal LLMs are built on earlier generations of LLMs, which have weaker base models, leading to poorer overall performance. + +Effectiveness of legal knowledge. Overall, the upper limit of legal LLMs is higher than that of open-domain LLMs. This is because legal LLMs, after extensive training on legal knowledge, have developed strong capabilities in solving legal issues. As a result, even though their base models are outdated, they can still perform effectively. + +# 6.3 Human Evaluation + +In this section, we compared the syllogism-level metric with human evaluation. Details of legal human annotators can be found in Appendix D. + +The syllogism-level evaluation of citation quality is divided into two stages: Stage 1: Extracting key components. Stage 2: Assessing the entailment using an NLI model. + +Stage 1: We randomly selected 50 questions each from the Layperson and Practitioner datasets. After splitting the cases into individual clauses, annotators were provided with the full case and its clauses. They do a three-class classification of each clause. The Qwen2's annotations were then compared with human annotations. The Cohen's kappa coefficient (Cohen, 1960) of 0.7876 indicates substantial agreement (0.61-0.80) between the model's and human annotators' labels. + +Stage 2: We randomly selected 50 questions from the Practitioner dataset and used Qwen2 to extract key components of pairs of responses and citations. Annotators assessed the degree to which the citations entailed the corresponding response components using a 5-point scale (1: low, 5: high), with descriptions provided in Appendix D. The entailment probabilities given by DISC-LawLLM, which range from 0 to 1, were scaled to the same 1-5 range by multiplying by 5 and rounding. We then compared the scaled model outputs with the human evaluations and calculated Cohen's kappa coefficient. The kappa score of 0.6923 again indicates substantial agreement (0.61-0.80) between the model and human judgments. + +# 6.4 Effects on Different Retrieval Models + +We selected BGE as the retrieval model in the main experiment. In this section, we explore the impact of using different retrieval models. Specifically, we evaluate Criminal-BERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019), two legal domain models based on BERT, fine-tuned on large-scale criminal and civil law documents, respectively. We replaced the retrieval model and tested the CGG + +![](images/bb02b0dcacfe9e2343a7e3dce9d3c1df14239b907d3877ece25c69828cb44528.jpg) +Figure 3: Performance of different retrieval models. Lay is short for Layperson dataset and Pra is short for Practitioner dataset. + +![](images/1fbc1d82ad26023059c2cbac91cbdf2370eca84e69adf8ce3919ae5ef0ed0727.jpg) + +![](images/2710aec7dc9ffc3bb3faa79cb8e962183333b9a0069341b63d3806ae4b8136a3.jpg) +(a) Methods for CitaLaw metric with Layperson dataset. +(b) Metrics for CGG method with Practitioner dataset. +Figure 4: The performance of different NLI models when the LLM is Llama. + +method on the Layperson dataset. The average results across all metrics are shown in Figure 3, with detailed metric results provided in Appendix E. + +As shown, on the Layperson dataset, BGE significantly outperforms the other two models. This is because the dataset consists of questions from laypersons, which are more everyday in nature. In contrast, the two legal BERT models, having been trained extensively on legal cases, show a distributional mismatch with open-domain data, leading to poorer performance. On the Practitioner dataset, which features professional legal questions, BGE still achieves the best performance. This can be attributed to its extensive training on diverse data, likely including some legal data, and its use of more advanced model architectures and techniques. However, the two legal BERT models perform comparably to BGE, showcasing the benefits of their specialized training on legal data. + +# 6.5 Effects on Different NLI Models + +We opted to use legal LLMs as the NLI model in our experiments, as they support longer input lengths and incorporate substantial legal knowl + +edge. In Section 6.3, we verified that DISC-LawLLM and human achieved good consistency. In this section, we explore the performance of several legal LLMs in the NLI task. Besides DISC-LawLLM, we evaluated LexiLaw, LawGPT_zh, and Hanfei, which demonstrated strong performance in the main experiments. + +In Figures 4 (a), we examined the ability of four legal LLMs to evaluate Llama across the Close-Book, CGG, ARG-Q, and ARG-QA methods using the CitaLaw metric on the Layperson dataset. In Figures 4 (b), we investigated the performance of four legal LLMs in evaluating the CGG method applied to Llama across the metrics CitaLaw, CitaC, Cita a, and Cita d on the Practitioner dataset. + +We can observe that Hanfei provides lower entailment scores across both datasets. This is because it is a fully parameter-tuned legal LLM, which results in a diminished capability to handle the general task of entailment reasoning. Additionally, we found that on the Practitioner dataset, other legal LLMs achieved results closer to those of DISC-LawLLM, while on the Layperson dataset, the performance gap was significantly larger. This is because the Practitioner dataset is more judicially oriented, aligning with the knowledge seen during the fine-tuning of legal LLMs. In contrast, due to limited training on general-purpose data, other legal LLMs struggle to accurately determine entailment relationships in the Layperson dataset. Similar conclusions can be drawn when the LLM is Qwen in Appendix F. + +# 7 Conclusion + +We introduce CitaLaw, a benchmark designed to explore LLMs to generate responses with citations in legal scenarios, thus improving the trustworthiness of LLMs. CitaLaw includes two categories of questions: laypersons and practitioners. For laypersons, CitaLaw provides law articles as citations to help them understand the LLM's response clearly. For practitioners, both law articles and precedent cases are provided as citations, better supporting their needs for complex reasoning. CitaLaw offers global-level and syllogism-level metrics and supports the integration of citations into LLM inputs to guide generation or using citations to refine LLM's response. We conducted extensive experiments on 7 legal-domain LLMs and 2 popular open-domain LLMs, providing valuable insights for the deployment of LLMs in legal scenarios. + +# 8 Limitations + +While Citalaw provides a robust framework for evaluating LLMs in legal scenarios, several limitations should be acknowledged to guide future extensions of this work. + +First, the datasets used in CitaLaw are primarily sourced from the Chinese legal system, which may limit the benchmark's applicability to other jurisdictions. However, by incorporating both law articles and precedent cases to align with the principles of civil and common law systems, CitaLaw demonstrates strong potential for adaptation to diverse legal contexts. + +Second, the syllogism-based evaluation framework simplifies legal reasoning into three key components: the major premise (law articles or precedent cases), the minor premise (case circumstances and actions), and the conclusion (legal decision). While this structured approach is effective for systematic evaluation, real-world legal reasoning may encompass additional complexities. + +# 9 Ethical Considerations + +Data Privacy and Confidentiality. The legal datasets used in CitaLaw include law articles, precedent cases, user questions, and golden responses. These documents were sourced from publicly available databases, ensuring compliance with data privacy and confidentiality standards. We carefully reviewed the datasets to ensure that no personally identifiable information (PII) or sensitive details about individuals were inadvertently included. + +Alignment with Legal Standards. Legal AI systems must align with the ethical and professional standards of the legal domain. Our work emphasizes the need for syllogism-based reasoning to ensure logical consistency and adherence to legal principles. + +Transparency and Explainability. Legal reasoning must be transparent and interpretable, particularly when used in sensitive or high-stakes domains. The metrics proposed in CitaLaw, including syllogism-based evaluation, aim to improve explainability by breaking down the reasoning process into logical components. + +Responsibility in System Deployment. Citalaw is intended as a research benchmark and should not be directly deployed in high-stakes legal decision-making without human oversight. While the benchmark aims to enhance the trustworthiness + +of LLM-generated responses, legal professionals should always verify the citations and legal interpretations provided by such systems. Misuse of automated systems without adequate validation could lead to inaccurate legal advice or unintended consequences in legal proceedings. + +# 10 Acknowledgements + +This work was funded by the National Key R&D Program of China (2023YFA1008704), the National Natural Science Foundation of China (62472426). Supported by fund for building world-class universities (disciplines) of Renmin University of China. Work partially done at Beijing Key Laboratory of Research on Large Models and Intelligent Governance, and Engineering Research Center of Next-Generation Intelligent Search and Recommendation, MOE. Supported by the Beijing Social Science Foundation Planning Project (Grant No. 24GLC041), the Fundamental Research Funds for the Central Universities in UIBE (Grant No. 24QN06, 24PYTS22). + +# References + +Abdelrahman Abdallah, Bhawna Piryani, and Adam Jatowt. 2023. Exploring the state of the art in legal qa systems. Journal of Big Data, 10(1):127. +AI@Meta. 2024. Llama 3 model card. +Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023. Can gpt-3 perform statutory reasoning? In Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law, pages 22-31. +Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1):37-46. +Yongfu Dai, Duanyu Feng, Jimin Huang, Haochen Jia, Qianqian Xie, Yifang Zhang, Weiguang Han, Wei Tian, and Hao Wang. 2023. Laiw: A chinese legal large language models benchmark (a technical report). arXiv preprint arXiv:2310.05620. +Haolin Deng, Chang Wang, Xin Li, Dezhang Yuan, Junlang Zhan, Tianhua Zhou, Jin Ma, Jun Gao, and Ruifeng Xu. 2024. Websites: Attributed query-focused summarization on chinese web search results with citations. arXiv preprint arXiv:2403.01774. +Jacob Devlin. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805. +Zhiwei Fei, Xiaoyu Shen, Dawei Zhu, Fengzhe Zhou, Zhuo Han, Songyang Zhang, Kai Chen, Zongwen + +Shen, and Jidong Ge. 2023. Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289. +Luyu Gao, Zhuyun Dai, Panupong Pasupat, Anthony Chen, Arun Tejasvi Chaganty, Yicheng Fan, Vincent Zhao, Ni Lao, Hongrae Lee, Da-Cheng Juan, et al. 2023a. Rarr: Researching and revising what language models say, using language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16477-16508. +Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023b. Enabling large language models to generate text with citations. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6465-6488. +Wanwei He, Jiabao Wen, Lei Zhang, Hao Cheng, Bowen Qin, Yunshui Li, Feng Jiang, Junying Chen, Benyou Wang, and Min Yang. 2023. Hanfei-1.0. https://github.com/siat-nlp/HanFei. +Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python. +Dongfang Li, Zetian Sun, Xinshuo Hu, Zhenyu Liu, Ziyang Chen, Baotian Hu, Aiguo Wu, and Min Zhang. 2023. A survey of large language models attribution. arXiv preprint arXiv:2311.03731. +Haitao Li, You Chen, Qingyao Ai, Yueyue Wu, Ruizhe Zhang, and Yiqun Liu. 2024. Lexeval: A comprehensive chinese legal benchmark for evaluating large language models. Preprint, arXiv:2409.20288. +Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81. +Hongcheng Liu, Yusheng Liao, Yutong Meng, and Yuhao Wang. 2023. Xiezhi: Chinese law large language model. https://github.com/LiuHC0428/LAW_GPT. +Luyao Ma, Yating Zhang, Tianyi Wang, Xiaozhong Liu, Wei Ye, Changlong Sun, and Shikun Zhang. 2021a. Legal judgment prediction with multi-stage case representation learning in the real court setting. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 993-1002. +Yixiao Ma, Yunqiu Shao, Yueyue Wu, Yiqun Liu, Ruizhe Zhang, Min Zhang, and Shaoping Ma. 2021b. Lecard: A legal case retrieval dataset for chinese law system. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2342-2348. +Chaitanya Malaviya, Subin Lee, Sihao Chen, Elizabeth Sieber, Mark Yatskar, and Dan Roth. 2024. Expertqa: Expert-curated questions and attributed answers. In Proceedings of the 2024 Conference of the North + +American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 3025-3045. +Krishna Pillutla, Swabha Swayamdipta, Rowan Zellers, John Thickstun, Sean Welleck, Yejin Choi, and Zaid Harchaoui. 2021. Mauve: Measuring the gap between neural text and human text using divergence frontiers. Advances in Neural Information Processing Systems, 34:4816-4828. +N Reimers. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084. +Jaromir Savelka, Kevin D Ashley, Morgan A Gray, Hannes Westermann, and Huihui Xu. 2023. Explaining legal concepts with augmented large language models (gpt-4). arXiv preprint arXiv:2306.09525. +Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45. +Shiguang Wu, Zhongkun Liu, Zhen Zhang, Zheng Chen, Wentao Deng, Wenhao Zhang, Jiyuan Yang, Zhitao Yao, Yougang Lyu, Xin Xin, Shen Gao, Pengjie Ren, Zhaochun Ren, and Zhumin Chen. 2023a. fuzi.mingcha. https://github.com/irlab-sdu/fuzi.mingcha. +Yiquan Wu, Yuhang Liu, Yifei Liu, Ang Li, Siying Zhou, and Kun Kuang. wisdominterrogatory. Available at GitHub. +Yiquan Wu, Siying Zhou, Yifei Liu, Weiming Lu, Xiaozhong Liu, Yating Zhang, Changlong Sun, Fei Wu, and Kun Kuang. 2023b. Precedent-enhanced legal judgment prediction with llm and domain-model collaboration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12060-12075. +Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597. +An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu + +Cui, Zhenru Zhang, and Zhihao Fan. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671. +Fangyi Yu, Lee Quartey, and Frank Schilder. 2022a. Legal prompting: Teaching a language model to think like a lawyer. arXiv preprint arXiv:2212.01326. +Weijie Yu, Zhongxiang Sun, Jun Xu, Zhenhua Dong, Xu Chen, Hongteng Xu, and Ji-Rong Wen. 2022b. Explainable legal case matching via inverse optimal transport-based rationale extraction. In Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval, pages 657-668. +Shengbin Yue, Wei Chen, Siyuan Wang, Bingxuan Li, Chenchen Shen, Shujun Liu, Yuxuan Zhou, Yao Xiao, Song Yun, Xuanjing Huang, and Zhongyu Wei. 2023. Disc-lawllm: Fine-tuning large language models for intelligent legal services. Preprint, arXiv:2309.11325. +Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675. +Haoxi Zhong, Chaojun Xiao, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2020. Jecqa: a legal-domain question answering dataset. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 9701-9708. +Haoxi Zhong, Zhengyan Zhang, Zhiyuan Liu, and Maosong Sun. 2019. Open chinese language pretrained model zoo. Technical report. +Zhi Zhou, Jiang-Xin Shi, Peng-Xiao Song, Xiao-Wen Yang, Yi-Xuan Jin, Lan-Zhe Guo, and Yu-Feng Li. 2024. Lawgpt: A chinese legal knowledge-enhanced large language model. Preprint, arXiv:2406.04614. + +# A The Used Prompts + +Figure 5 illustrates the prompts used in this paper, including $p_1$ , $p_2$ , $p_3$ in Eq. 1, Eq. 2 and Eq. 3. + +# B More Details of Evaluated Models and Datasets + +For the Legal LLMs, we choose (1) fuzi.mingcha (6B) (Wu et al., 2023a): It leverages unsupervised judicial corpora for training and uses syllogistic reasoning judgment data for fine-tuning. (2) LexiLaw7 (6B): It specifically utilizes legal articles and legal reference books for training. (3) Tailing8 (7B): It uses judicial text validation data, information extraction data, and judgment data for training. (4) DISC-LawLLM (13B) (Yue et al., 2023): In addition to fine-tuning with pairs, it also uses triplet data for fine-tuning to enhance the model's ability to leverage external knowledge. (5) zhihai (7B) (Wu et al.): It utilizes ChatGPT to modify the existing dataset and then performs secondary pre-training. (6) LawGPT_zh (6B) (Liu et al., 2023): It primarily uses scenario-based dialogues and knowledge-based question-answering data for fine-tuning based on LoRA. (7) HanFei (7B) (He et al., 2023): It is the first fully parameter-trained legal LLM in China. Because in the main experiment, CGG has the best overall performance, for the legal LLMs, we generate responses using CGG. + +Table 4 and Table 5 are the website URLs and corresponding licenses of the evaluated models and datasets. + +# C More Details on Implementation + +Considering the length of legal texts and the input window for the LLMs is limited, all experiments in this paper are conducted using a zero-shot setting. We use the Chinese-performing-well Qwen2-1.5B (Yang et al., 2024)9 to complete the MAUVE calculations. For RGUGE, We use version 1.0.1 of ROUGE for calculation. For BERTScore, we use bert-base-chinese (Devlin, 2018)10 to compute it. Regarding sentence-BERT, we employ paraphrase-multilingual-MiniLM-L12-v2 (Reimers, 2019)11. + +# D Human Evaluation + +We hired four legal annotators from a Chinese university, all of whom have legal education backgrounds and are familiar with the cases in the dataset they need to annotate. We explained to the annotators that the data they annotated would be used for scientific research and paid them a reasonable remuneration based on local conditions. They are all graduate students from the judicial field, with practical experience in the legal profession. Two are male, two are female, aged between 24 and 30, and all have over five years of judicial theory study. Two annotators were responsible for the first stage of annotation, while the other two were responsible for the second stage, with all working together on the annotation process. + +Table 6 shows a detailed description of each level used to evaluate the agreement of the NLI model with human evaluations. + +# E Different Retrieval Models + +Tables 7 and 8 present the performance of different retrieval models—Criminal-BERT, CivilBERT, and BGE—on each metric for the CGG method across the two datasets. It can be observed that when Llama3 and Qwen2 are used as LLMs, BGE achieves the best performance as the retrieval model. Comparing the two datasets, on the Layperson dataset, where the questions are more general, Criminal-BERT and Civil-BERT, which focus on legal cases, perform relatively poorly. In contrast, on the Practitioner dataset, despite no structural or training improvements, Criminal-BERT and CivilBERT achieve results comparable to BGE, highlighting the importance of legal knowledge in judicial QA tasks. + +The differences between the two datasets also underscore the significance of selecting an appropriate retrieval model. + +# F Different NLI Models + +Figures 6 (a) and (b) show the entailment scores given by four legal LLMs as NLI models under different methods (CloseBook, CGG, ARG-Q, ARGQA) and metrics(CitaLaw, CitaS, CitaB, and CitaC) when Qwen is used as the LLM. Similar conclusions to those in Section 6.5 can be drawn. + +Answer the question based on the provided law article and cite it appropriately. Only output the answer and citations, without including any additional content. When citing the law article, use [A1] at the end of the relevant sentence. + +Below is the provided law article: Law article [A1]: {Law article 1} + +Question: {Question} Answer: + +# (a) Layperson + +Refine the text based on the references and only output the refined text. + +Text: {Answer} +Reference: {References} + +Refined text (only output the Refined text, without any additional content): + +# (c) Response Refinement + +Answer the question based on the provided documents and cite them appropriately. + +Only output the answer and citations, without including any additional content. + +When citing precedent cases, use [C1], [C2], or [C3] at the end of the sentence. When citing the law article, use [A1]. + +Below are the provided documents: +Precedent case [C1]: {Precedent case 1} + +Precedent case [C2]: {Precedent case 2} + +Precedent case [C3]: {Precedent case 3} +Law article [A1]: {Law article 1} + +Question: {Question} Answer: + +# (b) Practitioner + +Answer the question. Only output the answer without any additional content. + +Question: {Question} Answer: + +# (d) Without Reference + +
TypeLLMURLLicence
Open domainQwen2-7B-Instructhttps://huggingface.co/Qwen/Qwen2-7B-InstructApache-2.0 license
Llam3-8B-Instructhttps://github.com/meta-llama/llama3META LLAMA 3 COMMUNITY License
Legal Domainfuzi.mingchahttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
DISC-LawLLMhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
LawGPT_zhhttps://github.com/LiuHC0428/LAW-GPT
Hanfeihttps://github.com/siat-nlp/HanFeiApache-2.0 license
Tailinghttps://github.com/DUTIR-LegalIntelligence/Tailing
LexiLawhttps://github.com/CSHaitao/LexiLawMIT license
zhihaihttps://github.com/zhihaiLLM/wisdomInterrogatoryApache-2.0 license
+ +Table 4: The LLM source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License. + +![](images/1b8aaf2ed2a7460c159e4afcce3fcd90678b2d5fe400a1644ae0c81d1b896487.jpg) +Figure 5: Prompts used in this paper. (a) The prompt $p_1$ is used to retrieve one law article in the Layperson dataset. (b) The prompt $p_1$ is used to retrieve one law article and three precedent cases in the Practitioner dataset. (c) The prompt $p_3$ is used to refine the LLM's answer based on references. (d) The prompt $p_2$ is used for LLM responses without references. + +![](images/1871806c373c10877618a242a1d2f56ced23ad1f3ac4caa451cbf99ee3a98601.jpg) +(a) Metrics for CGG method with Layperson dataset. +(b) Methods for CitaLaw metric with Practitioner dataset. +Figure 6: The performance of different NLI models when the LLM is Qwen. + +
TypeDatasetURLLicence
QuestionLayperson Practitionerhttps://github.com/open-compass/LawBenchApache-2.0 license
https://github.com/CSHaitao/LexEvalMIT License
CorpusLeCaRDhttps://github.com/myx666/LeCaRDMIT License
ELAMhttps://github.com/ruc-wjyu/IOT-MatchMIT License
CAIL2021-sfzyhttps://github.com/china-ai-law-challenge/CAIL2021
LJP-MSJudg fuzi.mingchahttps://github.com/mly-nlp/LJP-MSJudge
DISC-LawLLMhttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
LawGPT_zhhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
Hanfeihttps://github.com/LiuHC0428/LAW-GPT
https://github.com/siat-nlp/HanFeiApache-2.0 license
+ +Table 5: The dataset source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License. + +
ScoreDescription
1No Entailment: The former does not entail the latter at all, with no logical connection between the two.
2Weak Entailment: A partial entailment where the former somewhat relates to the latter, but the connection is weak and not fully conclusive.
3Moderate Entailment: A moderate degree of entailment, meaning the former generally leads to the latter in most cases, but exceptions exist.
4Strong Entailment: A strong logical relationship where the former can derive the latter in the vast majority of cases.
5Complete Entailment: The former fully entails the latter in all cases, with an unambiguous and definitive logical connection between them.
+ +Table 6: Scoring Criteria for Human Evaluation of Entailment. + +
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)Criminal37.4418.072.1813.1561.7164.0363.5664.3680.3444.98
Civil56.1618.272.3413.4461.9063.2263.8963.3580.9747.06
BGE61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
Qwen2 (Qwen2-7B-Instruct)Criminal55.2621.094.5314.3264.7363.1064.8965.8561.6046.15
Civil52.4420.484.1613.8164.4561.7964.9465.6259.8845.29
BGE75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
+ +Table 7: Performance comparisons on retrieval models in the Layperson dataset when the method is CGG. The best performance is indicated in bold. + +
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawCitaCCitaaCitaDAvg
Llama3 (Llam3-8B-Instruct)Criminal34.2525.797.8619.4265.0366.2776.3076.8270.5966.4170.0969.4754.03
Civil39.8426.398.0720.0265.2765.4175.7875.7369.2167.5269.5469.1654.33
BGE36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
Qwen2 (Qwen2-7B-Instruct)Criminal32.4931.7911.0923.9369.7972.0080.8181.5368.4268.4271.8671.5456.97
Civil33.3731.6711.0623.8469.6373.3580.5781.2769.1166.4170.0969.4756.65
BGE39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
+ +Table 8: Performance comparisons on retrieval models in the Practitioner dataset when the method is CGG. The best performance is indicated in bold. \ No newline at end of file diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/images.zip b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..2878ffde720d00e65c45073b45e02c3a068ac1f1 --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ef3d44807091a05db9f73e2dbb186e7fc266aaeb98f3004f93dfbbdfbad3727 +size 785144 diff --git a/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/layout.json b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..591c8bc3e3d01949906922956b1a36482eac9769 --- /dev/null +++ b/2025/CitaLaw_ Enhancing LLM with Citations in Legal Domain/layout.json @@ -0,0 +1,10258 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 76, + 479, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 76, + 479, + 94 + ], + "spans": [ + { + "bbox": [ + 115, + 76, + 479, + 94 + ], + "type": "text", + "content": "CitaLaw: Enhancing LLM with Citations in Legal Domain" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "spans": [ + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "text", + "content": "Kepu Zhang" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "text", + "content": ", Weijie Yu" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "text", + "content": ", Sunhao Dai" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "text", + "content": ", Jun Xu" + }, + { + "bbox": [ + 171, + 116, + 424, + 132 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 126, + 132, + 467, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 132, + 467, + 146 + ], + "spans": [ + { + "bbox": [ + 126, + 132, + 467, + 146 + ], + "type": "text", + "content": "1Gaoling School of Artificial Intelligence, Renmin University of China" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 146, + 426, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 146, + 426, + 158 + ], + "spans": [ + { + "bbox": [ + 167, + 146, + 426, + 158 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 167, + 146, + 426, + 158 + ], + "type": "text", + "content": " University of International Business and Economics" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 190, + 160, + 403, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 160, + 403, + 174 + ], + "spans": [ + { + "bbox": [ + 190, + 160, + 403, + 174 + ], + "type": "text", + "content": "kepuzhang@ruc.edu.cn, yu@uibep.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 243, + 274, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 243, + 274, + 507 + ], + "spans": [ + { + "bbox": [ + 84, + 243, + 274, + 507 + ], + "type": "text", + "content": "In this paper, we propose Citalaw, the first benchmark designed to evaluate LLMs' ability to produce legally sound responses with appropriate citations. Citalaw features a diverse set of legal questions for both laypersons and practitioners, paired with a comprehensive corpus of law articles and precedent cases as a reference pool. This framework enables LLM-based systems to retrieve supporting citations from the reference corpus and align these citations with the corresponding sentences in their responses. Moreover, we introduce syllogism-inspired evaluation methods to assess the legal alignment between retrieved references and LLM-generated responses, as well as their consistency with user questions. Extensive experiments on 2 open-domain and 7 legal-specific LLMs demonstrate that integrating legal references substantially enhances response quality. Furthermore, our proposed syllogism-based evaluation method exhibits strong agreement with human judgments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 518, + 155, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 518, + 155, + 530 + ], + "spans": [ + { + "bbox": [ + 68, + 518, + 155, + 530 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 539, + 291, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 539, + 291, + 714 + ], + "spans": [ + { + "bbox": [ + 67, + 539, + 291, + 714 + ], + "type": "text", + "content": "Generating responses supported by citations, such as relevant law articles and precedent cases, is essential for ensuring the trustworthiness of large language models (LLMs) in legal tasks. For laypersons seeking legal advice (Fei et al., 2023), LLM-generated responses grounded in citations provide verifiable information, fostering trust in the system. Conversely, for legal practitioners such as lawyers and judges, citations serve as supportive evidence that aids in analyzing complex cases, validating legal arguments, and ensuring decisions align with established legal principles (Li et al., 2024; Zhong et al., 2020; Abdallah et al., 2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 716, + 291, + 756 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 716, + 291, + 756 + ], + "spans": [ + { + "bbox": [ + 67, + 716, + 291, + 756 + ], + "type": "text", + "content": "Recently, a growing body of benchmark research (Gao et al., 2023a; Li et al., 2023) has focused on enabling LLMs to provide citations for the" + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 216, + 523, + 327 + ], + "blocks": [ + { + "bbox": [ + 307, + 216, + 523, + 327 + ], + "lines": [ + { + "bbox": [ + 307, + 216, + 523, + 327 + ], + "spans": [ + { + "bbox": [ + 307, + 216, + 523, + 327 + ], + "type": "image", + "image_path": "8b18c275924d2d7c798f99f411d1b1dd24b37d58177b2cd07259e65b86050300.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 329, + 335, + 499, + 347 + ], + "lines": [ + { + "bbox": [ + 329, + 335, + 499, + 347 + ], + "spans": [ + { + "bbox": [ + 329, + 335, + 499, + 347 + ], + "type": "text", + "content": "Figure 1: The framework of our CitaLaw." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 366, + 526, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 366, + 526, + 475 + ], + "spans": [ + { + "bbox": [ + 302, + 366, + 526, + 475 + ], + "type": "text", + "content": "statements they generate. For instance, ALCE (Gao et al., 2023b) introduces a benchmark designed to evaluate the ability of LLMs to generate citation-supported outputs, aiming to improve factual accuracy. WebCiteS (Deng et al., 2024) provides a curated database of manually annotated summaries and citations to enhance performance in text summarization and citation generation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 478, + 526, + 775 + ], + "type": "text", + "content": "While these studies have made notable progress in general domains, they face significant challenges when applied to the legal domain. First, laypersons and legal practitioners interact with LLMs differently and have distinct expectations for citations. Laypersons typically seek legal advice and rely on citations to verify the accuracy of LLM responses, whereas legal practitioners pose more complex queries, using LLMs for legal reasoning, with citations serving as supportive evidence. Existing studies fail to address these differences, leading to unsatisfactory performance in real-world applications. Second, existing methods often fall short in providing the diverse references required in legal contexts, such as law articles and precedent cases. Law articles establish the foundational legal framework, while precedent cases offer concrete examples and interpretive guidance. These two types of references inherently align with the distinct characteristics of civil and common law systems. Third, traditional citation evaluation measures, such as ROUGE (Lin, 2004), rely on surface-level similar" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "spans": [ + { + "bbox": [ + 80, + 762, + 170, + 774 + ], + "type": "text", + "content": "* Corresponding author" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "11183" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 131, + 795, + 463, + 806 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 795, + 463, + 806 + ], + "spans": [ + { + "bbox": [ + 131, + 795, + 463, + 806 + ], + "type": "text", + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 11183-11196" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "spans": [ + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "type": "text", + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 73, + 68, + 520, + 257 + ], + "blocks": [ + { + "bbox": [ + 73, + 68, + 520, + 257 + ], + "lines": [ + { + "bbox": [ + 73, + 68, + 520, + 257 + ], + "spans": [ + { + "bbox": [ + 73, + 68, + 520, + 257 + ], + "type": "image", + "image_path": "b0903bfb6decf7c8b09ba1de2359ba276ce2f70217a00854a97e5ed3d8807b8a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 266, + 525, + 303 + ], + "lines": [ + { + "bbox": [ + 67, + 266, + 525, + 303 + ], + "spans": [ + { + "bbox": [ + 67, + 266, + 525, + 303 + ], + "type": "text", + "content": "Figure 2: Examples from the two subsets of CitaLaw, with text in red, blue, and yellow representing the three dimensions of the syllogism: major premise, minor premise (circumstances, illegal acts), and conclusion (legal decisions), respectively. [A] and [C] denote citations to relevant law articles and precedent cases, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 315, + 290, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 315, + 290, + 383 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 290, + 383 + ], + "type": "text", + "content": "ities and are often insufficient to assess the alignment between references and LLM-generated responses. In the legal domain, effective evaluation requires a deeper understanding of logical and semantic relationships." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 386, + 290, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 386, + 290, + 467 + ], + "spans": [ + { + "bbox": [ + 67, + 386, + 290, + 467 + ], + "type": "text", + "content": "To overcome the above challenges, we propose CitaLaw, the first benchmark tailored to evaluate LLMs' capabilities in generating legally grounded responses supported by accurate and context-aware citations. As shown in Figure 1, CitaLaw incorporates four distinct legal-specific features:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 470, + 290, + 774 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 67, + 470, + 290, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 470, + 290, + 538 + ], + "spans": [ + { + "bbox": [ + 67, + 470, + 290, + 538 + ], + "type": "text", + "content": "(1) CitaLaw has two subsets tailored for laypersons and practitioners, with examples in Figure 2. Laypersons typically ask shorter, conversational questions, while practitioners often pose specialized, detailed questions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 542, + 290, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 542, + 290, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 542, + 290, + 677 + ], + "type": "text", + "content": "(2) CitaLaw includes a retrieval corpus comprising two commonly used references: law articles, which provide clear and concise guidelines for addressing user questions, and precedent cases, which offer legal reasoning and support for judicial decisions. Recognizing the distinct needs of laypersons and practitioners, we provide only law articles for laypersons to ensure clarity, while practitioners have access to both law articles and precedent cases to support more complex legal reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 680, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 290, + 774 + ], + "type": "text", + "content": "(3) In addition to traditional global-level metrics such as MAUVE (Pillutla et al., 2021), we propose a syllogism-based evaluation method to assess both the response correctness and the citation quality. This method provides a more granular evaluation by focusing on three key dimensions: circumstances, illegal acts, and legal decisions." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 315, + 526, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 315, + 526, + 464 + ], + "spans": [ + { + "bbox": [ + 302, + 315, + 526, + 464 + ], + "type": "text", + "content": "(4) We consider two types of response generation methods. The first type, Citation-Guided Generation (CGG), involves generating responses by incorporating retrieved references during generation. The second type, Answer Refinement Generation (ARG), refines the LLMs' initial response (CloseBook) by retrieving and incorporating reference information. This category includes ARG-Q, which retrieves citations using only the user query, and ARG-QA, which retrieves citations using both the user query and the LLM's initial response." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 465, + 525, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 465, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 302, + 465, + 525, + 666 + ], + "type": "text", + "content": "Extensive experiments on two open-domain and seven legal-specific LLMs reveal the following key insights: 1) Incorporating legal references into the LLM significantly improves the quality of responses; 2) Including references as part of the LLM's input consistently outperforms answer-refinement methods; 3) Leveraging references to refine the LLM's responses yields better alignment of responses and references. 4) For fine-tuning LLMs in legal scenarios, incorporating law articles, syllogistic reasoning, and full-scale fine-tuning achieves promising performance. 5) Open-domain LLMs surprisingly outperform legal-specific LLMs in certain scenarios; 6) Human evaluations show a strong correlation with our syllogism-based methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 669, + 515, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 669, + 515, + 682 + ], + "spans": [ + { + "bbox": [ + 314, + 669, + 515, + 682 + ], + "type": "text", + "content": "In summary, our contributions are as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 316, + 694, + 525, + 775 + ], + "type": "text", + "content": "- To the best of our knowledge, CitaLaw is the first benchmark designed to evaluate the capability of LLMs to generate legally grounded responses with accurate and context-aware citations. CitaLaw includes questions tailored to both laypersons and practitioners, paired" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11184" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 89, + 71, + 290, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 71, + 290, + 98 + ], + "spans": [ + { + "bbox": [ + 89, + 71, + 290, + 98 + ], + "type": "text", + "content": "with a citation corpus comprising law articles and precedent cases." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 108, + 290, + 307 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 81, + 108, + 289, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 108, + 289, + 188 + ], + "spans": [ + { + "bbox": [ + 81, + 108, + 289, + 188 + ], + "type": "text", + "content": "- We propose a two-level evaluation framework that combines global-level metrics with a syllogism-based reasoning approach. Additionally, we explore two mainstream methods for legal response generation: citation-guided and answer refinement." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 81, + 200, + 290, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 200, + 290, + 307 + ], + "spans": [ + { + "bbox": [ + 81, + 200, + 290, + 307 + ], + "type": "text", + "content": "- Through extensive experiments on two open-domain and seven legal-specific LLMs, we demonstrate the effectiveness of integrating legal references into response generation and validate our syllogism-based evaluation method. Additionally, we provide actionable insights for the practical deployment of LLMs in legal scenarios." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 319, + 161, + 332 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 319, + 161, + 332 + ], + "spans": [ + { + "bbox": [ + 68, + 319, + 161, + 332 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 341, + 291, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 341, + 291, + 571 + ], + "spans": [ + { + "bbox": [ + 67, + 341, + 291, + 571 + ], + "type": "text", + "content": "LLM for Legal Task. A amount of work has explored applying LLMs to legal tasks (Savelka et al., 2023; Wu et al., 2023b; Yu et al., 2022a; Blair-Stanek et al., 2023). Building LLMs tailored for legal scenarios is a popular direction (Yue et al., 2023; Wu et al., 2023a; He et al., 2023). There are also some benchmarks that explore the capabilities of LLMs in legal tasks. LawBench (Fei et al., 2023) evaluates LLMs' legal knowledge across three cognitive aspects. LAiW (Dai et al., 2023) assesses LLMs' legal reasoning abilities based on legal practice logic. LexEval (Li et al., 2024) evaluates LLMs' legal capabilities based on a new legal cognitive ability classification system. However, none of them have considered enhancing the trustworthiness of LLMs in legal scenarios by generating outputs with citations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 571, + 291, + 775 + ], + "type": "text", + "content": "Citation in LLM. Attribution (Li et al., 2023) in LLMs refers to providing supporting evidence for the answers generated by the model, presented in the form of citations. ALCE (Gao et al., 2023b) is an automated benchmark for evaluating LLMs' ability to generate outputs with citations, aimed at improving the factual accuracy of the generated responses. WebCiteS (Deng et al., 2024) provides a database containing 7,000 manually annotated summaries and citations to enhance LLMs' capabilities in summarization and citation. RARR (Gao et al., 2023a) enhances LLM outputs by automatically adding citations, and modifying the responses. ExpertQA (Malaviya et al., 2024) verifies and modifies citations through expert review to ensure re" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "content": "liability. In contrast to the above works, CitaLaw focuses specifically on citation in legal scenarios." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 109, + 515, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 109, + 515, + 122 + ], + "spans": [ + { + "bbox": [ + 302, + 109, + 515, + 122 + ], + "type": "text", + "content": "3 Task Setup and Dataset Construction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "spans": [ + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": "Suppose we have a legal corpus " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": ", which consists of either a collection of precedent cases " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "(D_{l})" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " or law articles " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "(D_{c})" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": ". Given a user question " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " posed by either a layperson or a practitioner, the LLM-based system is tasked with retrieving supportive citations from " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " and generating a legally grounded response " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": ". The response " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " comprises a list of " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " sentences, i.e., " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "y = [s_1,\\dots ,s_n]" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": ", where each sentence " + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 302, + 132, + 526, + 307 + ], + "type": "text", + "content": " refers to at most one corresponding citation. As illustrated in Figure 2, the system is further required to attach each citation to its relevant sentence, with \"[A]\" and \"[C]\" denoting references to law articles and precedent cases, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 309, + 525, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 309, + 525, + 348 + ], + "spans": [ + { + "bbox": [ + 302, + 309, + 525, + 348 + ], + "type": "text", + "content": "To enable the evaluation of this task, we construct the specialized dataset (Table 1 shows the statistics) as follows:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 349, + 525, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 349, + 525, + 444 + ], + "spans": [ + { + "bbox": [ + 302, + 349, + 525, + 444 + ], + "type": "text", + "content": "To simulate the behavior of laypersons, we include questions that are more conversational, lack detailed case descriptions, and are relatively short in length. We use the consultation section from LawBench (Fei et al., 2023), which collects user queries from the Hualv website1 and answers provided by lawyers or legal consulting firms." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 445, + 525, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 445, + 525, + 593 + ], + "spans": [ + { + "bbox": [ + 302, + 445, + 525, + 593 + ], + "type": "text", + "content": "To simulate the behavior of legal practitioners, we include questions that are more professional, often accompanied by detailed case descriptions, and generally longer. For this purpose, we use the open-ended question section from LexEval (Li et al., 2024), which consists of subjective questions from the National Uniform Legal Profession Qualification Examination. These questions are particularly challenging for LLMs, requiring them to understand the case fully and apply legal knowledge accurately to generate answers." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 595, + 526, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 595, + 526, + 743 + ], + "spans": [ + { + "bbox": [ + 302, + 595, + 526, + 743 + ], + "type": "text", + "content": "In terms of the corpus, we construct a comprehensive corpus from multiple sources, including law articles and precedent cases. Specifically, for law articles, we collect approximately 50,000 documents from LexiLaw" + }, + { + "bbox": [ + 302, + 595, + 526, + 743 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 302, + 595, + 526, + 743 + ], + "type": "text", + "content": ", covering areas such as Civil Law, Criminal Law, and judicial interpretations. For precedent cases, we include both criminal and civil cases. Criminal cases are sourced from the LeCaRD legal retrieval dataset (Ma et al., 2021b), ELAM (Yu et al., 2022b), and civil cases from the CAIL legal summary" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 317, + 751, + 383, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 751, + 383, + 762 + ], + "spans": [ + { + "bbox": [ + 317, + 751, + 383, + 762 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 317, + 751, + 383, + 762 + ], + "type": "text", + "content": "www.66law.com" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 762, + 459, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 762, + 459, + 774 + ], + "spans": [ + { + "bbox": [ + 317, + 762, + 459, + 774 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 317, + 762, + 459, + 774 + ], + "type": "text", + "content": "https://github.com/CSHaitao/LexiLaw" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11185" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 69, + 296, + 112 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 296, + 112 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 296, + 112 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 296, + 112 + ], + "type": "table", + "html": "
Dataset#QLenQLenAQ Type
Layperson50057.62107.40Question
Practitioner500618.96193.46Case + Question
", + "image_path": "1538c5a5008ee0e70ddbaa813d5a4617650be79590a5b11ce02b1f164dafec90.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "lines": [ + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "spans": [ + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "type": "text", + "content": "Table 1: Dataset statistics. #Q indicates the number of questions, " + }, + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "type": "inline_equation", + "content": "\\mathrm{Len}_Q" + }, + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "type": "inline_equation", + "content": "\\mathrm{Len}_A" + }, + { + "bbox": [ + 67, + 121, + 291, + 169 + ], + "type": "text", + "content": " denote the average lengths of questions and gold answers, and Q Type refers to the question type." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 187, + 291, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 187, + 291, + 349 + ], + "spans": [ + { + "bbox": [ + 67, + 187, + 291, + 349 + ], + "type": "text", + "content": "dataset, LJP-MSJudge (Ma et al., 2021a), and the pre-training data of fuzi.mingcha (Wu et al., 2023a). As a supplement to precedent cases, we also incorporate question-and-answer pairs from fine-tuning datasets of legal LLMs as part of the precedent cases. These QA pairs are collected from DISC-LawLLM (Yue et al., 2023), LawGPT_zh (Liu et al., 2023), and HanFei (He et al., 2023). In total, the constructed corpus contains approximately 500,000 documents, ensuring sufficient coverage of both law articles and precedent cases to support diverse legal tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 359, + 130, + 370 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 359, + 130, + 370 + ], + "spans": [ + { + "bbox": [ + 67, + 359, + 130, + 370 + ], + "type": "text", + "content": "4 Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 380, + 195, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 195, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 195, + 393 + ], + "type": "text", + "content": "4.1 Response Generation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 396, + 279, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 396, + 279, + 410 + ], + "spans": [ + { + "bbox": [ + 67, + 396, + 279, + 410 + ], + "type": "text", + "content": "We consider two types of methods in this study." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "text", + "content": "Citation-Guided Generation (CGG) produces response " + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "inline_equation", + "content": "y_{cgg}" + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "text", + "content": " given a user question " + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "text", + "content": " by referring retrieved relevant document(s) " + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "inline_equation", + "content": "D_R" + }, + { + "bbox": [ + 67, + 411, + 289, + 451 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 458, + 289, + 471 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 458, + 289, + 471 + ], + "spans": [ + { + "bbox": [ + 121, + 458, + 289, + 471 + ], + "type": "interline_equation", + "content": "y _ {\\mathrm {c g g}} = f _ {\\mathrm {L L M}} \\left(x, D _ {R}, p _ {1}\\right), \\tag {1}", + "image_path": "d80c057c523508241be65d736d0ce0290fbeaa36a0bc578d64a01130521fb468.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "spans": [ + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "inline_equation", + "content": "f_{\\mathrm{LLM}}" + }, + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "text", + "content": " denotes a open-domain or a legal specific LLM; " + }, + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 67, + 478, + 290, + 518 + ], + "type": "text", + "content": " is the direct generation prompt. All prompt settings are detailed in Appendix A." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "text", + "content": "Answer Refinement Generation (ARG) is a two-stage method that generates the final response " + }, + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{arg}}" + }, + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "text", + "content": " by refining the LLM's initial response " + }, + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{init}}" + }, + { + "bbox": [ + 67, + 518, + 290, + 586 + ], + "type": "text", + "content": " through the retrieval and incorporation of reference information. This process can be formulated as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 131, + 593, + 289, + 606 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 593, + 289, + 606 + ], + "spans": [ + { + "bbox": [ + 131, + 593, + 289, + 606 + ], + "type": "interline_equation", + "content": "y _ {\\text {i n i t}} = f _ {\\mathrm {L L M}} (x, p _ {2}), \\tag {2}", + "image_path": "8b3c396de93316103d9aae72a033b8cedd4830335725f208eaf4db9271f1a0ed.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "inline_equation", + "content": "p_2" + }, + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "text", + "content": " is the prompt instructing the LLM to directly generate an initial response without reference information. We refer to this step as CloseBook. The initial response " + }, + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{init}}" + }, + { + "bbox": [ + 67, + 613, + 291, + 667 + ], + "type": "text", + "content": " is then refined as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 116, + 674, + 289, + 687 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 674, + 289, + 687 + ], + "spans": [ + { + "bbox": [ + 116, + 674, + 289, + 687 + ], + "type": "interline_equation", + "content": "y _ {\\text {a r g}} = f _ {\\mathrm {L L M}} \\left(y _ {\\text {i n i t}}, D _ {R}, p _ {3}\\right), \\tag {3}", + "image_path": "974742aa3f78d94e57a96891eab29ac4810d84d06f9b25079544bdd792a92c45.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "spans": [ + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "inline_equation", + "content": "p_3" + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "text", + "content": " is the prompt guiding the LLM to refine the " + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{init}}" + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "text", + "content": " using the retrieved documents " + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "inline_equation", + "content": "D_R" + }, + { + "bbox": [ + 67, + 694, + 289, + 720 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": "Laypersons and practitioners interact with LLMs differently and have distinct expectations for citations. When " + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": " is submitted by a layperson, the corresponding " + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "inline_equation", + "content": "D_{R}" + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": " consists of relevant law articles. In" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": "contrast, when " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": " is submitted by a practitioner, the corresponding " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "D_R" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": " includes both relevant law articles and precedent cases. The process for retrieving " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "D_R" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 302, + 71, + 525, + 126 + ], + "type": "text", + "content": " is detailed in the next subsection." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 135, + 415, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 135, + 415, + 147 + ], + "spans": [ + { + "bbox": [ + 302, + 135, + 415, + 147 + ], + "type": "text", + "content": "4.2 Citation Retrieval" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "spans": [ + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": "We explore state-of-the-art open-domain dense retriever BGE (Xiao et al., 2023), along with two legal-specific dense retrievers, CriminalBERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019). We also investigate two types of retrieval queries: " + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": " (the user question alone, ARG-Q) and " + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "inline_equation", + "content": "[x; y_{\\mathrm{init}}]" + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": " (the concatenation of the user query " + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": " and the initial response " + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "inline_equation", + "content": "y_{\\mathrm{init}}" + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "inline_equation", + "content": "[]" + }, + { + "bbox": [ + 302, + 153, + 525, + 302 + ], + "type": "text", + "content": " denotes the concatenation operation, ARG-QA). The impact of different retrieval models on performance will be analyzed in the experiments." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 312, + 427, + 324 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 312, + 427, + 324 + ], + "spans": [ + { + "bbox": [ + 302, + 312, + 427, + 324 + ], + "type": "text", + "content": "4.3 Citation Attachment" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 330, + 525, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 330, + 525, + 396 + ], + "spans": [ + { + "bbox": [ + 302, + 330, + 525, + 396 + ], + "type": "text", + "content": "Building on the retrieved citations, this subsection outlines the process of attaching these law articles or precedents to specific sentences in the LLM-generated responses. This process involves answering two key questions:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 398, + 525, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 398, + 525, + 560 + ], + "spans": [ + { + "bbox": [ + 302, + 398, + 525, + 560 + ], + "type": "text", + "content": "What kind of sentences can be associated with citations? We utilize co-occurring words and legal entity extraction to identify sentences that explicitly reference legal concepts, actions, or terms relevant to the retrieved citations. Specifically, we construct a pool of legal terminologies using THUOCL3 and LaWGPT (Zhou et al., 2024). A sentence is considered eligible if it contains any of the terminologies from this pool. Additionally, we use SpaCy (Honnibal et al., 2020) to extract legal entities from each sentence. If a sentence includes legal entities, it is also deemed eligible for citation attachment." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "spans": [ + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "text", + "content": "How are citations attached to the identified sentences? If a sentence is deemed eligible for citation attachment, we associate it with retrieved citations as follows. For the laypersons, the retrieved law article " + }, + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "inline_equation", + "content": "c_{l} \\in D_{l}" + }, + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "text", + "content": " is attached to the most relevant sentence " + }, + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "inline_equation", + "content": "s_{k} \\in y" + }, + { + "bbox": [ + 302, + 561, + 525, + 642 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 653, + 525, + 686 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 653, + 525, + 686 + ], + "spans": [ + { + "bbox": [ + 310, + 653, + 525, + 686 + ], + "type": "interline_equation", + "content": "C _ {\\text {L a y}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\}, \\tag {4}", + "image_path": "1dc83bb4e2c74c9c8bc7843d63d50845a676f8ebea2b84caf79f6e3cfa74b077.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "spans": [ + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "inline_equation", + "content": "(s_k, c_l)" + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": " represents attaching the reference " + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "inline_equation", + "content": "c_l" + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": " to the sentence " + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "inline_equation", + "content": "\\mathrm{sim}(\\cdot)" + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": " is computed using sentence-BERT (Reimers, 2019). We set " + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "inline_equation", + "content": "|C_{\\mathrm{Lay}}| = 1" + }, + { + "bbox": [ + 302, + 687, + 525, + 755 + ], + "type": "text", + "content": " because, typically, a layperson's query pertains to only one specific legal article. For practitioners," + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 761, + 452, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 761, + 452, + 774 + ], + "spans": [ + { + "bbox": [ + 315, + 761, + 452, + 774 + ], + "type": "text", + "content": "3https://github.com/thunlp/THUOCL" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11186" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "text", + "content": "we attach the retrieved law article in the same way as for laypersons. Additionally, we associate the retrieved precedent cases " + }, + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "inline_equation", + "content": "c_{c} \\in D_{c}" + }, + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "text", + "content": " with each " + }, + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "inline_equation", + "content": "s_{i} \\in y" + }, + { + "bbox": [ + 67, + 71, + 291, + 125 + ], + "type": "text", + "content": ", which is formulated as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 72, + 132, + 291, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 132, + 291, + 180 + ], + "spans": [ + { + "bbox": [ + 72, + 132, + 291, + 180 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} C _ {\\text {P r a}} = \\left\\{\\left(s _ {k}, c _ {l}\\right) \\mid s _ {k} = \\underset {s _ {i} \\in y} {\\arg \\max } \\operatorname {s i m} \\left(s _ {i}, c _ {l}\\right) \\right\\} (5) \\\\ \\cup \\{(s _ {i}, c _ {c}) |, c _ {c} = \\underset {c _ {j} \\in D _ {c}} {\\arg \\max } \\operatorname {s i m} (s _ {i}, c _ {j}) \\}, (5) \\\\ \\end{array}", + "image_path": "eff66cfc4d0d1aa940c5f6dd2ac63fe835845a05885ea7b4d0a6945d679f2603.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "spans": [ + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "inline_equation", + "content": "|D_c| = 3" + }, + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "text", + "content": ", meaning each response " + }, + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 67, + 189, + 290, + 217 + ], + "type": "text", + "content": " can be associated with up to three precedents4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 226, + 145, + 238 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 226, + 145, + 238 + ], + "spans": [ + { + "bbox": [ + 67, + 226, + 145, + 238 + ], + "type": "text", + "content": "5 Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 248, + 291, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 248, + 291, + 314 + ], + "spans": [ + { + "bbox": [ + 67, + 248, + 291, + 314 + ], + "type": "text", + "content": "CitaLaw provides a comprehensive evaluation framework incorporating metrics for fluency, correctness, and citation quality. This framework is divided into two levels of analysis: global level and the proposed syllogism level." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 315, + 291, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 315, + 291, + 478 + ], + "spans": [ + { + "bbox": [ + 67, + 315, + 291, + 478 + ], + "type": "text", + "content": "Syllogism, a foundational framework in legal reasoning, comprises three key components: the major premise, the minor premise, and the conclusion. In our legal context, these correspond to the relevant law article or precedent case (major premise), the factual circumstances and actions of a specific case (minor premise), and the resulting legal decision (conclusion). By integrating this syllogistic framework, CitaLaw goes beyond surface-level correctness to evaluate the logical coherence and alignment of LLM-generated responses with established legal principles." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 486, + 225, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 486, + 225, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 486, + 225, + 499 + ], + "type": "text", + "content": "5.1 Fluency (Style Consistency)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 503, + 291, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 503, + 291, + 653 + ], + "spans": [ + { + "bbox": [ + 67, + 503, + 291, + 653 + ], + "type": "text", + "content": "To ensure the LLM-generated responses align with the user's requirements, the system must adapt its style based on the user's background. For laypersons, responses should avoid excessive technical jargon to ensure accessibility and comprehension. Conversely, responses for legal practitioners should adopt a formal and professional tone to maintain credibility and utility. To achieve this aim, we concatenate the user query and the LLM-generated response and apply MAUVE (Pillutla et al., 2021) to assess their style consistency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 661, + 151, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 151, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 151, + 673 + ], + "type": "text", + "content": "5.2 Correctness" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 678, + 291, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 678, + 291, + 746 + ], + "spans": [ + { + "bbox": [ + 67, + 678, + 291, + 746 + ], + "type": "text", + "content": "At the global level, we use established metrics ROUGE (Lin, 2004) and BERTScore (Zhang et al., 2019). ROUGE measures word-level overlap between the generated and labeled responses, with scores reported for ROUGE-1, ROUGE-2, and" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 71, + 526, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 139 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 139 + ], + "type": "text", + "content": "ROUGE-L. BERTScore captures semantic similarity between the generated and labeled responses, and we report the F-score (BERT-F) for evaluation. These metrics assess the overall correctness of LLM-generated responses." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "spans": [ + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "text", + "content": "At the syllogism level, we leverage the Qwen2 (Yang et al., 2024) to extract key components, including the circumstances, illegal acts, and legal decisions. We use sentence-BERT (Reimers, 2019) to measure the alignment between the labeled responses and the generated outputs across these dimensions, resulting in " + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "inline_equation", + "content": "\\mathrm{Correct}_{\\mathrm{c}}" + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "inline_equation", + "content": "\\mathrm{Correct}_{\\mathrm{a}}" + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "inline_equation", + "content": "\\mathrm{Correct}_{\\mathrm{d}}" + }, + { + "bbox": [ + 302, + 141, + 527, + 290 + ], + "type": "text", + "content": ". This syllogism-level evaluation allows us to assess the logical coherence of the responses, ensuring that they align with the underlying legal reasoning principles." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 304, + 407, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 304, + 407, + 317 + ], + "spans": [ + { + "bbox": [ + 302, + 304, + 407, + 317 + ], + "type": "text", + "content": "5.3 Citation Quality" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 326, + 526, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 326, + 526, + 541 + ], + "spans": [ + { + "bbox": [ + 302, + 326, + 526, + 541 + ], + "type": "text", + "content": "As previously discussed, we assume that a question submitted by laypersons typically corresponds to a specific law article. Therefore, at the global level, we evaluate the citation quality of the retrieved law article (premise) by measuring its entailment with the associated sentence in the LLM's response (hypothesis). Specifically, we use an NLI model to compute " + }, + { + "bbox": [ + 302, + 326, + 526, + 541 + ], + "type": "inline_equation", + "content": "\\text{Cita}_{\\text{Law}}" + }, + { + "bbox": [ + 302, + 326, + 526, + 541 + ], + "type": "text", + "content": ", which quantifies the degree to which the law article entails the attached sentence. This metric reflects how effectively the response aligns with the cited law article. We employ DISC-LawLLM (Yue et al., 2023) as the NLI model due to its strong agreement with human evaluations (as discussed in Sec. 6.3) and its superior performance compared to other NLI models (as detailed in Sec. 6.5)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "text", + "content": "At the syllogism level, we evaluate the quality of precedent case citations by examining three key components: circumstances, illegal acts, and legal decisions. After extracting these elements from both the retrieved cases and the associated sentence in the LLM's response, we utilize DISC-LawLLM to assess the entailment for each component. This evaluation yields three distinct scores: " + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "inline_equation", + "content": "\\text{Cita}_{\\text{c}}" + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "text", + "content": " for circumstances, " + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "inline_equation", + "content": "\\text{Cita}_{\\text{a}}" + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "text", + "content": " for illegal acts, and " + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "inline_equation", + "content": "\\text{Cita}_{\\text{d}}" + }, + { + "bbox": [ + 302, + 544, + 527, + 706 + ], + "type": "text", + "content": " for legal decisions, providing a more detailed and nuanced assessment of citation quality within the syllogism framework." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 722, + 390, + 736 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 722, + 390, + 736 + ], + "spans": [ + { + "bbox": [ + 302, + 722, + 390, + 736 + ], + "type": "text", + "content": "6 Experiments" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "content": "We conduct extensive experiments on our CitaLaw using the proposed two-level evaluation methods." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 290, + 775 + ], + "type": "text", + "content": "4 Considering the input window size of LLMs, we set up to retrieve 3 precedent cases." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11187" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 69, + 523, + 232 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 523, + 232 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 523, + 232 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 523, + 232 + ], + "type": "table", + "html": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-FCorrecteCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)CloseBook22.6316.471.9513.3458.4673.0568.2466.8767.3843.15
CGG61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
ARG-Q61.2723.175.6517.8364.2369.0475.4574.4779.1052.24
ARG-QA51.8323.736.9618.5364.8471.3774.8174.6680.8051.95
Qwen2 (Qwen2-7B-Instruct)CloseBook21.0415.292.2711.3158.3970.8971.7169.9372.3543.69
CGG75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
ARG-Q66.5520.864.5015.4264.5966.9677.8275.6681.4852.65
ARG-QA66.8021.734.7816.3464.8569.3176.3575.0582.8353.11
Legal LLM (CGG)DISC-LawLLM72.7022.464.1415.4865.0665.2178.5576.1783.4653.69
fuzi.mingcha56.5824.545.7017.4865.8663.2879.5677.9481.6452.51
LexiLaw71.8924.966.2518.9165.6868.8978.1276.7282.4254.87
Tailing13.9515.934.1312.8959.4772.0069.1168.3882.6744.28
zhihai37.5020.984.5913.6964.5467.7577.6876.9977.1648.99
LawGPT_zh51.6023.335.2816.1765.1463.7279.4377.5286.1852.04
Hanfei51.1223.955.1918.7665.1270.8375.0174.2176.9751.24
", + "image_path": "2d46d4ac9531fb6d2b19fb2e82f5e1e1272b4f917c051d82cde4acedf12f3f1f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 86, + 241, + 504, + 253 + ], + "lines": [ + { + "bbox": [ + 86, + 241, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 86, + 241, + 504, + 253 + ], + "type": "text", + "content": "Table 2: Performance comparisons on the Layperson dataset. The best performance is indicated in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 275, + 199, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 275, + 199, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 275, + 199, + 289 + ], + "type": "text", + "content": "6.1 Experimental Settings" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 292, + 188, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 292, + 188, + 304 + ], + "spans": [ + { + "bbox": [ + 67, + 292, + 188, + 304 + ], + "type": "text", + "content": "6.1.1 Evaluated Models" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 308, + 291, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 291, + 484 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 291, + 484 + ], + "type": "text", + "content": "We selected two categories of LLMs for testing: The legal LLMs include (1) fuzi.mingcha (6B) (Wu et al., 2023a), (2) LexiLaw5 (6B), (3) Tailing6 (7B), (4) DISC-LawLLM (13B) (Yue et al., 2023), (5) zhihai (7B) (Wu et al.), (6) LawGPT_zh (6B) (Liu et al., 2023), (7) HanFei (7B) (He et al., 2023). The open-domain LLMs include Qwen2 (7B) (Yang et al., 2024) and Llama3 (8B) (AI@Meta, 2024). For these models, we tested all methods mentioned in Sec. 4, including: (1) CloseBook, (2) CGG, (3) ARG-Q and (4) ARG-QA. For the legal LLMs, we generate responses using CGG. Appendix B has the details." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 491, + 214, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 491, + 214, + 504 + ], + "spans": [ + { + "bbox": [ + 67, + 491, + 214, + 504 + ], + "type": "text", + "content": "6.1.2 Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 507, + 291, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 291, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 291, + 602 + ], + "type": "text", + "content": "Our implementation is based on the Huggingface Transformers library (Wolf et al., 2020) with PyTorch. We use bge-base-zh-v1.5 (Xiao et al., 2023) as the retrieval model and conduct all experiments on Nvidia A6000 GPUs. Additional details are provided in Appendix C and https://github.com/ke-01/CitaLaw." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 610, + 158, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 610, + 158, + 622 + ], + "spans": [ + { + "bbox": [ + 67, + 610, + 158, + 622 + ], + "type": "text", + "content": "6.2 Main Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 628, + 290, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 628, + 290, + 668 + ], + "spans": [ + { + "bbox": [ + 67, + 628, + 290, + 668 + ], + "type": "text", + "content": "The results on the Layperson and Practitioner datasets are presented in Table 2 and Table 3. We analyze the results from three perspectives:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 676, + 267, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 676, + 267, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 676, + 267, + 688 + ], + "type": "text", + "content": "6.2.1 Performance of Open-Domain LLM" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 692, + 290, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 692, + 290, + 745 + ], + "spans": [ + { + "bbox": [ + 67, + 692, + 290, + 745 + ], + "type": "text", + "content": "Legal references improve the response quality. Compared to CloseBook, the overall performance in CGG, ARG-Q, and ARG-QA has improved. This indicates that incorporating references into the" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 275, + 525, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 275, + 525, + 329 + ], + "spans": [ + { + "bbox": [ + 302, + 275, + 525, + 329 + ], + "type": "text", + "content": "LLM helps it better understand both the question and the required direction for the answer, thereby enhancing performance in terms of style consistency, correctness, and citation quality." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 332, + 525, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 332, + 525, + 453 + ], + "spans": [ + { + "bbox": [ + 302, + 332, + 525, + 453 + ], + "type": "text", + "content": "CGG achieves better response quality. We observe that CGG achieves optimal performance, especially response correctness, suggesting that incorporating legal references into the LLM input is more effective than refining the LLM's response. This is because including legal knowledge as input allows the LLM to consider relevant context when generating replies, whereas refining the response might lead to excessive alterations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 457, + 525, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 457, + 525, + 578 + ], + "spans": [ + { + "bbox": [ + 302, + 457, + 525, + 578 + ], + "type": "text", + "content": "ARG improves the alignment of responses and references. We can observe that ARG outperforms CGG in citation-related metrics overall. This is because CGG merely incorporates reference information as input, which may lead the model to overlook some reference details during the generation process. In contrast, ARG modifies the answer based on the references after generation, making it easier to ensure the completeness of citations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 582, + 525, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 582, + 525, + 663 + ], + "spans": [ + { + "bbox": [ + 302, + 582, + 525, + 663 + ], + "type": "text", + "content": "Chinese data fine-tuning can bring benefits. Both the Layperson and Practitioner datasets are Chinese datasets. Qwen2 (Fine-tuning on more Chinese data) achieved better performance than Llama3, demonstrating the benefits of using Chinese data for fine-tuning." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 667, + 525, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 667, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 667, + 525, + 774 + ], + "type": "text", + "content": "CloseBook tends to state circumstances. CloseBook performs better in terms of correctness regarding circumstances compared to the other dimensions. This suggests that when judicial knowledge references are not used, the LLM is more likely to repeat the circumstances itself, rather than providing an appropriate response to the illegal acts and the legal decision." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 751, + 224, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 751, + 224, + 762 + ], + "spans": [ + { + "bbox": [ + 80, + 751, + 224, + 762 + ], + "type": "text", + "content": "5https://github.com/CSHaitao/LexiLaw" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 80, + 762, + 275, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 762, + 275, + 774 + ], + "spans": [ + { + "bbox": [ + 80, + 762, + 275, + 774 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 80, + 762, + 275, + 774 + ], + "type": "text", + "content": "https://github.com/DUTIR-LegalIntelligence/Tailing" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11188" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 67, + 523, + 206 + ], + "blocks": [ + { + "bbox": [ + 70, + 67, + 523, + 206 + ], + "lines": [ + { + "bbox": [ + 70, + 67, + 523, + 206 + ], + "spans": [ + { + "bbox": [ + 70, + 67, + 523, + 206 + ], + "type": "table", + "html": "
MetricFluencyCorrectnessCitationAll
CategoryModelMauveRouge-1Rouge-2Rouge-LBERT-F\\( Correct_c \\)\\( Correct_a \\)\\( Correct_d \\)\\( Cita_{Law} \\)\\( Cita_c \\)\\( Cita_a \\)\\( Cita_d \\)Avg
Llama3 (Llam3-8B-Instruct)CloseBook23.8123.057.2919.2362.8376.3071.0570.3263.4966.9568.8365.4651.55
CGG36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
ARG-Q42.6520.395.0715.7562.8270.4973.6772.0068.6169.4870.5168.3453.31
ARG-QA36.9418.644.5614.6361.5071.0772.3870.3269.4068.9570.4269.5152.36
Qwen2 (Qwen2-7B-Instruct)CloseBook61.9130.4410.5423.5367.5574.3579.8478.5268.5568.0370.3069.7158.61
CGG39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
ARG-Q41.0220.575.1415.6263.3167.8474.7173.9473.0168.9673.2073.6454.25
ARG-QA21.9716.673.0612.4760.7067.4971.1670.8871.7669.0171.0471.3350.63
Legal LLM (CGG )DISC-LawLLM38.1121.376.7516.9660.8473.4272.1471.7963.9267.4268.2265.4552.20
fuzi.mingcha66.5528.959.5122.6967.0670.7376.6677.4765.9266.9469.2868.6957.54
LexiLaw57.7429.018.9323.8365.6370.3676.6775.9765.2866.9368.8968.0356.44
Tailing50.1626.529.1622.4465.3575.9673.8370.3064.6566.9467.5666.0954.91
zhihai26.2921.386.0015.5364.4765.5976.3877.3767.9366.3063.1759.8250.85
LawGPT_zh47.1029.168.9222.5567.6469.4879.3780.2366.9068.3867.5568.9456.35
HanFei75.7232.9812.4626.9168.7273.2578.6378.1167.0367.4568.6367.7359.80
", + "image_path": "a066d02470012f39beeccbe312d971a870db0dcd6faf2866a40a68706574cc97.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 216, + 506, + 227 + ], + "lines": [ + { + "bbox": [ + 85, + 216, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 85, + 216, + 506, + 227 + ], + "type": "text", + "content": "Table 3: Performance comparisons on the Practitioner dataset. The best performance is indicated in bold." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 241, + 232, + 253 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 241, + 232, + 253 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 232, + 253 + ], + "type": "text", + "content": "6.2.2 Performance of Legal LLM" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 258, + 290, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 258, + 290, + 352 + ], + "spans": [ + { + "bbox": [ + 67, + 258, + 290, + 352 + ], + "type": "text", + "content": "Law article training achieves gains. In the Layperson dataset, LexiLaw achieves optimal performance overall. This is because the questions in the Layperson dataset often require only law articles to provide answers clearly, and LexiLaw's training explicitly used law articles, allowing it to effectively handle such questions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 353, + 291, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 353, + 291, + 421 + ], + "spans": [ + { + "bbox": [ + 67, + 353, + 291, + 421 + ], + "type": "text", + "content": "Full-parameter training offers advantages. Hanfei achieves the best results in the Practitioner dataset, as it is a fully parameter-trained legal LLM. Full-parameter fine-tuning allows it to effectively simulate a legal expert, thus performing well." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 422, + 291, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 422, + 291, + 476 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 291, + 476 + ], + "type": "text", + "content": "Syllogistic reasoning is useful. fuzi.mingcha performs well on syllogism evaluation metrics, particularly on the Layperson dataset. This is due to its fine-tuning of syllogism judgment data." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 485, + 269, + 497 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 485, + 269, + 497 + ], + "spans": [ + { + "bbox": [ + 67, + 485, + 269, + 497 + ], + "type": "text", + "content": "6.2.3 Open Domain LLM vs. Legal LLM" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 502, + 291, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 502, + 291, + 609 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 291, + 609 + ], + "type": "text", + "content": "Impact of LLM Backbone. We can observe that some legal LLMs perform worse than open-domain LLMs. This is because Qwen2 and Llama3 are the latest open-domain LLMs, and their overall capabilities have significantly improved. In contrast, most legal LLMs are built on earlier generations of LLMs, which have weaker base models, leading to poorer overall performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 611, + 291, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 611, + 291, + 706 + ], + "spans": [ + { + "bbox": [ + 67, + 611, + 291, + 706 + ], + "type": "text", + "content": "Effectiveness of legal knowledge. Overall, the upper limit of legal LLMs is higher than that of open-domain LLMs. This is because legal LLMs, after extensive training on legal knowledge, have developed strong capabilities in solving legal issues. As a result, even though their base models are outdated, they can still perform effectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 716, + 185, + 728 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 716, + 185, + 728 + ], + "spans": [ + { + "bbox": [ + 67, + 716, + 185, + 728 + ], + "type": "text", + "content": "6.3 Human Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "In this section, we compared the syllogism-level metric with human evaluation. Details of legal human annotators can be found in Appendix D." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 241, + 525, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 241, + 525, + 295 + ], + "spans": [ + { + "bbox": [ + 302, + 241, + 525, + 295 + ], + "type": "text", + "content": "The syllogism-level evaluation of citation quality is divided into two stages: Stage 1: Extracting key components. Stage 2: Assessing the entailment using an NLI model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 296, + 526, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 296, + 526, + 430 + ], + "spans": [ + { + "bbox": [ + 302, + 296, + 526, + 430 + ], + "type": "text", + "content": "Stage 1: We randomly selected 50 questions each from the Layperson and Practitioner datasets. After splitting the cases into individual clauses, annotators were provided with the full case and its clauses. They do a three-class classification of each clause. The Qwen2's annotations were then compared with human annotations. The Cohen's kappa coefficient (Cohen, 1960) of 0.7876 indicates substantial agreement (0.61-0.80) between the model's and human annotators' labels." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 432, + 525, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 432, + 525, + 635 + ], + "spans": [ + { + "bbox": [ + 302, + 432, + 525, + 635 + ], + "type": "text", + "content": "Stage 2: We randomly selected 50 questions from the Practitioner dataset and used Qwen2 to extract key components of pairs of responses and citations. Annotators assessed the degree to which the citations entailed the corresponding response components using a 5-point scale (1: low, 5: high), with descriptions provided in Appendix D. The entailment probabilities given by DISC-LawLLM, which range from 0 to 1, were scaled to the same 1-5 range by multiplying by 5 and rounding. We then compared the scaled model outputs with the human evaluations and calculated Cohen's kappa coefficient. The kappa score of 0.6923 again indicates substantial agreement (0.61-0.80) between the model and human judgments." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 647, + 504, + 660 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 647, + 504, + 660 + ], + "spans": [ + { + "bbox": [ + 302, + 647, + 504, + 660 + ], + "type": "text", + "content": "6.4 Effects on Different Retrieval Models" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 666, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 666, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 666, + 525, + 775 + ], + "type": "text", + "content": "We selected BGE as the retrieval model in the main experiment. In this section, we explore the impact of using different retrieval models. Specifically, we evaluate Criminal-BERT (Zhong et al., 2019) and Civil-BERT (Zhong et al., 2019), two legal domain models based on BERT, fine-tuned on large-scale criminal and civil law documents, respectively. We replaced the retrieval model and tested the CGG" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11189" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 70, + 282, + 157 + ], + "blocks": [ + { + "bbox": [ + 71, + 70, + 282, + 157 + ], + "lines": [ + { + "bbox": [ + 71, + 70, + 282, + 157 + ], + "spans": [ + { + "bbox": [ + 71, + 70, + 282, + 157 + ], + "type": "image", + "image_path": "bb02b0dcacfe9e2343a7e3dce9d3c1df14239b907d3877ece25c69828cb44528.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 167, + 291, + 203 + ], + "lines": [ + { + "bbox": [ + 67, + 167, + 291, + 203 + ], + "spans": [ + { + "bbox": [ + 67, + 167, + 291, + 203 + ], + "type": "text", + "content": "Figure 3: Performance of different retrieval models. Lay is short for Layperson dataset and Pra is short for Practitioner dataset." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 72, + 208, + 285, + 285 + ], + "blocks": [ + { + "bbox": [ + 72, + 208, + 285, + 285 + ], + "lines": [ + { + "bbox": [ + 72, + 208, + 285, + 285 + ], + "spans": [ + { + "bbox": [ + 72, + 208, + 285, + 285 + ], + "type": "image", + "image_path": "1fbc1d82ad26023059c2cbac91cbdf2370eca84e69adf8ce3919ae5ef0ed0727.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 303, + 285, + 381 + ], + "blocks": [ + { + "bbox": [ + 77, + 290, + 280, + 302 + ], + "lines": [ + { + "bbox": [ + 77, + 290, + 280, + 302 + ], + "spans": [ + { + "bbox": [ + 77, + 290, + 280, + 302 + ], + "type": "text", + "content": "(a) Methods for CitaLaw metric with Layperson dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 72, + 303, + 285, + 381 + ], + "lines": [ + { + "bbox": [ + 72, + 303, + 285, + 381 + ], + "spans": [ + { + "bbox": [ + 72, + 303, + 285, + 381 + ], + "type": "image", + "image_path": "2710aec7dc9ffc3bb3faa79cb8e962183333b9a0069341b63d3806ae4b8136a3.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 80, + 386, + 277, + 396 + ], + "lines": [ + { + "bbox": [ + 80, + 386, + 277, + 396 + ], + "spans": [ + { + "bbox": [ + 80, + 386, + 277, + 396 + ], + "type": "text", + "content": "(b) Metrics for CGG method with Practitioner dataset." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 406, + 289, + 429 + ], + "lines": [ + { + "bbox": [ + 67, + 406, + 289, + 429 + ], + "spans": [ + { + "bbox": [ + 67, + 406, + 289, + 429 + ], + "type": "text", + "content": "Figure 4: The performance of different NLI models when the LLM is Llama." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 443, + 290, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 443, + 290, + 484 + ], + "spans": [ + { + "bbox": [ + 67, + 443, + 290, + 484 + ], + "type": "text", + "content": "method on the Layperson dataset. The average results across all metrics are shown in Figure 3, with detailed metric results provided in Appendix E." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 486, + 291, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 486, + 291, + 702 + ], + "spans": [ + { + "bbox": [ + 67, + 486, + 291, + 702 + ], + "type": "text", + "content": "As shown, on the Layperson dataset, BGE significantly outperforms the other two models. This is because the dataset consists of questions from laypersons, which are more everyday in nature. In contrast, the two legal BERT models, having been trained extensively on legal cases, show a distributional mismatch with open-domain data, leading to poorer performance. On the Practitioner dataset, which features professional legal questions, BGE still achieves the best performance. This can be attributed to its extensive training on diverse data, likely including some legal data, and its use of more advanced model architectures and techniques. However, the two legal BERT models perform comparably to BGE, showcasing the benefits of their specialized training on legal data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 714, + 246, + 727 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 714, + 246, + 727 + ], + "spans": [ + { + "bbox": [ + 67, + 714, + 246, + 727 + ], + "type": "text", + "content": "6.5 Effects on Different NLI Models" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "We opted to use legal LLMs as the NLI model in our experiments, as they support longer input lengths and incorporate substantial legal knowl" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "content": "edge. In Section 6.3, we verified that DISC-LawLLM and human achieved good consistency. In this section, we explore the performance of several legal LLMs in the NLI task. Besides DISC-LawLLM, we evaluated LexiLaw, LawGPT_zh, and Hanfei, which demonstrated strong performance in the main experiments." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "spans": [ + { + "bbox": [ + 302, + 167, + 526, + 275 + ], + "type": "text", + "content": "In Figures 4 (a), we examined the ability of four legal LLMs to evaluate Llama across the Close-Book, CGG, ARG-Q, and ARG-QA methods using the CitaLaw metric on the Layperson dataset. In Figures 4 (b), we investigated the performance of four legal LLMs in evaluating the CGG method applied to Llama across the metrics CitaLaw, CitaC, Cita a, and Cita d on the Practitioner dataset." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 277, + 526, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 277, + 526, + 507 + ], + "spans": [ + { + "bbox": [ + 302, + 277, + 526, + 507 + ], + "type": "text", + "content": "We can observe that Hanfei provides lower entailment scores across both datasets. This is because it is a fully parameter-tuned legal LLM, which results in a diminished capability to handle the general task of entailment reasoning. Additionally, we found that on the Practitioner dataset, other legal LLMs achieved results closer to those of DISC-LawLLM, while on the Layperson dataset, the performance gap was significantly larger. This is because the Practitioner dataset is more judicially oriented, aligning with the knowledge seen during the fine-tuning of legal LLMs. In contrast, due to limited training on general-purpose data, other legal LLMs struggle to accurately determine entailment relationships in the Layperson dataset. Similar conclusions can be drawn when the LLM is Qwen in Appendix F." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 521, + 380, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 521, + 380, + 534 + ], + "spans": [ + { + "bbox": [ + 303, + 521, + 380, + 534 + ], + "type": "text", + "content": "7 Conclusion" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 526, + 775 + ], + "type": "text", + "content": "We introduce CitaLaw, a benchmark designed to explore LLMs to generate responses with citations in legal scenarios, thus improving the trustworthiness of LLMs. CitaLaw includes two categories of questions: laypersons and practitioners. For laypersons, CitaLaw provides law articles as citations to help them understand the LLM's response clearly. For practitioners, both law articles and precedent cases are provided as citations, better supporting their needs for complex reasoning. CitaLaw offers global-level and syllogism-level metrics and supports the integration of citations into LLM inputs to guide generation or using citations to refine LLM's response. We conducted extensive experiments on 7 legal-domain LLMs and 2 popular open-domain LLMs, providing valuable insights for the deployment of LLMs in legal scenarios." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11190" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 149, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 149, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 149, + 83 + ], + "type": "text", + "content": "8 Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 92, + 290, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 92, + 290, + 146 + ], + "spans": [ + { + "bbox": [ + 67, + 92, + 290, + 146 + ], + "type": "text", + "content": "While Citalaw provides a robust framework for evaluating LLMs in legal scenarios, several limitations should be acknowledged to guide future extensions of this work." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 147, + 290, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 147, + 290, + 255 + ], + "spans": [ + { + "bbox": [ + 66, + 147, + 290, + 255 + ], + "type": "text", + "content": "First, the datasets used in CitaLaw are primarily sourced from the Chinese legal system, which may limit the benchmark's applicability to other jurisdictions. However, by incorporating both law articles and precedent cases to align with the principles of civil and common law systems, CitaLaw demonstrates strong potential for adaptation to diverse legal contexts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 255, + 291, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 255, + 291, + 364 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 291, + 364 + ], + "type": "text", + "content": "Second, the syllogism-based evaluation framework simplifies legal reasoning into three key components: the major premise (law articles or precedent cases), the minor premise (case circumstances and actions), and the conclusion (legal decision). While this structured approach is effective for systematic evaluation, real-world legal reasoning may encompass additional complexities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 374, + 206, + 386 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 374, + 206, + 386 + ], + "spans": [ + { + "bbox": [ + 67, + 374, + 206, + 386 + ], + "type": "text", + "content": "9 Ethical Considerations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 395, + 290, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 395, + 290, + 529 + ], + "spans": [ + { + "bbox": [ + 67, + 395, + 290, + 529 + ], + "type": "text", + "content": "Data Privacy and Confidentiality. The legal datasets used in CitaLaw include law articles, precedent cases, user questions, and golden responses. These documents were sourced from publicly available databases, ensuring compliance with data privacy and confidentiality standards. We carefully reviewed the datasets to ensure that no personally identifiable information (PII) or sensitive details about individuals were inadvertently included." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 531, + 290, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 531, + 290, + 612 + ], + "spans": [ + { + "bbox": [ + 67, + 531, + 290, + 612 + ], + "type": "text", + "content": "Alignment with Legal Standards. Legal AI systems must align with the ethical and professional standards of the legal domain. Our work emphasizes the need for syllogism-based reasoning to ensure logical consistency and adherence to legal principles." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 613, + 290, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 290, + 707 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 290, + 707 + ], + "type": "text", + "content": "Transparency and Explainability. Legal reasoning must be transparent and interpretable, particularly when used in sensitive or high-stakes domains. The metrics proposed in CitaLaw, including syllogism-based evaluation, aim to improve explainability by breaking down the reasoning process into logical components." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 708, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 290, + 774 + ], + "type": "text", + "content": "Responsibility in System Deployment. Citalaw is intended as a research benchmark and should not be directly deployed in high-stakes legal decision-making without human oversight. While the benchmark aims to enhance the trustworthiness" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 525, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 153 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 153 + ], + "type": "text", + "content": "of LLM-generated responses, legal professionals should always verify the citations and legal interpretations provided by such systems. Misuse of automated systems without adequate validation could lead to inaccurate legal advice or unintended consequences in legal proceedings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 163, + 429, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 163, + 429, + 176 + ], + "spans": [ + { + "bbox": [ + 303, + 163, + 429, + 176 + ], + "type": "text", + "content": "10 Acknowledgements" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 184, + 526, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 184, + 526, + 373 + ], + "spans": [ + { + "bbox": [ + 302, + 184, + 526, + 373 + ], + "type": "text", + "content": "This work was funded by the National Key R&D Program of China (2023YFA1008704), the National Natural Science Foundation of China (62472426). Supported by fund for building world-class universities (disciplines) of Renmin University of China. Work partially done at Beijing Key Laboratory of Research on Large Models and Intelligent Governance, and Engineering Research Center of Next-Generation Intelligent Search and Recommendation, MOE. Supported by the Beijing Social Science Foundation Planning Project (Grant No. 24GLC041), the Fundamental Research Funds for the Central Universities in UIBE (Grant No. 24QN06, 24PYTS22)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 396, + 362, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 396, + 362, + 407 + ], + "spans": [ + { + "bbox": [ + 304, + 396, + 362, + 407 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 414, + 526, + 775 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 304, + 414, + 525, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 414, + 525, + 449 + ], + "spans": [ + { + "bbox": [ + 304, + 414, + 525, + 449 + ], + "type": "text", + "content": "Abdelrahman Abdallah, Bhawna Piryani, and Adam Jatowt. 2023. Exploring the state of the art in legal qa systems. Journal of Big Data, 10(1):127." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 456, + 460, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 456, + 460, + 468 + ], + "spans": [ + { + "bbox": [ + 304, + 456, + 460, + 468 + ], + "type": "text", + "content": "AI@Meta. 2024. Llama 3 model card." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 476, + 526, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 476, + 526, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 476, + 526, + 533 + ], + "type": "text", + "content": "Andrew Blair-Stanek, Nils Holzenberger, and Benjamin Van Durme. 2023. Can gpt-3 perform statutory reasoning? In Proceedings of the Nineteenth International Conference on Artificial Intelligence and Law, pages 22-31." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 540, + 526, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 540, + 526, + 574 + ], + "spans": [ + { + "bbox": [ + 303, + 540, + 526, + 574 + ], + "type": "text", + "content": "Jacob Cohen. 1960. A coefficient of agreement for nominal scales. Educational and psychological measurement, 20(1):37-46." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 582, + 526, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 582, + 526, + 638 + ], + "spans": [ + { + "bbox": [ + 304, + 582, + 526, + 638 + ], + "type": "text", + "content": "Yongfu Dai, Duanyu Feng, Jimin Huang, Haochen Jia, Qianqian Xie, Yifang Zhang, Weiguang Han, Wei Tian, and Hao Wang. 2023. Laiw: A chinese legal large language models benchmark (a technical report). arXiv preprint arXiv:2310.05620." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 645, + 526, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 645, + 526, + 702 + ], + "spans": [ + { + "bbox": [ + 304, + 645, + 526, + 702 + ], + "type": "text", + "content": "Haolin Deng, Chang Wang, Xin Li, Dezhang Yuan, Junlang Zhan, Tianhua Zhou, Jin Ma, Jun Gao, and Ruifeng Xu. 2024. Websites: Attributed query-focused summarization on chinese web search results with citations. arXiv preprint arXiv:2403.01774." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 709, + 526, + 743 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 709, + 526, + 743 + ], + "spans": [ + { + "bbox": [ + 303, + 709, + 526, + 743 + ], + "type": "text", + "content": "Jacob Devlin. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 303, + 751, + 526, + 775 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 751, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 303, + 751, + 526, + 775 + ], + "type": "text", + "content": "Zhiwei Fei, Xiaoyu Shen, Dawei Zhu, Fengzhe Zhou, Zhuo Han, Songyang Zhang, Kai Chen, Zongwen" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 311, + 791 + ], + "type": "text", + "content": "11191" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 290, + 773 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 290, + 105 + ], + "type": "text", + "content": "Shen, and Jidong Ge. 2023. Lawbench: Benchmarking legal knowledge of large language models. arXiv preprint arXiv:2309.16289." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 114, + 290, + 202 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 114, + 290, + 202 + ], + "spans": [ + { + "bbox": [ + 69, + 114, + 290, + 202 + ], + "type": "text", + "content": "Luyu Gao, Zhuyun Dai, Panupong Pasupat, Anthony Chen, Arun Tejasvi Chaganty, Yicheng Fan, Vincent Zhao, Ni Lao, Hongrae Lee, Da-Cheng Juan, et al. 2023a. Rarr: Researching and revising what language models say, using language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16477-16508." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 211, + 290, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 211, + 290, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 211, + 290, + 265 + ], + "type": "text", + "content": "Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023b. Enabling large language models to generate text with citations. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6465-6488." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 275, + 289, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 275, + 289, + 318 + ], + "spans": [ + { + "bbox": [ + 69, + 275, + 289, + 318 + ], + "type": "text", + "content": "Wanwei He, Jiabao Wen, Lei Zhang, Hao Cheng, Bowen Qin, Yunshui Li, Feng Jiang, Junying Chen, Benyou Wang, and Min Yang. 2023. Hanfei-1.0. https://github.com/siat-nlp/HanFei." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 327, + 290, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 290, + 360 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 290, + 360 + ], + "type": "text", + "content": "Matthew Honnibal, Ines Montani, Sofie Van Landeghem, and Adriane Boyd. 2020. spaCy: Industrial-strength Natural Language Processing in Python." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 370, + 289, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 370, + 289, + 412 + ], + "spans": [ + { + "bbox": [ + 69, + 370, + 289, + 412 + ], + "type": "text", + "content": "Dongfang Li, Zetian Sun, Xinshuo Hu, Zhenyu Liu, Ziyang Chen, Baotian Hu, Aiguo Wu, and Min Zhang. 2023. A survey of large language models attribution. arXiv preprint arXiv:2311.03731." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 422, + 289, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 422, + 289, + 465 + ], + "spans": [ + { + "bbox": [ + 69, + 422, + 289, + 465 + ], + "type": "text", + "content": "Haitao Li, You Chen, Qingyao Ai, Yueyue Wu, Ruizhe Zhang, and Yiqun Liu. 2024. Lexeval: A comprehensive chinese legal benchmark for evaluating large language models. Preprint, arXiv:2409.20288." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 475, + 289, + 508 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 475, + 289, + 508 + ], + "spans": [ + { + "bbox": [ + 69, + 475, + 289, + 508 + ], + "type": "text", + "content": "Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 517, + 289, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 289, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 289, + 560 + ], + "type": "text", + "content": "Hongcheng Liu, Yusheng Liao, Yutong Meng, and Yuhao Wang. 2023. Xiezhi: Chinese law large language model. https://github.com/LiuHC0428/LAW_GPT." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 569, + 289, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 569, + 289, + 646 + ], + "spans": [ + { + "bbox": [ + 69, + 569, + 289, + 646 + ], + "type": "text", + "content": "Luyao Ma, Yating Zhang, Tianyi Wang, Xiaozhong Liu, Wei Ye, Changlong Sun, and Shikun Zhang. 2021a. Legal judgment prediction with multi-stage case representation learning in the real court setting. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 993-1002." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 655, + 289, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 289, + 721 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 289, + 721 + ], + "type": "text", + "content": "Yixiao Ma, Yunqiu Shao, Yueyue Wu, Yiqun Liu, Ruizhe Zhang, Min Zhang, and Shaoping Ma. 2021b. Lecard: A legal case retrieval dataset for chinese law system. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 2342-2348." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 729, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 289, + 773 + ], + "type": "text", + "content": "Chaitanya Malaviya, Subin Lee, Sihao Chen, Elizabeth Sieber, Mark Yatskar, and Dan Roth. 2024. Expertqa: Expert-curated questions and attributed answers. In Proceedings of the 2024 Conference of the North" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 773 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 314, + 72, + 524, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 524, + 105 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 524, + 105 + ], + "type": "text", + "content": "American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 3025-3045." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 114, + 524, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 114, + 524, + 179 + ], + "spans": [ + { + "bbox": [ + 304, + 114, + 524, + 179 + ], + "type": "text", + "content": "Krishna Pillutla, Swabha Swayamdipta, Rowan Zellers, John Thickstun, Sean Welleck, Yejin Choi, and Zaid Harchaoui. 2021. Mauve: Measuring the gap between neural text and human text using divergence frontiers. Advances in Neural Information Processing Systems, 34:4816-4828." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 188, + 524, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 188, + 524, + 220 + ], + "spans": [ + { + "bbox": [ + 304, + 188, + 524, + 220 + ], + "type": "text", + "content": "N Reimers. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 229, + 524, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 229, + 524, + 274 + ], + "spans": [ + { + "bbox": [ + 304, + 229, + 524, + 274 + ], + "type": "text", + "content": "Jaromir Savelka, Kevin D Ashley, Morgan A Gray, Hannes Westermann, and Huihui Xu. 2023. Explaining legal concepts with augmented large language models (gpt-4). arXiv preprint arXiv:2306.09525." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 281, + 524, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 281, + 524, + 359 + ], + "spans": [ + { + "bbox": [ + 304, + 281, + 524, + 359 + ], + "type": "text", + "content": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pages 38-45." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 366, + 524, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 366, + 524, + 433 + ], + "spans": [ + { + "bbox": [ + 304, + 366, + 524, + 433 + ], + "type": "text", + "content": "Shiguang Wu, Zhongkun Liu, Zhen Zhang, Zheng Chen, Wentao Deng, Wenhao Zhang, Jiyuan Yang, Zhitao Yao, Yougang Lyu, Xin Xin, Shen Gao, Pengjie Ren, Zhaochun Ren, and Zhumin Chen. 2023a. fuzi.mingcha. https://github.com/irlab-sdu/fuzi.mingcha." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 441, + 524, + 473 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 441, + 524, + 473 + ], + "spans": [ + { + "bbox": [ + 304, + 441, + 524, + 473 + ], + "type": "text", + "content": "Yiquan Wu, Yuhang Liu, Yifei Liu, Ang Li, Siying Zhou, and Kun Kuang. wisdominterrogatory. Available at GitHub." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 482, + 524, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 482, + 524, + 560 + ], + "spans": [ + { + "bbox": [ + 304, + 482, + 524, + 560 + ], + "type": "text", + "content": "Yiquan Wu, Siying Zhou, Yifei Liu, Weiming Lu, Xiaozhong Liu, Yating Zhang, Changlong Sun, Fei Wu, and Kun Kuang. 2023b. Precedent-enhanced legal judgment prediction with llm and domain-model collaboration. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 12060-12075." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 568, + 524, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 568, + 524, + 612 + ], + "spans": [ + { + "bbox": [ + 304, + 568, + 524, + 612 + ], + "type": "text", + "content": "Shitao Xiao, Zheng Liu, Peitian Zhang, Niklas Muennighoff, Defu Lian, and Jian-Yun Nie. 2023. C-pack: Packaged resources to advance general chinese embedding. arXiv preprint arXiv:2309.07597." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 620, + 524, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 620, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 620, + 524, + 773 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 781, + 312, + 791 + ], + "type": "text", + "content": "11192" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 290, + 507 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 80, + 72, + 290, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 290, + 95 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 290, + 95 + ], + "type": "text", + "content": "Cui, Zhenru Zhang, and Zhihao Fan. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 103, + 290, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 103, + 290, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 103, + 290, + 137 + ], + "type": "text", + "content": "Fangyi Yu, Lee Quartey, and Frank Schilder. 2022a. Legal prompting: Teaching a language model to think like a lawyer. arXiv preprint arXiv:2212.01326." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 145, + 290, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 145, + 290, + 222 + ], + "spans": [ + { + "bbox": [ + 69, + 145, + 290, + 222 + ], + "type": "text", + "content": "Weijie Yu, Zhongxiang Sun, Jun Xu, Zhenhua Dong, Xu Chen, Hongteng Xu, and Ji-Rong Wen. 2022b. Explainable legal case matching via inverse optimal transport-based rationale extraction. In Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval, pages 657-668." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 230, + 290, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 230, + 290, + 296 + ], + "spans": [ + { + "bbox": [ + 69, + 230, + 290, + 296 + ], + "type": "text", + "content": "Shengbin Yue, Wei Chen, Siyuan Wang, Bingxuan Li, Chenchen Shen, Shujun Liu, Yuxuan Zhou, Yao Xiao, Song Yun, Xuanjing Huang, and Zhongyu Wei. 2023. Disc-lawllm: Fine-tuning large language models for intelligent legal services. Preprint, arXiv:2309.11325." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 305, + 290, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 305, + 290, + 348 + ], + "spans": [ + { + "bbox": [ + 69, + 305, + 290, + 348 + ], + "type": "text", + "content": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. 2019. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 358, + 290, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 358, + 290, + 412 + ], + "spans": [ + { + "bbox": [ + 69, + 358, + 290, + 412 + ], + "type": "text", + "content": "Haoxi Zhong, Chaojun Xiao, Cunchao Tu, Tianyang Zhang, Zhiyuan Liu, and Maosong Sun. 2020. Jecqa: a legal-domain question answering dataset. In Proceedings of the AAAI conference on artificial intelligence, volume 34, pages 9701-9708." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 421, + 290, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 421, + 290, + 455 + ], + "spans": [ + { + "bbox": [ + 69, + 421, + 290, + 455 + ], + "type": "text", + "content": "Haoxi Zhong, Zhengyan Zhang, Zhiyuan Liu, and Maosong Sun. 2019. Open chinese language pretrained model zoo. Technical report." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 463, + 290, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 463, + 290, + 507 + ], + "spans": [ + { + "bbox": [ + 69, + 463, + 290, + 507 + ], + "type": "text", + "content": "Zhi Zhou, Jiang-Xin Shi, Peng-Xiao Song, Xiao-Wen Yang, Yi-Xuan Jin, Lan-Zhe Guo, and Yu-Feng Li. 2024. Lawgpt: A chinese legal knowledge-enhanced large language model. Preprint, arXiv:2406.04614." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11193" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 70, + 186, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 70, + 186, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 70, + 186, + 84 + ], + "type": "text", + "content": "A The Used Prompts" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "text", + "content": "Figure 5 illustrates the prompts used in this paper, including " + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "inline_equation", + "content": "p_2" + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "inline_equation", + "content": "p_3" + }, + { + "bbox": [ + 67, + 96, + 291, + 125 + ], + "type": "text", + "content": " in Eq. 1, Eq. 2 and Eq. 3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 138, + 287, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 138, + 287, + 166 + ], + "spans": [ + { + "bbox": [ + 68, + 138, + 287, + 166 + ], + "type": "text", + "content": "B More Details of Evaluated Models and Datasets" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 179, + 291, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 179, + 291, + 476 + ], + "spans": [ + { + "bbox": [ + 69, + 179, + 291, + 476 + ], + "type": "text", + "content": "For the Legal LLMs, we choose (1) fuzi.mingcha (6B) (Wu et al., 2023a): It leverages unsupervised judicial corpora for training and uses syllogistic reasoning judgment data for fine-tuning. (2) LexiLaw7 (6B): It specifically utilizes legal articles and legal reference books for training. (3) Tailing8 (7B): It uses judicial text validation data, information extraction data, and judgment data for training. (4) DISC-LawLLM (13B) (Yue et al., 2023): In addition to fine-tuning with pairs, it also uses triplet data for fine-tuning to enhance the model's ability to leverage external knowledge. (5) zhihai (7B) (Wu et al.): It utilizes ChatGPT to modify the existing dataset and then performs secondary pre-training. (6) LawGPT_zh (6B) (Liu et al., 2023): It primarily uses scenario-based dialogues and knowledge-based question-answering data for fine-tuning based on LoRA. (7) HanFei (7B) (He et al., 2023): It is the first fully parameter-trained legal LLM in China. Because in the main experiment, CGG has the best overall performance, for the legal LLMs, we generate responses using CGG." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 478, + 290, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 478, + 290, + 519 + ], + "spans": [ + { + "bbox": [ + 67, + 478, + 290, + 519 + ], + "type": "text", + "content": "Table 4 and Table 5 are the website URLs and corresponding licenses of the evaluated models and datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 534, + 258, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 534, + 258, + 550 + ], + "spans": [ + { + "bbox": [ + 68, + 534, + 258, + 550 + ], + "type": "text", + "content": "C More Details on Implementation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 560, + 291, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 560, + 291, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 291, + 696 + ], + "type": "text", + "content": "Considering the length of legal texts and the input window for the LLMs is limited, all experiments in this paper are conducted using a zero-shot setting. We use the Chinese-performing-well Qwen2-1.5B (Yang et al., 2024)9 to complete the MAUVE calculations. For RGUGE, We use version 1.0.1 of ROUGE for calculation. For BERTScore, we use bert-base-chinese (Devlin, 2018)10 to compute it. Regarding sentence-BERT, we employ paraphrase-multilingual-MiniLM-L12-v2 (Reimers, 2019)11." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 70, + 425, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 70, + 425, + 83 + ], + "spans": [ + { + "bbox": [ + 303, + 70, + 425, + 83 + ], + "type": "text", + "content": "D Human Evaluation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 91, + 526, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 91, + 526, + 294 + ], + "spans": [ + { + "bbox": [ + 302, + 91, + 526, + 294 + ], + "type": "text", + "content": "We hired four legal annotators from a Chinese university, all of whom have legal education backgrounds and are familiar with the cases in the dataset they need to annotate. We explained to the annotators that the data they annotated would be used for scientific research and paid them a reasonable remuneration based on local conditions. They are all graduate students from the judicial field, with practical experience in the legal profession. Two are male, two are female, aged between 24 and 30, and all have over five years of judicial theory study. Two annotators were responsible for the first stage of annotation, while the other two were responsible for the second stage, with all working together on the annotation process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 295, + 525, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 295, + 525, + 334 + ], + "spans": [ + { + "bbox": [ + 302, + 295, + 525, + 334 + ], + "type": "text", + "content": "Table 6 shows a detailed description of each level used to evaluate the agreement of the NLI model with human evaluations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 345, + 463, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 345, + 463, + 358 + ], + "spans": [ + { + "bbox": [ + 302, + 345, + 463, + 358 + ], + "type": "text", + "content": "E Different Retrieval Models" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 366, + 526, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 366, + 526, + 569 + ], + "spans": [ + { + "bbox": [ + 302, + 366, + 526, + 569 + ], + "type": "text", + "content": "Tables 7 and 8 present the performance of different retrieval models—Criminal-BERT, CivilBERT, and BGE—on each metric for the CGG method across the two datasets. It can be observed that when Llama3 and Qwen2 are used as LLMs, BGE achieves the best performance as the retrieval model. Comparing the two datasets, on the Layperson dataset, where the questions are more general, Criminal-BERT and Civil-BERT, which focus on legal cases, perform relatively poorly. In contrast, on the Practitioner dataset, despite no structural or training improvements, Criminal-BERT and CivilBERT achieve results comparable to BGE, highlighting the importance of legal knowledge in judicial QA tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 570, + 525, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 570, + 525, + 609 + ], + "spans": [ + { + "bbox": [ + 302, + 570, + 525, + 609 + ], + "type": "text", + "content": "The differences between the two datasets also underscore the significance of selecting an appropriate retrieval model." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 619, + 436, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 619, + 436, + 633 + ], + "spans": [ + { + "bbox": [ + 302, + 619, + 436, + 633 + ], + "type": "text", + "content": "F Different NLI Models" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 641, + 526, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 641, + 526, + 722 + ], + "spans": [ + { + "bbox": [ + 302, + 641, + 526, + 722 + ], + "type": "text", + "content": "Figures 6 (a) and (b) show the entailment scores given by four legal LLMs as NLI models under different methods (CloseBook, CGG, ARG-Q, ARGQA) and metrics(CitaLaw, CitaS, CitaB, and CitaC) when Qwen is used as the LLM. Similar conclusions to those in Section 6.5 can be drawn." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 81, + 709, + 225, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 709, + 225, + 720 + ], + "spans": [ + { + "bbox": [ + 81, + 709, + 225, + 720 + ], + "type": "text", + "content": "7https://github.com/CSHaitao/LexiLaw" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 81, + 720, + 276, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 720, + 276, + 732 + ], + "spans": [ + { + "bbox": [ + 81, + 720, + 276, + 732 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 81, + 720, + 276, + 732 + ], + "type": "text", + "content": "https://github.com/DUTIR-LegalIntelligence/Tailing" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 81, + 732, + 239, + 742 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 732, + 239, + 742 + ], + "spans": [ + { + "bbox": [ + 81, + 732, + 239, + 742 + ], + "type": "text", + "content": "9https://huggingface.co/Qwen/Qwen2-1.5B" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 81, + 742, + 276, + 753 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 742, + 276, + 753 + ], + "spans": [ + { + "bbox": [ + 81, + 742, + 276, + 753 + ], + "type": "text", + "content": "10https://huggingface.co/google-bert/bert-base-chinese" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 81, + 753, + 205, + 763 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 753, + 205, + 763 + ], + "spans": [ + { + "bbox": [ + 81, + 753, + 205, + 763 + ], + "type": "text", + "content": "11 https://huggingface.co/sentence-" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 764, + 267, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 764, + 267, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 764, + 267, + 774 + ], + "type": "text", + "content": "transformers/paraphrase-multilingual-MiniLM-L12-v2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11194" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 77, + 74, + 264, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 74, + 264, + 139 + ], + "spans": [ + { + "bbox": [ + 77, + 74, + 264, + 139 + ], + "type": "text", + "content": "Answer the question based on the provided law article and cite it appropriately. Only output the answer and citations, without including any additional content. When citing the law article, use [A1] at the end of the relevant sentence." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 79, + 149, + 202, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 149, + 202, + 173 + ], + "spans": [ + { + "bbox": [ + 79, + 149, + 202, + 173 + ], + "type": "text", + "content": "Below is the provided law article: Law article [A1]: {Law article 1}" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 79, + 183, + 160, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 183, + 160, + 203 + ], + "spans": [ + { + "bbox": [ + 79, + 183, + 160, + 203 + ], + "type": "text", + "content": "Question: {Question} Answer:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 145, + 206, + 200, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 206, + 200, + 216 + ], + "spans": [ + { + "bbox": [ + 145, + 206, + 200, + 216 + ], + "type": "text", + "content": "(a) Layperson" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 79, + 221, + 254, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 221, + 254, + 243 + ], + "spans": [ + { + "bbox": [ + 79, + 221, + 254, + 243 + ], + "type": "text", + "content": "Refine the text based on the references and only output the refined text." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 79, + 253, + 170, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 253, + 170, + 275 + ], + "spans": [ + { + "bbox": [ + 79, + 253, + 170, + 275 + ], + "type": "text", + "content": "Text: {Answer} \nReference: {References}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 79, + 285, + 263, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 285, + 263, + 307 + ], + "spans": [ + { + "bbox": [ + 79, + 285, + 263, + 307 + ], + "type": "text", + "content": "Refined text (only output the Refined text, without any additional content):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 123, + 312, + 221, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 312, + 221, + 322 + ], + "spans": [ + { + "bbox": [ + 123, + 312, + 221, + 322 + ], + "type": "text", + "content": "(c) Response Refinement" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 279, + 74, + 509, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 74, + 509, + 96 + ], + "spans": [ + { + "bbox": [ + 279, + 74, + 509, + 96 + ], + "type": "text", + "content": "Answer the question based on the provided documents and cite them appropriately." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 279, + 97, + 497, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 97, + 497, + 117 + ], + "spans": [ + { + "bbox": [ + 279, + 97, + 497, + 117 + ], + "type": "text", + "content": "Only output the answer and citations, without including any additional content." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 279, + 118, + 508, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 118, + 508, + 140 + ], + "spans": [ + { + "bbox": [ + 279, + 118, + 508, + 140 + ], + "type": "text", + "content": "When citing precedent cases, use [C1], [C2], or [C3] at the end of the sentence. When citing the law article, use [A1]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 279, + 150, + 428, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 150, + 428, + 172 + ], + "spans": [ + { + "bbox": [ + 279, + 150, + 428, + 172 + ], + "type": "text", + "content": "Below are the provided documents: \nPrecedent case [C1]: {Precedent case 1}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 280, + 173, + 428, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 173, + 428, + 182 + ], + "spans": [ + { + "bbox": [ + 280, + 173, + 428, + 182 + ], + "type": "text", + "content": "Precedent case [C2]: {Precedent case 2}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 280, + 183, + 428, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 183, + 428, + 205 + ], + "spans": [ + { + "bbox": [ + 280, + 183, + 428, + 205 + ], + "type": "text", + "content": "Precedent case [C3]: {Precedent case 3} \nLaw article [A1]: {Law article 1}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 279, + 215, + 359, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 215, + 359, + 235 + ], + "spans": [ + { + "bbox": [ + 279, + 215, + 359, + 235 + ], + "type": "text", + "content": "Question: {Question} Answer:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 366, + 239, + 428, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 366, + 239, + 428, + 250 + ], + "spans": [ + { + "bbox": [ + 366, + 239, + 428, + 250 + ], + "type": "text", + "content": "(b) Practitioner" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 279, + 255, + 480, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 255, + 480, + 277 + ], + "spans": [ + { + "bbox": [ + 279, + 255, + 480, + 277 + ], + "type": "text", + "content": "Answer the question. Only output the answer without any additional content." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 279, + 287, + 359, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 279, + 287, + 359, + 307 + ], + "spans": [ + { + "bbox": [ + 279, + 287, + 359, + 307 + ], + "type": "text", + "content": "Question: {Question} Answer:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 354, + 312, + 440, + 322 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 354, + 312, + 440, + 322 + ], + "spans": [ + { + "bbox": [ + 354, + 312, + 440, + 322 + ], + "type": "text", + "content": "(d) Without Reference" + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 92, + 381, + 501, + 474 + ], + "blocks": [ + { + "bbox": [ + 92, + 381, + 501, + 474 + ], + "lines": [ + { + "bbox": [ + 92, + 381, + 501, + 474 + ], + "spans": [ + { + "bbox": [ + 92, + 381, + 501, + 474 + ], + "type": "table", + "html": "
TypeLLMURLLicence
Open domainQwen2-7B-Instructhttps://huggingface.co/Qwen/Qwen2-7B-InstructApache-2.0 license
Llam3-8B-Instructhttps://github.com/meta-llama/llama3META LLAMA 3 COMMUNITY License
Legal Domainfuzi.mingchahttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
DISC-LawLLMhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
LawGPT_zhhttps://github.com/LiuHC0428/LAW-GPT
Hanfeihttps://github.com/siat-nlp/HanFeiApache-2.0 license
Tailinghttps://github.com/DUTIR-LegalIntelligence/Tailing
LexiLawhttps://github.com/CSHaitao/LexiLawMIT license
zhihaihttps://github.com/zhihaiLLM/wisdomInterrogatoryApache-2.0 license
", + "image_path": "96a30d23176633519f60baaa92417ea8b03345e190eef6af9094729a1d7ef0ce.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 481, + 525, + 505 + ], + "lines": [ + { + "bbox": [ + 67, + 481, + 525, + 505 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 525, + 505 + ], + "type": "text", + "content": "Table 4: The LLM source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 73, + 538, + 285, + 615 + ], + "blocks": [ + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "lines": [ + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "spans": [ + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "text", + "content": "Figure 5: Prompts used in this paper. (a) The prompt " + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "text", + "content": " is used to retrieve one law article in the Layperson dataset. (b) The prompt " + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "inline_equation", + "content": "p_1" + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "text", + "content": " is used to retrieve one law article and three precedent cases in the Practitioner dataset. (c) The prompt " + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "inline_equation", + "content": "p_3" + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "text", + "content": " is used to refine the LLM's answer based on references. (d) The prompt " + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "inline_equation", + "content": "p_2" + }, + { + "bbox": [ + 67, + 331, + 525, + 377 + ], + "type": "text", + "content": " is used for LLM responses without references." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 538, + 285, + 615 + ], + "lines": [ + { + "bbox": [ + 73, + 538, + 285, + 615 + ], + "spans": [ + { + "bbox": [ + 73, + 538, + 285, + 615 + ], + "type": "image", + "image_path": "1b8aaf2ed2a7460c159e4afcce3fcd90678b2d5fe400a1644ae0c81d1b896487.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 73, + 633, + 285, + 708 + ], + "blocks": [ + { + "bbox": [ + 82, + 620, + 274, + 631 + ], + "lines": [ + { + "bbox": [ + 82, + 620, + 274, + 631 + ], + "spans": [ + { + "bbox": [ + 82, + 620, + 274, + 631 + ], + "type": "text", + "content": "(a) Metrics for CGG method with Layperson dataset." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 73, + 633, + 285, + 708 + ], + "lines": [ + { + "bbox": [ + 73, + 633, + 285, + 708 + ], + "spans": [ + { + "bbox": [ + 73, + 633, + 285, + 708 + ], + "type": "image", + "image_path": "1871806c373c10877618a242a1d2f56ced23ad1f3ac4caa451cbf99ee3a98601.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 75, + 714, + 282, + 724 + ], + "lines": [ + { + "bbox": [ + 75, + 714, + 282, + 724 + ], + "spans": [ + { + "bbox": [ + 75, + 714, + 282, + 724 + ], + "type": "text", + "content": "(b) Methods for CitaLaw metric with Practitioner dataset." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 734, + 289, + 759 + ], + "lines": [ + { + "bbox": [ + 67, + 734, + 289, + 759 + ], + "spans": [ + { + "bbox": [ + 67, + 734, + 289, + 759 + ], + "type": "text", + "content": "Figure 6: The performance of different NLI models when the LLM is Qwen." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 284, + 780, + 312, + 791 + ], + "type": "text", + "content": "11195" + } + ] + } + ], + "index": 27 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 116, + 100, + 477, + 211 + ], + "blocks": [ + { + "bbox": [ + 116, + 100, + 477, + 211 + ], + "lines": [ + { + "bbox": [ + 116, + 100, + 477, + 211 + ], + "spans": [ + { + "bbox": [ + 116, + 100, + 477, + 211 + ], + "type": "table", + "html": "
TypeDatasetURLLicence
QuestionLayperson Practitionerhttps://github.com/open-compass/LawBenchApache-2.0 license
https://github.com/CSHaitao/LexEvalMIT License
CorpusLeCaRDhttps://github.com/myx666/LeCaRDMIT License
ELAMhttps://github.com/ruc-wjyu/IOT-MatchMIT License
CAIL2021-sfzyhttps://github.com/china-ai-law-challenge/CAIL2021
LJP-MSJudg fuzi.mingchahttps://github.com/mly-nlp/LJP-MSJudge
DISC-LawLLMhttps://github.com/irlab-sdu/fuzi.mingchaApache-2.0 license
LawGPT_zhhttps://github.com/FudanDISC/DISC-LawLLMApache-2.0 license
Hanfeihttps://github.com/LiuHC0428/LAW-GPT
https://github.com/siat-nlp/HanFeiApache-2.0 license
", + "image_path": "73a65b8872e18e177946d159ac17775e763b3dd786f8015bae458aa6daedd73a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 71, + 314, + 523, + 380 + ], + "blocks": [ + { + "bbox": [ + 67, + 219, + 525, + 245 + ], + "lines": [ + { + "bbox": [ + 67, + 219, + 525, + 245 + ], + "spans": [ + { + "bbox": [ + 67, + 219, + 525, + 245 + ], + "type": "text", + "content": "Table 5: The dataset source URLs and licenses used by CitaLaw. The parts where the license is listed as empty indicate that the author has not provided a License." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 314, + 523, + 380 + ], + "lines": [ + { + "bbox": [ + 71, + 314, + 523, + 380 + ], + "spans": [ + { + "bbox": [ + 71, + 314, + 523, + 380 + ], + "type": "table", + "html": "
ScoreDescription
1No Entailment: The former does not entail the latter at all, with no logical connection between the two.
2Weak Entailment: A partial entailment where the former somewhat relates to the latter, but the connection is weak and not fully conclusive.
3Moderate Entailment: A moderate degree of entailment, meaning the former generally leads to the latter in most cases, but exceptions exist.
4Strong Entailment: A strong logical relationship where the former can derive the latter in the vast majority of cases.
5Complete Entailment: The former fully entails the latter in all cases, with an unambiguous and definitive logical connection between them.
", + "image_path": "080385d2bcacab2925a1b61996cc0e63dbd2fddf493e74f5c86d6f067e0c4776.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 71, + 467, + 521, + 551 + ], + "blocks": [ + { + "bbox": [ + 169, + 388, + 423, + 400 + ], + "lines": [ + { + "bbox": [ + 169, + 388, + 423, + 400 + ], + "spans": [ + { + "bbox": [ + 169, + 388, + 423, + 400 + ], + "type": "text", + "content": "Table 6: Scoring Criteria for Human Evaluation of Entailment." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 467, + 521, + 551 + ], + "lines": [ + { + "bbox": [ + 71, + 467, + 521, + 551 + ], + "spans": [ + { + "bbox": [ + 71, + 467, + 521, + 551 + ], + "type": "table", + "html": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawAvg
Llama3 (Llam3-8B-Instruct)Criminal37.4418.072.1813.1561.7164.0363.5664.3680.3444.98
Civil56.1618.272.3413.4461.9063.2263.8963.3580.9747.06
BGE61.0123.976.0517.9165.9467.2977.3174.9586.7053.46
Qwen2 (Qwen2-7B-Instruct)Criminal55.2621.094.5314.3264.7363.1064.8965.8561.6046.15
Civil52.4420.484.1613.8164.4561.7964.9465.6259.8845.29
BGE75.1022.264.7715.4165.2867.5078.6277.8277.5953.82
", + "image_path": "027878eda94a6faf8c54569893beefb2f3a5cb08aa3267780c477ccced102680.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 70, + 643, + 523, + 715 + ], + "blocks": [ + { + "bbox": [ + 67, + 560, + 525, + 586 + ], + "lines": [ + { + "bbox": [ + 67, + 560, + 525, + 586 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 525, + 586 + ], + "type": "text", + "content": "Table 7: Performance comparisons on retrieval models in the Layperson dataset when the method is CGG. The best performance is indicated in bold." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 70, + 643, + 523, + 715 + ], + "lines": [ + { + "bbox": [ + 70, + 643, + 523, + 715 + ], + "spans": [ + { + "bbox": [ + 70, + 643, + 523, + 715 + ], + "type": "table", + "html": "
MetricFluencyCorrectnessCitationAll
CategoryRetrieverMauveRouge-1Rouge-2Rouge-LBERT-FCorrectcCorrectaCorrectdCitaLawCitaCCitaaCitaDAvg
Llama3 (Llam3-8B-Instruct)Criminal34.2525.797.8619.4265.0366.2776.3076.8270.5966.4170.0969.4754.03
Civil39.8426.398.0720.0265.2765.4175.7875.7369.2167.5269.5469.1654.33
BGE36.3726.157.8419.5565.6067.1976.3677.7373.5868.2367.8767.6554.51
Qwen2 (Qwen2-7B-Instruct)Criminal32.4931.7911.0923.9369.7972.0080.8181.5368.4268.4271.8671.5456.97
Civil33.3731.6711.0623.8469.6373.3580.5781.2769.1166.4170.0969.4756.65
BGE39.6631.0110.7523.4369.0673.4980.1181.1170.3767.8269.5370.0157.20
", + "image_path": "14d673f55be52d60b789d60dd518ee6ad7c5133dc3ce3a80ce4cb29596d4df1e.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 724, + 525, + 749 + ], + "lines": [ + { + "bbox": [ + 67, + 724, + 525, + 749 + ], + "spans": [ + { + "bbox": [ + 67, + 724, + 525, + 749 + ], + "type": "text", + "content": "Table 8: Performance comparisons on retrieval models in the Practitioner dataset when the method is CGG. The best performance is indicated in bold." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "spans": [ + { + "bbox": [ + 283, + 780, + 312, + 791 + ], + "type": "text", + "content": "11196" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_content_list.json b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..60ebc533b8d1e9fb53d78e874342796e93c1a152 --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_content_list.json @@ -0,0 +1,3003 @@ +[ + { + "type": "text", + "text": "ClaimPKG: Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM", + "text_level": 1, + "bbox": [ + 166, + 89, + 831, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hoang Pham*, Thanh-Do Nguyen*, Khac-Hoai Nam Bui†", + "bbox": [ + 253, + 151, + 747, + 168 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Viettel Artificial Intelligence and Data Services Center,", + "bbox": [ + 273, + 168, + 722, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Viettel Group, Vietnam", + "bbox": [ + 401, + 186, + 593, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{hoangpv4, dont15, nambkh} @ viettel.com.vn", + "bbox": [ + 310, + 203, + 685, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Integrating knowledge graphs (KGs) to enhance the reasoning capabilities of large language models (LLMs) is an emerging research challenge in claim verification. While KGs provide structured, semantically rich representations well-suited for reasoning, most existing verification methods rely on unstructured text corpora, limiting their ability to effectively leverage KGs. Additionally, despite possessing strong reasoning abilities, modern LLMs struggle with multi-step modular pipelines and reasoning over KGs without adaptation. To address these challenges, we propose ClaimPKG1, an end-to-end framework that seamlessly integrates LLM reasoning with structured knowledge from KGs. Specifically, the main idea of ClaimPKG is to employ a lightweight, specialized LLM to represent the input claim as pseudo-subgraphs, guiding a dedicated subgraph retrieval module to identify relevant KG subgraphs. These retrieved subgraphs are then processed by a general-purpose LLM to produce the final verdict and justification. Extensive experiments on the FactKG dataset demonstrate that ClaimPKG achieves state-of-the-art performance, outperforming strong baselines in this research field by $9\\% - 12\\%$ accuracy points across multiple categories. Furthermore, ClaimPKG exhibits zero-shot generalizability to unstructured datasets such as HoVer and FEVERIOUS, effectively combining structured knowledge from KGs with LLM reasoning across various LLM backbones.", + "bbox": [ + 141, + 292, + 460, + 762 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 778, + 258, + 793 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In today's rapidly evolving information landscape, distinguishing fact from misinformation is becoming more challenging, especially with the rise of AI-generated content. Robust claim verification", + "bbox": [ + 112, + 806, + 489, + 870 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/1f79aa990ca6454f337d823732bf436f1207f9d509390dd3c4d4aaae0fda0a94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 547, + 258, + 845, + 381 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/101c2011dbb6cd268f3b14346c625c2dd2859bef56228ac675c763c1aa6f5077.jpg", + "image_caption": [ + "c) Our Method - ClaimPKG", + "Figure 1: Different claim verification paradigms: (a) Unstructured Text-based methods focusing on claim decomposition and sequential reasoning over text, (b) KG-based methods facing challenges in entity resolution and structured reasoning, and (c) ClaimPKG's unified framework with specialized modules for pseudosubgraph generation, retrieval, and general reasoning." + ], + "image_footnote": [], + "bbox": [ + 547, + 382, + 843, + 445 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "systems, leveraging NLP methods to automatically assess the veracity of claims (Glockner et al., 2022a,b; Thorne and Vlachos, 2018), are essential to ensure information reliability. Effective methods require not only accuracy but also transparency, necessitating strong reasoning to identify evidence and provide clear justifications (Pan et al., 2023).", + "bbox": [ + 507, + 595, + 884, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Most existing verification approaches focus on unstructured text corpora, using techniques like chain-of-thought (CoT) reasoning (Wei et al., 2022) to break down claims for verification. Approaches like ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023) employ modular pipelines to verify claims against text-based knowledge bases (Figure 1(a)). However, the inherent limitations of text representation pose challenges. Specifically, ambiguous entity references and complex multi-hop relationships make it difficult to perform rigorous verification against unstructured text.", + "bbox": [ + 507, + 709, + 882, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In contrast, Knowledge Graphs (KGs) provide", + "bbox": [ + 526, + 904, + 880, + 921 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 134, + 881, + 262, + 892 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "† Corresponding author.", + "bbox": [ + 137, + 894, + 280, + 906 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ https://github.com/HoangHoang1408/ClaimPKG", + "bbox": [ + 137, + 906, + 440, + 920 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "5271", + "bbox": [ + 480, + 927, + 517, + 940 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5271-5290", + "bbox": [ + 228, + 945, + 768, + 958 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics", + "bbox": [ + 268, + 959, + 727, + 972 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "structured relationships for effective reasoning (Luo et al., 2024; Sun et al., 2024), yet their use in claim verification remains limited. Existing KG-based approaches (Figure 1(b)) (Kim et al., 2023b; Zhou et al., 2019; Kim et al., 2023a) lack end-to-end solutions, often requiring pre-extracted entities via modules like entity or relation extraction. Meanwhile, despite excelling at general reasoning, LLMs struggle with KG-specific tasks like entity resolution and multi-hop reasoning (Cao et al., 2021; Aly et al., 2021), suggesting the need for a system combining LLM capabilities with KG-based inference.", + "bbox": [ + 110, + 84, + 492, + 275 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Overall, solving claim verification problems is hindered by following major limitations: (1) Entity Ambiguity: Systems must accurately disambiguate entities within claims to identify relevant evidence (Aly et al., 2021); (2) Multihop Reasoning: Complex claims often require reasoning across multiple evidence from different sources (Pan et al., 2023; Wang and Shu, 2023); and (3) Limited integration of KGs and LLMs: Current approaches are underexploring the potential of combining the application of structured representation with strong inference capabilities of LLMs (Kim et al., 2023a).", + "bbox": [ + 110, + 277, + 490, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we propose ClaimPKG (Claim Verification using Pseudo-Subgraph in Knowledge Graphs), a novel end-to-end framework that synergizes the adaptability and generalization strengths of LLMs with the structured and rigorous representation of KGs to enable robust and transparent claim verification. As specified in Figure 1(c), ClaimPKG operates through three phases: (1) Pseudo-Subgraphs Generation: A KG-specialized lightweight LLM generates pseudo subgraphs as the representations of input claims under a Trie-based KG-Entity Constraint, ensuring the correctness of extracted entities; (2) Subgraphs Retrieval: A retrieval algorithm considers generated pseudo subgraphs as queries to identify actual relevant KG subgraphs as evidence; and (3) General Reasoning: A general-purpose LLM reasons over the retrieved KG subgraphs to produce the verdict and human-readable justifications. Through extensive experiments on the FactKG dataset, ClaimPKG achieves state-of-the-art performance, demonstrating its effectiveness over various claim types with a small number of training samples. Furthermore, its zero-shot generalizability to unstructured datasets (HoVer, FEVEROUS) highlights its robustness.", + "bbox": [ + 110, + 470, + 490, + 889 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions can be summarized as follows: (1) We introduce ClaimPKG, a holistic framework", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "that integrates LLMs and KGs for accurate and interpretable claim verification, handling various types of claims in a unified manner; (2) We develop a lightweight specialized LLM with its according decoding algorithm for pseudo-subgraph generation and pair it with general-purpose LLMs to achieve robust reasoning; and (3) We validate the effectiveness of ClaimPKG through extensive experiments, achieving state-of-the-art performance on structure-based datasets and generalizing to unstructure-based datasets.", + "bbox": [ + 507, + 84, + 885, + 261 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 507, + 278, + 665, + 293 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Claim Verification Approaches. Claim verification systems utilize knowledge bases that can be categorized into unstructured and structured formats. In the unstructured domain, text-based verification methods predominate, with systems designed to verify claims against textual evidence, as demonstrated in the FEVER dataset (Thorne et al., 2018). Recent advances have focused on handling specialized verification scenarios, including ambiguous question-answer pairs (Park et al., 2022), detecting factual changes (Schuster et al., 2021), and processing multiple documents concurrently (Jiang et al., 2020). For structured verification, research has primarily focused on tables and graphs, with early work developing specialized architectures: graph neural networks for knowledge graph processing (Zhou et al., 2020), table-specific transformers (Herzig et al., 2020), and tree-structured decoders for hierarchical data (Wang et al., 2020).", + "bbox": [ + 507, + 306, + 885, + 611 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Claim Verification over Knowledge Graphs (KGs). The emergence of Large Language Models (LLMs) has simplified direct reasoning over textual corpora for claim verification, as demonstrated by ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023). However, structured data sources like tables and graphs can provide more grounded and robust verification results (Kim et al., 2023b). Knowledge graphs are particularly advantageous as they enable explicit representation of reasoning processes through logical rules over nodes and edges. FactKG (Kim et al., 2023b) established a foundation in this direction by introducing a comprehensive dataset for evaluating modern verification methods. KG-GPT (Kim et al., 2023a) followed this work by demonstrating performance gains through a pipeline that performs sentence decomposition, subgraph retrieval, and logical inference. Additionally, while not directly addressing", + "bbox": [ + 507, + 615, + 885, + 922 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "5272", + "bbox": [ + 480, + 927, + 521, + 940 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "claim verification, StructGPT (Jiang et al., 2023) and RoG (Luo et al., 2024) achieved promising results in related tasks (e.g., Knowledge Base Question Answering) by collecting relevant evidence, such as subgraphs in KGs, then leveraging LLMs for complex reasoning in particular scenarios.", + "bbox": [ + 112, + 84, + 489, + 181 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Preliminary", + "text_level": 1, + "bbox": [ + 112, + 192, + 253, + 208 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Knowledge Graph: Knowledge Graph (KG) $\\mathcal{G}$ represents facts as triplets of format $t = (e,r,e')$ , where entities $e,e'\\in \\mathcal{E}$ are connected by a relation $r\\in \\mathcal{R}$ ; $r$ can also be referred as $r(e,e')$ .", + "bbox": [ + 112, + 216, + 487, + 281 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Claim Verification: Given a claim $c$ , a verification model $\\mathcal{F}$ determines its veracity as Supported or Refuted based on an external knowledge base $\\kappa$ , while also providing a justification $j$ to explain the predicted label. This work specifically considers the scenario where $\\kappa$ is structured as a Knowledge Graph $\\mathcal{G}$ , enabling reasoning over graph knowledge to infer $v$ and $j$ . Formally, the verification process is defined as: $(v,j) = \\mathcal{F}(c,\\mathcal{G})$ .", + "bbox": [ + 112, + 284, + 485, + 429 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Trie-based Constrained Decoding: A Trie (Wikipedia, 2025b) indexes predefined token sequences, where each root-to-node path represents a prefix. During LLM generation, this structure restricts token selection to only valid Trie paths, ensuring reliable output.", + "bbox": [ + 112, + 432, + 489, + 530 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 ClaimPKG", + "text_level": 1, + "bbox": [ + 112, + 539, + 245, + 555 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1 Formulation of ClaimPKG", + "text_level": 1, + "bbox": [ + 112, + 565, + 369, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We formulate the ClaimPKG framework using a probabilistic approach. Given a claim $c$ and a prebuilt KG $\\mathcal{G}$ , our objective is to model the distribution $p_{\\theta}(v,j|c,\\mathcal{G})$ , where $v$ denotes the verdict and $j$ the justification. However, direct computation for this distribution is infeasible as reasoning over the entire KG is not practical given its large size. To address this, we propose to select $S_{c}$ , a subgraph of $\\mathcal{G}$ relevant to $c$ containing necessary information to derive our target distribution. Treating $S_{c}$ as a latent variable, $p_{\\theta}(v,j|c,\\mathcal{G})$ is decomposed as:", + "bbox": [ + 112, + 586, + 487, + 764 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} (v, j \\mid c, \\mathcal {G}) = \\sum_ {\\mathcal {S} _ {c}} p _ {\\theta} (v, j \\mid c, \\mathcal {S} _ {c}) p _ {\\theta} (\\mathcal {S} _ {c} \\mid c, \\mathcal {G}) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 772, + 487, + 800 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})$ models the subgraph selection, and $p_{\\theta}(v,j|c,\\mathcal{S}_c)$ models the generator of the verdict and justification given $\\mathcal{S}_c$ . However, direct computation of $p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})$ is challenging due to modality mismatch between the input $c$ (text) and the target $\\mathcal{S}_c$ (graph structure), hindering the employment of retrieval methods for $\\mathcal{S}_c$ . To bridge this", + "bbox": [ + 112, + 807, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "gap, we decompose the subgraph selection into:", + "bbox": [ + 507, + 84, + 865, + 99 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\mathcal {S} _ {c} | c, \\mathcal {G}\\right) = \\sum_ {\\mathcal {P} _ {c}} p _ {\\theta} \\left(\\mathcal {S} _ {c} \\mid \\mathcal {P} _ {c}, \\mathcal {G}\\right) p _ {\\theta} \\left(\\mathcal {P} _ {c} | c, \\mathcal {G}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 527, + 105, + 882, + 133 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $p_{\\theta}(\\mathcal{P}_c|c,\\mathcal{G})$ models the generation of the graph representation $\\mathcal{P}_c$ , which we refer as \"pseudo subgraph\", from a textual claim $c$ , and $p_{\\theta}(\\mathcal{S}_c|\\mathcal{P}_c,\\mathcal{G})$ models the distribution over relevant subgraphs $\\mathcal{S}_c$ given $\\mathcal{P}_c$ . While equations 1 and 2 establish our theoretical framework for ClaimPKG, computing exact probabilities by summing over all possible $(\\mathcal{S}_c,\\mathcal{P}_c)$ pairs is intractable. Addressing this we propose two approximations: (1) We infer the veracity using only the most relevant subgraph $\\mathcal{S}_c^*$ :", + "bbox": [ + 507, + 140, + 882, + 300 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left(v ^ {*}, j ^ {*}\\right) \\sim p _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 601, + 307, + 882, + 326 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "(2) We assume each generated pseudo-subgraph is reasonable with a high probability, allowing us to approximate the subgraph selection in 2 as:", + "bbox": [ + 507, + 332, + 882, + 379 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {S} _ {c} ^ {(i)} = \\arg \\max p _ {\\theta} \\left(\\mathcal {S} _ {c} | \\mathcal {P} _ {c} ^ {(i)}, \\mathcal {G}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 578, + 385, + 882, + 406 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with $\\mathcal{P}_c^{(i)}$ is the $ith$ pseudo-graph generation. We then construct $\\mathcal{S}_c^*$ by aggregating multiple sampled subgraphs, specifically $\\mathcal{S}_c^* = \\bigcup \\mathcal{S}_c^{(i)}$ .", + "bbox": [ + 507, + 413, + 880, + 464 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "These approximations lead ClaimPKG to comprise 3 key modules as depicted in Figure 2: (1) Pseudo Subgraph Generation to generate graph representations $\\mathcal{P}_c$ 's given claim $c$ ; (2) Subgraph Retrieval to retrieve relevant evidence subgraph $S_c^*$ ; and (3) General Reasoning to generate final verdict $v$ and justification $j$ . The inference procedure is described as follows:", + "bbox": [ + 507, + 464, + 882, + 592 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inference Procedure of ClaimPKG", + "text_level": 1, + "bbox": [ + 521, + 604, + 798, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Preprocessing: Index the KG $\\mathcal{G}$ into an Entity. TriE for effective entity lookup.", + "bbox": [ + 519, + 624, + 870, + 656 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Pseudo Subgraph Generation: Generate multiple graph representations (pseudo subgraphs) $\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i=1}^N$ from claim $c$ , using a specialized LLM with beam search and Entity-Trie constraints.", + "2. Subgraph Retrieval: Use each pseudo graph in $\\mathbb{P}_c$ for querying the most respective relevant subgraph $S_{c}^{(i)}$ in the KG $\\mathcal{G}$ , resulting in a set of $\\{S_c^{(i)}\\}_{i = 1}^N$ following Equation 4, then aggregate them to form $S_{c}^{*} = \\bigcup_{i = 1}^{N}S_{c}^{(i)}$ .", + "3. General Reasoning: Employ a general-purpose LLM to reason veracity $(v^{*},j^{*})\\sim p_{\\theta}(v,j|c,\\mathcal{S}_{c}^{*})$ following Equation 3." + ], + "bbox": [ + 519, + 657, + 872, + 873 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The subsequent sections provide details about each component in the ClaimPKG framework.", + "bbox": [ + 507, + 889, + 880, + 920 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "5273", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8f8c020c8c78d4277712169bc006bc9507ecf8d5ded3239cae38ce9a08b55ea3.jpg", + "image_caption": [ + "Figure 2: Illustration of the ClaimPKG for claim verification. The framework consists of three key modules: (1) Pseudo-subgraph Generation, constructing representative subgraphs; (2) Subgraph Retrieval, selecting the most pertinent KG subgraphs; and (3) General Reasoning, integrating them for accurate and interpretable verification." + ], + "image_footnote": [], + "bbox": [ + 119, + 80, + 873, + 369 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Pseudo Subgraph Generation", + "text_level": 1, + "bbox": [ + 112, + 445, + 391, + 462 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The first step to effectively verify a claim is to understand its content thoroughly and represent it in a format compatible with the KG. Since evidence comes from KG, representing claims in the graph format is crucial, which captures hypothetical relations among entities in an effective way that enables effective comparisons with KG subgraphs for evidence retrieval. However, this process faces two main challenges: (1) handling ambiguity resolution and multi-hop reasoning, and (2) ensuring accurate entity extraction from the claim.", + "bbox": [ + 112, + 467, + 487, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specialized LLM. To address the first challenge, the Pseudo Subgraph Generation module employs a lightweight model optimized for processing input claims. Following (Li et al., 2013; Miwa and Bansal, 2016), the model is trained to jointly extract entities and their corresponding relations from a claim $c$ . Specifically, from $c$ the model constructs a pseudo subgraph $\\mathcal{P}_c$ comprising triplets in the form of head_entity||relation||tail-entity (illustrated in Figure 2). To ensure the generated subgraph can identify entities requiring ambiguity resolution and multi-hop reasoning, we employ a specialized annotation mechanism: when the claim references an entity indirectly—either without explicit naming or through relations to other entities—we denote it as unknown_i, with the index i to keep track of different entities. This", + "bbox": [ + 112, + 646, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "notation effectively signals the need for further disambiguation and reasoning within the KG in subsequent steps. Training details enabling this annotation strategy are presented in Appendix B.1.", + "bbox": [ + 507, + 445, + 882, + 511 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Trie-Constrained Decoding. For the second challenge, we develop a constrained decoding algorithm with an Entity Trie inspired by (Cao et al., 2021). We construct a trie $\\mathcal{T}$ from the KG's entity set $\\mathcal{E} = \\{e_1,e_2,\\ldots \\}$ . The specialized LLM generates entities using special tokens $\\langle e\\rangle$ and $\\langle /e\\rangle$ to mark entity boundaries. When $\\langle e\\rangle$ is generated, the decoding process restricts token selection based on $\\mathcal{T}$ until $\\langle /e\\rangle$ is produced, ensuring all generated entities exist in the KG. Outside such boundaries, the model generates relations by sampling from an unconstrained original token distribution. This mechanism ensures entity reliability while preserving flexible relation extraction (Edge et al., 2024).", + "bbox": [ + 507, + 514, + 882, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Multiple Representations. In order to capture different semantic views of a claim, we employ beam search along with the described sampling strategy, which is proved to improve the coverage of extracted triplets (table 8), resulting in multiple representations $\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i = 1}^N$ for an input claim.", + "bbox": [ + 507, + 743, + 882, + 843 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In summary, each of the claim's graph representations satisfies following properties: (1) effectively capture the underlying graph structure of that claim, and (2) correctly align with the KG's entities.", + "bbox": [ + 507, + 857, + 882, + 921 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "5274", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3 Subgraph Retrieval", + "text_level": 1, + "bbox": [ + 112, + 84, + 315, + 99 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The second component of ClaimPKG involves retrieving relevant KG subgraphs as evidence by using a dedicated algorithm that matches the pseudosubgraphs $\\mathcal{P}_c$ 's from the previous step to actual subgraphs in the KG. We present the high-level description of our algorithm here, while its complete formulation is detailed in Appendix D. We categorize triplets in a $\\mathcal{P}_c$ into: (1) Incomplete triplets, where either the head or tail entity is marked as unknown, and (2) Complete triplets, where both head and tail entities are explicitly identified.", + "bbox": [ + 112, + 105, + 489, + 282 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Relation Scoring Function: We define a function $\\operatorname{Sim}(r_1, r_2)$ to quantify the similarity between two relations, where a higher score indicates greater similarity. This function can be instantiated via various mechanisms (e.g., embedding similarity, re-ranking, fuzzy matching, etc.).", + "bbox": [ + 112, + 285, + 489, + 381 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Incomplete Triplets Retrieval: Our goal is to identify evidence (actual triplets in the KG) to inform us about entities marked as unknown and their respective relations with explicit entities in the pseudo-subgraphs. First, for a $\\mathcal{P}_c$ , we group triplets sharing the same unknown entity $u$ into a group $g$ (e.g., in Figure 2, triplets associated with unknown_0 are grouped together). Subsequently, for each group $g$ characterized by the unknown entity $u$ , we denote: $\\mathcal{E}_u = \\{e_{u1}, \\ldots, e_{un}\\}$ as entities directly connected to $u$ in the pseudo-subgraph $\\mathcal{P}_c$ and $\\mathcal{R}_u = \\{r_{u1}, \\ldots, r_{un}\\}$ as relations from $u$ to corresponding entities in $\\mathcal{E}_c$ . In $g$ , for each explicit entity $e_{ui} \\in \\mathcal{E}_u$ , we first retrieve candidate set $C_{ui} = \\{e_{i1}^c, \\ldots, e_{im}^c\\}$ containing all entities connected to $e_{ui}$ in the KG, then collect all candidate sets into $\\mathcal{C}_u = \\{C_{u1}, \\ldots, C_{un}\\}$ .", + "bbox": [ + 112, + 385, + 489, + 658 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To determine the best candidates for resolving $u$ , we propose an Entity Scoring mechanism, which is based on two assumptions: (1) since $u$ has pseudo relations with all entities in $\\mathcal{E}_u$ , a candidate $e^c$ connected to more entities in $\\mathcal{E}_u$ is more likely to resolve $u$ ; and (2) because every information related to $e_{ui}$ and $u$ is crucial to verify the initial claim, each candidate set $C_{ui}$ must contribute to the final verification. Note that an entity can appear in multiple candidate sets, hence we compute a \"global\" score for each $e_{ij}^c$ in a candidate set $C_{ui}$ :", + "bbox": [ + 112, + 659, + 489, + 838 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {s c o r e} \\left(e _ {i j} ^ {c}\\right) = \\sum_ {r} ^ {R _ {i j} ^ {u}} \\operatorname {S i m} \\left(r _ {u i}, r\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 848, + 487, + 873 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with $R_{ij}^{u} = \\bigcup_{i = 1}^{\\left|\\mathcal{E}_{u}\\right|}\\{r(e_{ui},e_{ij}^{c})\\mid$ if $e_{ij}^{c}\\in C_{ui}\\}$ , the set of all relations across candidate sets appearing", + "bbox": [ + 112, + 885, + 487, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "in $\\mathcal{C}_u$ that connect $e_{ij}^c$ with an $e_{ui}$ . Subsequently, to construct the set $T_{u}$ of most relevant triplets to a group $g$ , we employ a ranking function as follows:", + "bbox": [ + 507, + 84, + 884, + 133 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nT _ {u} = \\bigcup_ {i = 1} ^ {| C _ {u} |} \\underset {\\text {t r i p l e t}, k _ {1}} {\\arg \\max } \\left\\{\\pi_ {i j} \\mid j \\leq \\left| C _ {u i} \\right| \\right\\} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 561, + 139, + 882, + 184 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with $\\pi_{ij}$ is simply $score(e_{ij}^{c})$ and (triplet, $k_{1}$ ) denotes the selection of top $k_{1}$ triplets $(e_{ui}, r, e^{c})$ having the highest global scores from each set in $\\mathcal{C}_{u}$ .", + "bbox": [ + 507, + 190, + 884, + 239 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "While equation 5 ensures candidates appearing in multiple candidate sets and having high similar scores are prioritized, equation 6 ensures every entity in $\\mathcal{E}_u$ has at least $k_{1}$ triplets, both of which make use of assumptions (1) and (2).", + "bbox": [ + 507, + 239, + 882, + 319 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Complete Triplets Retrieval: For each triplet $(e_1, r, e_2)$ in a $\\mathcal{P}_c$ , we first find top $k_2$ similar relations between $e_1$ and $e_2$ in the KG $\\mathcal{G}$ using the Sim function. If no direct connection exists (e.g., \"103 Colmore Row\" and \"Vedat Tek\" as shown in figure 2), the triplet is decomposed into two: $(e_1, r, \\text{unknown}_0)$ and $(\\text{unknown}_0, r, e_2)$ . These are then handled via Incomplete Triplets Retrieval.", + "bbox": [ + 507, + 323, + 882, + 451 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Subgraph Union: In summary, for an input claim $c$ , multiple pseudo-graphs are generated, containing complete and incomplete triplets. These triplets undergo processing to handle shared unknown entities and identified entities that are not connected in the KG $\\mathcal{G}$ , and are used to query $\\mathcal{G}$ for relevant triplets. All retrieved evidence triplets are aggregated into a final subgraph $S_{c}^{*}$ , serving as the evidence for the final component of ClaimPKG.", + "bbox": [ + 507, + 455, + 882, + 599 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.4 General Reasoning", + "text_level": 1, + "bbox": [ + 507, + 609, + 705, + 626 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The General Reasoning module concludes the ClaimPKG framework by determining claim veracity through reasoning over input claim $c$ and retrieved evidence subgraph $S_{c}^{*}$ . As complex tasks, especially claim verification, require deliberate chain-of-thought reasoning (Jiang et al., 2020; Wang et al., 2023), we use a general-purpose LLM to analyze $c$ and $S_{c}^{*}$ . Using carefully designed prompts (Figure 6), the module generates a natural language justification $j$ and verdict $v$ . Expanded from equation 3, this step is formalized as:", + "bbox": [ + 507, + 631, + 882, + 807 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) = p _ {\\theta} (v | c, j, \\mathcal {S} _ {c} ^ {*}) p _ {\\theta} (j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 529, + 815, + 882, + 834 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $p(j|c, S_c^*)$ produces the justification and $p(v|c, j, S_c^*)$ determines veracity. This model-agnostic design enables integration with state-of-the-art LLMs (e.g., Llama, Qwen and GPT4) for zero-shot reasoning.", + "bbox": [ + 507, + 841, + 882, + 921 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5275", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Experiments", + "text_level": 1, + "bbox": [ + 112, + 84, + 260, + 99 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 112, + 110, + 317, + 126 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets. Our primary benchmark is the FactKG dataset (Kim et al., 2023b), designed for claim verification over the DBpedia KG (Lehmann et al., 2015). It consists of 108K claims grounded in DBpedia and labelled as either SUPPORTED or REFUTED. The claims span five distinct categories: One-hop, Conjunction, Existence, Multi-hop, and Negation, each posing unique challenges. For evaluation, we randomly sample 2K claims from the test set, ensuring balanced representation across categories under computational efficiency. To assess the generalizability of ClaimPKG beyond structured benchmarks, we also evaluate HoVer (Jiang et al., 2020) and FEVERIOUS (Aly et al., 2021), two widely-used unstructured-based benchmarks requiring multi-hop reasoning and evidence aggregation from Wikipedia. Additional statistics of datasets are provided in Appendix A.", + "bbox": [ + 112, + 131, + 489, + 420 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Metrics. We use Accuracy as the primary metric along with Entity Correctness to measure if the claim's extracted entity is valid in KG. Additionally, for the FactKG dev set, we report Claim Structure Coverage, which quantifies the proportion of triplets from the original claim's graph structure successfully reconstructed by our pipeline. We refer readers to Appendix C for more details.", + "bbox": [ + 112, + 423, + 489, + 549 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Annotation. For brevity, we use Llama-3B, Llama-70B, and Qwen-72B to refer to Llama-3.2-3B, Llama-3.3-70B, and Qwen2.5-72B respectively. The * symbol denotes models fine-tuned for pseudo subgraph generation. Full model names are used when necessary.", + "bbox": [ + 112, + 551, + 489, + 646 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baselines. We compare ClaimPKG with recent KG-based claim verification methods: Zero-shot CoT (Wei et al., 2022) prompts LLMs to generate rationales and verdicts without accessing the KG; GEAR (Zhou et al., 2019), originally designed for text-based verification, employs graph-based evidence aggregation with multiple aggregators to capture multi-evidence dependencies, using BERT for language representation and adapted for KG settings following (Kim et al., 2023b); and KG-GPT (Kim et al., 2023a), a pioneer work that combines LLMs and KGs through a structured pipeline of Sentence Segmentation, Graph Retrieval, and Logic Inference. Notably, unlike baselines which receive pre-identified claim entities along with the claim as the input, our method processes entities in an end-to-end pipeline.", + "bbox": [ + 112, + 646, + 490, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Implementation. For a comprehensive evaluation, we evaluate baselines on three model series: Llama 3 (Meta, 2024), Qwen 2.5 (Qwen, 2024), and GPT4o-mini (OpenAI, 2024). In ClaimPKG, we configure the Specialized LLM to generate multiple pseudo-subgraphs using a beam size of 5. For the Subgraph Retrieval algorithm, we adopt an embedding-based approach leveraging BGE-LargeEN-v1.5 (Xiao et al., 2023) to compute dot-product similarity for the Relation Scoring Function, we set the primary hyperparameters to $k_{1} = 3$ and $k_{2} = 1$ . Detailed justification is provided in Appendix C.", + "bbox": [ + 507, + 84, + 885, + 278 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Results and Analysis", + "text_level": 1, + "bbox": [ + 507, + 294, + 719, + 310 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We present the main experimental results in this section and additional findings in Appendix C.", + "bbox": [ + 507, + 319, + 880, + 350 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(RQ1): How Does ClaimPKG Perform Against the Baselines? Table 1 compares the accuracy $(\\%)$ of ClaimPKG with baselines across claim categories of the FactKG. Key observations include:", + "bbox": [ + 507, + 355, + 884, + 418 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(1) Direct inference using LLMs with CoT reasoning significantly underperforms compared to evidence-based methods, with the best average score reaching only $69.07\\%$ , highlighting that despite LLM advancements, evidence retrieval remains crucial. (2) KG-GPT integrates knowledge graphs with LLMs but its best average score achieves only $74.70\\%$ (Llama-70B Few-shot), falling short of GEAR's fine-tuned model at $76.65\\%$ . This suggests that while LLMs excel at language tasks, they require specific adaptation for KG processing. (3) ClaimPKG, with the strongest configuration $(\\text{Llama}-3\\text{B}^{*} + \\text{Llama}-70\\text{B})$ and constrained by Entity-Trie for valid KG entity generation, achieves a 12-point improvement over KG-GPT and 9 points over GEAR. It particularly excels in multi-hop reasoning, demonstrating strong performance across Llama-3 and Qwen-2.5 backbones through effective structured evidence retrieval and KG integration.", + "bbox": [ + 507, + 419, + 885, + 740 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "(RQ2): How Do Different Components Affect Performance? To evaluate the impact of each component in ClaimPKG, we conduct ablation studies of the following components, maintaining Llama-3B* as the Specialized LLM and Llama-70B as the General LLM.", + "bbox": [ + 507, + 744, + 884, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Entity-Trie Constraint. We remove the Entity-Trie constraint to assess its necessity. Compared to the full setup, this reduces the entity extraction correctness from $100\\%$ to $87.5\\%$ , and overall performance from $84.64\\%$ to $82.72\\%$ .", + "bbox": [ + 507, + 841, + 884, + 919 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "5276", + "bbox": [ + 480, + 927, + 521, + 940 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/61bfcdebf0f230d60799ac75fe83febfdd5cbc6f7c63931a562659166c79107f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodEntity CorrectnessNegationExistenceConjunctionMulti-hopOne-hopAverage
Direct Inference With CoT - w/o Evidence Retrieval
GPT-4o-mini (Zero-shot CoT)-61.9159.4569.5160.8770.8364.51
Qwen-72B (Zero-shot CoT)-62.9162.2074.0462.3275.9867.49
Llama-70B (Zero-shot CoT)-64.3464.6272.4765.5878.3269.07
Baseline Comparison - w/ Evidence Retrieval
GEAR (Finetuned BERT)Known in Prior79.7279.1978.6368.3977.3476.65
KG-GPT (Llama-70B Few-shot)Known in Prior70.9165.0686.6458.8792.0274.70
KG-GPT (Qwen-72B Few-shot)Known in Prior67.3160.0889.1458.1990.8773.12
ClaimPKG (Llama-3B* + GPT-4o-mini)100.0%85.1072.6484.2372.2691.0181.05
ClaimPKG (Llama-3B* + Qwen-72B)100.0%85.2786.9084.0278.7191.2085.22
ClaimPKG (Llama-3B* + Llama-70B)100.0%84.5884.2085.6878.4990.2684.64
Ablation Results (Llama-3B* + Llama-70B) - w/ Evidence Retrieval
ClaimPKG (w/o Trie Constraint)87.50%82.5083.2483.8276.1388.0182.74
ClaimPKG (Few-shot Specialized LLM)86.52%77.9981.8977.8068.8281.6577.63
ClaimPKG (w/o Incomplete Retrieval)100.0%68.8051.2567.8461.2976.2265.08
", + "bbox": [ + 114, + 80, + 884, + 305 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Specialized LLM. When replacing the specialized LLM with few-shot prompting strategy using Llama-70B, a much larger general-purpose LLM, entity correctness further declines to $86.52\\%$ , leading overall performance to drop to $77.63\\%$ . These results demonstrate that even with examples, general-purpose LLMs struggle to produce outputs with desired graph structure correctly, emphasizing the importance of the specialized LLM in generating pseudo subgraphs.", + "bbox": [ + 110, + 367, + 489, + 527 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Incomplete Retrieval. Removing the Incomplete Triplet Retrieval function, which forces the retrieval algorithm to only query evidence using complete triplets, causes a significant average performance drop of nearly $20\\%$ compared to the full setup, showing the complete graph structure of input claims is essential for optimal performance.", + "bbox": [ + 112, + 527, + 489, + 640 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(RQ3): Robustness and Generalization of ClaimPKG? To assess ClaimPKG's robustness, we vary model backbones, examine zero-shot generalizability, analyze the effect of training data size, and conduct error analysis.", + "bbox": [ + 112, + 644, + 489, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Model Backbones. We evaluate different LLM architectures for both Specialized and General LLMs (Table 2). For General LLMs, we test various model sizes (7B to 70B parameters) using retrieved KG triplets as input. For Specialized LLMs, we experiment with different small fine-tuned backbones and few-shot prompt templates (Figure 7), while keeping Llama-3.3-70B as the fixed General LLM.", + "bbox": [ + 112, + 727, + 489, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Results in Table 2 show larger General LLMs (GPT-4o-Mini, Llama-3.3-70B) outperform smaller ones (Qwen-2.5-7B, Llama-3.1-8B) by up to 8 points, highlighting model capacity's role in ag", + "bbox": [ + 112, + 857, + 489, + 921 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/9ac66200dbc3401fe34010e7a5000f1776a14812c0614b39e7ff8f7d53fd5cb1.jpg", + "table_caption": [ + "Table 1: Performance (accuracy %) comparison of ClaimPKG with baselines on 5 claim categories of FactKG dataset and their average scores." + ], + "table_footnote": [], + "table_body": "
ComponentStrategyBackboneAverage
General LLMZero-shotLlama 3.1 - 8B77.08
Llama 3.3 - 70B84.64
GPT4o - Mini81.05
Qwen 2.5 - 7B80.22
Qwen 2.5 - 72B85.22
Specialized LLMFinetuneLlama 3 - 3B84.64
Qwen 2.5 - 3B82.32
Llama 3 - 1B83.91
Qwen 2.5 - 1.5B82.20
Few-shotLlama 3.3 - 70B77.63
Qwen 2.5 - 72B77.10
", + "bbox": [ + 529, + 363, + 863, + 537 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Performance on Different Backbones.", + "bbox": [ + 534, + 546, + 853, + 560 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "gregating subgraph evidence. Notably, a fine-tuned 1B Specialized LLM outperforms the general 70B counterpart, demonstrating fine-tuning's effectiveness to process graph data. This supports the need to combine powerful General LLMs with adapted Specialized LLMs for optimal performance.", + "bbox": [ + 507, + 586, + 884, + 682 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/08437af0052f12721d4cbbff83bb6f61ca3eb2e52371af20b92557139d77614a.jpg", + "table_caption": [ + "Zero-shot Generalizability. To assess" + ], + "table_footnote": [], + "table_body": "
BenchmarkLlama 3Qwen 2.5
HoVer (Zero-shot CoT)66.665.3
HoVer (Support-Predicted)70.7 (14.3%)69.4 (15.7%)
FEVEROUS (Zero-shot CoT)81.180.9
FEVEROUS (Support-Predicted)83.8 (12.5%)83.6 (12.9%)
", + "bbox": [ + 510, + 709, + 884, + 789 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Zero-shot transferred performance on other unstructure-based benchmarks on the Support-Predicted samples along with Support Predicted rates.", + "bbox": [ + 507, + 797, + 882, + 841 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ClaimPKG's zero-shot generalizability, we test transfer to HoVer (Jiang et al., 2020) and FEVEROUS (Aly et al., 2021) datasets. Using DBpedia (Lehmann et al., 2015) as the knowledge", + "bbox": [ + 507, + 857, + 884, + 921 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "5277", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "source, we evaluate with trained Specialized LLMs (Llama-3.2-3B and Qwen-2.5-3B) while keeping Llama-3.3-70B as the General LLM. Since external datasets may contain claims outside DBpedia's coverage, making it difficult to distinguish between knowledge gaps and actual verification failures of ClaimPKG for Refuted cases, we analyze only samples predicted as Supported. As shown in Table 3, ClaimPKG predicts Supported for only $12.5\\% - 15.7\\%$ of samples, indicating limited knowledge overlap with DBpedia. However, on these samples, ClaimPKG outperforms Llama-3.3-70B's zero-shot CoT inference by $4\\%$ accuracy on both datasets, demonstrating robust transfer to reasoning patterns in unseen data.", + "bbox": [ + 112, + 84, + 492, + 325 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Training Data Size. To assess the impact of train", + "bbox": [ + 112, + 326, + 489, + 343 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/1f8ce5c2eb5971e4f38babdf748e06ca7c28c9b4b01a81101d541f6b5a409692.jpg", + "image_caption": [ + "Figure 3: Varying Specialized LLM's training data." + ], + "image_footnote": [], + "bbox": [ + 127, + 354, + 473, + 512 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ing data on the Specialized LLM, we vary the number of training samples from 0.1K to 10K, using two configurations: Llama-3.2-3B and Qwen-2.5-3B as the specialized LLM and keep the General LLM to be Llama-3.3-70B. We evaluate performance based on two metrics: average accuracy on the test set and claim structure coverage on the dev set. As shown in Figure 3, the Specialized LLMs achieve satisfactory accuracy (Llama-3.2-3B: $79.35\\%$ , Qwen-2.5-3B: $77.62\\%$ ) with just 100 training samples, demonstrating efficiency and low training costs for KG adaptation. While both structure coverage and accuracy improve up to 5K samples, coverage plateaus thereafter, and accuracy begins to decline, indicating overfitting where excessive training data reduces generalizability.", + "bbox": [ + 112, + 552, + 489, + 810 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Interpretability and Error Analysis", + "text_level": 1, + "bbox": [ + 112, + 820, + 438, + 835 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ClaimPKG can improve claim verification performance while enhancing interpretability. Representative outputs of ClaimPKG (Figure 12, Appendix E) illustrate its ability to capture claim structure and provide well-grounded justifications. Notably,", + "bbox": [ + 112, + 840, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "when refuting claims, it explicitly presents contradicting evidence, ensuring transparent reasoning. To further assess reliability, we conducted a human analysis of 200 incorrect predictions from FactKG, categorizing errors (Figure 13, Appendix E) into: Claim Structure Errors: fail to capture the underlying claim structure; Retrieval Errors: fail to retrieve necessary evidence required for claim verification; and Reasoning Errors: incorrect logical inferences of the general LLM to judge the verdict.", + "bbox": [ + 507, + 84, + 884, + 244 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Specifically, there are 0 (0%) Claim Structure Errors, 57 (28.5%) Retrieval Errors, and 143 (71.5%) Reasoning Errors. These results suggest that, with chances (multiple beams) to generate pseudosubgraphs, the Specialized LLM can effectively capture the structural representation of claims. However, the general-purpose LLM, despite its strong reasoning capabilities, still struggles with certain complex reasoning scenarios that require specific handling. Moreover, retrieval errors highlight cases where additional implicit reasoning is necessary, as we hypothesize that direct subgraph retrieval failed to provide a comprehensive picture of the required evidence. These highlight future improvements, focusing on enhancing retrieval inference and refining reasoning for complex claim verification over structured knowledge.", + "bbox": [ + 507, + 246, + 884, + 520 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.4 Scalability of ClaimPKG", + "text_level": 1, + "bbox": [ + 507, + 533, + 752, + 549 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ClaimPKG maintains scalability and adaptability within dynamic knowledge environments. After training the Specialized LLM on a domain (e.g., Wikipedia), the system remains decoupled from the underlying Knowledge Graph (KG). Only the Entity-Trie component interfaces directly with the data. Consequently, when the KG undergoes updates, ClaimPKG requires merely an update of the corresponding entities within the Entity-Trie, ensuring an efficient adaptation process.", + "bbox": [ + 507, + 556, + 882, + 717 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 732, + 640, + 747 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this work, we present ClaimPKG, a novel claim verification combining the structure of Knowledge Graphs with the adaptability and reasoning of Large Language Models. Through Pseudosubgraph Generation, Subgraph Retrieval, and General Reasoning, it addresses limitations while ensuring transparency. Extensive experiments show state-of-the-art performance and generalizability across datasets, making ClaimPKG a step toward reliable and explainable misinformation detection.", + "bbox": [ + 507, + 760, + 882, + 921 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "5278", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations", + "text_level": 1, + "bbox": [ + 114, + 84, + 220, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Despite their advanced reasoning capabilities, LLMs are prone to errors and biases, necessitating careful deployment, particularly in fact-checking systems where incorrect or biased outputs could contribute to misinformation. Addressing these biases remains an ongoing research challenge, requiring effective mechanisms for detection, control, and mitigation. Additionally, real-world claim verification often requires inferring implicit reasoning, where further related knowledge for a problem is necessary, and making improvements in pipeline components to handle this type of information is crucial. Another limitation is the performance decline observed when the Specialized LLM is trained on an excessive number of examples, highlighting the need for future research into regularization strategies. Further improvements should also focus on the general reasoning module to infer missing knowledge more effectively and enhance intricate and nuanced claim verification cases over structured knowledge.", + "bbox": [ + 115, + 110, + 489, + 447 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 115, + 474, + 213, + 489 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rami Aly, Zhijiang Guo, Michael Sejr Schlichtkrull, James Thorne, Andreas Vlachos, Christos Christodoulopoulos, Oana Cocarascu, and Arpit Mittal. 2021. FEVEROUS: fact extraction and verification over unstructured and structured information. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual.", + "Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2021. Autoregressive entity retrieval. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net.", + "Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, and Jonathan Larson. 2024. From local to global: A graph RAG approach to query-focused summarization. CoRR, abs/2404.16130.", + "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022a. Missing counter-evidence renders NLP fact-checking unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics.", + "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022b. Missing counter-evidence renders NLP fact-checking" + ], + "bbox": [ + 115, + 497, + 489, + 920 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics.", + "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. 2020. TaPas: Weakly supervised table parsing via pre-training. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4320-4333, Online. Association for Computational Linguistics.", + "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. 2023. StructGPT: A general framework for large language model to reason over structured data. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9237-9251, Singapore. Association for Computational Linguistics.", + "Yichen Jiang, Shikha Bordia, Zheng Zhong, Charles Dognin, Maneesh Kumar Singh, and Mohit Bansal. 2020. Hover: A dataset for many-hop fact extraction and claim verification. In Findings of the Association for Computational Linguistics: EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 3441-3460. Association for Computational Linguistics.", + "Jiho Kim, Yeonsu Kwon, Yohan Jo, and Edward Choi. 2023a. KG-GPT: A general framework for reasoning on knowledge graphs using large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 9410-9421. Association for Computational Linguistics.", + "Jiho Kim, Sungjin Park, Yeonsu Kwon, Yohan Jo, James Thorne, and Edward Choi. 2023b. Factkg: Fact verification via reasoning on knowledge graphs. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 16190-16206. Association for Computational Linguistics.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles.", + "Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas, Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef, Soren Auer, and Christian Bizer. 2015. Dbpedia - A large-scale, multilingual knowledge base extracted from wikipedia. Semantic Web, 6(2):167-195.", + "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global features." + ], + "bbox": [ + 510, + 85, + 884, + 920 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "5279", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 2013, 4-9 August 2013, Sofia, Bulgaria, Volume 1: Long Papers, pages 73-82. The Association for Computer Linguistics.", + "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.", + "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.", + "Meta. 2024. Build the future of ai with meta llama 3, 2024.", + "Makoto Miwa and Mohit Bansal. 2016. End-to-end relation extraction using LSTMs on sequences and tree structures. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1105-1116, Berlin, Germany. Association for Computational Linguistics.", + "OpenAI. 2024. Hello gpt-4o, 2024a.", + "Liangming Pan, Xiaobao Wu, Xinyuan Lu, Anh Tuan Luu, William Yang Wang, Min-Yen Kan, and Preslav Nakov. 2023. Fact-checking complex claims with program-guided reasoning. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 6981-7004. Association for Computational Linguistics.", + "Jungsoo Park, Sewon Min, Jaewoo Kang, Luke Zettle-moyer, and Hannaneh Hajishirzi. 2022. FaVIQ: FAct verification from information-seeking questions. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5154-5166, Dublin, Ireland. Association for Computational Linguistics.", + "Qwen. 2024. Qwen2.5: A party of foundation models.", + "Tal Schuster, Adam Fisch, and Regina Barzilay. 2021. Get your vitamin C! robust fact verification with contrastive evidence. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 624-643, Online. Association for Computational Linguistics.", + "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, and Jian Guo. 2024. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net." + ], + "bbox": [ + 115, + 85, + 487, + 919 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "James Thorne and Andreas Vlachos. 2018. Automated fact checking: Task formulations, methods and future directions. In Proceedings of the 27th International Conference on Computational Linguistics, COLING 2018, Santa Fe, New Mexico, USA, August 20-26, 2018, pages 3346-3359. Association for Computational Linguistics.", + "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and verification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 809-819. Association for Computational Linguistics.", + "Bailin Wang, Richard Shin, Xiaodong Liu, Oleksandr Polozov, and Matthew Richardson. 2020. RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7567-7578, Online. Association for Computational Linguistics.", + "Haoran Wang and Kai Shu. 2023. Explainable claim verification via knowledge-grounded reasoning with large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 6288-6304. Association for Computational Linguistics.", + "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net.", + "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022.", + "Wikipedia. 2025a. Levenshtein distance — Wikipedia, The Free Encyclopedia. Accessed: 14-February-2025.", + "Wikipedia. 2025b. Trie — Wikipedia, The Free Encyclopedia. [Online; accessed 9-February-2025].", + "Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-pack: Packaged resources to advance general chinese embedding. Preprint, arXiv:2309.07597.", + "Jie Zhou, Ganqu Cui, Shengding Hu, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2020. Graph" + ], + "bbox": [ + 510, + 85, + 882, + 920 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "5280", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "neural networks: A review of methods and applications. AI Open, 1:57-81.", + "bbox": [ + 132, + 85, + 487, + 112 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Jie Zhou, Xu Han, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2019. GEAR: graph-based evidence aggregating and reasoning for fact verification. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 892-901. Association for Computational Linguistics.", + "bbox": [ + 114, + 121, + 489, + 227 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A Benchmark Datasets", + "text_level": 1, + "bbox": [ + 114, + 239, + 332, + 254 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/b55d997587224b02783b02b3b9127c03b061270dccee78a9fdf5a9ef8d856b89.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetSplitSupportRefuteNEITotal
FactKGTrain4272343644-86367
Dev64266840-132666
Test43984643-9041
Total5354755127-108674
HoverTrain110237148-18171
Dev20002000-4000
Test20002000-4000
Total1502311148-26171
FEVER OUSTrain4183527215224171291
Dev390834815017890
Test3372297315007845
Total4911533669424287026
", + "bbox": [ + 142, + 273, + 458, + 454 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/a843bca345b9237b3cc9f99db24289489479dff382b844d530adf3e14e806541.jpg", + "table_caption": [ + "Table 4: Basic statistics of Hover, FEVERIOUS, and FactKG Datasets" + ], + "table_footnote": [], + "table_body": "
TypeWrittenColloquialTotal
ModelPresup
One-hop2,10615,9341,58019,530
Conjunction20,58715,90860237,097
Existence2804,0604,8329,172
Multi-hop10,23916,42060327,262
Negation1,34012,4661,80715,613
Total34,46264,7889,424108,674
", + "bbox": [ + 137, + 521, + 463, + 648 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 5: Dataset statistics of FACTKG for claim types.", + "bbox": [ + 114, + 659, + 485, + 674 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "FEVEROUS. (Aly et al., 2021) FEVEROUS is a fact verification dataset comprising 87,026 verified claims sourced from Wikipedia (Table 4). Each claim is accompanied by evidence in the form of sentences and/or cells from tables, along with a label indicating whether the evidence supports, refutes, or does not provide enough information to verify the claim. The dataset includes metadata like annotator actions and challenge types, designed to minimize biases. It is used for tasks that involve verifying claims against both unstructured (textual) and structured (tabular) information.", + "bbox": [ + 112, + 692, + 487, + 884 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "HoVer. (Jiang et al., 2020) HoVer is a dataset containing 26,171 samples, designed for open-domain,", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "multi-hop fact extraction and claim verification, using the Wikipedia corpus. Claims in HoVer are adapted from question-answer pairs and require the extraction of facts from multiple (up to four) Wikipedia articles to determine if the claim is supported or not supported. The complexity of HoVer, particularly in the 3/4-hop claims, is further amplified because these claims are often expressed across multiple sentences, which introduces challenges related to long-range dependencies, such as accurately resolving coreferences.", + "bbox": [ + 507, + 84, + 884, + 261 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "FactKG. (Kim et al., 2023b) FactKG is a challenging fact verification dataset comprised of 108,674 samples, designed to rigorously test models' abilities to reason over structured knowledge represented in a knowledge graph. Its difficulty arises from a combination of factors. First, it demands proficiency in five distinct reasoning types: one-hop (single relationship), conjunction (combining multiple relationships), existence (verifying entity/relationship presence), multi-hop (traversing multiple relationships), and, crucially, negation (reasoning about the absence of relationships). Second, FactKG incorporates linguistic diversity, encompassing both formal, written-style claims and more challenging colloquial expressions, requiring models to handle paraphrasing, idiomatic language, and less direct wording. Third, instead of unstructured text, FactKG utilizes the DBpedia knowledge graph (derived from Wikipedia), necessitating that models correctly link entities and relations mentioned in the claim to the graph's nodes and edges, and perform complex path-based reasoning, especially for multi-hop claims. The addition of a weakly semantic knowledge source, and cross-style evaluation to assess generalizability, further contributes to the difficulty of this dataset. These features collectively make FactKG significantly more complex than datasets relying solely on unstructured text for verification. Detailed statistics of this dataset can be found in table 5. Readers can refer to table 4 for the overall basic statistics of all employed datasets for ClaimPKG.", + "bbox": [ + 507, + 263, + 884, + 781 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B Implementation Details", + "text_level": 1, + "bbox": [ + 509, + 804, + 749, + 822 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We conducted all experiments on a DGX server with 8 NVIDIA A100 GPUs. The General LLM is hosted within the vLLM framework (Kwon et al., 2023). Below, we detail the training process of the Specialized LLM.", + "bbox": [ + 507, + 841, + 882, + 921 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "5281", + "bbox": [ + 480, + 928, + 517, + 940 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B.1 Specialized LLM Training Data Annotation", + "text_level": 1, + "bbox": [ + 114, + 84, + 413, + 115 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To tailor the specialized model for improved comprehension and processing of KG-specific data, we construct a dedicated dataset for training, leveraging the provided version of FactKG (Kim et al., 2023b) (illustrated in Figure 4). The annotation process consists of the following steps:", + "bbox": [ + 112, + 122, + 489, + 219 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Claim: A musical artist, whose music is Post-metal, played with the band Twilight and performs for Mamiffer.", + "bbox": [ + 121, + 235, + 478, + 259 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Entities: [Mamiffer, Post-metal, Twilight_(band)] Evidence:", + "bbox": [ + 124, + 260, + 431, + 282 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Twilight_(band), (associatedMusicalArtist, associatedBand), Mamiffer)", + "- Twilight_(band), (associatedMusicalArtist, genre), Postmetal" + ], + "bbox": [ + 122, + 284, + 478, + 331 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure 4: Provided data of FactKG", + "bbox": [ + 179, + 354, + 418, + 368 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Preprocessing: All entities and relations from FactKG, including the train, development, and test datasets, as well as the DBPedia KG, are normalized by splitting concatenated words to ensure consistency.", + "bbox": [ + 112, + 395, + 489, + 475 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Graph Construction: Using the provided evidence information from FactKG, we observe that while evidence may not explicitly exist in the graph, it accurately captures the underlying structure of the claim. Accordingly, for triplets with relation paths exceeding one hop, we decompose them into multiple triplets while introducing a placeholder entity, denoted as \"unknown_[index]\", to preserve structural integrity. This placeholder represents an ambiguous or missing entity that requires identification. For instance, the triplet: \"Twilight_(band), (~associatedMusicalArtist, associatedBand), Mamiffer\" is transformed into the following triplets: \"Twilight_(band), associatedBand, unknown_1\" and \"unknown_1\", associatedMusicalArtist, Mamiffer\". Additionally, entities present in the Entities set but absent from the graph are also introduced as unknown_[index]. To further enhance graph completeness, GPT-4 is employed to verify whether entities from the Entities set are explicitly mentioned in the claim. This ensures that relevant entities are either linked to existing nodes or added as placeholders. The automatic entity verification process is conducted using a prompt template, as shown in Figure 8. Additionally, the symbol \"\\~\"", + "bbox": [ + 112, + 486, + 489, + 920 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "is retained to denote inverse relations. Random shuffle among constructed triplets but preserving the sequential order of “unknown” entity is applied to improve the robustness of the model being trained.", + "bbox": [ + 507, + 84, + 882, + 162 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Generated Pseudo-Subgraph: The transformed claim results in the pseudo-subgraph illustrated in Figure 5.", + "bbox": [ + 507, + 173, + 882, + 221 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Pseudo Subgraph Label:", + "bbox": [ + 517, + 237, + 680, + 250 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Twilight (band), associated musical artist, unknown_0", + "- unknown_0, associated band, Mamiffer", + "- unknown_0, genre, Post-metal" + ], + "bbox": [ + 515, + 250, + 858, + 286 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure 5: Pseudo-Subgraph label as the output of the data annotation process.", + "bbox": [ + 507, + 310, + 882, + 338 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B.2 Training and Hyperparameter Settings of the Specialized LLM", + "text_level": 1, + "bbox": [ + 509, + 365, + 882, + 397 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/7d37f1c7c7d6b89c99c8673233300e4d431d4973cce33808f4428a06a932ca3e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ParameterValue
BackboneLlama-3-Base
Qwen-2.5-Base
Learning Rate1e-5
Training Epoch1
Training Steps128
OptimizerAdamW
", + "bbox": [ + 564, + 413, + 826, + 539 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 6: Hyperparameters of the Specialized LLM in ClaimPKG.", + "bbox": [ + 507, + 549, + 882, + 577 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The training configurations for the Specialized LLM are summarized in Table 6. The model training is based on the Base version of Llama-3 (Llama3.2-1B, Llama-3.2-3B, Llama-3.1-8B) and Qwen 2.5 (Qwen-2.5-1.5B, Qwen-2.5-3B, Qwen-2.5-7B). These base models are selected to preserve their inherent linguistic capabilities while facilitating optimal adaptation to domain-specific tasks during fine-tuning. The training process employs the annotated dataset described in Section B.1 and is conducted over one single epoch using the AdamW (Loshchilov and Hutter, 2019) optimizer. This strategy enables the generation of multiple variants of the Specialized LLM, ensuring task-specific adaptation while maintaining robust generalization across diverse linguistic structures.", + "bbox": [ + 507, + 594, + 884, + 852 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C Additional Experimental Results", + "text_level": 1, + "bbox": [ + 507, + 864, + 831, + 879 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In this section, we present additional experimental results through a systematic analysis on the FactKG", + "bbox": [ + 507, + 889, + 882, + 921 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "5282", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "development set with 2000 randomly sampled data points across claim categories. First, we provide a more detailed explanation of the evaluation metrics used. Second, we examine the performance of the specialized LLM by varying the beam size and backbone model size. Third, we analyze the Subgraph Retrieval by adjusting the hyperparameters $k_{1}$ and $k_{2}$ as explained in the 4.3, which influence the diversity and correctness of the retrieved subgraphs.", + "bbox": [ + 112, + 84, + 492, + 247 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.1 Metrics", + "text_level": 1, + "bbox": [ + 112, + 256, + 226, + 269 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The specialized LLM's generation of pseudosubgraphs plays a crucial role in ClaimPKG's performance. We evaluated the specialized LLM's performance using four metrics: claim structure coverage (coverage), entity correctness (correctness), unique triplet count, and average end-to-end accuracy. While the final metric is straightforward, the three former metrics can be described as follows:", + "bbox": [ + 112, + 277, + 489, + 405 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(1) Structure coverage quantifies the alignment between the LLM-generated pseudo-graph and the reference claim graph in the FactKG dataset. Specifically, for a generated graph $P$ and reference graph $Q$ , coverage is computed as:", + "bbox": [ + 112, + 409, + 489, + 491 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nc o v e r a g e (P, Q) = \\frac {\\# (P . t r i p l e t s \\cap Q . t r i p l e t s)}{\\# (Q . t r i p l e t s)}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 500, + 485, + 535 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(2) Entity correctness quantifies the correctness of a claim's extracted entities, i.e., whether these entities exist in the KG. Specifically, for a generated graph $P$ and a knowledge graph $\\mathcal{G}$ , correctness is computed as:", + "bbox": [ + 112, + 544, + 489, + 625 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {c o r r e c t n e s s} (P, \\mathcal {G}) = \\frac {\\# (P . e n i t i e s \\cap \\mathcal {G} . e n t i t i e s)}{\\# (P . e n t i t i e s)}\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 636, + 495, + 670 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "(3) Unique triplet count measures the diversity of generated graph structures, with higher counts potentially enabling better subgraph retrieval through increased coverage of possible relationships.", + "bbox": [ + 112, + 680, + 489, + 744 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.2 Different Beam Sizes of the Specialized LLM", + "text_level": 1, + "bbox": [ + 112, + 755, + 470, + 785 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To evaluate the LLM's decoding strategy across different beam sizes, we utilized three average accuracy, structure coverage and unique triplet count as metrics. Table 7 details the impact of the number of beam sizes on the previously mentioned metrics on the FactKG dev set. Both Llama and Qwen models demonstrate consistent improvements in average performance and claim structure coverage", + "bbox": [ + 112, + 791, + 489, + 921 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/1f537c4a4fe0d1d8f1cdfa130bbd1c1ae8bc7b9c0e025ff93aa1d1f0bb180cb6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BackboneBeam SizeAverage AccuracyStructure CoverageUnique Triplets
Llama-3BBeam 179.7876.514.48
Beam 381.8081.276.44
Beam 582.0483.028.39
Beam 1082.3384.6113.83
Qwen-3BBeam 178.8477.953.82
Beam 380.7682.665.16
Beam 581.4183.586.73
Beam 1082.1984.629.58
", + "bbox": [ + 510, + 82, + 884, + 233 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/50ffcfd1ce834cb1fb16fb167f2106725eaed26e8420039ebfc0fc3271131bc6.jpg", + "table_caption": [ + "Table 7: Performance metrics for different models on FactKG dev set." + ], + "table_footnote": [], + "table_body": "
Beam SizeGen Graph (s)Retrieve (s)Reason (s)
beam 11.020.242.19
beam 32.160.382.22
beam 53.520.502.33
beam 1035.181.012.88
", + "bbox": [ + 510, + 287, + 882, + 373 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 8: Computing time for different beam sizes on FactKG dev set.", + "bbox": [ + 507, + 382, + 882, + 411 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "as beam size increases from 1 to 10. At beam size 10, Llama achieves $84.61\\%$ coverage while Qwen reaches $84.62\\%$ , showing comparable performance at higher beam sizes. The unique triplet count shows more pronounced growth with larger beam sizes, with Llama generating 13.83 unique triplets and Qwen 9.58 triplets at beam size 10.", + "bbox": [ + 505, + 437, + 882, + 549 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "However, table 8 shows this improved performance comes with significant computational overhead. Table 8 details on the time taken for generating pseudo-graphs, retrieving sub-graphs and reasoning with retrieved evidence. Most notably, while the time required for retrieving sub-graphs and reasoning with evidence only increase marginally as the beam size increase, this figure for pseudo-graph generation increases dramatically as the beam size goes to 10, from 1.02s at beam size 1 to 35.18s at beam size 10 - a $34.5 \\times$ increase. Based on this measurement, in our official framework we select beam size $= 5$ to balance the performance gain and computational costs.", + "bbox": [ + 507, + 551, + 882, + 776 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.3 Different Model Sizes of the Specialized LLM", + "text_level": 1, + "bbox": [ + 507, + 787, + 870, + 816 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To evaluate how model size affects performance, we compare different variants of Llama and Qwen models ranging from 1B to 8B parameters. Table 9 presents the performance on the FactKG dev set across three key metrics: average performance, structure coverage, and unique triplets generated,", + "bbox": [ + 507, + 824, + 884, + 921 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "5283", + "bbox": [ + 480, + 927, + 519, + 940 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "which was explained previously.", + "bbox": [ + 112, + 84, + 356, + 99 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/4c6128eb3c9daad179b5f54f1a032ab8bf7e5de180c8216a7b8f4717c2aaa9a2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BackboneAverage AccuracyStructure CoverageUnique Triplets
Llama - 1B80.2678.988.97
Llama - 3B82.0483.028.39
Llama - 8B82.6382.849.34
Qwen - 1.5B80.4881.346.58
Qwen - 3B81.4183.586.73
Qwen - 7B81.7982.887.05
", + "bbox": [ + 132, + 112, + 468, + 246 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For both model families, we observe improvements in performance as model size increases, though with different patterns. The Llama family shows more notable gains, with average performance increasing from $80.26\\%$ (1B) to $82.63\\%$ (8B), while Qwen demonstrates more modest improvements from $80.48\\%$ (1.5B) to $81.79\\%$ (7B). Structure coverage peaks with the 3B variants for both families - Llama-3B achieving $83.02\\%$ and Qwen-3B reaching $83.58\\%$ . The models keep the increasing trend in their triplet generation patterns: Llama maintains relatively stable unique triplet counts (8.39 - 9.34) across sizes, while the figures for Qwen are (6.58 - 7.05) as the model size increases.", + "bbox": [ + 112, + 302, + 489, + 542 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Overall, scaling to larger models shows slight improvements while increasing computational requirements. Based on these results, we select 3B variants of both model families in our official implementation, which offer an optimal balance of performance and model size, with Llama-3B and Qwen-3B showing comparable effectiveness across all metrics.", + "bbox": [ + 112, + 544, + 489, + 671 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C.4 Different Hyperparameters of Subgraph Retrieval", + "text_level": 1, + "bbox": [ + 112, + 684, + 482, + 715 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/667e6e99399c7756b9a7db1758e3d845a66740b40ed72846c0076e18439f73fc.jpg", + "table_caption": [ + "Table 9: Performance metrics for different models on the FactKG dev set." + ], + "table_footnote": [], + "table_body": "
Hyper ParamsAverage AccuracyUnique Triplets
k1=5;k2=382.0011.42
k1=3;k2=182.048.39
k1=1;k2=181.873.58
", + "bbox": [ + 169, + 732, + 431, + 816 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To assess the impact of different hyperparameters in the subgraph retrieval algorithm on overall", + "bbox": [ + 112, + 889, + 489, + 921 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "performance, we systematically vary these hyperparameters while keeping the specialized LLM and general LLM fixed as Llama-3.2-3B and Llama-3.3-70B, respectively. Table 10 presents the performance across two key metrics: average accuracy and the number of unique triplets generated.", + "bbox": [ + 507, + 84, + 884, + 180 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "The results indicate that increasing $k_{1}$ and $k_{2}$ leads to a higher number of unique triplets, suggesting greater diversity in retrieved claims. However, this increase does not consistently translate to overall performance gains, which fall in the range of 81.87 - 82.00. Notably, performance peaks at $k_{1} = 3$ and $k_{2} = 1$ , suggesting that a more focused retrieval strategy is sufficient to achieve optimal performance, whereas excessively high $k$ values may introduce noise or irrelevant information. Based on these results, we select $k_{1} = 3$ and $k_{2} = 1$ in our official implementation, which balancing between information discovery and computing required.", + "bbox": [ + 507, + 181, + 884, + 406 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C.5 Different Methods for Relation Scoring Function", + "text_level": 1, + "bbox": [ + 507, + 416, + 868, + 448 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/8610827d03ca8d49eee8ca854a131213d4b26795f9a93f91e26eb477e672e274.jpg", + "table_caption": [ + "Table 10: Performance of different subgraph retrieval configurations $k_{1}$ and $k_{2}$ with Llama-3.2-3B + Llama-3.3-70B on the FactKG dev set." + ], + "table_footnote": [], + "table_body": "
MethodAverage Accuracy
Embedding Based84.64
Rerank Based84.73
Fuzzy Matching82.19
Exact Matching81.57
", + "bbox": [ + 547, + 463, + 845, + 554 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 11: Performance of different scoring approach of the Subgraph Retrieval on the FactKG test set", + "bbox": [ + 507, + 563, + 882, + 593 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To assess the impact of different scoring mechanisms on performance, we vary the scoring function and evaluate the test set of FactKG while fix the Specialized LLM and the General LLM. Specifically, we explore multiple strategies for the Relation Scoring Function (Sim), as described in Section 4.3, incorporating diverse techniques such as embedding-based retrieval, reranking, fuzzy text matching (Wikipedia, 2025a), and exact matching.", + "bbox": [ + 507, + 612, + 884, + 756 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For embedding-based and reranking approaches, we employ state-of-the-art pre-trained models, namely BGE-Large-EN-v1.5² and BGE-Reranker-Large³, as provided by (Xiao et al., 2023). Experimental results indicate that deep learning-based methods, such as embedding and reranking, achieve superior performance, with accuracy scores of 84.64 and 84.56, respectively. In contrast,", + "bbox": [ + 507, + 757, + 885, + 885 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://huggingface.co/BAAI/bge-large-en-v1.5", + "bbox": [ + 529, + 892, + 828, + 906 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "3https://huggingface.co/BAAI/bge-reranker-large", + "bbox": [ + 529, + 906, + 831, + 920 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "5284", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "text-matching-based methods yield lower accuracy, with fuzzy matching and exact matching scoring 82.19 and 81.57, respectively. These findings highlight the effectiveness of deep learning-based approaches.", + "bbox": [ + 112, + 84, + 489, + 164 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We recommend embedding-based retrieval as it enables pre-indexing of corpus relations. This allows precomputation of relation embeddings and requires encoding only the query relation for new Pseudo Subgraphs, eliminating the need to re-encode existing knowledge graph relations during inference.", + "bbox": [ + 112, + 165, + 489, + 275 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D Algorithm Details", + "text_level": 1, + "bbox": [ + 112, + 288, + 309, + 305 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The detailed implementation of the Entity Trie-constrained decoding algorithm is provided as the pseudo-code in Algorithm 1 and the Algorithm 2 details the implementation of the Subgraph Retrieval.", + "bbox": [ + 112, + 313, + 489, + 394 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "E Case Study", + "text_level": 1, + "bbox": [ + 112, + 405, + 250, + 422 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We present the case study results of ClaimPKG on the FactKG dataset in Tables 12 and 13. Each table includes the claim $c$ , pseudo-subgraphs $P_{s}$ , retrieved subgraphs $S_{c}$ , final justification $j$ , and verdict $v$ . Table 12 showcases correctly predicted examples, demonstrating ClaimPKG's ability to accurately capture claim structures and generate well-grounded justifications. Conversely, Table 13 highlights incorrectly predicted cases of two error types as detailed in Section 5.3. The first two examples illustrate Reasoning Errors, while the third represents a Retrieval Error. These insights serve as a foundation for future improvements, emphasizing key areas for future refinement.", + "bbox": [ + 112, + 430, + 489, + 653 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "F Prompt Templates", + "text_level": 1, + "bbox": [ + 112, + 667, + 310, + 684 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For better reproducibility, we present all prompt templates in the appendix. Below is a quick reference list outlining the prompt templates and their usages:", + "bbox": [ + 112, + 692, + 489, + 756 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Figure 6: Prompt the General LLM to reason on the input claim and retrieved subgraphs to produce justification and final verdict.", + "Figure 7: Few-shot prompts the General LLM to generate a Pseudo Subgraph with provided examples.", + "- Figure 8: Annotate the inside and outside entities of the input claim for the training dataset." + ], + "bbox": [ + 136, + 764, + 487, + 910 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "5285", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1: LLM Decoding with Entity-Trie Constraint" + ], + "code_body": "Input:Specialized LLM, Input claim $c$ Entity TriE T \nOutput:Pseudo-Subgraph P \nInitialize: $\\mathcal{P}\\gets \\emptyset$ // Initialize pseudo subgraph \n $h_0\\gets$ InitializeHiddenStates(); constrained $\\leftarrow$ False; \nFunction ConstrainedDecoding(LLM,c,T): \nwhile True do \n $p_t,h_t\\gets LLM(\\mathcal{P},c,h_{t - 1})$ // Compute token probabilities and update hidden states if constrained then \nprefix $\\leftarrow$ ExtractPrefix(P); // Retrieve tokens from last unclosed to the last allowed $\\leftarrow$ T.lookup(prefix);// Retrieve allowed tokens from valid continuations in T $p_t\\gets$ MaskProb $(p_t,$ allowed); // Impose probabilities of invalid tokens to be 0 \nnew_token $\\leftarrow$ arg max $p_t$ . // Select new token for P \n $\\mathcal{P}\\gets \\mathcal{P}\\cup \\{\\text{new_token}\\}$ . if new_token $= = < e>$ then $\\sqsubset$ constrained $\\leftarrow$ True; if new_token $= = < / e>$ then $\\sqsubset$ constrained $\\leftarrow$ False; if new_token $= = EOS$ then $\\sqsubset$ break; \nreturn P", + "guess_lang": "txt", + "bbox": [ + 129, + 143, + 855, + 424 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "GENERAL REASONING", + "text_level": 1, + "bbox": [ + 127, + 526, + 299, + 538 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Task: Verify whether the fact in the given sentence is true or false based on the provided graph triplets. Use only the information in the triplets for verification.", + "bbox": [ + 127, + 550, + 868, + 587 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The triplets provided represent all relevant knowledge that can be retrieved.", + "- If the fact is a negation and the triplets do not include the fact, consider the fact as true.", + "- Ignore questions and verify only the factual assertion within them. For example, in the question \"When was Daniel Martínez (politician) a leader of Montevideo?\", focusing on verifying the assertion \"Daniel Martínez (politician) a leader of Montevideo\".", + "- Interpret the “ $\\sim$ ” symbol in triplets as indicating a reverse relationship. For example: “A $\\sim$ south of B” means “B is north of A”." + ], + "bbox": [ + 124, + 598, + 870, + 681 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Response Format:", + "text_level": 1, + "bbox": [ + 127, + 692, + 247, + 703 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Provide your response in the following JSON format without any additional explanations: \n{ \"rationale\": \"A concise explanation for your decision\", \"verdict\": \"true/false as the JSON value\" }", + "bbox": [ + 126, + 705, + 670, + 764 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Triplets:", + "text_level": 1, + "bbox": [ + 127, + 775, + 184, + 788 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "{triplets}", + "bbox": [ + 127, + 788, + 196, + 800 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Claim:", + "text_level": 1, + "bbox": [ + 127, + 810, + 174, + 822 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "{claim}", + "bbox": [ + 127, + 822, + 189, + 835 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Figure 6: Prompt template for the general LLM to perform reasoning", + "bbox": [ + 263, + 862, + 732, + 877 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "5286", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2: Subgraph Retrieval" + ], + "code_body": "Input: Knowledge graph $\\mathcal{G}$ Pseudo Subgraph List $P_{c}$ Top $k_{1}$ Candidate Unknown Entities, Top $k_{2}$ Complete Triplets \nOutput:Combined subgraph $S_{c}$ \nFunction SubgraphRetrieval $(\\mathcal{G},\\mathcal{P}_c,k_1,k_2)$ .. \n $S\\gets \\emptyset$ . \nforeach $\\mathcal{P}\\in \\mathcal{P}_c$ do \n $S\\gets S\\cup$ RetrieveSingleSubgraph $(\\mathcal{G},\\mathcal{P},k_1,k_2)$ // Process each pseudo subgraph \nreturn JoinSubgraphs $(S)$ // Combine subgraphs \nFunction RetrieveSingleSubgraph $(\\mathcal{G},\\mathcal{P},k_1,k_2)$ .. \n $(T_{comp},T_{inc})\\leftarrow$ CategorizeTriplets( $\\mathcal{P}$ );//Split into complete/incomplete triplets \n $S_{inc}\\gets$ RetrieveIncomplete $(\\mathcal{G},T_{inc},k_1)$ . \n $S_{comp}\\gets$ RetrieveComplete $(\\mathcal{G},T_{comp},k_1,k_2)$ . \nreturn $S_{inc}\\cup S_{comp}$ \nFunction RetrieveIncomplete $(\\mathcal{G},T_{inc},k_1)$ .. \n $S\\gets \\emptyset$ . \n $G\\gets$ GroupTripletsByUnknown $(T_{inc})$ //Group by unknown entity \nforeach $g\\in G$ do \n $(E_u,R_u)\\leftarrow$ ExtractPseudoStructure $(g)$ //Extract entities and relations associated to unknown entity \n $C\\gets \\emptyset$ . \nforeach $(e,r)\\in (E_u,R_u)$ do \n $(C_e,\\mathrm{scores})\\leftarrow$ GetCandidatesAndScores $(G,e,r)$ . \n $C\\gets C\\cup \\{(C_e,\\mathrm{scores})\\}$ . \n $C =$ AggregateGlobalScore(C); //Aggregate candidate scores globally $C^{*}\\gets$ RankTopKCandidates $(C,k_{1})$ //Select top- $k_{1}$ candidates \n $S\\gets S\\cup$ GetTriplets $(C^{*},g)$ . \nreturn $S$ \nFunction GetCandidatesAndScores $(G,e,r)$ .. \n $R_{act}\\gets$ RetrieveActualConnectedRelations $(G,e)$ . \n $E_{act}\\gets$ RetrieveActualConnectedEntities $(G,e)$ . \n $r\\_ score s\\gets$ RelationScore(r, $R_{act}$ . \n $S\\gets \\emptyset$ . \nforeach $e^{\\prime}\\in E_{act}$ do \n $s\\gets$ MaxRelatedRelationScores(e',r Scores); \n $S\\gets S\\cup \\{(e^{\\prime},s)\\}$ . \nreturn $S$ // Score connected entities \nFunction AggregateGlobalScore $(C)$ .. \n//Calculate new scores and reassign for each $C\\_ e$ \nforeach $(C_e,\\mathrm{scores})\\in C$ do \nforeach $(c,s)\\in (C_e,\\mathrm{scores})$ do \n $s\\gets$ Sum([s'(c) for $(C',s')$ in $C$ if $c\\in C')$ \nreturn $C$ . \nFunction RankTopKCandidates $(C,k_1)$ .. \n $C^{*}\\gets \\emptyset$ . \nforeach $(C_e,\\mathrm{scores})\\in C$ do \n $C_e^*\\gets$ TopKCandidates $(C_e,\\mathrm{scores},k_1)$ . \n $C^{*}\\gets C^{*}\\cup C_{e}^{*}$ . \nreturn $C^{*}$ //Rank candidates per unknown entity \nFunction RetrieveComplete $(G,T_{comp},k_1,k_2)$ .. \n $S\\gets \\emptyset$ . \nforeach $(h,r,t)\\in T_{comp}$ do if ExistConnection $(h,t)$ then $T\\gets$ GetTripletsWithHeadAndTail $(G,h,t)$ scores $\\leftarrow \\emptyset$ . foreach $t'\\in T$ do scores $\\leftarrow$ scores U RelationScore(r, $t'$ .relation); $S\\gets S\\cup$ TopK(T,scores, $k_{2}$ ); //Rank connected triplets else $S_{1}\\gets$ RetrieveIncomplete(G,[(unknown,r,t)], $k_{1}$ ) $S_{2}\\gets$ RetrieveIncomplete(G,[(h,r,unknown)], $k_{1}$ ) $S\\gets S\\cup (S_1\\cup S_2)$ // Handle missing connection", + "bbox": [ + 129, + 105, + 855, + 909 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "5287", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/62617df14bd514789f8d69ce3158412a62ad76bde340f8c42a83081a3750257c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
The 11th Mississippi Infantry Monument is not in Gettysburg, Pennsylvania.P1: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(11th Mississippi Infantry Monument, municipality, Gettysburg, Pennsylvania)Justification: The triplets provided include '11th Mississippi Infantry Monument', 'municipality', 'Gettysburg, Pennsylvania' and '11th Mississippi Infantry Monument', 'state', 'Pennsylvania', indicating the monument's location as Gettysburg, Pennsylvania.
P2: Gettysburg, Pennsylvania | -location | 11th Mississippi Infantry Monument(11th Mississippi Infantry Monument, website, 11th mississippianemy monument)
11th Mississippi Infantry Monument | location | unknown_0(U.S. Route 140, location, Gettysburg)
P3: 11th Mississippi Infantry Monument | location | Gettysburg(Pennsylvania Route 34, location, Gettysburg)
11th Mississippi Infantry Monument | location | Unknown_0(U.S. Route 30 in Pennsylvania, location, Gettysburg)
P4: 11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, state, Pennsylvania)
11th Mississippi Infantry Monument | is part of military conflict | Gettysburg(11th Mississippi Infantry Monument, region, Adams County, Pennsylvania)
11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, country, United States)
11th Mississippi Infantry Monument | location | Unknown_0(11th Mississippi Infantry Monument, category, Contributing property)
P5: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(Lutheran Theological Seminary at Gettysburg, city, Gettysburg)
(11th Mississippi Infantry Monument, range, Semi-nary Ridge)Verdict: False
It was released in Hardcover by J.V. JonesP1: Hardcover | -media type | unknown_0(Hardcover, -media type, A Fortress of Grey Ice)Justification: The triplets show that 'A Fortress of Grey Ice' is a book by 'J.V. Jones' and it is of 'Hardcover' media type, which supports the claim.
unknown_0 | author | J.V. Jones(Hardcover, -media type, A Clockwork Orange)
P2: unknown_0 | author | J.V. Jones(Hardcover, -media type, The Bell Curve)
unknown_0 | media type | Hardcover(A Fortress of Grey Ice, author, J.V. Jones)
unknown_0 | -genre | unknown_1(A Cavern of Black Ice, author, J.V. Jones)
P3: J.V. Jones | -author | unknown_0(The Barbed Coil, author, J.V. Jones)
unknown_0 | media type | Hardcover(Hardcover, -media type, The Andromeda Strain)
P4: Hardcover | -media type | J.V. Jones(1963, -date of birth, J.V. Jones)
J.V. Jones | -author | unknown_0(Liverpool, England, -place of birth, J.V. Jones)
unknown_0 | previous work | unknown_1
P5: Hardcover | -media type | unknown_0
unknown_0 | author | J.V. Jones
A person was born in the County of Ty-rol and later died in Austria which is led by Doris Bures.P1: Austria | leader | Doris Bures(Austria, leader, Doris Bures)Justification: The triplets show that Kurt Schuschnigg, Alfons Gorbach, and Josef Hellensteiner were born in the County of Tyrol and died in Austria. Additionally, Doris Bures is the leader of Austria.
Austria | -death place | unknown_0(Austria, -death place, Hans Hörberg)
unknown_0 | death place | County of Tyrol(Austria, -death place, Edgar Meyer (painter) )
P2: Austria | leader name | Doris Bures(Austria, -death place, Rakhat Aliyev)
Austria | -death place | unknown_0(Ahn Hörberg, death place, County of Tyrol)
unknown_0 | death place | Austria(Edgar Meyer (painter), death place, County of Tyrol)
unknown_0 | successor | Doris Bures(Friedrich Hasenöhr, death place, County of Tyrol)
P3: County of Tyrol | -birth place | unknown_0(County of Tyrol, -birth place, Kurt Schuschnigg)
unknown_0 | death place | Austria(Sky, death place, Alfons Gorbach)
unknown_0 | successor | Doris Bures(County of Tyrol, -birth place, Josef Hellensteiner)
P4: Doris Bures | -leader | Austria(Kurt Schuschnigg, death place, Austria)
Austria | -death place | unknown_0(Alfons Gorbach, death place, Austria)
unknown_0 | birth place | County of Tyrol(Josef Hellensteiner, death place, Austria)
P5: Austria | leader | Doris Bures(Barbara Prammer, successor, Doris Bures)
Austria | -death place | unknown_0(Werner Faymann, successor, Doris Bures)
unknown_0 | birth place | County of Tyrol(Alois Stöger, -successor, Doris Bures)
The New Britain Transit is the operat-ing organisation of Agra Airport which is located in India and has the location identifier VIAG.P1: Agra Airport | location | New Britain Transit(Agra Airport, location, India)Justification: The operator of Agra Airport is listed as 'Indian Air Force' and 'Airports Authority of India', not 'New Britain Transit', which operates 'Connecticut Transit New Britain and Bris-tol'
Agra Airport | location | India(Agra Airport, location, Uttar Pradesh)
Agra Airport | faa | VIAG(Agra Airport, location, Agra)
P2: Agra Airport | location | India(Connecticut Transit New Britain and Bristol, oper-ator, New Britain Transit)
Agra Airport | operator | New Britain Transit(Agra Airport, cao location identifier, VIAG)
Agra Airport | faa | VIAG(Agra Airport, operator, Airports Authority of India)
Agra Airport | faa | VIAG(Agra Airport, elevation, 167.94)Verdict: False
P3: Agra Airport | location | India
Agra Airport | operator | New Britain Transit
Agra Airport | location | India
Agra Airport | faa | VIAG
", + "bbox": [ + 114, + 130, + 884, + 843 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 12: Case study of ClaimPKG on the FactKG dataset with correctly predicted samples.", + "bbox": [ + 186, + 853, + 808, + 868 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "5288", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/217bb3327d615bb082f158c5d98c1b78a369f72661768f72a57d65a2e27c788b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
When did Indian Air Force operate Agra Airport?P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets do not provide information about the specific time period when the Indian Air Force operated Agra Airport. However, they do confirm that the Indian Air Force is the operator of Agra Airport, implying that the operation is either current or has occurred at some point, but the exact timing is not specified. Verdict: False
P2: Indian Air Force || operator || Agra Airport(Agra Airport, runway length, 2743.0)
Agra Airport || ~runway designation || unknown_0(Agra Airport, runway length, 1818.0)
Agra Airport || ~runway designation || Indian Air Force(Agra Airport, location, India)
Agra Airport || ~location || unknown_0(Agra Airport, location, Uttar Pradesh)
unknown_0 || operator || Indian Air Force(Agra Airport, location, Agra)
P4: Agra Airport || operator || Indian Air Force(Bhuj Airport, operator, Indian Air Force)
P5: Agra Airport || operator || Indian Air Force(Bhuj Rudra Mata Air Force Base, operator, Indian Air Force)
(Car Nicobar Air Force Base, operator, Indian Air Force)
I imagined that Lady Anne Monson not lived in the Cape Verde whose capital was Winchester.P1: Cape Verde || capital || Winchester(Cape Verde, capital, Praia)Justification: The capital of Cape Verde is Praia, not Winchester. Winchester is the capital of Wessex and the Kingdom of England. Lady Anne Monson's residence is listed as India, and her birthplace is listed as Dar-lington and the Kingdom of England, with no mention of Cape Verde as her residence. Verdict: True
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, currency, Cape Verdean esculo)
P2: Lady Anne Monson || residence || Cape Verde(Cape Verde, ~headquarter, Movement for Democracy (Cape Verde))
Cape Verde || capital || Winchester(Wessex, capital, Winchester)
P3: Anne Monzon || residence || Cape Verde(Kingdom of England, capital, Winchester)
Cape Verde || capital || Winchester(River Itchen, Hampshire, city, Winchester)
P4: Cape Verde || capital || Winchester(Cape Verde, ~birth place, Aristides Pereira)
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, ~birth place, Pedro Pires)
P5: Cape Verde || capital || Winchester(Cape Verde, ~birth place, José Maria Neves)
Cape Verde || ~death place || Lady Anne Monson(India, ~residence, Lady Anne Monson)
(Kingdom of England, ~birth place, Lady Anne Monson)
(Darlington, ~birth place, Lady Anne Monson)
(Anne Monzon, ~cinematography, "Wont Last a Day Without You")
(Anne Monzon, ~cinematography, Pop Class)
(Anne Monzon, ~cinematography, Namets!)
(Cape Verde, ~death place, Corsino Fortes)
(Cape Verde, ~death place, Cesária Évora)
(Cape Verde, ~death place, Baltasar Lopes da Silva)
(1776-02-18, ~death date, Lady Anne Monson)
(1776, ~death year, Lady Anne Monson)
The operating organization for Agra Airport is the Indian Air Force who deploy the HAL Light Combat Helicopter and the Boeing C17 Globemaster III transport aircraft.P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets confirm Agra Airport is operated by the Indian Air Force, the Indian Air Force deploys the HAL Light Combat Helicopter, and the Boeing C-17 Globemaster III is indeed a transport aircraft, but there is no direct information linking the Indian Air Force at Agra Airport to the deployment of the Boeing C-17 Globemaster III. However, given the information provided, we can infer the involvement of the Indian Air Force with the mentioned aircraft types, albeit indirectly for the Boeing C-17 Globemaster III. Verdict: False
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Uttar Pradesh)
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Agra)
Agra Airport || ~location || Boeing C-17 Globemaster III(2012 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2013 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2009 in spaceflight (July-December), site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(Hindustan Aeronautics, manufacturer, HAL Light Combat Helicopter)
Agra Airport || ~location || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, United States Air Force)
Agra Airport || operator || Indian Air Force(Boeing C-17 Globemaster III, aircraft transport, Royal Air Force)
Agra Airport || runway length || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, Royal Australian Air Force)
Agra Airport || ~location || HAL Light Combat Helicopter(2743.0, runway length, Agra Airport)
Agra Airport || ~city || HAL Light Combat Helicopter(1818.0, runway length, Agra Airport)
Agra Airport || ~city || Boeing C-17 Globemaster III(HAL Light Combat Helicopter, aircraft helicopter, Indian Air Force)
(Aircraft, icoa location identifier, VIAG)
(Airlift, type, Boeing C-17 Globemaster III)
(United States, origin, Boeing C-17 Globemaster III)
(In service, status, Boeing C-17 Globemaster III)
", + "bbox": [ + 114, + 114, + 882, + 860 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 13: Case study of ClaimPKG on the FactKG dataset with incorrectly predicted samples.", + "bbox": [ + 179, + 870, + 815, + 885 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "5289", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "FEWSHOT PSEUDO SUBGRAPH GENERATION", + "text_level": 1, + "bbox": [ + 127, + 114, + 463, + 126 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Task: Generate a reference graph to verify the following claim. Only return the subgraphs following the format of provided examples and do NOT include other unnecessary information.", + "bbox": [ + 126, + 137, + 870, + 162 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Here are some examples:", + "text_level": 1, + "bbox": [ + 127, + 174, + 290, + 186 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: Akeem Priestley played for club RoPS and currently plays for the Orange County Blues FC, which is managed by Oliver Wyss.", + "bbox": [ + 126, + 197, + 870, + 222 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "text_level": 1, + "bbox": [ + 127, + 222, + 201, + 234 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Orange County Blues FC || manager || Oliver Wyss \nOrange County Blues FC || clubs || Akeem Priestley \nAkeem Priestley || team || RoPS", + "bbox": [ + 126, + 234, + 547, + 269 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: He is a Rhythm and Blues singer from Errata, Mississippi!", + "bbox": [ + 127, + 280, + 532, + 293 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "text_level": 1, + "bbox": [ + 127, + 293, + 203, + 305 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": " || genre || unknown_0 \nunknown_0 || birth place || Errata, Mississippi \nunknown_0 || background || unknown_1", + "bbox": [ + 126, + 305, + 457, + 340 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: Arròs negro is a traditional dish from Spain, and from the Catalonia region, which is led by the Maria Norrfalk.", + "bbox": [ + 126, + 351, + 870, + 375 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "text_level": 1, + "bbox": [ + 127, + 376, + 203, + 388 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$<\\mathrm{e}>$ Arròs negro
|| country || Spain \n $<\\mathrm{e}>$ Arròs negro || region || Catalonia \n $<\\mathrm{e}>$ Catalonia || leader name || Maria Norrfalk", + "bbox": [ + 126, + 388, + 482, + 423 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: Well, Jason Sherlock did not have a nickname!", + "bbox": [ + 127, + 434, + 460, + 445 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "text_level": 1, + "bbox": [ + 127, + 447, + 203, + 458 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$<\\mathrm{e}>$ Jason Sherlock | | nickname | | unknown_0", + "bbox": [ + 126, + 458, + 428, + 470 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: Garlic is the main ingredient of Ajoblanco, which is from Andalusia.", + "bbox": [ + 127, + 481, + 594, + 494 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "text_level": 1, + "bbox": [ + 127, + 494, + 203, + 505 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "$< \\mathrm{e}>$ Ajoblanco || region || Andalusia \n $< \\mathrm{e}>$ Ajoblanco || ingredient || Garlic", + "bbox": [ + 126, + 505, + 421, + 529 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "....More examples ....", + "bbox": [ + 127, + 541, + 268, + 552 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: {{claim}}", + "bbox": [ + 127, + 564, + 240, + 576 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Subgraphs:", + "bbox": [ + 127, + 577, + 203, + 589 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 7: Prompt template for the general LLM to generate pseudo subgraphs", + "bbox": [ + 233, + 625, + 759, + 640 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "ANNOTATE IN AND OUT ENTITIES", + "text_level": 1, + "bbox": [ + 127, + 706, + 379, + 718 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Task: Specify if the following entities are mentioned in the claim or not.", + "bbox": [ + 126, + 730, + 568, + 743 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Respond correctly in the following JSON format and do not output anything else: { \"in Entities\": [list of entities that are in the claim], \"out Entities\": [list of entities that are not in the claim] } Do not change the entity names from the list of provided entities.", + "bbox": [ + 126, + 743, + 621, + 815 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Claim: {{claim}}", + "bbox": [ + 127, + 825, + 240, + 838 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Entities: {{entities}}", + "bbox": [ + 127, + 838, + 260, + 850 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 8: Prompt template to annotate inside and outside entity of the claim.", + "bbox": [ + 238, + 876, + 754, + 891 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "5290", + "bbox": [ + 480, + 928, + 519, + 940 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_model.json b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_model.json new file mode 100644 index 0000000000000000000000000000000000000000..987a01f554b8991e9d90d91891725aa95ae09c9a --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_model.json @@ -0,0 +1,3551 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.167, + 0.09, + 0.833, + 0.131 + ], + "angle": 0, + "content": "ClaimPKG: Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.152, + 0.748, + 0.169 + ], + "angle": 0, + "content": "Hoang Pham*, Thanh-Do Nguyen*, Khac-Hoai Nam Bui†" + }, + { + "type": "text", + "bbox": [ + 0.275, + 0.17, + 0.723, + 0.186 + ], + "angle": 0, + "content": "Viettel Artificial Intelligence and Data Services Center," + }, + { + "type": "text", + "bbox": [ + 0.403, + 0.187, + 0.594, + 0.203 + ], + "angle": 0, + "content": "Viettel Group, Vietnam" + }, + { + "type": "text", + "bbox": [ + 0.312, + 0.204, + 0.687, + 0.22 + ], + "angle": 0, + "content": "{hoangpv4, dont15, nambkh} @ viettel.com.vn" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.293, + 0.461, + 0.763 + ], + "angle": 0, + "content": "Integrating knowledge graphs (KGs) to enhance the reasoning capabilities of large language models (LLMs) is an emerging research challenge in claim verification. While KGs provide structured, semantically rich representations well-suited for reasoning, most existing verification methods rely on unstructured text corpora, limiting their ability to effectively leverage KGs. Additionally, despite possessing strong reasoning abilities, modern LLMs struggle with multi-step modular pipelines and reasoning over KGs without adaptation. To address these challenges, we propose ClaimPKG1, an end-to-end framework that seamlessly integrates LLM reasoning with structured knowledge from KGs. Specifically, the main idea of ClaimPKG is to employ a lightweight, specialized LLM to represent the input claim as pseudo-subgraphs, guiding a dedicated subgraph retrieval module to identify relevant KG subgraphs. These retrieved subgraphs are then processed by a general-purpose LLM to produce the final verdict and justification. Extensive experiments on the FactKG dataset demonstrate that ClaimPKG achieves state-of-the-art performance, outperforming strong baselines in this research field by \\(9\\% - 12\\%\\) accuracy points across multiple categories. Furthermore, ClaimPKG exhibits zero-shot generalizability to unstructured datasets such as HoVer and FEVERIOUS, effectively combining structured knowledge from KGs with LLM reasoning across various LLM backbones." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.779, + 0.26, + 0.794 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.807, + 0.49, + 0.871 + ], + "angle": 0, + "content": "In today's rapidly evolving information landscape, distinguishing fact from misinformation is becoming more challenging, especially with the rise of AI-generated content. Robust claim verification" + }, + { + "type": "image", + "bbox": [ + 0.549, + 0.259, + 0.847, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.549, + 0.383, + 0.844, + 0.446 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.631, + 0.447, + 0.751, + 0.456 + ], + "angle": 0, + "content": "c) Our Method - ClaimPKG" + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.467, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Figure 1: Different claim verification paradigms: (a) Unstructured Text-based methods focusing on claim decomposition and sequential reasoning over text, (b) KG-based methods facing challenges in entity resolution and structured reasoning, and (c) ClaimPKG's unified framework with specialized modules for pseudosubgraph generation, retrieval, and general reasoning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.596, + 0.885, + 0.709 + ], + "angle": 0, + "content": "systems, leveraging NLP methods to automatically assess the veracity of claims (Glockner et al., 2022a,b; Thorne and Vlachos, 2018), are essential to ensure information reliability. Effective methods require not only accuracy but also transparency, necessitating strong reasoning to identify evidence and provide clear justifications (Pan et al., 2023)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.711, + 0.884, + 0.903 + ], + "angle": 0, + "content": "Most existing verification approaches focus on unstructured text corpora, using techniques like chain-of-thought (CoT) reasoning (Wei et al., 2022) to break down claims for verification. Approaches like ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023) employ modular pipelines to verify claims against text-based knowledge bases (Figure 1(a)). However, the inherent limitations of text representation pose challenges. Specifically, ambiguous entity references and complex multi-hop relationships make it difficult to perform rigorous verification against unstructured text." + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.906, + 0.882, + 0.922 + ], + "angle": 0, + "content": "In contrast, Knowledge Graphs (KGs) provide" + }, + { + "type": "page_footnote", + "bbox": [ + 0.136, + 0.882, + 0.263, + 0.894 + ], + "angle": 0, + "content": "*Equal contribution." + }, + { + "type": "page_footnote", + "bbox": [ + 0.138, + 0.895, + 0.282, + 0.907 + ], + "angle": 0, + "content": "† Corresponding author." + }, + { + "type": "page_footnote", + "bbox": [ + 0.138, + 0.907, + 0.442, + 0.921 + ], + "angle": 0, + "content": "\\(^{1}\\)https://github.com/HoangHoang1408/ClaimPKG" + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.882, + 0.442, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.519, + 0.941 + ], + "angle": 0, + "content": "5271" + }, + { + "type": "footer", + "bbox": [ + 0.229, + 0.946, + 0.769, + 0.959 + ], + "angle": 0, + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5271-5290" + }, + { + "type": "footer", + "bbox": [ + 0.269, + 0.96, + 0.729, + 0.973 + ], + "angle": 0, + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.085, + 0.493, + 0.277 + ], + "angle": 0, + "content": "structured relationships for effective reasoning (Luo et al., 2024; Sun et al., 2024), yet their use in claim verification remains limited. Existing KG-based approaches (Figure 1(b)) (Kim et al., 2023b; Zhou et al., 2019; Kim et al., 2023a) lack end-to-end solutions, often requiring pre-extracted entities via modules like entity or relation extraction. Meanwhile, despite excelling at general reasoning, LLMs struggle with KG-specific tasks like entity resolution and multi-hop reasoning (Cao et al., 2021; Aly et al., 2021), suggesting the need for a system combining LLM capabilities with KG-based inference." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.278, + 0.492, + 0.472 + ], + "angle": 0, + "content": "Overall, solving claim verification problems is hindered by following major limitations: (1) Entity Ambiguity: Systems must accurately disambiguate entities within claims to identify relevant evidence (Aly et al., 2021); (2) Multihop Reasoning: Complex claims often require reasoning across multiple evidence from different sources (Pan et al., 2023; Wang and Shu, 2023); and (3) Limited integration of KGs and LLMs: Current approaches are underexploring the potential of combining the application of structured representation with strong inference capabilities of LLMs (Kim et al., 2023a)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.472, + 0.492, + 0.89 + ], + "angle": 0, + "content": "To address these challenges, we propose ClaimPKG (Claim Verification using Pseudo-Subgraph in Knowledge Graphs), a novel end-to-end framework that synergizes the adaptability and generalization strengths of LLMs with the structured and rigorous representation of KGs to enable robust and transparent claim verification. As specified in Figure 1(c), ClaimPKG operates through three phases: (1) Pseudo-Subgraphs Generation: A KG-specialized lightweight LLM generates pseudo subgraphs as the representations of input claims under a Trie-based KG-Entity Constraint, ensuring the correctness of extracted entities; (2) Subgraphs Retrieval: A retrieval algorithm considers generated pseudo subgraphs as queries to identify actual relevant KG subgraphs as evidence; and (3) General Reasoning: A general-purpose LLM reasons over the retrieved KG subgraphs to produce the verdict and human-readable justifications. Through extensive experiments on the FactKG dataset, ClaimPKG achieves state-of-the-art performance, demonstrating its effectiveness over various claim types with a small number of training samples. Furthermore, its zero-shot generalizability to unstructured datasets (HoVer, FEVEROUS) highlights its robustness." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Our contributions can be summarized as follows: (1) We introduce ClaimPKG, a holistic framework" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.887, + 0.262 + ], + "angle": 0, + "content": "that integrates LLMs and KGs for accurate and interpretable claim verification, handling various types of claims in a unified manner; (2) We develop a lightweight specialized LLM with its according decoding algorithm for pseudo-subgraph generation and pair it with general-purpose LLMs to achieve robust reasoning; and (3) We validate the effectiveness of ClaimPKG through extensive experiments, achieving state-of-the-art performance on structure-based datasets and generalizing to unstructure-based datasets." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.279, + 0.667, + 0.294 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.307, + 0.886, + 0.612 + ], + "angle": 0, + "content": "Claim Verification Approaches. Claim verification systems utilize knowledge bases that can be categorized into unstructured and structured formats. In the unstructured domain, text-based verification methods predominate, with systems designed to verify claims against textual evidence, as demonstrated in the FEVER dataset (Thorne et al., 2018). Recent advances have focused on handling specialized verification scenarios, including ambiguous question-answer pairs (Park et al., 2022), detecting factual changes (Schuster et al., 2021), and processing multiple documents concurrently (Jiang et al., 2020). For structured verification, research has primarily focused on tables and graphs, with early work developing specialized architectures: graph neural networks for knowledge graph processing (Zhou et al., 2020), table-specific transformers (Herzig et al., 2020), and tree-structured decoders for hierarchical data (Wang et al., 2020)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.616, + 0.886, + 0.923 + ], + "angle": 0, + "content": "Claim Verification over Knowledge Graphs (KGs). The emergence of Large Language Models (LLMs) has simplified direct reasoning over textual corpora for claim verification, as demonstrated by ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023). However, structured data sources like tables and graphs can provide more grounded and robust verification results (Kim et al., 2023b). Knowledge graphs are particularly advantageous as they enable explicit representation of reasoning processes through logical rules over nodes and edges. FactKG (Kim et al., 2023b) established a foundation in this direction by introducing a comprehensive dataset for evaluating modern verification methods. KG-GPT (Kim et al., 2023a) followed this work by demonstrating performance gains through a pipeline that performs sentence decomposition, subgraph retrieval, and logical inference. Additionally, while not directly addressing" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.522, + 0.941 + ], + "angle": 0, + "content": "5272" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.182 + ], + "angle": 0, + "content": "claim verification, StructGPT (Jiang et al., 2023) and RoG (Luo et al., 2024) achieved promising results in related tasks (e.g., Knowledge Base Question Answering) by collecting relevant evidence, such as subgraphs in KGs, then leveraging LLMs for complex reasoning in particular scenarios." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.193, + 0.254, + 0.209 + ], + "angle": 0, + "content": "3 Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.217, + 0.489, + 0.282 + ], + "angle": 0, + "content": "Knowledge Graph: Knowledge Graph (KG) \\(\\mathcal{G}\\) represents facts as triplets of format \\(t = (e,r,e')\\), where entities \\(e,e'\\in \\mathcal{E}\\) are connected by a relation \\(r\\in \\mathcal{R}\\); \\(r\\) can also be referred as \\(r(e,e')\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.285, + 0.487, + 0.43 + ], + "angle": 0, + "content": "Claim Verification: Given a claim \\(c\\), a verification model \\(\\mathcal{F}\\) determines its veracity as Supported or Refuted based on an external knowledge base \\(\\kappa\\), while also providing a justification \\(j\\) to explain the predicted label. This work specifically considers the scenario where \\(\\kappa\\) is structured as a Knowledge Graph \\(\\mathcal{G}\\), enabling reasoning over graph knowledge to infer \\(v\\) and \\(j\\). Formally, the verification process is defined as: \\((v,j) = \\mathcal{F}(c,\\mathcal{G})\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.434, + 0.49, + 0.531 + ], + "angle": 0, + "content": "Trie-based Constrained Decoding: A Trie (Wikipedia, 2025b) indexes predefined token sequences, where each root-to-node path represents a prefix. During LLM generation, this structure restricts token selection to only valid Trie paths, ensuring reliable output." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.541, + 0.246, + 0.556 + ], + "angle": 0, + "content": "4 ClaimPKG" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.566, + 0.371, + 0.581 + ], + "angle": 0, + "content": "4.1 Formulation of ClaimPKG" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.587, + 0.489, + 0.765 + ], + "angle": 0, + "content": "We formulate the ClaimPKG framework using a probabilistic approach. Given a claim \\(c\\) and a prebuilt KG \\(\\mathcal{G}\\), our objective is to model the distribution \\(p_{\\theta}(v,j|c,\\mathcal{G})\\), where \\(v\\) denotes the verdict and \\(j\\) the justification. However, direct computation for this distribution is infeasible as reasoning over the entire KG is not practical given its large size. To address this, we propose to select \\(S_{c}\\), a subgraph of \\(\\mathcal{G}\\) relevant to \\(c\\) containing necessary information to derive our target distribution. Treating \\(S_{c}\\) as a latent variable, \\(p_{\\theta}(v,j|c,\\mathcal{G})\\) is decomposed as:" + }, + { + "type": "equation", + "bbox": [ + 0.129, + 0.773, + 0.488, + 0.801 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} (v, j \\mid c, \\mathcal {G}) = \\sum_ {\\mathcal {S} _ {c}} p _ {\\theta} (v, j \\mid c, \\mathcal {S} _ {c}) p _ {\\theta} (\\mathcal {S} _ {c} \\mid c, \\mathcal {G}) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.808, + 0.49, + 0.922 + ], + "angle": 0, + "content": "where \\(p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})\\) models the subgraph selection, and \\(p_{\\theta}(v,j|c,\\mathcal{S}_c)\\) models the generator of the verdict and justification given \\(\\mathcal{S}_c\\). However, direct computation of \\(p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})\\) is challenging due to modality mismatch between the input \\(c\\) (text) and the target \\(\\mathcal{S}_c\\) (graph structure), hindering the employment of retrieval methods for \\(\\mathcal{S}_c\\). To bridge this" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.085, + 0.867, + 0.101 + ], + "angle": 0, + "content": "gap, we decompose the subgraph selection into:" + }, + { + "type": "equation", + "bbox": [ + 0.528, + 0.107, + 0.883, + 0.134 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\mathcal {S} _ {c} | c, \\mathcal {G}\\right) = \\sum_ {\\mathcal {P} _ {c}} p _ {\\theta} \\left(\\mathcal {S} _ {c} \\mid \\mathcal {P} _ {c}, \\mathcal {G}\\right) p _ {\\theta} \\left(\\mathcal {P} _ {c} | c, \\mathcal {G}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.141, + 0.884, + 0.302 + ], + "angle": 0, + "content": "where \\(p_{\\theta}(\\mathcal{P}_c|c,\\mathcal{G})\\) models the generation of the graph representation \\(\\mathcal{P}_c\\), which we refer as \"pseudo subgraph\", from a textual claim \\(c\\), and \\(p_{\\theta}(\\mathcal{S}_c|\\mathcal{P}_c,\\mathcal{G})\\) models the distribution over relevant subgraphs \\(\\mathcal{S}_c\\) given \\(\\mathcal{P}_c\\). While equations 1 and 2 establish our theoretical framework for ClaimPKG, computing exact probabilities by summing over all possible \\((\\mathcal{S}_c,\\mathcal{P}_c)\\) pairs is intractable. Addressing this we propose two approximations: (1) We infer the veracity using only the most relevant subgraph \\(\\mathcal{S}_c^*\\):" + }, + { + "type": "equation", + "bbox": [ + 0.603, + 0.309, + 0.883, + 0.327 + ], + "angle": 0, + "content": "\\[\n\\left(v ^ {*}, j ^ {*}\\right) \\sim p _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.333, + 0.883, + 0.38 + ], + "angle": 0, + "content": "(2) We assume each generated pseudo-subgraph is reasonable with a high probability, allowing us to approximate the subgraph selection in 2 as:" + }, + { + "type": "equation", + "bbox": [ + 0.579, + 0.386, + 0.883, + 0.407 + ], + "angle": 0, + "content": "\\[\n\\mathcal {S} _ {c} ^ {(i)} = \\arg \\max p _ {\\theta} \\left(\\mathcal {S} _ {c} | \\mathcal {P} _ {c} ^ {(i)}, \\mathcal {G}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.414, + 0.882, + 0.466 + ], + "angle": 0, + "content": "with \\(\\mathcal{P}_c^{(i)}\\) is the \\(ith\\) pseudo-graph generation. We then construct \\(\\mathcal{S}_c^*\\) by aggregating multiple sampled subgraphs, specifically \\(\\mathcal{S}_c^* = \\bigcup \\mathcal{S}_c^{(i)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.466, + 0.884, + 0.593 + ], + "angle": 0, + "content": "These approximations lead ClaimPKG to comprise 3 key modules as depicted in Figure 2: (1) Pseudo Subgraph Generation to generate graph representations \\(\\mathcal{P}_c\\)'s given claim \\(c\\); (2) Subgraph Retrieval to retrieve relevant evidence subgraph \\(S_c^*\\); and (3) General Reasoning to generate final verdict \\(v\\) and justification \\(j\\). The inference procedure is described as follows:" + }, + { + "type": "title", + "bbox": [ + 0.522, + 0.605, + 0.799, + 0.619 + ], + "angle": 0, + "content": "Inference Procedure of ClaimPKG" + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.625, + 0.871, + 0.657 + ], + "angle": 0, + "content": "Preprocessing: Index the KG \\(\\mathcal{G}\\) into an Entity. TriE for effective entity lookup." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.658, + 0.873, + 0.737 + ], + "angle": 0, + "content": "1. Pseudo Subgraph Generation: Generate multiple graph representations (pseudo subgraphs) \\(\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i=1}^N\\) from claim \\(c\\), using a specialized LLM with beam search and Entity-Trie constraints." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.74, + 0.871, + 0.827 + ], + "angle": 0, + "content": "2. Subgraph Retrieval: Use each pseudo graph in \\(\\mathbb{P}_c\\) for querying the most respective relevant subgraph \\(S_{c}^{(i)}\\) in the KG \\(\\mathcal{G}\\), resulting in a set of \\(\\{S_c^{(i)}\\}_{i = 1}^N\\) following Equation 4, then aggregate them to form \\(S_{c}^{*} = \\bigcup_{i = 1}^{N}S_{c}^{(i)}\\)." + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.827, + 0.871, + 0.875 + ], + "angle": 0, + "content": "3. General Reasoning: Employ a general-purpose LLM to reason veracity \\((v^{*},j^{*})\\sim p_{\\theta}(v,j|c,\\mathcal{S}_{c}^{*})\\) following Equation 3." + }, + { + "type": "list", + "bbox": [ + 0.521, + 0.658, + 0.873, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.89, + 0.882, + 0.921 + ], + "angle": 0, + "content": "The subsequent sections provide details about each component in the ClaimPKG framework." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5273" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.121, + 0.081, + 0.875, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.378, + 0.883, + 0.422 + ], + "angle": 0, + "content": "Figure 2: Illustration of the ClaimPKG for claim verification. The framework consists of three key modules: (1) Pseudo-subgraph Generation, constructing representative subgraphs; (2) Subgraph Retrieval, selecting the most pertinent KG subgraphs; and (3) General Reasoning, integrating them for accurate and interpretable verification." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.447, + 0.393, + 0.463 + ], + "angle": 0, + "content": "4.2 Pseudo Subgraph Generation" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.468, + 0.489, + 0.644 + ], + "angle": 0, + "content": "The first step to effectively verify a claim is to understand its content thoroughly and represent it in a format compatible with the KG. Since evidence comes from KG, representing claims in the graph format is crucial, which captures hypothetical relations among entities in an effective way that enables effective comparisons with KG subgraphs for evidence retrieval. However, this process faces two main challenges: (1) handling ambiguity resolution and multi-hop reasoning, and (2) ensuring accurate entity extraction from the claim." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.648, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Specialized LLM. To address the first challenge, the Pseudo Subgraph Generation module employs a lightweight model optimized for processing input claims. Following (Li et al., 2013; Miwa and Bansal, 2016), the model is trained to jointly extract entities and their corresponding relations from a claim \\( c \\). Specifically, from \\( c \\) the model constructs a pseudo subgraph \\( \\mathcal{P}_c \\) comprising triplets in the form of head_entity||relation||tail-entity (illustrated in Figure 2). To ensure the generated subgraph can identify entities requiring ambiguity resolution and multi-hop reasoning, we employ a specialized annotation mechanism: when the claim references an entity indirectly—either without explicit naming or through relations to other entities—we denote it as unknown_i, with the index i to keep track of different entities. This" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.447, + 0.883, + 0.512 + ], + "angle": 0, + "content": "notation effectively signals the need for further disambiguation and reasoning within the KG in subsequent steps. Training details enabling this annotation strategy are presented in Appendix B.1." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.515, + 0.884, + 0.74 + ], + "angle": 0, + "content": "Trie-Constrained Decoding. For the second challenge, we develop a constrained decoding algorithm with an Entity Trie inspired by (Cao et al., 2021). We construct a trie \\(\\mathcal{T}\\) from the KG's entity set \\(\\mathcal{E} = \\{e_1,e_2,\\ldots \\}\\). The specialized LLM generates entities using special tokens \\(\\langle e\\rangle\\) and \\(\\langle /e\\rangle\\) to mark entity boundaries. When \\(\\langle e\\rangle\\) is generated, the decoding process restricts token selection based on \\(\\mathcal{T}\\) until \\(\\langle /e\\rangle\\) is produced, ensuring all generated entities exist in the KG. Outside such boundaries, the model generates relations by sampling from an unconstrained original token distribution. This mechanism ensures entity reliability while preserving flexible relation extraction (Edge et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.744, + 0.883, + 0.844 + ], + "angle": 0, + "content": "Multiple Representations. In order to capture different semantic views of a claim, we employ beam search along with the described sampling strategy, which is proved to improve the coverage of extracted triplets (table 8), resulting in multiple representations \\(\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i = 1}^N\\) for an input claim." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.858, + 0.884, + 0.922 + ], + "angle": 0, + "content": "In summary, each of the claim's graph representations satisfies following properties: (1) effectively capture the underlying graph structure of that claim, and (2) correctly align with the KG's entities." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5274" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.317, + 0.1 + ], + "angle": 0, + "content": "4.3 Subgraph Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.106, + 0.49, + 0.283 + ], + "angle": 0, + "content": "The second component of ClaimPKG involves retrieving relevant KG subgraphs as evidence by using a dedicated algorithm that matches the pseudosubgraphs \\(\\mathcal{P}_c\\)'s from the previous step to actual subgraphs in the KG. We present the high-level description of our algorithm here, while its complete formulation is detailed in Appendix D. We categorize triplets in a \\(\\mathcal{P}_c\\) into: (1) Incomplete triplets, where either the head or tail entity is marked as unknown, and (2) Complete triplets, where both head and tail entities are explicitly identified." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.286, + 0.49, + 0.382 + ], + "angle": 0, + "content": "Relation Scoring Function: We define a function \\(\\operatorname{Sim}(r_1, r_2)\\) to quantify the similarity between two relations, where a higher score indicates greater similarity. This function can be instantiated via various mechanisms (e.g., embedding similarity, re-ranking, fuzzy matching, etc.)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.386, + 0.49, + 0.659 + ], + "angle": 0, + "content": "Incomplete Triplets Retrieval: Our goal is to identify evidence (actual triplets in the KG) to inform us about entities marked as unknown and their respective relations with explicit entities in the pseudo-subgraphs. First, for a \\(\\mathcal{P}_c\\), we group triplets sharing the same unknown entity \\(u\\) into a group \\(g\\) (e.g., in Figure 2, triplets associated with unknown_0 are grouped together). Subsequently, for each group \\(g\\) characterized by the unknown entity \\(u\\), we denote: \\(\\mathcal{E}_u = \\{e_{u1}, \\ldots, e_{un}\\}\\) as entities directly connected to \\(u\\) in the pseudo-subgraph \\(\\mathcal{P}_c\\) and \\(\\mathcal{R}_u = \\{r_{u1}, \\ldots, r_{un}\\}\\) as relations from \\(u\\) to corresponding entities in \\(\\mathcal{E}_c\\). In \\(g\\), for each explicit entity \\(e_{ui} \\in \\mathcal{E}_u\\), we first retrieve candidate set \\(C_{ui} = \\{e_{i1}^c, \\ldots, e_{im}^c\\}\\) containing all entities connected to \\(e_{ui}\\) in the KG, then collect all candidate sets into \\(\\mathcal{C}_u = \\{C_{u1}, \\ldots, C_{un}\\}\\)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.661, + 0.49, + 0.839 + ], + "angle": 0, + "content": "To determine the best candidates for resolving \\( u \\), we propose an Entity Scoring mechanism, which is based on two assumptions: (1) since \\( u \\) has pseudo relations with all entities in \\( \\mathcal{E}_u \\), a candidate \\( e^c \\) connected to more entities in \\( \\mathcal{E}_u \\) is more likely to resolve \\( u \\); and (2) because every information related to \\( e_{ui} \\) and \\( u \\) is crucial to verify the initial claim, each candidate set \\( C_{ui} \\) must contribute to the final verification. Note that an entity can appear in multiple candidate sets, hence we compute a \"global\" score for each \\( e_{ij}^c \\) in a candidate set \\( C_{ui} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.85, + 0.488, + 0.874 + ], + "angle": 0, + "content": "\\[\n\\operatorname {s c o r e} \\left(e _ {i j} ^ {c}\\right) = \\sum_ {r} ^ {R _ {i j} ^ {u}} \\operatorname {S i m} \\left(r _ {u i}, r\\right) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.886, + 0.488, + 0.922 + ], + "angle": 0, + "content": "with \\(R_{ij}^{u} = \\bigcup_{i = 1}^{\\left|\\mathcal{E}_{u}\\right|}\\{r(e_{ui},e_{ij}^{c})\\mid\\) if \\(e_{ij}^{c}\\in C_{ui}\\}\\) , the set of all relations across candidate sets appearing" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.134 + ], + "angle": 0, + "content": "in \\(\\mathcal{C}_u\\) that connect \\(e_{ij}^c\\) with an \\(e_{ui}\\). Subsequently, to construct the set \\(T_{u}\\) of most relevant triplets to a group \\(g\\), we employ a ranking function as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.562, + 0.14, + 0.884, + 0.185 + ], + "angle": 0, + "content": "\\[\nT _ {u} = \\bigcup_ {i = 1} ^ {| C _ {u} |} \\underset {\\text {t r i p l e t}, k _ {1}} {\\arg \\max } \\left\\{\\pi_ {i j} \\mid j \\leq \\left| C _ {u i} \\right| \\right\\} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.191, + 0.885, + 0.24 + ], + "angle": 0, + "content": "with \\(\\pi_{ij}\\) is simply \\(score(e_{ij}^{c})\\) and (triplet, \\(k_{1}\\)) denotes the selection of top \\(k_{1}\\) triplets \\((e_{ui}, r, e^{c})\\) having the highest global scores from each set in \\(\\mathcal{C}_{u}\\)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.24, + 0.884, + 0.32 + ], + "angle": 0, + "content": "While equation 5 ensures candidates appearing in multiple candidate sets and having high similar scores are prioritized, equation 6 ensures every entity in \\(\\mathcal{E}_u\\) has at least \\(k_{1}\\) triplets, both of which make use of assumptions (1) and (2)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.324, + 0.884, + 0.452 + ], + "angle": 0, + "content": "Complete Triplets Retrieval: For each triplet \\((e_1, r, e_2)\\) in a \\(\\mathcal{P}_c\\), we first find top \\(k_2\\) similar relations between \\(e_1\\) and \\(e_2\\) in the KG \\(\\mathcal{G}\\) using the Sim function. If no direct connection exists (e.g., \"103 Colmore Row\" and \"Vedat Tek\" as shown in figure 2), the triplet is decomposed into two: \\((e_1, r, \\text{unknown}_0)\\) and \\((\\text{unknown}_0, r, e_2)\\). These are then handled via Incomplete Triplets Retrieval." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.456, + 0.884, + 0.6 + ], + "angle": 0, + "content": "Subgraph Union: In summary, for an input claim \\(c\\), multiple pseudo-graphs are generated, containing complete and incomplete triplets. These triplets undergo processing to handle shared unknown entities and identified entities that are not connected in the KG \\(\\mathcal{G}\\), and are used to query \\(\\mathcal{G}\\) for relevant triplets. All retrieved evidence triplets are aggregated into a final subgraph \\(S_{c}^{*}\\), serving as the evidence for the final component of ClaimPKG." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.611, + 0.707, + 0.627 + ], + "angle": 0, + "content": "4.4 General Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.632, + 0.884, + 0.808 + ], + "angle": 0, + "content": "The General Reasoning module concludes the ClaimPKG framework by determining claim veracity through reasoning over input claim \\( c \\) and retrieved evidence subgraph \\( S_{c}^{*} \\). As complex tasks, especially claim verification, require deliberate chain-of-thought reasoning (Jiang et al., 2020; Wang et al., 2023), we use a general-purpose LLM to analyze \\( c \\) and \\( S_{c}^{*} \\). Using carefully designed prompts (Figure 6), the module generates a natural language justification \\( j \\) and verdict \\( v \\). Expanded from equation 3, this step is formalized as:" + }, + { + "type": "equation", + "bbox": [ + 0.53, + 0.816, + 0.883, + 0.835 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) = p _ {\\theta} (v | c, j, \\mathcal {S} _ {c} ^ {*}) p _ {\\theta} (j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "where \\(p(j|c, S_c^*)\\) produces the justification and \\(p(v|c, j, S_c^*)\\) determines veracity. This model-agnostic design enables integration with state-of-the-art LLMs (e.g., Llama, Qwen and GPT4) for zero-shot reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5275" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.114, + 0.085, + 0.262, + 0.101 + ], + "angle": 0, + "content": "5 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.111, + 0.318, + 0.127 + ], + "angle": 0, + "content": "5.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.133, + 0.49, + 0.422 + ], + "angle": 0, + "content": "Datasets. Our primary benchmark is the FactKG dataset (Kim et al., 2023b), designed for claim verification over the DBpedia KG (Lehmann et al., 2015). It consists of 108K claims grounded in DBpedia and labelled as either SUPPORTED or REFUTED. The claims span five distinct categories: One-hop, Conjunction, Existence, Multi-hop, and Negation, each posing unique challenges. For evaluation, we randomly sample 2K claims from the test set, ensuring balanced representation across categories under computational efficiency. To assess the generalizability of ClaimPKG beyond structured benchmarks, we also evaluate HoVer (Jiang et al., 2020) and FEVERIOUS (Aly et al., 2021), two widely-used unstructured-based benchmarks requiring multi-hop reasoning and evidence aggregation from Wikipedia. Additional statistics of datasets are provided in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.424, + 0.49, + 0.55 + ], + "angle": 0, + "content": "Metrics. We use Accuracy as the primary metric along with Entity Correctness to measure if the claim's extracted entity is valid in KG. Additionally, for the FactKG dev set, we report Claim Structure Coverage, which quantifies the proportion of triplets from the original claim's graph structure successfully reconstructed by our pipeline. We refer readers to Appendix C for more details." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.552, + 0.49, + 0.648 + ], + "angle": 0, + "content": "Annotation. For brevity, we use Llama-3B, Llama-70B, and Qwen-72B to refer to Llama-3.2-3B, Llama-3.3-70B, and Qwen2.5-72B respectively. The * symbol denotes models fine-tuned for pseudo subgraph generation. Full model names are used when necessary." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.648, + 0.492, + 0.922 + ], + "angle": 0, + "content": "Baselines. We compare ClaimPKG with recent KG-based claim verification methods: Zero-shot CoT (Wei et al., 2022) prompts LLMs to generate rationales and verdicts without accessing the KG; GEAR (Zhou et al., 2019), originally designed for text-based verification, employs graph-based evidence aggregation with multiple aggregators to capture multi-evidence dependencies, using BERT for language representation and adapted for KG settings following (Kim et al., 2023b); and KG-GPT (Kim et al., 2023a), a pioneer work that combines LLMs and KGs through a structured pipeline of Sentence Segmentation, Graph Retrieval, and Logic Inference. Notably, unlike baselines which receive pre-identified claim entities along with the claim as the input, our method processes entities in an end-to-end pipeline." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.886, + 0.279 + ], + "angle": 0, + "content": "Implementation. For a comprehensive evaluation, we evaluate baselines on three model series: Llama 3 (Meta, 2024), Qwen 2.5 (Qwen, 2024), and GPT4o-mini (OpenAI, 2024). In ClaimPKG, we configure the Specialized LLM to generate multiple pseudo-subgraphs using a beam size of 5. For the Subgraph Retrieval algorithm, we adopt an embedding-based approach leveraging BGE-LargeEN-v1.5 (Xiao et al., 2023) to compute dot-product similarity for the Relation Scoring Function, we set the primary hyperparameters to \\( k_{1} = 3 \\) and \\( k_{2} = 1 \\). Detailed justification is provided in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.295, + 0.72, + 0.311 + ], + "angle": 0, + "content": "5.2 Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.32, + 0.882, + 0.351 + ], + "angle": 0, + "content": "We present the main experimental results in this section and additional findings in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.356, + 0.885, + 0.419 + ], + "angle": 0, + "content": "(RQ1): How Does ClaimPKG Perform Against the Baselines? Table 1 compares the accuracy \\((\\%)\\) of ClaimPKG with baselines across claim categories of the FactKG. Key observations include:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.42, + 0.886, + 0.741 + ], + "angle": 0, + "content": "(1) Direct inference using LLMs with CoT reasoning significantly underperforms compared to evidence-based methods, with the best average score reaching only \\(69.07\\%\\), highlighting that despite LLM advancements, evidence retrieval remains crucial. (2) KG-GPT integrates knowledge graphs with LLMs but its best average score achieves only \\(74.70\\%\\) (Llama-70B Few-shot), falling short of GEAR's fine-tuned model at \\(76.65\\%\\). This suggests that while LLMs excel at language tasks, they require specific adaptation for KG processing. (3) ClaimPKG, with the strongest configuration \\((\\text{Llama}-3\\text{B}^{*} + \\text{Llama}-70\\text{B})\\) and constrained by Entity-Trie for valid KG entity generation, achieves a 12-point improvement over KG-GPT and 9 points over GEAR. It particularly excels in multi-hop reasoning, demonstrating strong performance across Llama-3 and Qwen-2.5 backbones through effective structured evidence retrieval and KG integration." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.745, + 0.885, + 0.84 + ], + "angle": 0, + "content": "(RQ2): How Do Different Components Affect Performance? To evaluate the impact of each component in ClaimPKG, we conduct ablation studies of the following components, maintaining Llama-3B* as the Specialized LLM and Llama-70B as the General LLM." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.885, + 0.92 + ], + "angle": 0, + "content": "Entity-Trie Constraint. We remove the Entity-Trie constraint to assess its necessity. Compared to the full setup, this reduces the entity extraction correctness from \\(100\\%\\) to \\(87.5\\%\\), and overall performance from \\(84.64\\%\\) to \\(82.72\\%\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.522, + 0.941 + ], + "angle": 0, + "content": "5276" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.082, + 0.885, + 0.306 + ], + "angle": 0, + "content": "
MethodEntity CorrectnessNegationExistenceConjunctionMulti-hopOne-hopAverage
Direct Inference With CoT - w/o Evidence Retrieval
GPT-4o-mini (Zero-shot CoT)-61.9159.4569.5160.8770.8364.51
Qwen-72B (Zero-shot CoT)-62.9162.2074.0462.3275.9867.49
Llama-70B (Zero-shot CoT)-64.3464.6272.4765.5878.3269.07
Baseline Comparison - w/ Evidence Retrieval
GEAR (Finetuned BERT)Known in Prior79.7279.1978.6368.3977.3476.65
KG-GPT (Llama-70B Few-shot)Known in Prior70.9165.0686.6458.8792.0274.70
KG-GPT (Qwen-72B Few-shot)Known in Prior67.3160.0889.1458.1990.8773.12
ClaimPKG (Llama-3B* + GPT-4o-mini)100.0%85.1072.6484.2372.2691.0181.05
ClaimPKG (Llama-3B* + Qwen-72B)100.0%85.2786.9084.0278.7191.2085.22
ClaimPKG (Llama-3B* + Llama-70B)100.0%84.5884.2085.6878.4990.2684.64
Ablation Results (Llama-3B* + Llama-70B) - w/ Evidence Retrieval
ClaimPKG (w/o Trie Constraint)87.50%82.5083.2483.8276.1388.0182.74
ClaimPKG (Few-shot Specialized LLM)86.52%77.9981.8977.8068.8281.6577.63
ClaimPKG (w/o Incomplete Retrieval)100.0%68.8051.2567.8461.2976.2265.08
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.313, + 0.884, + 0.343 + ], + "angle": 0, + "content": "Table 1: Performance (accuracy %) comparison of ClaimPKG with baselines on 5 claim categories of FactKG dataset and their average scores." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.368, + 0.49, + 0.528 + ], + "angle": 0, + "content": "Specialized LLM. When replacing the specialized LLM with few-shot prompting strategy using Llama-70B, a much larger general-purpose LLM, entity correctness further declines to \\(86.52\\%\\), leading overall performance to drop to \\(77.63\\%\\). These results demonstrate that even with examples, general-purpose LLMs struggle to produce outputs with desired graph structure correctly, emphasizing the importance of the specialized LLM in generating pseudo subgraphs." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.529, + 0.49, + 0.642 + ], + "angle": 0, + "content": "Incomplete Retrieval. Removing the Incomplete Triplet Retrieval function, which forces the retrieval algorithm to only query evidence using complete triplets, causes a significant average performance drop of nearly \\(20\\%\\) compared to the full setup, showing the complete graph structure of input claims is essential for optimal performance." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.645, + 0.49, + 0.724 + ], + "angle": 0, + "content": "(RQ3): Robustness and Generalization of ClaimPKG? To assess ClaimPKG's robustness, we vary model backbones, examine zero-shot generalizability, analyze the effect of training data size, and conduct error analysis." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.728, + 0.49, + 0.856 + ], + "angle": 0, + "content": "Model Backbones. We evaluate different LLM architectures for both Specialized and General LLMs (Table 2). For General LLMs, we test various model sizes (7B to 70B parameters) using retrieved KG triplets as input. For Specialized LLMs, we experiment with different small fine-tuned backbones and few-shot prompt templates (Figure 7), while keeping Llama-3.3-70B as the fixed General LLM." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.858, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Results in Table 2 show larger General LLMs (GPT-4o-Mini, Llama-3.3-70B) outperform smaller ones (Qwen-2.5-7B, Llama-3.1-8B) by up to 8 points, highlighting model capacity's role in ag" + }, + { + "type": "table", + "bbox": [ + 0.53, + 0.365, + 0.865, + 0.538 + ], + "angle": 0, + "content": "
ComponentStrategyBackboneAverage
General LLMZero-shotLlama 3.1 - 8B77.08
Llama 3.3 - 70B84.64
GPT4o - Mini81.05
Qwen 2.5 - 7B80.22
Qwen 2.5 - 72B85.22
Specialized LLMFinetuneLlama 3 - 3B84.64
Qwen 2.5 - 3B82.32
Llama 3 - 1B83.91
Qwen 2.5 - 1.5B82.20
Few-shotLlama 3.3 - 70B77.63
Qwen 2.5 - 72B77.10
" + }, + { + "type": "table_caption", + "bbox": [ + 0.536, + 0.548, + 0.855, + 0.561 + ], + "angle": 0, + "content": "Table 2: Performance on Different Backbones." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.587, + 0.885, + 0.683 + ], + "angle": 0, + "content": "gregating subgraph evidence. Notably, a fine-tuned 1B Specialized LLM outperforms the general 70B counterpart, demonstrating fine-tuning's effectiveness to process graph data. This supports the need to combine powerful General LLMs with adapted Specialized LLMs for optimal performance." + }, + { + "type": "table_caption", + "bbox": [ + 0.51, + 0.685, + 0.882, + 0.7 + ], + "angle": 0, + "content": "Zero-shot Generalizability. To assess" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.711, + 0.885, + 0.79 + ], + "angle": 0, + "content": "
BenchmarkLlama 3Qwen 2.5
HoVer (Zero-shot CoT)66.665.3
HoVer (Support-Predicted)70.7 (14.3%)69.4 (15.7%)
FEVEROUS (Zero-shot CoT)81.180.9
FEVEROUS (Support-Predicted)83.8 (12.5%)83.6 (12.9%)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.799, + 0.884, + 0.843 + ], + "angle": 0, + "content": "Table 3: Zero-shot transferred performance on other unstructure-based benchmarks on the Support-Predicted samples along with Support Predicted rates." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.858, + 0.885, + 0.922 + ], + "angle": 0, + "content": "ClaimPKG's zero-shot generalizability, we test transfer to HoVer (Jiang et al., 2020) and FEVEROUS (Aly et al., 2021) datasets. Using DBpedia (Lehmann et al., 2015) as the knowledge" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5277" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.326 + ], + "angle": 0, + "content": "source, we evaluate with trained Specialized LLMs (Llama-3.2-3B and Qwen-2.5-3B) while keeping Llama-3.3-70B as the General LLM. Since external datasets may contain claims outside DBpedia's coverage, making it difficult to distinguish between knowledge gaps and actual verification failures of ClaimPKG for Refuted cases, we analyze only samples predicted as Supported. As shown in Table 3, ClaimPKG predicts Supported for only \\(12.5\\% - 15.7\\%\\) of samples, indicating limited knowledge overlap with DBpedia. However, on these samples, ClaimPKG outperforms Llama-3.3-70B's zero-shot CoT inference by \\(4\\%\\) accuracy on both datasets, demonstrating robust transfer to reasoning patterns in unseen data." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.328, + 0.49, + 0.344 + ], + "angle": 0, + "content": "Training Data Size. To assess the impact of train" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.355, + 0.475, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.126, + 0.522, + 0.475, + 0.537 + ], + "angle": 0, + "content": "Figure 3: Varying Specialized LLM's training data." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.553, + 0.49, + 0.812 + ], + "angle": 0, + "content": "ing data on the Specialized LLM, we vary the number of training samples from 0.1K to 10K, using two configurations: Llama-3.2-3B and Qwen-2.5-3B as the specialized LLM and keep the General LLM to be Llama-3.3-70B. We evaluate performance based on two metrics: average accuracy on the test set and claim structure coverage on the dev set. As shown in Figure 3, the Specialized LLMs achieve satisfactory accuracy (Llama-3.2-3B: \\(79.35\\%\\), Qwen-2.5-3B: \\(77.62\\%\\)) with just 100 training samples, demonstrating efficiency and low training costs for KG adaptation. While both structure coverage and accuracy improve up to 5K samples, coverage plateaus thereafter, and accuracy begins to decline, indicating overfitting where excessive training data reduces generalizability." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.821, + 0.44, + 0.837 + ], + "angle": 0, + "content": "5.3 Interpretability and Error Analysis" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.841, + 0.49, + 0.922 + ], + "angle": 0, + "content": "ClaimPKG can improve claim verification performance while enhancing interpretability. Representative outputs of ClaimPKG (Figure 12, Appendix E) illustrate its ability to capture claim structure and provide well-grounded justifications. Notably," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.246 + ], + "angle": 0, + "content": "when refuting claims, it explicitly presents contradicting evidence, ensuring transparent reasoning. To further assess reliability, we conducted a human analysis of 200 incorrect predictions from FactKG, categorizing errors (Figure 13, Appendix E) into: Claim Structure Errors: fail to capture the underlying claim structure; Retrieval Errors: fail to retrieve necessary evidence required for claim verification; and Reasoning Errors: incorrect logical inferences of the general LLM to judge the verdict." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.247, + 0.885, + 0.521 + ], + "angle": 0, + "content": "Specifically, there are 0 (0%) Claim Structure Errors, 57 (28.5%) Retrieval Errors, and 143 (71.5%) Reasoning Errors. These results suggest that, with chances (multiple beams) to generate pseudosubgraphs, the Specialized LLM can effectively capture the structural representation of claims. However, the general-purpose LLM, despite its strong reasoning capabilities, still struggles with certain complex reasoning scenarios that require specific handling. Moreover, retrieval errors highlight cases where additional implicit reasoning is necessary, as we hypothesize that direct subgraph retrieval failed to provide a comprehensive picture of the required evidence. These highlight future improvements, focusing on enhancing retrieval inference and refining reasoning for complex claim verification over structured knowledge." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.535, + 0.753, + 0.55 + ], + "angle": 0, + "content": "5.4 Scalability of ClaimPKG" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.557, + 0.884, + 0.718 + ], + "angle": 0, + "content": "ClaimPKG maintains scalability and adaptability within dynamic knowledge environments. After training the Specialized LLM on a domain (e.g., Wikipedia), the system remains decoupled from the underlying Knowledge Graph (KG). Only the Entity-Trie component interfaces directly with the data. Consequently, when the KG undergoes updates, ClaimPKG requires merely an update of the corresponding entities within the Entity-Trie, ensuring an efficient adaptation process." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.733, + 0.642, + 0.748 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.884, + 0.922 + ], + "angle": 0, + "content": "In this work, we present ClaimPKG, a novel claim verification combining the structure of Knowledge Graphs with the adaptability and reasoning of Large Language Models. Through Pseudosubgraph Generation, Subgraph Retrieval, and General Reasoning, it addresses limitations while ensuring transparency. Extensive experiments show state-of-the-art performance and generalizability across datasets, making ClaimPKG a step toward reliable and explainable misinformation detection." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5278" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.221, + 0.1 + ], + "angle": 0, + "content": "Limitations" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.111, + 0.49, + 0.448 + ], + "angle": 0, + "content": "Despite their advanced reasoning capabilities, LLMs are prone to errors and biases, necessitating careful deployment, particularly in fact-checking systems where incorrect or biased outputs could contribute to misinformation. Addressing these biases remains an ongoing research challenge, requiring effective mechanisms for detection, control, and mitigation. Additionally, real-world claim verification often requires inferring implicit reasoning, where further related knowledge for a problem is necessary, and making improvements in pipeline components to handle this type of information is crucial. Another limitation is the performance decline observed when the Specialized LLM is trained on an excessive number of examples, highlighting the need for future research into regularization strategies. Further improvements should also focus on the general reasoning module to infer missing knowledge more effectively and enhance intricate and nuanced claim verification cases over structured knowledge." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.475, + 0.214, + 0.49 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.498, + 0.49, + 0.616 + ], + "angle": 0, + "content": "Rami Aly, Zhijiang Guo, Michael Sejr Schlichtkrull, James Thorne, Andreas Vlachos, Christos Christodoulopoulos, Oana Cocarascu, and Arpit Mittal. 2021. FEVEROUS: fact extraction and verification over unstructured and structured information. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.627, + 0.49, + 0.693 + ], + "angle": 0, + "content": "Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2021. Autoregressive entity retrieval. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.703, + 0.49, + 0.768 + ], + "angle": 0, + "content": "Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, and Jonathan Larson. 2024. From local to global: A graph RAG approach to query-focused summarization. CoRR, abs/2404.16130." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.779, + 0.49, + 0.883 + ], + "angle": 0, + "content": "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022a. Missing counter-evidence renders NLP fact-checking unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.894, + 0.489, + 0.921 + ], + "angle": 0, + "content": "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022b. Missing counter-evidence renders NLP fact-checking" + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.498, + 0.49, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.086, + 0.885, + 0.165 + ], + "angle": 0, + "content": "unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.174, + 0.885, + 0.267 + ], + "angle": 0, + "content": "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. 2020. TaPas: Weakly supervised table parsing via pre-training. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4320-4333, Online. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.275, + 0.885, + 0.368 + ], + "angle": 0, + "content": "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. 2023. StructGPT: A general framework for large language model to reason over structured data. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9237-9251, Singapore. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.376, + 0.885, + 0.482 + ], + "angle": 0, + "content": "Yichen Jiang, Shikha Bordia, Zheng Zhong, Charles Dognin, Maneesh Kumar Singh, and Mohit Bansal. 2020. Hover: A dataset for many-hop fact extraction and claim verification. In Findings of the Association for Computational Linguistics: EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 3441-3460. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.49, + 0.885, + 0.582 + ], + "angle": 0, + "content": "Jiho Kim, Yeonsu Kwon, Yohan Jo, and Edward Choi. 2023a. KG-GPT: A general framework for reasoning on knowledge graphs using large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 9410-9421. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.591, + 0.885, + 0.696 + ], + "angle": 0, + "content": "Jiho Kim, Sungjin Park, Yeonsu Kwon, Yohan Jo, James Thorne, and Edward Choi. 2023b. Factkg: Fact verification via reasoning on knowledge graphs. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 16190-16206. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.705, + 0.885, + 0.798 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.806, + 0.885, + 0.885 + ], + "angle": 0, + "content": "Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas, Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef, Soren Auer, and Christian Bizer. 2015. Dbpedia - A large-scale, multilingual knowledge base extracted from wikipedia. Semantic Web, 6(2):167-195." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.894, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global features." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.885, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5279" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.135, + 0.086, + 0.489, + 0.153 + ], + "angle": 0, + "content": "In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 2013, 4-9 August 2013, Sofia, Bulgaria, Volume 1: Long Papers, pages 73-82. The Association for Computer Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.162, + 0.489, + 0.228 + ], + "angle": 0, + "content": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.238, + 0.487, + 0.316 + ], + "angle": 0, + "content": "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.326, + 0.487, + 0.351 + ], + "angle": 0, + "content": "Meta. 2024. Build the future of ai with meta llama 3, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.362, + 0.489, + 0.442 + ], + "angle": 0, + "content": "Makoto Miwa and Mohit Bansal. 2016. End-to-end relation extraction using LSTMs on sequences and tree structures. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1105-1116, Berlin, Germany. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.451, + 0.363, + 0.466 + ], + "angle": 0, + "content": "OpenAI. 2024. Hello gpt-4o, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.475, + 0.489, + 0.58 + ], + "angle": 0, + "content": "Liangming Pan, Xiaobao Wu, Xinyuan Lu, Anh Tuan Luu, William Yang Wang, Min-Yen Kan, and Preslav Nakov. 2023. Fact-checking complex claims with program-guided reasoning. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 6981-7004. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.589, + 0.489, + 0.681 + ], + "angle": 0, + "content": "Jungsoo Park, Sewon Min, Jaewoo Kang, Luke Zettle-moyer, and Hannaneh Hajishirzi. 2022. FaVIQ: FAct verification from information-seeking questions. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5154-5166, Dublin, Ireland. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.69, + 0.483, + 0.705 + ], + "angle": 0, + "content": "Qwen. 2024. Qwen2.5: A party of foundation models." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.714, + 0.489, + 0.806 + ], + "angle": 0, + "content": "Tal Schuster, Adam Fisch, and Regina Barzilay. 2021. Get your vitamin C! robust fact verification with contrastive evidence. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 624-643, Online. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.816, + 0.489, + 0.92 + ], + "angle": 0, + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, and Jian Guo. 2024. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.489, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.178 + ], + "angle": 0, + "content": "James Thorne and Andreas Vlachos. 2018. Automated fact checking: Task formulations, methods and future directions. In Proceedings of the 27th International Conference on Computational Linguistics, COLING 2018, Santa Fe, New Mexico, USA, August 20-26, 2018, pages 3346-3359. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.187, + 0.883, + 0.319 + ], + "angle": 0, + "content": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and verification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 809-819. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.328, + 0.883, + 0.42 + ], + "angle": 0, + "content": "Bailin Wang, Richard Shin, Xiaodong Liu, Oleksandr Polozov, and Matthew Richardson. 2020. RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7567-7578, Online. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.43, + 0.883, + 0.508 + ], + "angle": 0, + "content": "Haoran Wang and Kai Shu. 2023. Explainable claim verification via knowledge-grounded reasoning with large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 6288-6304. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.518, + 0.883, + 0.61 + ], + "angle": 0, + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.619, + 0.883, + 0.723 + ], + "angle": 0, + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.733, + 0.883, + 0.772 + ], + "angle": 0, + "content": "Wikipedia. 2025a. Levenshtein distance — Wikipedia, The Free Encyclopedia. Accessed: 14-February-2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.783, + 0.883, + 0.81 + ], + "angle": 0, + "content": "Wikipedia. 2025b. Trie — Wikipedia, The Free Encyclopedia. [Online; accessed 9-February-2025]." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.819, + 0.883, + 0.871 + ], + "angle": 0, + "content": "Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-pack: Packaged resources to advance general chinese embedding. Preprint, arXiv:2309.07597." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.881, + 0.883, + 0.921 + ], + "angle": 0, + "content": "Jie Zhou, Ganqu Cui, Shengding Hu, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2020. Graph" + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5280" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.133, + 0.086, + 0.489, + 0.113 + ], + "angle": 0, + "content": "neural networks: A review of methods and applications. AI Open, 1:57-81." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.122, + 0.49, + 0.228 + ], + "angle": 0, + "content": "Jie Zhou, Xu Han, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2019. GEAR: graph-based evidence aggregating and reasoning for fact verification. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 892-901. Association for Computational Linguistics." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.24, + 0.334, + 0.255 + ], + "angle": 0, + "content": "A Benchmark Datasets" + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.274, + 0.46, + 0.455 + ], + "angle": 0, + "content": "
DatasetSplitSupportRefuteNEITotal
FactKGTrain4272343644-86367
Dev64266840-132666
Test43984643-9041
Total5354755127-108674
HoverTrain110237148-18171
Dev20002000-4000
Test20002000-4000
Total1502311148-26171
FEVER OUSTrain4183527215224171291
Dev390834815017890
Test3372297315007845
Total4911533669424287026
" + }, + { + "type": "table_caption", + "bbox": [ + 0.114, + 0.465, + 0.486, + 0.493 + ], + "angle": 0, + "content": "Table 4: Basic statistics of Hover, FEVERIOUS, and FactKG Datasets" + }, + { + "type": "table", + "bbox": [ + 0.139, + 0.522, + 0.465, + 0.649 + ], + "angle": 0, + "content": "
TypeWrittenColloquialTotal
ModelPresup
One-hop2,10615,9341,58019,530
Conjunction20,58715,90860237,097
Existence2804,0604,8329,172
Multi-hop10,23916,42060327,262
Negation1,34012,4661,80715,613
Total34,46264,7889,424108,674
" + }, + { + "type": "table_caption", + "bbox": [ + 0.115, + 0.66, + 0.486, + 0.675 + ], + "angle": 0, + "content": "Table 5: Dataset statistics of FACTKG for claim types." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.693, + 0.489, + 0.885 + ], + "angle": 0, + "content": "FEVEROUS. (Aly et al., 2021) FEVEROUS is a fact verification dataset comprising 87,026 verified claims sourced from Wikipedia (Table 4). Each claim is accompanied by evidence in the form of sentences and/or cells from tables, along with a label indicating whether the evidence supports, refutes, or does not provide enough information to verify the claim. The dataset includes metadata like annotator actions and challenge types, designed to minimize biases. It is used for tasks that involve verifying claims against both unstructured (textual) and structured (tabular) information." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "HoVer. (Jiang et al., 2020) HoVer is a dataset containing 26,171 samples, designed for open-domain," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.262 + ], + "angle": 0, + "content": "multi-hop fact extraction and claim verification, using the Wikipedia corpus. Claims in HoVer are adapted from question-answer pairs and require the extraction of facts from multiple (up to four) Wikipedia articles to determine if the claim is supported or not supported. The complexity of HoVer, particularly in the 3/4-hop claims, is further amplified because these claims are often expressed across multiple sentences, which introduces challenges related to long-range dependencies, such as accurately resolving coreferences." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.265, + 0.885, + 0.782 + ], + "angle": 0, + "content": "FactKG. (Kim et al., 2023b) FactKG is a challenging fact verification dataset comprised of 108,674 samples, designed to rigorously test models' abilities to reason over structured knowledge represented in a knowledge graph. Its difficulty arises from a combination of factors. First, it demands proficiency in five distinct reasoning types: one-hop (single relationship), conjunction (combining multiple relationships), existence (verifying entity/relationship presence), multi-hop (traversing multiple relationships), and, crucially, negation (reasoning about the absence of relationships). Second, FactKG incorporates linguistic diversity, encompassing both formal, written-style claims and more challenging colloquial expressions, requiring models to handle paraphrasing, idiomatic language, and less direct wording. Third, instead of unstructured text, FactKG utilizes the DBpedia knowledge graph (derived from Wikipedia), necessitating that models correctly link entities and relations mentioned in the claim to the graph's nodes and edges, and perform complex path-based reasoning, especially for multi-hop claims. The addition of a weakly semantic knowledge source, and cross-style evaluation to assess generalizability, further contributes to the difficulty of this dataset. These features collectively make FactKG significantly more complex than datasets relying solely on unstructured text for verification. Detailed statistics of this dataset can be found in table 5. Readers can refer to table 4 for the overall basic statistics of all employed datasets for ClaimPKG." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.806, + 0.75, + 0.823 + ], + "angle": 0, + "content": "B Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "We conducted all experiments on a DGX server with 8 NVIDIA A100 GPUs. The General LLM is hosted within the vLLM framework (Kwon et al., 2023). Below, we detail the training process of the Specialized LLM." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.519, + 0.941 + ], + "angle": 0, + "content": "5281" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.414, + 0.116 + ], + "angle": 0, + "content": "B.1 Specialized LLM Training Data Annotation" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.123, + 0.49, + 0.22 + ], + "angle": 0, + "content": "To tailor the specialized model for improved comprehension and processing of KG-specific data, we construct a dedicated dataset for training, leveraging the provided version of FactKG (Kim et al., 2023b) (illustrated in Figure 4). The annotation process consists of the following steps:" + }, + { + "type": "text", + "bbox": [ + 0.122, + 0.236, + 0.48, + 0.26 + ], + "angle": 0, + "content": "Claim: A musical artist, whose music is Post-metal, played with the band Twilight and performs for Mamiffer." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.261, + 0.432, + 0.283 + ], + "angle": 0, + "content": "Entities: [Mamiffer, Post-metal, Twilight_(band)] Evidence:" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.285, + 0.48, + 0.308 + ], + "angle": 0, + "content": "- Twilight_(band), (associatedMusicalArtist, associatedBand), Mamiffer)" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.308, + 0.48, + 0.332 + ], + "angle": 0, + "content": "- Twilight_(band), (associatedMusicalArtist, genre), Postmetal" + }, + { + "type": "list", + "bbox": [ + 0.124, + 0.285, + 0.48, + 0.332 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.181, + 0.355, + 0.42, + 0.369 + ], + "angle": 0, + "content": "Figure 4: Provided data of FactKG" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.397, + 0.49, + 0.476 + ], + "angle": 0, + "content": "Preprocessing: All entities and relations from FactKG, including the train, development, and test datasets, as well as the DBPedia KG, are normalized by splitting concatenated words to ensure consistency." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.487, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Graph Construction: Using the provided evidence information from FactKG, we observe that while evidence may not explicitly exist in the graph, it accurately captures the underlying structure of the claim. Accordingly, for triplets with relation paths exceeding one hop, we decompose them into multiple triplets while introducing a placeholder entity, denoted as \"unknown_[index]\", to preserve structural integrity. This placeholder represents an ambiguous or missing entity that requires identification. For instance, the triplet: \"Twilight_(band), (~associatedMusicalArtist, associatedBand), Mamiffer\" is transformed into the following triplets: \"Twilight_(band), associatedBand, unknown_1\" and \"unknown_1\", associatedMusicalArtist, Mamiffer\". Additionally, entities present in the Entities set but absent from the graph are also introduced as unknown_[index]. To further enhance graph completeness, GPT-4 is employed to verify whether entities from the Entities set are explicitly mentioned in the claim. This ensures that relevant entities are either linked to existing nodes or added as placeholders. The automatic entity verification process is conducted using a prompt template, as shown in Figure 8. Additionally, the symbol \"\\~\"" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.883, + 0.164 + ], + "angle": 0, + "content": "is retained to denote inverse relations. Random shuffle among constructed triplets but preserving the sequential order of “unknown” entity is applied to improve the robustness of the model being trained." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.174, + 0.883, + 0.222 + ], + "angle": 0, + "content": "Generated Pseudo-Subgraph: The transformed claim results in the pseudo-subgraph illustrated in Figure 5." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.238, + 0.682, + 0.251 + ], + "angle": 0, + "content": "Pseudo Subgraph Label:" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.252, + 0.86, + 0.263 + ], + "angle": 0, + "content": "- Twilight (band), associated musical artist, unknown_0" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.263, + 0.771, + 0.274 + ], + "angle": 0, + "content": "- unknown_0, associated band, Mamiffer" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.275, + 0.713, + 0.287 + ], + "angle": 0, + "content": "- unknown_0, genre, Post-metal" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.252, + 0.86, + 0.287 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.311, + 0.883, + 0.34 + ], + "angle": 0, + "content": "Figure 5: Pseudo-Subgraph label as the output of the data annotation process." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.366, + 0.884, + 0.398 + ], + "angle": 0, + "content": "B.2 Training and Hyperparameter Settings of the Specialized LLM" + }, + { + "type": "table", + "bbox": [ + 0.566, + 0.414, + 0.828, + 0.541 + ], + "angle": 0, + "content": "
ParameterValue
BackboneLlama-3-Base
Qwen-2.5-Base
Learning Rate1e-5
Training Epoch1
Training Steps128
OptimizerAdamW
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.55, + 0.883, + 0.578 + ], + "angle": 0, + "content": "Table 6: Hyperparameters of the Specialized LLM in ClaimPKG." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.595, + 0.885, + 0.853 + ], + "angle": 0, + "content": "The training configurations for the Specialized LLM are summarized in Table 6. The model training is based on the Base version of Llama-3 (Llama3.2-1B, Llama-3.2-3B, Llama-3.1-8B) and Qwen 2.5 (Qwen-2.5-1.5B, Qwen-2.5-3B, Qwen-2.5-7B). These base models are selected to preserve their inherent linguistic capabilities while facilitating optimal adaptation to domain-specific tasks during fine-tuning. The training process employs the annotated dataset described in Section B.1 and is conducted over one single epoch using the AdamW (Loshchilov and Hutter, 2019) optimizer. This strategy enables the generation of multiple variants of the Specialized LLM, ensuring task-specific adaptation while maintaining robust generalization across diverse linguistic structures." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.865, + 0.833, + 0.881 + ], + "angle": 0, + "content": "C Additional Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.89, + 0.883, + 0.922 + ], + "angle": 0, + "content": "In this section, we present additional experimental results through a systematic analysis on the FactKG" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5282" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.493, + 0.248 + ], + "angle": 0, + "content": "development set with 2000 randomly sampled data points across claim categories. First, we provide a more detailed explanation of the evaluation metrics used. Second, we examine the performance of the specialized LLM by varying the beam size and backbone model size. Third, we analyze the Subgraph Retrieval by adjusting the hyperparameters \\( k_{1} \\) and \\( k_{2} \\) as explained in the 4.3, which influence the diversity and correctness of the retrieved subgraphs." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.257, + 0.227, + 0.271 + ], + "angle": 0, + "content": "C.1 Metrics" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.278, + 0.49, + 0.406 + ], + "angle": 0, + "content": "The specialized LLM's generation of pseudosubgraphs plays a crucial role in ClaimPKG's performance. We evaluated the specialized LLM's performance using four metrics: claim structure coverage (coverage), entity correctness (correctness), unique triplet count, and average end-to-end accuracy. While the final metric is straightforward, the three former metrics can be described as follows:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.41, + 0.49, + 0.492 + ], + "angle": 0, + "content": "(1) Structure coverage quantifies the alignment between the LLM-generated pseudo-graph and the reference claim graph in the FactKG dataset. Specifically, for a generated graph \\(P\\) and reference graph \\(Q\\), coverage is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.114, + 0.501, + 0.487, + 0.536 + ], + "angle": 0, + "content": "\\[\nc o v e r a g e (P, Q) = \\frac {\\# (P . t r i p l e t s \\cap Q . t r i p l e t s)}{\\# (Q . t r i p l e t s)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.545, + 0.49, + 0.626 + ], + "angle": 0, + "content": "(2) Entity correctness quantifies the correctness of a claim's extracted entities, i.e., whether these entities exist in the KG. Specifically, for a generated graph \\( P \\) and a knowledge graph \\( \\mathcal{G} \\), correctness is computed as:" + }, + { + "type": "equation", + "bbox": [ + 0.114, + 0.637, + 0.496, + 0.671 + ], + "angle": 0, + "content": "\\[\n\\operatorname {c o r r e c t n e s s} (P, \\mathcal {G}) = \\frac {\\# (P . e n i t i e s \\cap \\mathcal {G} . e n t i t i e s)}{\\# (P . e n t i t i e s)}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.681, + 0.49, + 0.745 + ], + "angle": 0, + "content": "(3) Unique triplet count measures the diversity of generated graph structures, with higher counts potentially enabling better subgraph retrieval through increased coverage of possible relationships." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.756, + 0.471, + 0.787 + ], + "angle": 0, + "content": "C.2 Different Beam Sizes of the Specialized LLM" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.49, + 0.922 + ], + "angle": 0, + "content": "To evaluate the LLM's decoding strategy across different beam sizes, we utilized three average accuracy, structure coverage and unique triplet count as metrics. Table 7 details the impact of the number of beam sizes on the previously mentioned metrics on the FactKG dev set. Both Llama and Qwen models demonstrate consistent improvements in average performance and claim structure coverage" + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.083, + 0.885, + 0.234 + ], + "angle": 0, + "content": "
BackboneBeam SizeAverage AccuracyStructure CoverageUnique Triplets
Llama-3BBeam 179.7876.514.48
Beam 381.8081.276.44
Beam 582.0483.028.39
Beam 1082.3384.6113.83
Qwen-3BBeam 178.8477.953.82
Beam 380.7682.665.16
Beam 581.4183.586.73
Beam 1082.1984.629.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.509, + 0.244, + 0.882, + 0.272 + ], + "angle": 0, + "content": "Table 7: Performance metrics for different models on FactKG dev set." + }, + { + "type": "table", + "bbox": [ + 0.512, + 0.288, + 0.883, + 0.374 + ], + "angle": 0, + "content": "
Beam SizeGen Graph (s)Retrieve (s)Reason (s)
beam 11.020.242.19
beam 32.160.382.22
beam 53.520.502.33
beam 1035.181.012.88
" + }, + { + "type": "table_caption", + "bbox": [ + 0.509, + 0.383, + 0.883, + 0.412 + ], + "angle": 0, + "content": "Table 8: Computing time for different beam sizes on FactKG dev set." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.438, + 0.884, + 0.55 + ], + "angle": 0, + "content": "as beam size increases from 1 to 10. At beam size 10, Llama achieves \\(84.61\\%\\) coverage while Qwen reaches \\(84.62\\%\\), showing comparable performance at higher beam sizes. The unique triplet count shows more pronounced growth with larger beam sizes, with Llama generating 13.83 unique triplets and Qwen 9.58 triplets at beam size 10." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.552, + 0.884, + 0.777 + ], + "angle": 0, + "content": "However, table 8 shows this improved performance comes with significant computational overhead. Table 8 details on the time taken for generating pseudo-graphs, retrieving sub-graphs and reasoning with retrieved evidence. Most notably, while the time required for retrieving sub-graphs and reasoning with evidence only increase marginally as the beam size increase, this figure for pseudo-graph generation increases dramatically as the beam size goes to 10, from 1.02s at beam size 1 to 35.18s at beam size 10 - a \\(34.5 \\times\\) increase. Based on this measurement, in our official framework we select beam size \\(= 5\\) to balance the performance gain and computational costs." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.788, + 0.871, + 0.818 + ], + "angle": 0, + "content": "C.3 Different Model Sizes of the Specialized LLM" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.825, + 0.885, + 0.922 + ], + "angle": 0, + "content": "To evaluate how model size affects performance, we compare different variants of Llama and Qwen models ranging from 1B to 8B parameters. Table 9 presents the performance on the FactKG dev set across three key metrics: average performance, structure coverage, and unique triplets generated," + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.928, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5283" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.357, + 0.101 + ], + "angle": 0, + "content": "which was explained previously." + }, + { + "type": "table", + "bbox": [ + 0.134, + 0.114, + 0.47, + 0.247 + ], + "angle": 0, + "content": "
BackboneAverage AccuracyStructure CoverageUnique Triplets
Llama - 1B80.2678.988.97
Llama - 3B82.0483.028.39
Llama - 8B82.6382.849.34
Qwen - 1.5B80.4881.346.58
Qwen - 3B81.4183.586.73
Qwen - 7B81.7982.887.05
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.257, + 0.486, + 0.286 + ], + "angle": 0, + "content": "Table 9: Performance metrics for different models on the FactKG dev set." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.303, + 0.49, + 0.543 + ], + "angle": 0, + "content": "For both model families, we observe improvements in performance as model size increases, though with different patterns. The Llama family shows more notable gains, with average performance increasing from \\(80.26\\%\\) (1B) to \\(82.63\\%\\) (8B), while Qwen demonstrates more modest improvements from \\(80.48\\%\\) (1.5B) to \\(81.79\\%\\) (7B). Structure coverage peaks with the 3B variants for both families - Llama-3B achieving \\(83.02\\%\\) and Qwen-3B reaching \\(83.58\\%\\). The models keep the increasing trend in their triplet generation patterns: Llama maintains relatively stable unique triplet counts (8.39 - 9.34) across sizes, while the figures for Qwen are (6.58 - 7.05) as the model size increases." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.545, + 0.49, + 0.673 + ], + "angle": 0, + "content": "Overall, scaling to larger models shows slight improvements while increasing computational requirements. Based on these results, we select 3B variants of both model families in our official implementation, which offer an optimal balance of performance and model size, with Llama-3B and Qwen-3B showing comparable effectiveness across all metrics." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.686, + 0.483, + 0.717 + ], + "angle": 0, + "content": "C.4 Different Hyperparameters of Subgraph Retrieval" + }, + { + "type": "table", + "bbox": [ + 0.171, + 0.733, + 0.433, + 0.817 + ], + "angle": 0, + "content": "
Hyper ParamsAverage AccuracyUnique Triplets
k1=5;k2=382.0011.42
k1=3;k2=182.048.39
k1=1;k2=181.873.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.826, + 0.49, + 0.87 + ], + "angle": 0, + "content": "Table 10: Performance of different subgraph retrieval configurations \\( k_{1} \\) and \\( k_{2} \\) with Llama-3.2-3B + Llama-3.3-70B on the FactKG dev set." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.49, + 0.922 + ], + "angle": 0, + "content": "To assess the impact of different hyperparameters in the subgraph retrieval algorithm on overall" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.885, + 0.181 + ], + "angle": 0, + "content": "performance, we systematically vary these hyperparameters while keeping the specialized LLM and general LLM fixed as Llama-3.2-3B and Llama-3.3-70B, respectively. Table 10 presents the performance across two key metrics: average accuracy and the number of unique triplets generated." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.182, + 0.885, + 0.407 + ], + "angle": 0, + "content": "The results indicate that increasing \\( k_{1} \\) and \\( k_{2} \\) leads to a higher number of unique triplets, suggesting greater diversity in retrieved claims. However, this increase does not consistently translate to overall performance gains, which fall in the range of 81.87 - 82.00. Notably, performance peaks at \\( k_{1} = 3 \\) and \\( k_{2} = 1 \\), suggesting that a more focused retrieval strategy is sufficient to achieve optimal performance, whereas excessively high \\( k \\) values may introduce noise or irrelevant information. Based on these results, we select \\( k_{1} = 3 \\) and \\( k_{2} = 1 \\) in our official implementation, which balancing between information discovery and computing required." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.417, + 0.869, + 0.449 + ], + "angle": 0, + "content": "C.5 Different Methods for Relation Scoring Function" + }, + { + "type": "table", + "bbox": [ + 0.548, + 0.464, + 0.847, + 0.555 + ], + "angle": 0, + "content": "
MethodAverage Accuracy
Embedding Based84.64
Rerank Based84.73
Fuzzy Matching82.19
Exact Matching81.57
" + }, + { + "type": "table_caption", + "bbox": [ + 0.508, + 0.564, + 0.883, + 0.594 + ], + "angle": 0, + "content": "Table 11: Performance of different scoring approach of the Subgraph Retrieval on the FactKG test set" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.613, + 0.885, + 0.757 + ], + "angle": 0, + "content": "To assess the impact of different scoring mechanisms on performance, we vary the scoring function and evaluate the test set of FactKG while fix the Specialized LLM and the General LLM. Specifically, we explore multiple strategies for the Relation Scoring Function (Sim), as described in Section 4.3, incorporating diverse techniques such as embedding-based retrieval, reranking, fuzzy text matching (Wikipedia, 2025a), and exact matching." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.758, + 0.886, + 0.886 + ], + "angle": 0, + "content": "For embedding-based and reranking approaches, we employ state-of-the-art pre-trained models, namely BGE-Large-EN-v1.5² and BGE-Reranker-Large³, as provided by (Xiao et al., 2023). Experimental results indicate that deep learning-based methods, such as embedding and reranking, achieve superior performance, with accuracy scores of 84.64 and 84.56, respectively. In contrast," + }, + { + "type": "page_footnote", + "bbox": [ + 0.53, + 0.894, + 0.83, + 0.907 + ], + "angle": 0, + "content": "\\(^{2}\\)https://huggingface.co/BAAI/bge-large-en-v1.5" + }, + { + "type": "page_footnote", + "bbox": [ + 0.53, + 0.907, + 0.833, + 0.921 + ], + "angle": 0, + "content": "3https://huggingface.co/BAAI/bge-reranker-large" + }, + { + "type": "list", + "bbox": [ + 0.53, + 0.894, + 0.833, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5284" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.165 + ], + "angle": 0, + "content": "text-matching-based methods yield lower accuracy, with fuzzy matching and exact matching scoring 82.19 and 81.57, respectively. These findings highlight the effectiveness of deep learning-based approaches." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.166, + 0.49, + 0.277 + ], + "angle": 0, + "content": "We recommend embedding-based retrieval as it enables pre-indexing of corpus relations. This allows precomputation of relation embeddings and requires encoding only the query relation for new Pseudo Subgraphs, eliminating the need to re-encode existing knowledge graph relations during inference." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.289, + 0.31, + 0.306 + ], + "angle": 0, + "content": "D Algorithm Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.314, + 0.49, + 0.395 + ], + "angle": 0, + "content": "The detailed implementation of the Entity Trie-constrained decoding algorithm is provided as the pseudo-code in Algorithm 1 and the Algorithm 2 details the implementation of the Subgraph Retrieval." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.406, + 0.251, + 0.423 + ], + "angle": 0, + "content": "E Case Study" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.431, + 0.49, + 0.655 + ], + "angle": 0, + "content": "We present the case study results of ClaimPKG on the FactKG dataset in Tables 12 and 13. Each table includes the claim \\( c \\), pseudo-subgraphs \\( P_{s} \\), retrieved subgraphs \\( S_{c} \\), final justification \\( j \\), and verdict \\( v \\). Table 12 showcases correctly predicted examples, demonstrating ClaimPKG's ability to accurately capture claim structures and generate well-grounded justifications. Conversely, Table 13 highlights incorrectly predicted cases of two error types as detailed in Section 5.3. The first two examples illustrate Reasoning Errors, while the third represents a Retrieval Error. These insights serve as a foundation for future improvements, emphasizing key areas for future refinement." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.668, + 0.312, + 0.685 + ], + "angle": 0, + "content": "F Prompt Templates" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.693, + 0.49, + 0.757 + ], + "angle": 0, + "content": "For better reproducibility, we present all prompt templates in the appendix. Below is a quick reference list outlining the prompt templates and their usages:" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.765, + 0.486, + 0.811 + ], + "angle": 0, + "content": "- Figure 6: Prompt the General LLM to reason on the input claim and retrieved subgraphs to produce justification and final verdict." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.823, + 0.486, + 0.869 + ], + "angle": 0, + "content": "Figure 7: Few-shot prompts the General LLM to generate a Pseudo Subgraph with provided examples." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.881, + 0.488, + 0.911 + ], + "angle": 0, + "content": "- Figure 8: Annotate the inside and outside entities of the input claim for the training dataset." + }, + { + "type": "list", + "bbox": [ + 0.137, + 0.765, + 0.488, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5285" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.125, + 0.128, + 0.553, + 0.143 + ], + "angle": 0, + "content": "Algorithm 1: LLM Decoding with Entity-Trie Constraint" + }, + { + "type": "code", + "bbox": [ + 0.13, + 0.145, + 0.856, + 0.425 + ], + "angle": 0, + "content": "Input:Specialized LLM, Input claim \\(c\\) Entity TriE T \nOutput:Pseudo-Subgraph P \nInitialize: \\(\\mathcal{P}\\gets \\emptyset\\) // Initialize pseudo subgraph \n\\(h_0\\gets\\) InitializeHiddenStates(); constrained \\(\\leftarrow\\) False; \nFunction ConstrainedDecoding(LLM,c,T): \nwhile True do \n\\(p_t,h_t\\gets LLM(\\mathcal{P},c,h_{t - 1})\\) // Compute token probabilities and update hidden states if constrained then \nprefix \\(\\leftarrow\\) ExtractPrefix(P); // Retrieve tokens from last unclosed to the last allowed \\(\\leftarrow\\) T.lookup(prefix);// Retrieve allowed tokens from valid continuations in T \\(p_t\\gets\\) MaskProb \\((p_t,\\) allowed); // Impose probabilities of invalid tokens to be 0 \nnew_token \\(\\leftarrow\\) arg max \\(p_t\\) . // Select new token for P \n\\(\\mathcal{P}\\gets \\mathcal{P}\\cup \\{\\text{new_token}\\}\\) . if new_token \\(= = < e>\\) then \\(\\sqsubset\\) constrained \\(\\leftarrow\\) True; if new_token \\(= = < / e>\\) then \\(\\sqsubset\\) constrained \\(\\leftarrow\\) False; if new_token \\(= = EOS\\) then \\(\\sqsubset\\) break; \nreturn P" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.527, + 0.3, + 0.539 + ], + "angle": 0, + "content": "GENERAL REASONING" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.551, + 0.87, + 0.588 + ], + "angle": 0, + "content": "Task: Verify whether the fact in the given sentence is true or false based on the provided graph triplets. Use only the information in the triplets for verification." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.599, + 0.593, + 0.611 + ], + "angle": 0, + "content": "- The triplets provided represent all relevant knowledge that can be retrieved." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.611, + 0.661, + 0.623 + ], + "angle": 0, + "content": "- If the fact is a negation and the triplets do not include the fact, consider the fact as true." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.623, + 0.871, + 0.657 + ], + "angle": 0, + "content": "- Ignore questions and verify only the factual assertion within them. For example, in the question \"When was Daniel Martínez (politician) a leader of Montevideo?\", focusing on verifying the assertion \"Daniel Martínez (politician) a leader of Montevideo\"." + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.658, + 0.87, + 0.682 + ], + "angle": 0, + "content": "- Interpret the “\\(\\sim\\)” symbol in triplets as indicating a reverse relationship. For example: “A \\(\\sim\\) south of B” means “B is north of A”." + }, + { + "type": "list", + "bbox": [ + 0.126, + 0.599, + 0.871, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.693, + 0.248, + 0.705 + ], + "angle": 0, + "content": "Response Format:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.706, + 0.672, + 0.765 + ], + "angle": 0, + "content": "Provide your response in the following JSON format without any additional explanations: \n{ \"rationale\": \"A concise explanation for your decision\", \"verdict\": \"true/false as the JSON value\" }" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.776, + 0.186, + 0.789 + ], + "angle": 0, + "content": "Triplets:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.789, + 0.198, + 0.801 + ], + "angle": 0, + "content": "{triplets}" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.812, + 0.175, + 0.823 + ], + "angle": 0, + "content": "Claim:" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.824, + 0.19, + 0.837 + ], + "angle": 0, + "content": "{claim}" + }, + { + "type": "image_caption", + "bbox": [ + 0.264, + 0.863, + 0.733, + 0.878 + ], + "angle": 0, + "content": "Figure 6: Prompt template for the general LLM to perform reasoning" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5286" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.125, + 0.089, + 0.375, + 0.103 + ], + "angle": 0, + "content": "Algorithm 2: Subgraph Retrieval" + }, + { + "type": "algorithm", + "bbox": [ + 0.131, + 0.106, + 0.856, + 0.91 + ], + "angle": 0, + "content": "Input: Knowledge graph \\(\\mathcal{G}\\) Pseudo Subgraph List \\(P_{c}\\) Top \\(k_{1}\\) Candidate Unknown Entities, Top \\(k_{2}\\) Complete Triplets \nOutput:Combined subgraph \\(S_{c}\\) \nFunction SubgraphRetrieval \\((\\mathcal{G},\\mathcal{P}_c,k_1,k_2)\\) .. \n\\(S\\gets \\emptyset\\) . \nforeach \\(\\mathcal{P}\\in \\mathcal{P}_c\\) do \n\\(S\\gets S\\cup\\) RetrieveSingleSubgraph \\((\\mathcal{G},\\mathcal{P},k_1,k_2)\\) // Process each pseudo subgraph \nreturn JoinSubgraphs \\((S)\\) // Combine subgraphs \nFunction RetrieveSingleSubgraph \\((\\mathcal{G},\\mathcal{P},k_1,k_2)\\) .. \n\\((T_{comp},T_{inc})\\leftarrow\\) CategorizeTriplets( \\(\\mathcal{P}\\) );//Split into complete/incomplete triplets \n\\(S_{inc}\\gets\\) RetrieveIncomplete \\((\\mathcal{G},T_{inc},k_1)\\) . \n\\(S_{comp}\\gets\\) RetrieveComplete \\((\\mathcal{G},T_{comp},k_1,k_2)\\) . \nreturn \\(S_{inc}\\cup S_{comp}\\) \nFunction RetrieveIncomplete \\((\\mathcal{G},T_{inc},k_1)\\) .. \n\\(S\\gets \\emptyset\\) . \n\\(G\\gets\\) GroupTripletsByUnknown \\((T_{inc})\\) //Group by unknown entity \nforeach \\(g\\in G\\) do \n\\((E_u,R_u)\\leftarrow\\) ExtractPseudoStructure \\((g)\\) //Extract entities and relations associated to unknown entity \n\\(C\\gets \\emptyset\\) . \nforeach \\((e,r)\\in (E_u,R_u)\\) do \n\\((C_e,\\mathrm{scores})\\leftarrow\\) GetCandidatesAndScores \\((G,e,r)\\) . \n\\(C\\gets C\\cup \\{(C_e,\\mathrm{scores})\\}\\) . \n\\(C =\\) AggregateGlobalScore(C); //Aggregate candidate scores globally \\(C^{*}\\gets\\) RankTopKCandidates \\((C,k_{1})\\) //Select top- \\(k_{1}\\) candidates \n\\(S\\gets S\\cup\\) GetTriplets \\((C^{*},g)\\) . \nreturn \\(S\\) \nFunction GetCandidatesAndScores \\((G,e,r)\\) .. \n\\(R_{act}\\gets\\) RetrieveActualConnectedRelations \\((G,e)\\) . \n\\(E_{act}\\gets\\) RetrieveActualConnectedEntities \\((G,e)\\) . \n\\(r\\_ score s\\gets\\) RelationScore(r, \\(R_{act}\\) . \n\\(S\\gets \\emptyset\\) . \nforeach \\(e^{\\prime}\\in E_{act}\\) do \n\\(s\\gets\\) MaxRelatedRelationScores(e',r Scores); \n\\(S\\gets S\\cup \\{(e^{\\prime},s)\\}\\) . \nreturn \\(S\\) // Score connected entities \nFunction AggregateGlobalScore \\((C)\\) .. \n//Calculate new scores and reassign for each \\(C\\_ e\\) \nforeach \\((C_e,\\mathrm{scores})\\in C\\) do \nforeach \\((c,s)\\in (C_e,\\mathrm{scores})\\) do \n\\(s\\gets\\) Sum([s'(c) for \\((C',s')\\) in \\(C\\) if \\(c\\in C')\\) \nreturn \\(C\\) . \nFunction RankTopKCandidates \\((C,k_1)\\) .. \n\\(C^{*}\\gets \\emptyset\\) . \nforeach \\((C_e,\\mathrm{scores})\\in C\\) do \n\\(C_e^*\\gets\\) TopKCandidates \\((C_e,\\mathrm{scores},k_1)\\) . \n\\(C^{*}\\gets C^{*}\\cup C_{e}^{*}\\) . \nreturn \\(C^{*}\\) //Rank candidates per unknown entity \nFunction RetrieveComplete \\((G,T_{comp},k_1,k_2)\\) .. \n\\(S\\gets \\emptyset\\) . \nforeach \\((h,r,t)\\in T_{comp}\\) do if ExistConnection \\((h,t)\\) then \\(T\\gets\\) GetTripletsWithHeadAndTail \\((G,h,t)\\) scores \\(\\leftarrow \\emptyset\\) . foreach \\(t'\\in T\\) do scores \\(\\leftarrow\\) scores U RelationScore(r, \\(t'\\).relation); \\(S\\gets S\\cup\\) TopK(T,scores, \\(k_{2}\\) ); //Rank connected triplets else \\(S_{1}\\gets\\) RetrieveIncomplete(G,[(unknown,r,t)], \\(k_{1}\\)) \\(S_{2}\\gets\\) RetrieveIncomplete(G,[(h,r,unknown)], \\(k_{1}\\)) \\(S\\gets S\\cup (S_1\\cup S_2)\\) // Handle missing connection" + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5287" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.131, + 0.885, + 0.844 + ], + "angle": 0, + "content": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
The 11th Mississippi Infantry Monument is not in Gettysburg, Pennsylvania.P1: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(11th Mississippi Infantry Monument, municipality, Gettysburg, Pennsylvania)Justification: The triplets provided include '11th Mississippi Infantry Monument', 'municipality', 'Gettysburg, Pennsylvania' and '11th Mississippi Infantry Monument', 'state', 'Pennsylvania', indicating the monument's location as Gettysburg, Pennsylvania.
P2: Gettysburg, Pennsylvania | -location | 11th Mississippi Infantry Monument(11th Mississippi Infantry Monument, website, 11th mississippianemy monument)
11th Mississippi Infantry Monument | location | unknown_0(U.S. Route 140, location, Gettysburg)
P3: 11th Mississippi Infantry Monument | location | Gettysburg(Pennsylvania Route 34, location, Gettysburg)
11th Mississippi Infantry Monument | location | Unknown_0(U.S. Route 30 in Pennsylvania, location, Gettysburg)
P4: 11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, state, Pennsylvania)
11th Mississippi Infantry Monument | is part of military conflict | Gettysburg(11th Mississippi Infantry Monument, region, Adams County, Pennsylvania)
11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, country, United States)
11th Mississippi Infantry Monument | location | Unknown_0(11th Mississippi Infantry Monument, category, Contributing property)
P5: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(Lutheran Theological Seminary at Gettysburg, city, Gettysburg)
(11th Mississippi Infantry Monument, range, Semi-nary Ridge)Verdict: False
It was released in Hardcover by J.V. JonesP1: Hardcover | -media type | unknown_0(Hardcover, -media type, A Fortress of Grey Ice)Justification: The triplets show that 'A Fortress of Grey Ice' is a book by 'J.V. Jones' and it is of 'Hardcover' media type, which supports the claim.
unknown_0 | author | J.V. Jones(Hardcover, -media type, A Clockwork Orange)
P2: unknown_0 | author | J.V. Jones(Hardcover, -media type, The Bell Curve)
unknown_0 | media type | Hardcover(A Fortress of Grey Ice, author, J.V. Jones)
unknown_0 | -genre | unknown_1(A Cavern of Black Ice, author, J.V. Jones)
P3: J.V. Jones | -author | unknown_0(The Barbed Coil, author, J.V. Jones)
unknown_0 | media type | Hardcover(Hardcover, -media type, The Andromeda Strain)
P4: Hardcover | -media type | J.V. Jones(1963, -date of birth, J.V. Jones)
J.V. Jones | -author | unknown_0(Liverpool, England, -place of birth, J.V. Jones)
unknown_0 | previous work | unknown_1
P5: Hardcover | -media type | unknown_0
unknown_0 | author | J.V. Jones
A person was born in the County of Ty-rol and later died in Austria which is led by Doris Bures.P1: Austria | leader | Doris Bures(Austria, leader, Doris Bures)Justification: The triplets show that Kurt Schuschnigg, Alfons Gorbach, and Josef Hellensteiner were born in the County of Tyrol and died in Austria. Additionally, Doris Bures is the leader of Austria.
Austria | -death place | unknown_0(Austria, -death place, Hans Hörberg)
unknown_0 | death place | County of Tyrol(Austria, -death place, Edgar Meyer (painter) )
P2: Austria | leader name | Doris Bures(Austria, -death place, Rakhat Aliyev)
Austria | -death place | unknown_0(Ahn Hörberg, death place, County of Tyrol)
unknown_0 | death place | Austria(Edgar Meyer (painter), death place, County of Tyrol)
unknown_0 | successor | Doris Bures(Friedrich Hasenöhr, death place, County of Tyrol)
P3: County of Tyrol | -birth place | unknown_0(County of Tyrol, -birth place, Kurt Schuschnigg)
unknown_0 | death place | Austria(Sky, death place, Alfons Gorbach)
unknown_0 | successor | Doris Bures(County of Tyrol, -birth place, Josef Hellensteiner)
P4: Doris Bures | -leader | Austria(Kurt Schuschnigg, death place, Austria)
Austria | -death place | unknown_0(Alfons Gorbach, death place, Austria)
unknown_0 | birth place | County of Tyrol(Josef Hellensteiner, death place, Austria)
P5: Austria | leader | Doris Bures(Barbara Prammer, successor, Doris Bures)
Austria | -death place | unknown_0(Werner Faymann, successor, Doris Bures)
unknown_0 | birth place | County of Tyrol(Alois Stöger, -successor, Doris Bures)
The New Britain Transit is the operat-ing organisation of Agra Airport which is located in India and has the location identifier VIAG.P1: Agra Airport | location | New Britain Transit(Agra Airport, location, India)Justification: The operator of Agra Airport is listed as 'Indian Air Force' and 'Airports Authority of India', not 'New Britain Transit', which operates 'Connecticut Transit New Britain and Bris-tol'
Agra Airport | location | India(Agra Airport, location, Uttar Pradesh)
Agra Airport | faa | VIAG(Agra Airport, location, Agra)
P2: Agra Airport | location | India(Connecticut Transit New Britain and Bristol, oper-ator, New Britain Transit)
Agra Airport | operator | New Britain Transit(Agra Airport, cao location identifier, VIAG)
Agra Airport | faa | VIAG(Agra Airport, operator, Airports Authority of India)
Agra Airport | faa | VIAG(Agra Airport, elevation, 167.94)Verdict: False
P3: Agra Airport | location | India
Agra Airport | operator | New Britain Transit
Agra Airport | location | India
Agra Airport | faa | VIAG
" + }, + { + "type": "table_caption", + "bbox": [ + 0.187, + 0.854, + 0.809, + 0.869 + ], + "angle": 0, + "content": "Table 12: Case study of ClaimPKG on the FactKG dataset with correctly predicted samples." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5288" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.115, + 0.884, + 0.862 + ], + "angle": 0, + "content": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
When did Indian Air Force operate Agra Airport?P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets do not provide information about the specific time period when the Indian Air Force operated Agra Airport. However, they do confirm that the Indian Air Force is the operator of Agra Airport, implying that the operation is either current or has occurred at some point, but the exact timing is not specified. Verdict: False
P2: Indian Air Force || operator || Agra Airport(Agra Airport, runway length, 2743.0)
Agra Airport || ~runway designation || unknown_0(Agra Airport, runway length, 1818.0)
Agra Airport || ~runway designation || Indian Air Force(Agra Airport, location, India)
Agra Airport || ~location || unknown_0(Agra Airport, location, Uttar Pradesh)
unknown_0 || operator || Indian Air Force(Agra Airport, location, Agra)
P4: Agra Airport || operator || Indian Air Force(Bhuj Airport, operator, Indian Air Force)
P5: Agra Airport || operator || Indian Air Force(Bhuj Rudra Mata Air Force Base, operator, Indian Air Force)
(Car Nicobar Air Force Base, operator, Indian Air Force)
I imagined that Lady Anne Monson not lived in the Cape Verde whose capital was Winchester.P1: Cape Verde || capital || Winchester(Cape Verde, capital, Praia)Justification: The capital of Cape Verde is Praia, not Winchester. Winchester is the capital of Wessex and the Kingdom of England. Lady Anne Monson's residence is listed as India, and her birthplace is listed as Dar-lington and the Kingdom of England, with no mention of Cape Verde as her residence. Verdict: True
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, currency, Cape Verdean esculo)
P2: Lady Anne Monson || residence || Cape Verde(Cape Verde, ~headquarter, Movement for Democracy (Cape Verde))
Cape Verde || capital || Winchester(Wessex, capital, Winchester)
P3: Anne Monzon || residence || Cape Verde(Kingdom of England, capital, Winchester)
Cape Verde || capital || Winchester(River Itchen, Hampshire, city, Winchester)
P4: Cape Verde || capital || Winchester(Cape Verde, ~birth place, Aristides Pereira)
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, ~birth place, Pedro Pires)
P5: Cape Verde || capital || Winchester(Cape Verde, ~birth place, José Maria Neves)
Cape Verde || ~death place || Lady Anne Monson(India, ~residence, Lady Anne Monson)
(Kingdom of England, ~birth place, Lady Anne Monson)
(Darlington, ~birth place, Lady Anne Monson)
(Anne Monzon, ~cinematography, "Wont Last a Day Without You")
(Anne Monzon, ~cinematography, Pop Class)
(Anne Monzon, ~cinematography, Namets!)
(Cape Verde, ~death place, Corsino Fortes)
(Cape Verde, ~death place, Cesária Évora)
(Cape Verde, ~death place, Baltasar Lopes da Silva)
(1776-02-18, ~death date, Lady Anne Monson)
(1776, ~death year, Lady Anne Monson)
The operating organization for Agra Airport is the Indian Air Force who deploy the HAL Light Combat Helicopter and the Boeing C17 Globemaster III transport aircraft.P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets confirm Agra Airport is operated by the Indian Air Force, the Indian Air Force deploys the HAL Light Combat Helicopter, and the Boeing C-17 Globemaster III is indeed a transport aircraft, but there is no direct information linking the Indian Air Force at Agra Airport to the deployment of the Boeing C-17 Globemaster III. However, given the information provided, we can infer the involvement of the Indian Air Force with the mentioned aircraft types, albeit indirectly for the Boeing C-17 Globemaster III. Verdict: False
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Uttar Pradesh)
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Agra)
Agra Airport || ~location || Boeing C-17 Globemaster III(2012 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2013 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2009 in spaceflight (July-December), site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(Hindustan Aeronautics, manufacturer, HAL Light Combat Helicopter)
Agra Airport || ~location || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, United States Air Force)
Agra Airport || operator || Indian Air Force(Boeing C-17 Globemaster III, aircraft transport, Royal Air Force)
Agra Airport || runway length || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, Royal Australian Air Force)
Agra Airport || ~location || HAL Light Combat Helicopter(2743.0, runway length, Agra Airport)
Agra Airport || ~city || HAL Light Combat Helicopter(1818.0, runway length, Agra Airport)
Agra Airport || ~city || Boeing C-17 Globemaster III(HAL Light Combat Helicopter, aircraft helicopter, Indian Air Force)
(Aircraft, icoa location identifier, VIAG)
(Airlift, type, Boeing C-17 Globemaster III)
(United States, origin, Boeing C-17 Globemaster III)
(In service, status, Boeing C-17 Globemaster III)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.18, + 0.871, + 0.816, + 0.886 + ], + "angle": 0, + "content": "Table 13: Case study of ClaimPKG on the FactKG dataset with incorrectly predicted samples." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.52, + 0.941 + ], + "angle": 0, + "content": "5289" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.128, + 0.115, + 0.465, + 0.127 + ], + "angle": 0, + "content": "FEWSHOT PSEUDO SUBGRAPH GENERATION" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.139, + 0.871, + 0.164 + ], + "angle": 0, + "content": "Task: Generate a reference graph to verify the following claim. Only return the subgraphs following the format of provided examples and do NOT include other unnecessary information." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.175, + 0.292, + 0.187 + ], + "angle": 0, + "content": "Here are some examples:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.198, + 0.871, + 0.223 + ], + "angle": 0, + "content": "Claim: Akeem Priestley played for club RoPS and currently plays for the Orange County Blues FC, which is managed by Oliver Wyss." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.223, + 0.203, + 0.235 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.235, + 0.549, + 0.27 + ], + "angle": 0, + "content": "Orange County Blues FC || manager || Oliver Wyss \nOrange County Blues FC || clubs || Akeem Priestley \nAkeem Priestley || team || RoPS" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.281, + 0.534, + 0.294 + ], + "angle": 0, + "content": "Claim: He is a Rhythm and Blues singer from Errata, Mississippi!" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.294, + 0.205, + 0.306 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.306, + 0.458, + 0.341 + ], + "angle": 0, + "content": " || genre || unknown_0 \nunknown_0 || birth place || Errata, Mississippi \nunknown_0 || background || unknown_1" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.352, + 0.871, + 0.376 + ], + "angle": 0, + "content": "Claim: Arròs negro is a traditional dish from Spain, and from the Catalonia region, which is led by the Maria Norrfalk." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.377, + 0.205, + 0.389 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.389, + 0.484, + 0.424 + ], + "angle": 0, + "content": "\\(<\\mathrm{e}>\\) Arròs negro
|| country || Spain \n\\(<\\mathrm{e}>\\) Arròs negro || region || Catalonia \n\\(<\\mathrm{e}>\\) Catalonia || leader name || Maria Norrfalk" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.435, + 0.462, + 0.447 + ], + "angle": 0, + "content": "Claim: Well, Jason Sherlock did not have a nickname!" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.448, + 0.205, + 0.459 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.459, + 0.429, + 0.471 + ], + "angle": 0, + "content": "\\(<\\mathrm{e}>\\) Jason Sherlock | | nickname | | unknown_0" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.482, + 0.596, + 0.495 + ], + "angle": 0, + "content": "Claim: Garlic is the main ingredient of Ajoblanco, which is from Andalusia." + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.495, + 0.205, + 0.506 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.506, + 0.422, + 0.53 + ], + "angle": 0, + "content": "\\(< \\mathrm{e}>\\) Ajoblanco || region || Andalusia \n\\(< \\mathrm{e}>\\) Ajoblanco || ingredient || Garlic" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.542, + 0.27, + 0.554 + ], + "angle": 0, + "content": "....More examples ...." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.565, + 0.242, + 0.577 + ], + "angle": 0, + "content": "Claim: {{claim}}" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.578, + 0.205, + 0.59 + ], + "angle": 0, + "content": "Subgraphs:" + }, + { + "type": "image_caption", + "bbox": [ + 0.235, + 0.626, + 0.761, + 0.641 + ], + "angle": 0, + "content": "Figure 7: Prompt template for the general LLM to generate pseudo subgraphs" + }, + { + "type": "title", + "bbox": [ + 0.128, + 0.707, + 0.381, + 0.719 + ], + "angle": 0, + "content": "ANNOTATE IN AND OUT ENTITIES" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.731, + 0.569, + 0.744 + ], + "angle": 0, + "content": "Task: Specify if the following entities are mentioned in the claim or not." + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.744, + 0.623, + 0.816 + ], + "angle": 0, + "content": "Respond correctly in the following JSON format and do not output anything else: { \"in Entities\": [list of entities that are in the claim], \"out Entities\": [list of entities that are not in the claim] } Do not change the entity names from the list of provided entities." + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.826, + 0.242, + 0.839 + ], + "angle": 0, + "content": "Claim: {{claim}}" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.839, + 0.261, + 0.851 + ], + "angle": 0, + "content": "Entities: {{entities}}" + }, + { + "type": "image_caption", + "bbox": [ + 0.239, + 0.877, + 0.756, + 0.892 + ], + "angle": 0, + "content": "Figure 8: Prompt template to annotate inside and outside entity of the claim." + }, + { + "type": "page_number", + "bbox": [ + 0.481, + 0.929, + 0.521, + 0.941 + ], + "angle": 0, + "content": "5290" + } + ] +] \ No newline at end of file diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_origin.pdf b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b9adddd81ea36fe48c2a9df142e2693ee836f194 --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/08384e56-2d5c-4ecb-b64c-ecb74bcfc53b_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5c0363cf7c4506bcf7f15bde91fbe35f1e9385dc77a1aec2b6a1b59c05cfca5 +size 2470739 diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/full.md b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/full.md new file mode 100644 index 0000000000000000000000000000000000000000..da8eaee2476e137d33f8c85f34cc208b002428b9 --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/full.md @@ -0,0 +1,599 @@ +# ClaimPKG: Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM + +Hoang Pham*, Thanh-Do Nguyen*, Khac-Hoai Nam Bui† + +Viettel Artificial Intelligence and Data Services Center, + +Viettel Group, Vietnam + +{hoangpv4, dont15, nambkh} @ viettel.com.vn + +# Abstract + +Integrating knowledge graphs (KGs) to enhance the reasoning capabilities of large language models (LLMs) is an emerging research challenge in claim verification. While KGs provide structured, semantically rich representations well-suited for reasoning, most existing verification methods rely on unstructured text corpora, limiting their ability to effectively leverage KGs. Additionally, despite possessing strong reasoning abilities, modern LLMs struggle with multi-step modular pipelines and reasoning over KGs without adaptation. To address these challenges, we propose ClaimPKG1, an end-to-end framework that seamlessly integrates LLM reasoning with structured knowledge from KGs. Specifically, the main idea of ClaimPKG is to employ a lightweight, specialized LLM to represent the input claim as pseudo-subgraphs, guiding a dedicated subgraph retrieval module to identify relevant KG subgraphs. These retrieved subgraphs are then processed by a general-purpose LLM to produce the final verdict and justification. Extensive experiments on the FactKG dataset demonstrate that ClaimPKG achieves state-of-the-art performance, outperforming strong baselines in this research field by $9\% - 12\%$ accuracy points across multiple categories. Furthermore, ClaimPKG exhibits zero-shot generalizability to unstructured datasets such as HoVer and FEVERIOUS, effectively combining structured knowledge from KGs with LLM reasoning across various LLM backbones. + +# 1 Introduction + +In today's rapidly evolving information landscape, distinguishing fact from misinformation is becoming more challenging, especially with the rise of AI-generated content. Robust claim verification + +![](images/1f79aa990ca6454f337d823732bf436f1207f9d509390dd3c4d4aaae0fda0a94.jpg) + +![](images/101c2011dbb6cd268f3b14346c625c2dd2859bef56228ac675c763c1aa6f5077.jpg) +c) Our Method - ClaimPKG +Figure 1: Different claim verification paradigms: (a) Unstructured Text-based methods focusing on claim decomposition and sequential reasoning over text, (b) KG-based methods facing challenges in entity resolution and structured reasoning, and (c) ClaimPKG's unified framework with specialized modules for pseudosubgraph generation, retrieval, and general reasoning. + +systems, leveraging NLP methods to automatically assess the veracity of claims (Glockner et al., 2022a,b; Thorne and Vlachos, 2018), are essential to ensure information reliability. Effective methods require not only accuracy but also transparency, necessitating strong reasoning to identify evidence and provide clear justifications (Pan et al., 2023). + +Most existing verification approaches focus on unstructured text corpora, using techniques like chain-of-thought (CoT) reasoning (Wei et al., 2022) to break down claims for verification. Approaches like ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023) employ modular pipelines to verify claims against text-based knowledge bases (Figure 1(a)). However, the inherent limitations of text representation pose challenges. Specifically, ambiguous entity references and complex multi-hop relationships make it difficult to perform rigorous verification against unstructured text. + +In contrast, Knowledge Graphs (KGs) provide + +structured relationships for effective reasoning (Luo et al., 2024; Sun et al., 2024), yet their use in claim verification remains limited. Existing KG-based approaches (Figure 1(b)) (Kim et al., 2023b; Zhou et al., 2019; Kim et al., 2023a) lack end-to-end solutions, often requiring pre-extracted entities via modules like entity or relation extraction. Meanwhile, despite excelling at general reasoning, LLMs struggle with KG-specific tasks like entity resolution and multi-hop reasoning (Cao et al., 2021; Aly et al., 2021), suggesting the need for a system combining LLM capabilities with KG-based inference. + +Overall, solving claim verification problems is hindered by following major limitations: (1) Entity Ambiguity: Systems must accurately disambiguate entities within claims to identify relevant evidence (Aly et al., 2021); (2) Multihop Reasoning: Complex claims often require reasoning across multiple evidence from different sources (Pan et al., 2023; Wang and Shu, 2023); and (3) Limited integration of KGs and LLMs: Current approaches are underexploring the potential of combining the application of structured representation with strong inference capabilities of LLMs (Kim et al., 2023a). + +To address these challenges, we propose ClaimPKG (Claim Verification using Pseudo-Subgraph in Knowledge Graphs), a novel end-to-end framework that synergizes the adaptability and generalization strengths of LLMs with the structured and rigorous representation of KGs to enable robust and transparent claim verification. As specified in Figure 1(c), ClaimPKG operates through three phases: (1) Pseudo-Subgraphs Generation: A KG-specialized lightweight LLM generates pseudo subgraphs as the representations of input claims under a Trie-based KG-Entity Constraint, ensuring the correctness of extracted entities; (2) Subgraphs Retrieval: A retrieval algorithm considers generated pseudo subgraphs as queries to identify actual relevant KG subgraphs as evidence; and (3) General Reasoning: A general-purpose LLM reasons over the retrieved KG subgraphs to produce the verdict and human-readable justifications. Through extensive experiments on the FactKG dataset, ClaimPKG achieves state-of-the-art performance, demonstrating its effectiveness over various claim types with a small number of training samples. Furthermore, its zero-shot generalizability to unstructured datasets (HoVer, FEVEROUS) highlights its robustness. + +Our contributions can be summarized as follows: (1) We introduce ClaimPKG, a holistic framework + +that integrates LLMs and KGs for accurate and interpretable claim verification, handling various types of claims in a unified manner; (2) We develop a lightweight specialized LLM with its according decoding algorithm for pseudo-subgraph generation and pair it with general-purpose LLMs to achieve robust reasoning; and (3) We validate the effectiveness of ClaimPKG through extensive experiments, achieving state-of-the-art performance on structure-based datasets and generalizing to unstructure-based datasets. + +# 2 Related Work + +Claim Verification Approaches. Claim verification systems utilize knowledge bases that can be categorized into unstructured and structured formats. In the unstructured domain, text-based verification methods predominate, with systems designed to verify claims against textual evidence, as demonstrated in the FEVER dataset (Thorne et al., 2018). Recent advances have focused on handling specialized verification scenarios, including ambiguous question-answer pairs (Park et al., 2022), detecting factual changes (Schuster et al., 2021), and processing multiple documents concurrently (Jiang et al., 2020). For structured verification, research has primarily focused on tables and graphs, with early work developing specialized architectures: graph neural networks for knowledge graph processing (Zhou et al., 2020), table-specific transformers (Herzig et al., 2020), and tree-structured decoders for hierarchical data (Wang et al., 2020). + +Claim Verification over Knowledge Graphs (KGs). The emergence of Large Language Models (LLMs) has simplified direct reasoning over textual corpora for claim verification, as demonstrated by ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023). However, structured data sources like tables and graphs can provide more grounded and robust verification results (Kim et al., 2023b). Knowledge graphs are particularly advantageous as they enable explicit representation of reasoning processes through logical rules over nodes and edges. FactKG (Kim et al., 2023b) established a foundation in this direction by introducing a comprehensive dataset for evaluating modern verification methods. KG-GPT (Kim et al., 2023a) followed this work by demonstrating performance gains through a pipeline that performs sentence decomposition, subgraph retrieval, and logical inference. Additionally, while not directly addressing + +claim verification, StructGPT (Jiang et al., 2023) and RoG (Luo et al., 2024) achieved promising results in related tasks (e.g., Knowledge Base Question Answering) by collecting relevant evidence, such as subgraphs in KGs, then leveraging LLMs for complex reasoning in particular scenarios. + +# 3 Preliminary + +Knowledge Graph: Knowledge Graph (KG) $\mathcal{G}$ represents facts as triplets of format $t = (e,r,e')$ , where entities $e,e'\in \mathcal{E}$ are connected by a relation $r\in \mathcal{R}$ ; $r$ can also be referred as $r(e,e')$ . + +Claim Verification: Given a claim $c$ , a verification model $\mathcal{F}$ determines its veracity as Supported or Refuted based on an external knowledge base $\kappa$ , while also providing a justification $j$ to explain the predicted label. This work specifically considers the scenario where $\kappa$ is structured as a Knowledge Graph $\mathcal{G}$ , enabling reasoning over graph knowledge to infer $v$ and $j$ . Formally, the verification process is defined as: $(v,j) = \mathcal{F}(c,\mathcal{G})$ . + +Trie-based Constrained Decoding: A Trie (Wikipedia, 2025b) indexes predefined token sequences, where each root-to-node path represents a prefix. During LLM generation, this structure restricts token selection to only valid Trie paths, ensuring reliable output. + +# 4 ClaimPKG + +# 4.1 Formulation of ClaimPKG + +We formulate the ClaimPKG framework using a probabilistic approach. Given a claim $c$ and a prebuilt KG $\mathcal{G}$ , our objective is to model the distribution $p_{\theta}(v,j|c,\mathcal{G})$ , where $v$ denotes the verdict and $j$ the justification. However, direct computation for this distribution is infeasible as reasoning over the entire KG is not practical given its large size. To address this, we propose to select $S_{c}$ , a subgraph of $\mathcal{G}$ relevant to $c$ containing necessary information to derive our target distribution. Treating $S_{c}$ as a latent variable, $p_{\theta}(v,j|c,\mathcal{G})$ is decomposed as: + +$$ +p _ {\theta} (v, j \mid c, \mathcal {G}) = \sum_ {\mathcal {S} _ {c}} p _ {\theta} (v, j \mid c, \mathcal {S} _ {c}) p _ {\theta} (\mathcal {S} _ {c} \mid c, \mathcal {G}) \tag {1} +$$ + +where $p_{\theta}(\mathcal{S}_c|c,\mathcal{G})$ models the subgraph selection, and $p_{\theta}(v,j|c,\mathcal{S}_c)$ models the generator of the verdict and justification given $\mathcal{S}_c$ . However, direct computation of $p_{\theta}(\mathcal{S}_c|c,\mathcal{G})$ is challenging due to modality mismatch between the input $c$ (text) and the target $\mathcal{S}_c$ (graph structure), hindering the employment of retrieval methods for $\mathcal{S}_c$ . To bridge this + +gap, we decompose the subgraph selection into: + +$$ +p _ {\theta} \left(\mathcal {S} _ {c} | c, \mathcal {G}\right) = \sum_ {\mathcal {P} _ {c}} p _ {\theta} \left(\mathcal {S} _ {c} \mid \mathcal {P} _ {c}, \mathcal {G}\right) p _ {\theta} \left(\mathcal {P} _ {c} | c, \mathcal {G}\right) \tag {2} +$$ + +where $p_{\theta}(\mathcal{P}_c|c,\mathcal{G})$ models the generation of the graph representation $\mathcal{P}_c$ , which we refer as "pseudo subgraph", from a textual claim $c$ , and $p_{\theta}(\mathcal{S}_c|\mathcal{P}_c,\mathcal{G})$ models the distribution over relevant subgraphs $\mathcal{S}_c$ given $\mathcal{P}_c$ . While equations 1 and 2 establish our theoretical framework for ClaimPKG, computing exact probabilities by summing over all possible $(\mathcal{S}_c,\mathcal{P}_c)$ pairs is intractable. Addressing this we propose two approximations: (1) We infer the veracity using only the most relevant subgraph $\mathcal{S}_c^*$ : + +$$ +\left(v ^ {*}, j ^ {*}\right) \sim p _ {\theta} (v, j | c, \mathcal {S} _ {c} ^ {*}) \tag {3} +$$ + +(2) We assume each generated pseudo-subgraph is reasonable with a high probability, allowing us to approximate the subgraph selection in 2 as: + +$$ +\mathcal {S} _ {c} ^ {(i)} = \arg \max p _ {\theta} \left(\mathcal {S} _ {c} | \mathcal {P} _ {c} ^ {(i)}, \mathcal {G}\right) \tag {4} +$$ + +with $\mathcal{P}_c^{(i)}$ is the $ith$ pseudo-graph generation. We then construct $\mathcal{S}_c^*$ by aggregating multiple sampled subgraphs, specifically $\mathcal{S}_c^* = \bigcup \mathcal{S}_c^{(i)}$ . + +These approximations lead ClaimPKG to comprise 3 key modules as depicted in Figure 2: (1) Pseudo Subgraph Generation to generate graph representations $\mathcal{P}_c$ 's given claim $c$ ; (2) Subgraph Retrieval to retrieve relevant evidence subgraph $S_c^*$ ; and (3) General Reasoning to generate final verdict $v$ and justification $j$ . The inference procedure is described as follows: + +# Inference Procedure of ClaimPKG + +Preprocessing: Index the KG $\mathcal{G}$ into an Entity. TriE for effective entity lookup. + +1. Pseudo Subgraph Generation: Generate multiple graph representations (pseudo subgraphs) $\mathbb{P}_c = \{\mathcal{P}_c^{(i)}\}_{i=1}^N$ from claim $c$ , using a specialized LLM with beam search and Entity-Trie constraints. +2. Subgraph Retrieval: Use each pseudo graph in $\mathbb{P}_c$ for querying the most respective relevant subgraph $S_{c}^{(i)}$ in the KG $\mathcal{G}$ , resulting in a set of $\{S_c^{(i)}\}_{i = 1}^N$ following Equation 4, then aggregate them to form $S_{c}^{*} = \bigcup_{i = 1}^{N}S_{c}^{(i)}$ . +3. General Reasoning: Employ a general-purpose LLM to reason veracity $(v^{*},j^{*})\sim p_{\theta}(v,j|c,\mathcal{S}_{c}^{*})$ following Equation 3. + +The subsequent sections provide details about each component in the ClaimPKG framework. + +![](images/8f8c020c8c78d4277712169bc006bc9507ecf8d5ded3239cae38ce9a08b55ea3.jpg) +Figure 2: Illustration of the ClaimPKG for claim verification. The framework consists of three key modules: (1) Pseudo-subgraph Generation, constructing representative subgraphs; (2) Subgraph Retrieval, selecting the most pertinent KG subgraphs; and (3) General Reasoning, integrating them for accurate and interpretable verification. + +# 4.2 Pseudo Subgraph Generation + +The first step to effectively verify a claim is to understand its content thoroughly and represent it in a format compatible with the KG. Since evidence comes from KG, representing claims in the graph format is crucial, which captures hypothetical relations among entities in an effective way that enables effective comparisons with KG subgraphs for evidence retrieval. However, this process faces two main challenges: (1) handling ambiguity resolution and multi-hop reasoning, and (2) ensuring accurate entity extraction from the claim. + +Specialized LLM. To address the first challenge, the Pseudo Subgraph Generation module employs a lightweight model optimized for processing input claims. Following (Li et al., 2013; Miwa and Bansal, 2016), the model is trained to jointly extract entities and their corresponding relations from a claim $c$ . Specifically, from $c$ the model constructs a pseudo subgraph $\mathcal{P}_c$ comprising triplets in the form of head_entity||relation||tail-entity (illustrated in Figure 2). To ensure the generated subgraph can identify entities requiring ambiguity resolution and multi-hop reasoning, we employ a specialized annotation mechanism: when the claim references an entity indirectly—either without explicit naming or through relations to other entities—we denote it as unknown_i, with the index i to keep track of different entities. This + +notation effectively signals the need for further disambiguation and reasoning within the KG in subsequent steps. Training details enabling this annotation strategy are presented in Appendix B.1. + +Trie-Constrained Decoding. For the second challenge, we develop a constrained decoding algorithm with an Entity Trie inspired by (Cao et al., 2021). We construct a trie $\mathcal{T}$ from the KG's entity set $\mathcal{E} = \{e_1,e_2,\ldots \}$ . The specialized LLM generates entities using special tokens $\langle e\rangle$ and $\langle /e\rangle$ to mark entity boundaries. When $\langle e\rangle$ is generated, the decoding process restricts token selection based on $\mathcal{T}$ until $\langle /e\rangle$ is produced, ensuring all generated entities exist in the KG. Outside such boundaries, the model generates relations by sampling from an unconstrained original token distribution. This mechanism ensures entity reliability while preserving flexible relation extraction (Edge et al., 2024). + +Multiple Representations. In order to capture different semantic views of a claim, we employ beam search along with the described sampling strategy, which is proved to improve the coverage of extracted triplets (table 8), resulting in multiple representations $\mathbb{P}_c = \{\mathcal{P}_c^{(i)}\}_{i = 1}^N$ for an input claim. + +In summary, each of the claim's graph representations satisfies following properties: (1) effectively capture the underlying graph structure of that claim, and (2) correctly align with the KG's entities. + +# 4.3 Subgraph Retrieval + +The second component of ClaimPKG involves retrieving relevant KG subgraphs as evidence by using a dedicated algorithm that matches the pseudosubgraphs $\mathcal{P}_c$ 's from the previous step to actual subgraphs in the KG. We present the high-level description of our algorithm here, while its complete formulation is detailed in Appendix D. We categorize triplets in a $\mathcal{P}_c$ into: (1) Incomplete triplets, where either the head or tail entity is marked as unknown, and (2) Complete triplets, where both head and tail entities are explicitly identified. + +Relation Scoring Function: We define a function $\operatorname{Sim}(r_1, r_2)$ to quantify the similarity between two relations, where a higher score indicates greater similarity. This function can be instantiated via various mechanisms (e.g., embedding similarity, re-ranking, fuzzy matching, etc.). + +Incomplete Triplets Retrieval: Our goal is to identify evidence (actual triplets in the KG) to inform us about entities marked as unknown and their respective relations with explicit entities in the pseudo-subgraphs. First, for a $\mathcal{P}_c$ , we group triplets sharing the same unknown entity $u$ into a group $g$ (e.g., in Figure 2, triplets associated with unknown_0 are grouped together). Subsequently, for each group $g$ characterized by the unknown entity $u$ , we denote: $\mathcal{E}_u = \{e_{u1}, \ldots, e_{un}\}$ as entities directly connected to $u$ in the pseudo-subgraph $\mathcal{P}_c$ and $\mathcal{R}_u = \{r_{u1}, \ldots, r_{un}\}$ as relations from $u$ to corresponding entities in $\mathcal{E}_c$ . In $g$ , for each explicit entity $e_{ui} \in \mathcal{E}_u$ , we first retrieve candidate set $C_{ui} = \{e_{i1}^c, \ldots, e_{im}^c\}$ containing all entities connected to $e_{ui}$ in the KG, then collect all candidate sets into $\mathcal{C}_u = \{C_{u1}, \ldots, C_{un}\}$ . + +To determine the best candidates for resolving $u$ , we propose an Entity Scoring mechanism, which is based on two assumptions: (1) since $u$ has pseudo relations with all entities in $\mathcal{E}_u$ , a candidate $e^c$ connected to more entities in $\mathcal{E}_u$ is more likely to resolve $u$ ; and (2) because every information related to $e_{ui}$ and $u$ is crucial to verify the initial claim, each candidate set $C_{ui}$ must contribute to the final verification. Note that an entity can appear in multiple candidate sets, hence we compute a "global" score for each $e_{ij}^c$ in a candidate set $C_{ui}$ : + +$$ +\operatorname {s c o r e} \left(e _ {i j} ^ {c}\right) = \sum_ {r} ^ {R _ {i j} ^ {u}} \operatorname {S i m} \left(r _ {u i}, r\right) \tag {5} +$$ + +with $R_{ij}^{u} = \bigcup_{i = 1}^{\left|\mathcal{E}_{u}\right|}\{r(e_{ui},e_{ij}^{c})\mid$ if $e_{ij}^{c}\in C_{ui}\}$ , the set of all relations across candidate sets appearing + +in $\mathcal{C}_u$ that connect $e_{ij}^c$ with an $e_{ui}$ . Subsequently, to construct the set $T_{u}$ of most relevant triplets to a group $g$ , we employ a ranking function as follows: + +$$ +T _ {u} = \bigcup_ {i = 1} ^ {| C _ {u} |} \underset {\text {t r i p l e t}, k _ {1}} {\arg \max } \left\{\pi_ {i j} \mid j \leq \left| C _ {u i} \right| \right\} \tag {6} +$$ + +with $\pi_{ij}$ is simply $score(e_{ij}^{c})$ and (triplet, $k_{1}$ ) denotes the selection of top $k_{1}$ triplets $(e_{ui}, r, e^{c})$ having the highest global scores from each set in $\mathcal{C}_{u}$ . + +While equation 5 ensures candidates appearing in multiple candidate sets and having high similar scores are prioritized, equation 6 ensures every entity in $\mathcal{E}_u$ has at least $k_{1}$ triplets, both of which make use of assumptions (1) and (2). + +Complete Triplets Retrieval: For each triplet $(e_1, r, e_2)$ in a $\mathcal{P}_c$ , we first find top $k_2$ similar relations between $e_1$ and $e_2$ in the KG $\mathcal{G}$ using the Sim function. If no direct connection exists (e.g., "103 Colmore Row" and "Vedat Tek" as shown in figure 2), the triplet is decomposed into two: $(e_1, r, \text{unknown}_0)$ and $(\text{unknown}_0, r, e_2)$ . These are then handled via Incomplete Triplets Retrieval. + +Subgraph Union: In summary, for an input claim $c$ , multiple pseudo-graphs are generated, containing complete and incomplete triplets. These triplets undergo processing to handle shared unknown entities and identified entities that are not connected in the KG $\mathcal{G}$ , and are used to query $\mathcal{G}$ for relevant triplets. All retrieved evidence triplets are aggregated into a final subgraph $S_{c}^{*}$ , serving as the evidence for the final component of ClaimPKG. + +# 4.4 General Reasoning + +The General Reasoning module concludes the ClaimPKG framework by determining claim veracity through reasoning over input claim $c$ and retrieved evidence subgraph $S_{c}^{*}$ . As complex tasks, especially claim verification, require deliberate chain-of-thought reasoning (Jiang et al., 2020; Wang et al., 2023), we use a general-purpose LLM to analyze $c$ and $S_{c}^{*}$ . Using carefully designed prompts (Figure 6), the module generates a natural language justification $j$ and verdict $v$ . Expanded from equation 3, this step is formalized as: + +$$ +p _ {\theta} (v, j | c, \mathcal {S} _ {c} ^ {*}) = p _ {\theta} (v | c, j, \mathcal {S} _ {c} ^ {*}) p _ {\theta} (j | c, \mathcal {S} _ {c} ^ {*}) \tag {7} +$$ + +where $p(j|c, S_c^*)$ produces the justification and $p(v|c, j, S_c^*)$ determines veracity. This model-agnostic design enables integration with state-of-the-art LLMs (e.g., Llama, Qwen and GPT4) for zero-shot reasoning. + +# 5 Experiments + +# 5.1 Experimental Setup + +Datasets. Our primary benchmark is the FactKG dataset (Kim et al., 2023b), designed for claim verification over the DBpedia KG (Lehmann et al., 2015). It consists of 108K claims grounded in DBpedia and labelled as either SUPPORTED or REFUTED. The claims span five distinct categories: One-hop, Conjunction, Existence, Multi-hop, and Negation, each posing unique challenges. For evaluation, we randomly sample 2K claims from the test set, ensuring balanced representation across categories under computational efficiency. To assess the generalizability of ClaimPKG beyond structured benchmarks, we also evaluate HoVer (Jiang et al., 2020) and FEVERIOUS (Aly et al., 2021), two widely-used unstructured-based benchmarks requiring multi-hop reasoning and evidence aggregation from Wikipedia. Additional statistics of datasets are provided in Appendix A. + +Metrics. We use Accuracy as the primary metric along with Entity Correctness to measure if the claim's extracted entity is valid in KG. Additionally, for the FactKG dev set, we report Claim Structure Coverage, which quantifies the proportion of triplets from the original claim's graph structure successfully reconstructed by our pipeline. We refer readers to Appendix C for more details. + +Annotation. For brevity, we use Llama-3B, Llama-70B, and Qwen-72B to refer to Llama-3.2-3B, Llama-3.3-70B, and Qwen2.5-72B respectively. The * symbol denotes models fine-tuned for pseudo subgraph generation. Full model names are used when necessary. + +Baselines. We compare ClaimPKG with recent KG-based claim verification methods: Zero-shot CoT (Wei et al., 2022) prompts LLMs to generate rationales and verdicts without accessing the KG; GEAR (Zhou et al., 2019), originally designed for text-based verification, employs graph-based evidence aggregation with multiple aggregators to capture multi-evidence dependencies, using BERT for language representation and adapted for KG settings following (Kim et al., 2023b); and KG-GPT (Kim et al., 2023a), a pioneer work that combines LLMs and KGs through a structured pipeline of Sentence Segmentation, Graph Retrieval, and Logic Inference. Notably, unlike baselines which receive pre-identified claim entities along with the claim as the input, our method processes entities in an end-to-end pipeline. + +Implementation. For a comprehensive evaluation, we evaluate baselines on three model series: Llama 3 (Meta, 2024), Qwen 2.5 (Qwen, 2024), and GPT4o-mini (OpenAI, 2024). In ClaimPKG, we configure the Specialized LLM to generate multiple pseudo-subgraphs using a beam size of 5. For the Subgraph Retrieval algorithm, we adopt an embedding-based approach leveraging BGE-LargeEN-v1.5 (Xiao et al., 2023) to compute dot-product similarity for the Relation Scoring Function, we set the primary hyperparameters to $k_{1} = 3$ and $k_{2} = 1$ . Detailed justification is provided in Appendix C. + +# 5.2 Results and Analysis + +We present the main experimental results in this section and additional findings in Appendix C. + +(RQ1): How Does ClaimPKG Perform Against the Baselines? Table 1 compares the accuracy $(\%)$ of ClaimPKG with baselines across claim categories of the FactKG. Key observations include: + +(1) Direct inference using LLMs with CoT reasoning significantly underperforms compared to evidence-based methods, with the best average score reaching only $69.07\%$ , highlighting that despite LLM advancements, evidence retrieval remains crucial. (2) KG-GPT integrates knowledge graphs with LLMs but its best average score achieves only $74.70\%$ (Llama-70B Few-shot), falling short of GEAR's fine-tuned model at $76.65\%$ . This suggests that while LLMs excel at language tasks, they require specific adaptation for KG processing. (3) ClaimPKG, with the strongest configuration $(\text{Llama}-3\text{B}^{*} + \text{Llama}-70\text{B})$ and constrained by Entity-Trie for valid KG entity generation, achieves a 12-point improvement over KG-GPT and 9 points over GEAR. It particularly excels in multi-hop reasoning, demonstrating strong performance across Llama-3 and Qwen-2.5 backbones through effective structured evidence retrieval and KG integration. + +(RQ2): How Do Different Components Affect Performance? To evaluate the impact of each component in ClaimPKG, we conduct ablation studies of the following components, maintaining Llama-3B* as the Specialized LLM and Llama-70B as the General LLM. + +Entity-Trie Constraint. We remove the Entity-Trie constraint to assess its necessity. Compared to the full setup, this reduces the entity extraction correctness from $100\%$ to $87.5\%$ , and overall performance from $84.64\%$ to $82.72\%$ . + +
MethodEntity CorrectnessNegationExistenceConjunctionMulti-hopOne-hopAverage
Direct Inference With CoT - w/o Evidence Retrieval
GPT-4o-mini (Zero-shot CoT)-61.9159.4569.5160.8770.8364.51
Qwen-72B (Zero-shot CoT)-62.9162.2074.0462.3275.9867.49
Llama-70B (Zero-shot CoT)-64.3464.6272.4765.5878.3269.07
Baseline Comparison - w/ Evidence Retrieval
GEAR (Finetuned BERT)Known in Prior79.7279.1978.6368.3977.3476.65
KG-GPT (Llama-70B Few-shot)Known in Prior70.9165.0686.6458.8792.0274.70
KG-GPT (Qwen-72B Few-shot)Known in Prior67.3160.0889.1458.1990.8773.12
ClaimPKG (Llama-3B* + GPT-4o-mini)100.0%85.1072.6484.2372.2691.0181.05
ClaimPKG (Llama-3B* + Qwen-72B)100.0%85.2786.9084.0278.7191.2085.22
ClaimPKG (Llama-3B* + Llama-70B)100.0%84.5884.2085.6878.4990.2684.64
Ablation Results (Llama-3B* + Llama-70B) - w/ Evidence Retrieval
ClaimPKG (w/o Trie Constraint)87.50%82.5083.2483.8276.1388.0182.74
ClaimPKG (Few-shot Specialized LLM)86.52%77.9981.8977.8068.8281.6577.63
ClaimPKG (w/o Incomplete Retrieval)100.0%68.8051.2567.8461.2976.2265.08
+ +Specialized LLM. When replacing the specialized LLM with few-shot prompting strategy using Llama-70B, a much larger general-purpose LLM, entity correctness further declines to $86.52\%$ , leading overall performance to drop to $77.63\%$ . These results demonstrate that even with examples, general-purpose LLMs struggle to produce outputs with desired graph structure correctly, emphasizing the importance of the specialized LLM in generating pseudo subgraphs. + +Incomplete Retrieval. Removing the Incomplete Triplet Retrieval function, which forces the retrieval algorithm to only query evidence using complete triplets, causes a significant average performance drop of nearly $20\%$ compared to the full setup, showing the complete graph structure of input claims is essential for optimal performance. + +(RQ3): Robustness and Generalization of ClaimPKG? To assess ClaimPKG's robustness, we vary model backbones, examine zero-shot generalizability, analyze the effect of training data size, and conduct error analysis. + +Model Backbones. We evaluate different LLM architectures for both Specialized and General LLMs (Table 2). For General LLMs, we test various model sizes (7B to 70B parameters) using retrieved KG triplets as input. For Specialized LLMs, we experiment with different small fine-tuned backbones and few-shot prompt templates (Figure 7), while keeping Llama-3.3-70B as the fixed General LLM. + +Results in Table 2 show larger General LLMs (GPT-4o-Mini, Llama-3.3-70B) outperform smaller ones (Qwen-2.5-7B, Llama-3.1-8B) by up to 8 points, highlighting model capacity's role in ag + +Table 1: Performance (accuracy %) comparison of ClaimPKG with baselines on 5 claim categories of FactKG dataset and their average scores. + +
ComponentStrategyBackboneAverage
General LLMZero-shotLlama 3.1 - 8B77.08
Llama 3.3 - 70B84.64
GPT4o - Mini81.05
Qwen 2.5 - 7B80.22
Qwen 2.5 - 72B85.22
Specialized LLMFinetuneLlama 3 - 3B84.64
Qwen 2.5 - 3B82.32
Llama 3 - 1B83.91
Qwen 2.5 - 1.5B82.20
Few-shotLlama 3.3 - 70B77.63
Qwen 2.5 - 72B77.10
+ +Table 2: Performance on Different Backbones. + +gregating subgraph evidence. Notably, a fine-tuned 1B Specialized LLM outperforms the general 70B counterpart, demonstrating fine-tuning's effectiveness to process graph data. This supports the need to combine powerful General LLMs with adapted Specialized LLMs for optimal performance. + +Zero-shot Generalizability. To assess + +
BenchmarkLlama 3Qwen 2.5
HoVer (Zero-shot CoT)66.665.3
HoVer (Support-Predicted)70.7 (14.3%)69.4 (15.7%)
FEVEROUS (Zero-shot CoT)81.180.9
FEVEROUS (Support-Predicted)83.8 (12.5%)83.6 (12.9%)
+ +Table 3: Zero-shot transferred performance on other unstructure-based benchmarks on the Support-Predicted samples along with Support Predicted rates. + +ClaimPKG's zero-shot generalizability, we test transfer to HoVer (Jiang et al., 2020) and FEVEROUS (Aly et al., 2021) datasets. Using DBpedia (Lehmann et al., 2015) as the knowledge + +source, we evaluate with trained Specialized LLMs (Llama-3.2-3B and Qwen-2.5-3B) while keeping Llama-3.3-70B as the General LLM. Since external datasets may contain claims outside DBpedia's coverage, making it difficult to distinguish between knowledge gaps and actual verification failures of ClaimPKG for Refuted cases, we analyze only samples predicted as Supported. As shown in Table 3, ClaimPKG predicts Supported for only $12.5\% - 15.7\%$ of samples, indicating limited knowledge overlap with DBpedia. However, on these samples, ClaimPKG outperforms Llama-3.3-70B's zero-shot CoT inference by $4\%$ accuracy on both datasets, demonstrating robust transfer to reasoning patterns in unseen data. + +Training Data Size. To assess the impact of train + +![](images/1f8ce5c2eb5971e4f38babdf748e06ca7c28c9b4b01a81101d541f6b5a409692.jpg) +Figure 3: Varying Specialized LLM's training data. + +ing data on the Specialized LLM, we vary the number of training samples from 0.1K to 10K, using two configurations: Llama-3.2-3B and Qwen-2.5-3B as the specialized LLM and keep the General LLM to be Llama-3.3-70B. We evaluate performance based on two metrics: average accuracy on the test set and claim structure coverage on the dev set. As shown in Figure 3, the Specialized LLMs achieve satisfactory accuracy (Llama-3.2-3B: $79.35\%$ , Qwen-2.5-3B: $77.62\%$ ) with just 100 training samples, demonstrating efficiency and low training costs for KG adaptation. While both structure coverage and accuracy improve up to 5K samples, coverage plateaus thereafter, and accuracy begins to decline, indicating overfitting where excessive training data reduces generalizability. + +# 5.3 Interpretability and Error Analysis + +ClaimPKG can improve claim verification performance while enhancing interpretability. Representative outputs of ClaimPKG (Figure 12, Appendix E) illustrate its ability to capture claim structure and provide well-grounded justifications. Notably, + +when refuting claims, it explicitly presents contradicting evidence, ensuring transparent reasoning. To further assess reliability, we conducted a human analysis of 200 incorrect predictions from FactKG, categorizing errors (Figure 13, Appendix E) into: Claim Structure Errors: fail to capture the underlying claim structure; Retrieval Errors: fail to retrieve necessary evidence required for claim verification; and Reasoning Errors: incorrect logical inferences of the general LLM to judge the verdict. + +Specifically, there are 0 (0%) Claim Structure Errors, 57 (28.5%) Retrieval Errors, and 143 (71.5%) Reasoning Errors. These results suggest that, with chances (multiple beams) to generate pseudosubgraphs, the Specialized LLM can effectively capture the structural representation of claims. However, the general-purpose LLM, despite its strong reasoning capabilities, still struggles with certain complex reasoning scenarios that require specific handling. Moreover, retrieval errors highlight cases where additional implicit reasoning is necessary, as we hypothesize that direct subgraph retrieval failed to provide a comprehensive picture of the required evidence. These highlight future improvements, focusing on enhancing retrieval inference and refining reasoning for complex claim verification over structured knowledge. + +# 5.4 Scalability of ClaimPKG + +ClaimPKG maintains scalability and adaptability within dynamic knowledge environments. After training the Specialized LLM on a domain (e.g., Wikipedia), the system remains decoupled from the underlying Knowledge Graph (KG). Only the Entity-Trie component interfaces directly with the data. Consequently, when the KG undergoes updates, ClaimPKG requires merely an update of the corresponding entities within the Entity-Trie, ensuring an efficient adaptation process. + +# 6 Conclusion + +In this work, we present ClaimPKG, a novel claim verification combining the structure of Knowledge Graphs with the adaptability and reasoning of Large Language Models. Through Pseudosubgraph Generation, Subgraph Retrieval, and General Reasoning, it addresses limitations while ensuring transparency. Extensive experiments show state-of-the-art performance and generalizability across datasets, making ClaimPKG a step toward reliable and explainable misinformation detection. + +# Limitations + +Despite their advanced reasoning capabilities, LLMs are prone to errors and biases, necessitating careful deployment, particularly in fact-checking systems where incorrect or biased outputs could contribute to misinformation. Addressing these biases remains an ongoing research challenge, requiring effective mechanisms for detection, control, and mitigation. Additionally, real-world claim verification often requires inferring implicit reasoning, where further related knowledge for a problem is necessary, and making improvements in pipeline components to handle this type of information is crucial. Another limitation is the performance decline observed when the Specialized LLM is trained on an excessive number of examples, highlighting the need for future research into regularization strategies. Further improvements should also focus on the general reasoning module to infer missing knowledge more effectively and enhance intricate and nuanced claim verification cases over structured knowledge. + +# References + +Rami Aly, Zhijiang Guo, Michael Sejr Schlichtkrull, James Thorne, Andreas Vlachos, Christos Christodoulopoulos, Oana Cocarascu, and Arpit Mittal. 2021. FEVEROUS: fact extraction and verification over unstructured and structured information. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual. +Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2021. Autoregressive entity retrieval. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net. +Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, and Jonathan Larson. 2024. From local to global: A graph RAG approach to query-focused summarization. CoRR, abs/2404.16130. +Max Glockner, Yufang Hou, and Iryna Gurevych. 2022a. Missing counter-evidence renders NLP fact-checking unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics. +Max Glockner, Yufang Hou, and Iryna Gurevych. 2022b. Missing counter-evidence renders NLP fact-checking + +unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics. +Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. 2020. TaPas: Weakly supervised table parsing via pre-training. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4320-4333, Online. Association for Computational Linguistics. +Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. 2023. StructGPT: A general framework for large language model to reason over structured data. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9237-9251, Singapore. Association for Computational Linguistics. +Yichen Jiang, Shikha Bordia, Zheng Zhong, Charles Dognin, Maneesh Kumar Singh, and Mohit Bansal. 2020. Hover: A dataset for many-hop fact extraction and claim verification. In Findings of the Association for Computational Linguistics: EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 3441-3460. Association for Computational Linguistics. +Jiho Kim, Yeonsu Kwon, Yohan Jo, and Edward Choi. 2023a. KG-GPT: A general framework for reasoning on knowledge graphs using large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 9410-9421. Association for Computational Linguistics. +Jiho Kim, Sungjin Park, Yeonsu Kwon, Yohan Jo, James Thorne, and Edward Choi. 2023b. Factkg: Fact verification via reasoning on knowledge graphs. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 16190-16206. Association for Computational Linguistics. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles. +Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas, Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef, Soren Auer, and Christian Bizer. 2015. Dbpedia - A large-scale, multilingual knowledge base extracted from wikipedia. Semantic Web, 6(2):167-195. +Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global features. + +In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 2013, 4-9 August 2013, Sofia, Bulgaria, Volume 1: Long Papers, pages 73-82. The Association for Computer Linguistics. +Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net. +Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net. +Meta. 2024. Build the future of ai with meta llama 3, 2024. +Makoto Miwa and Mohit Bansal. 2016. End-to-end relation extraction using LSTMs on sequences and tree structures. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1105-1116, Berlin, Germany. Association for Computational Linguistics. +OpenAI. 2024. Hello gpt-4o, 2024a. +Liangming Pan, Xiaobao Wu, Xinyuan Lu, Anh Tuan Luu, William Yang Wang, Min-Yen Kan, and Preslav Nakov. 2023. Fact-checking complex claims with program-guided reasoning. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 6981-7004. Association for Computational Linguistics. +Jungsoo Park, Sewon Min, Jaewoo Kang, Luke Zettle-moyer, and Hannaneh Hajishirzi. 2022. FaVIQ: FAct verification from information-seeking questions. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5154-5166, Dublin, Ireland. Association for Computational Linguistics. +Qwen. 2024. Qwen2.5: A party of foundation models. +Tal Schuster, Adam Fisch, and Regina Barzilay. 2021. Get your vitamin C! robust fact verification with contrastive evidence. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 624-643, Online. Association for Computational Linguistics. +Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, and Jian Guo. 2024. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net. + +James Thorne and Andreas Vlachos. 2018. Automated fact checking: Task formulations, methods and future directions. In Proceedings of the 27th International Conference on Computational Linguistics, COLING 2018, Santa Fe, New Mexico, USA, August 20-26, 2018, pages 3346-3359. Association for Computational Linguistics. +James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and verification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 809-819. Association for Computational Linguistics. +Bailin Wang, Richard Shin, Xiaodong Liu, Oleksandr Polozov, and Matthew Richardson. 2020. RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7567-7578, Online. Association for Computational Linguistics. +Haoran Wang and Kai Shu. 2023. Explainable claim verification via knowledge-grounded reasoning with large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 6288-6304. Association for Computational Linguistics. +Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net. +Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022. +Wikipedia. 2025a. Levenshtein distance — Wikipedia, The Free Encyclopedia. Accessed: 14-February-2025. +Wikipedia. 2025b. Trie — Wikipedia, The Free Encyclopedia. [Online; accessed 9-February-2025]. +Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-pack: Packaged resources to advance general chinese embedding. Preprint, arXiv:2309.07597. +Jie Zhou, Ganqu Cui, Shengding Hu, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2020. Graph + +neural networks: A review of methods and applications. AI Open, 1:57-81. + +Jie Zhou, Xu Han, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2019. GEAR: graph-based evidence aggregating and reasoning for fact verification. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 892-901. Association for Computational Linguistics. + +# A Benchmark Datasets + +
DatasetSplitSupportRefuteNEITotal
FactKGTrain4272343644-86367
Dev64266840-132666
Test43984643-9041
Total5354755127-108674
HoverTrain110237148-18171
Dev20002000-4000
Test20002000-4000
Total1502311148-26171
FEVER OUSTrain4183527215224171291
Dev390834815017890
Test3372297315007845
Total4911533669424287026
+ +Table 4: Basic statistics of Hover, FEVERIOUS, and FactKG Datasets + +
TypeWrittenColloquialTotal
ModelPresup
One-hop2,10615,9341,58019,530
Conjunction20,58715,90860237,097
Existence2804,0604,8329,172
Multi-hop10,23916,42060327,262
Negation1,34012,4661,80715,613
Total34,46264,7889,424108,674
+ +Table 5: Dataset statistics of FACTKG for claim types. + +FEVEROUS. (Aly et al., 2021) FEVEROUS is a fact verification dataset comprising 87,026 verified claims sourced from Wikipedia (Table 4). Each claim is accompanied by evidence in the form of sentences and/or cells from tables, along with a label indicating whether the evidence supports, refutes, or does not provide enough information to verify the claim. The dataset includes metadata like annotator actions and challenge types, designed to minimize biases. It is used for tasks that involve verifying claims against both unstructured (textual) and structured (tabular) information. + +HoVer. (Jiang et al., 2020) HoVer is a dataset containing 26,171 samples, designed for open-domain, + +multi-hop fact extraction and claim verification, using the Wikipedia corpus. Claims in HoVer are adapted from question-answer pairs and require the extraction of facts from multiple (up to four) Wikipedia articles to determine if the claim is supported or not supported. The complexity of HoVer, particularly in the 3/4-hop claims, is further amplified because these claims are often expressed across multiple sentences, which introduces challenges related to long-range dependencies, such as accurately resolving coreferences. + +FactKG. (Kim et al., 2023b) FactKG is a challenging fact verification dataset comprised of 108,674 samples, designed to rigorously test models' abilities to reason over structured knowledge represented in a knowledge graph. Its difficulty arises from a combination of factors. First, it demands proficiency in five distinct reasoning types: one-hop (single relationship), conjunction (combining multiple relationships), existence (verifying entity/relationship presence), multi-hop (traversing multiple relationships), and, crucially, negation (reasoning about the absence of relationships). Second, FactKG incorporates linguistic diversity, encompassing both formal, written-style claims and more challenging colloquial expressions, requiring models to handle paraphrasing, idiomatic language, and less direct wording. Third, instead of unstructured text, FactKG utilizes the DBpedia knowledge graph (derived from Wikipedia), necessitating that models correctly link entities and relations mentioned in the claim to the graph's nodes and edges, and perform complex path-based reasoning, especially for multi-hop claims. The addition of a weakly semantic knowledge source, and cross-style evaluation to assess generalizability, further contributes to the difficulty of this dataset. These features collectively make FactKG significantly more complex than datasets relying solely on unstructured text for verification. Detailed statistics of this dataset can be found in table 5. Readers can refer to table 4 for the overall basic statistics of all employed datasets for ClaimPKG. + +# B Implementation Details + +We conducted all experiments on a DGX server with 8 NVIDIA A100 GPUs. The General LLM is hosted within the vLLM framework (Kwon et al., 2023). Below, we detail the training process of the Specialized LLM. + +# B.1 Specialized LLM Training Data Annotation + +To tailor the specialized model for improved comprehension and processing of KG-specific data, we construct a dedicated dataset for training, leveraging the provided version of FactKG (Kim et al., 2023b) (illustrated in Figure 4). The annotation process consists of the following steps: + +Claim: A musical artist, whose music is Post-metal, played with the band Twilight and performs for Mamiffer. + +Entities: [Mamiffer, Post-metal, Twilight_(band)] Evidence: + +- Twilight_(band), (associatedMusicalArtist, associatedBand), Mamiffer) +- Twilight_(band), (associatedMusicalArtist, genre), Postmetal + +Figure 4: Provided data of FactKG + +Preprocessing: All entities and relations from FactKG, including the train, development, and test datasets, as well as the DBPedia KG, are normalized by splitting concatenated words to ensure consistency. + +Graph Construction: Using the provided evidence information from FactKG, we observe that while evidence may not explicitly exist in the graph, it accurately captures the underlying structure of the claim. Accordingly, for triplets with relation paths exceeding one hop, we decompose them into multiple triplets while introducing a placeholder entity, denoted as "unknown_[index]", to preserve structural integrity. This placeholder represents an ambiguous or missing entity that requires identification. For instance, the triplet: "Twilight_(band), (~associatedMusicalArtist, associatedBand), Mamiffer" is transformed into the following triplets: "Twilight_(band), associatedBand, unknown_1" and "unknown_1", associatedMusicalArtist, Mamiffer". Additionally, entities present in the Entities set but absent from the graph are also introduced as unknown_[index]. To further enhance graph completeness, GPT-4 is employed to verify whether entities from the Entities set are explicitly mentioned in the claim. This ensures that relevant entities are either linked to existing nodes or added as placeholders. The automatic entity verification process is conducted using a prompt template, as shown in Figure 8. Additionally, the symbol "\~" + +is retained to denote inverse relations. Random shuffle among constructed triplets but preserving the sequential order of “unknown” entity is applied to improve the robustness of the model being trained. + +Generated Pseudo-Subgraph: The transformed claim results in the pseudo-subgraph illustrated in Figure 5. + +Pseudo Subgraph Label: + +- Twilight (band), associated musical artist, unknown_0 +- unknown_0, associated band, Mamiffer +- unknown_0, genre, Post-metal + +Figure 5: Pseudo-Subgraph label as the output of the data annotation process. + +# B.2 Training and Hyperparameter Settings of the Specialized LLM + +
ParameterValue
BackboneLlama-3-Base
Qwen-2.5-Base
Learning Rate1e-5
Training Epoch1
Training Steps128
OptimizerAdamW
+ +Table 6: Hyperparameters of the Specialized LLM in ClaimPKG. + +The training configurations for the Specialized LLM are summarized in Table 6. The model training is based on the Base version of Llama-3 (Llama3.2-1B, Llama-3.2-3B, Llama-3.1-8B) and Qwen 2.5 (Qwen-2.5-1.5B, Qwen-2.5-3B, Qwen-2.5-7B). These base models are selected to preserve their inherent linguistic capabilities while facilitating optimal adaptation to domain-specific tasks during fine-tuning. The training process employs the annotated dataset described in Section B.1 and is conducted over one single epoch using the AdamW (Loshchilov and Hutter, 2019) optimizer. This strategy enables the generation of multiple variants of the Specialized LLM, ensuring task-specific adaptation while maintaining robust generalization across diverse linguistic structures. + +# C Additional Experimental Results + +In this section, we present additional experimental results through a systematic analysis on the FactKG + +development set with 2000 randomly sampled data points across claim categories. First, we provide a more detailed explanation of the evaluation metrics used. Second, we examine the performance of the specialized LLM by varying the beam size and backbone model size. Third, we analyze the Subgraph Retrieval by adjusting the hyperparameters $k_{1}$ and $k_{2}$ as explained in the 4.3, which influence the diversity and correctness of the retrieved subgraphs. + +# C.1 Metrics + +The specialized LLM's generation of pseudosubgraphs plays a crucial role in ClaimPKG's performance. We evaluated the specialized LLM's performance using four metrics: claim structure coverage (coverage), entity correctness (correctness), unique triplet count, and average end-to-end accuracy. While the final metric is straightforward, the three former metrics can be described as follows: + +(1) Structure coverage quantifies the alignment between the LLM-generated pseudo-graph and the reference claim graph in the FactKG dataset. Specifically, for a generated graph $P$ and reference graph $Q$ , coverage is computed as: + +$$ +c o v e r a g e (P, Q) = \frac {\# (P . t r i p l e t s \cap Q . t r i p l e t s)}{\# (Q . t r i p l e t s)} +$$ + +(2) Entity correctness quantifies the correctness of a claim's extracted entities, i.e., whether these entities exist in the KG. Specifically, for a generated graph $P$ and a knowledge graph $\mathcal{G}$ , correctness is computed as: + +$$ +\operatorname {c o r r e c t n e s s} (P, \mathcal {G}) = \frac {\# (P . e n i t i e s \cap \mathcal {G} . e n t i t i e s)}{\# (P . e n t i t i e s)} +$$ + +(3) Unique triplet count measures the diversity of generated graph structures, with higher counts potentially enabling better subgraph retrieval through increased coverage of possible relationships. + +# C.2 Different Beam Sizes of the Specialized LLM + +To evaluate the LLM's decoding strategy across different beam sizes, we utilized three average accuracy, structure coverage and unique triplet count as metrics. Table 7 details the impact of the number of beam sizes on the previously mentioned metrics on the FactKG dev set. Both Llama and Qwen models demonstrate consistent improvements in average performance and claim structure coverage + +
BackboneBeam SizeAverage AccuracyStructure CoverageUnique Triplets
Llama-3BBeam 179.7876.514.48
Beam 381.8081.276.44
Beam 582.0483.028.39
Beam 1082.3384.6113.83
Qwen-3BBeam 178.8477.953.82
Beam 380.7682.665.16
Beam 581.4183.586.73
Beam 1082.1984.629.58
+ +Table 7: Performance metrics for different models on FactKG dev set. + +
Beam SizeGen Graph (s)Retrieve (s)Reason (s)
beam 11.020.242.19
beam 32.160.382.22
beam 53.520.502.33
beam 1035.181.012.88
+ +Table 8: Computing time for different beam sizes on FactKG dev set. + +as beam size increases from 1 to 10. At beam size 10, Llama achieves $84.61\%$ coverage while Qwen reaches $84.62\%$ , showing comparable performance at higher beam sizes. The unique triplet count shows more pronounced growth with larger beam sizes, with Llama generating 13.83 unique triplets and Qwen 9.58 triplets at beam size 10. + +However, table 8 shows this improved performance comes with significant computational overhead. Table 8 details on the time taken for generating pseudo-graphs, retrieving sub-graphs and reasoning with retrieved evidence. Most notably, while the time required for retrieving sub-graphs and reasoning with evidence only increase marginally as the beam size increase, this figure for pseudo-graph generation increases dramatically as the beam size goes to 10, from 1.02s at beam size 1 to 35.18s at beam size 10 - a $34.5 \times$ increase. Based on this measurement, in our official framework we select beam size $= 5$ to balance the performance gain and computational costs. + +# C.3 Different Model Sizes of the Specialized LLM + +To evaluate how model size affects performance, we compare different variants of Llama and Qwen models ranging from 1B to 8B parameters. Table 9 presents the performance on the FactKG dev set across three key metrics: average performance, structure coverage, and unique triplets generated, + +which was explained previously. + +
BackboneAverage AccuracyStructure CoverageUnique Triplets
Llama - 1B80.2678.988.97
Llama - 3B82.0483.028.39
Llama - 8B82.6382.849.34
Qwen - 1.5B80.4881.346.58
Qwen - 3B81.4183.586.73
Qwen - 7B81.7982.887.05
+ +For both model families, we observe improvements in performance as model size increases, though with different patterns. The Llama family shows more notable gains, with average performance increasing from $80.26\%$ (1B) to $82.63\%$ (8B), while Qwen demonstrates more modest improvements from $80.48\%$ (1.5B) to $81.79\%$ (7B). Structure coverage peaks with the 3B variants for both families - Llama-3B achieving $83.02\%$ and Qwen-3B reaching $83.58\%$ . The models keep the increasing trend in their triplet generation patterns: Llama maintains relatively stable unique triplet counts (8.39 - 9.34) across sizes, while the figures for Qwen are (6.58 - 7.05) as the model size increases. + +Overall, scaling to larger models shows slight improvements while increasing computational requirements. Based on these results, we select 3B variants of both model families in our official implementation, which offer an optimal balance of performance and model size, with Llama-3B and Qwen-3B showing comparable effectiveness across all metrics. + +# C.4 Different Hyperparameters of Subgraph Retrieval + +Table 9: Performance metrics for different models on the FactKG dev set. + +
Hyper ParamsAverage AccuracyUnique Triplets
k1=5;k2=382.0011.42
k1=3;k2=182.048.39
k1=1;k2=181.873.58
+ +To assess the impact of different hyperparameters in the subgraph retrieval algorithm on overall + +performance, we systematically vary these hyperparameters while keeping the specialized LLM and general LLM fixed as Llama-3.2-3B and Llama-3.3-70B, respectively. Table 10 presents the performance across two key metrics: average accuracy and the number of unique triplets generated. + +The results indicate that increasing $k_{1}$ and $k_{2}$ leads to a higher number of unique triplets, suggesting greater diversity in retrieved claims. However, this increase does not consistently translate to overall performance gains, which fall in the range of 81.87 - 82.00. Notably, performance peaks at $k_{1} = 3$ and $k_{2} = 1$ , suggesting that a more focused retrieval strategy is sufficient to achieve optimal performance, whereas excessively high $k$ values may introduce noise or irrelevant information. Based on these results, we select $k_{1} = 3$ and $k_{2} = 1$ in our official implementation, which balancing between information discovery and computing required. + +# C.5 Different Methods for Relation Scoring Function + +Table 10: Performance of different subgraph retrieval configurations $k_{1}$ and $k_{2}$ with Llama-3.2-3B + Llama-3.3-70B on the FactKG dev set. + +
MethodAverage Accuracy
Embedding Based84.64
Rerank Based84.73
Fuzzy Matching82.19
Exact Matching81.57
+ +Table 11: Performance of different scoring approach of the Subgraph Retrieval on the FactKG test set + +To assess the impact of different scoring mechanisms on performance, we vary the scoring function and evaluate the test set of FactKG while fix the Specialized LLM and the General LLM. Specifically, we explore multiple strategies for the Relation Scoring Function (Sim), as described in Section 4.3, incorporating diverse techniques such as embedding-based retrieval, reranking, fuzzy text matching (Wikipedia, 2025a), and exact matching. + +For embedding-based and reranking approaches, we employ state-of-the-art pre-trained models, namely BGE-Large-EN-v1.5² and BGE-Reranker-Large³, as provided by (Xiao et al., 2023). Experimental results indicate that deep learning-based methods, such as embedding and reranking, achieve superior performance, with accuracy scores of 84.64 and 84.56, respectively. In contrast, + +text-matching-based methods yield lower accuracy, with fuzzy matching and exact matching scoring 82.19 and 81.57, respectively. These findings highlight the effectiveness of deep learning-based approaches. + +We recommend embedding-based retrieval as it enables pre-indexing of corpus relations. This allows precomputation of relation embeddings and requires encoding only the query relation for new Pseudo Subgraphs, eliminating the need to re-encode existing knowledge graph relations during inference. + +# D Algorithm Details + +The detailed implementation of the Entity Trie-constrained decoding algorithm is provided as the pseudo-code in Algorithm 1 and the Algorithm 2 details the implementation of the Subgraph Retrieval. + +# E Case Study + +We present the case study results of ClaimPKG on the FactKG dataset in Tables 12 and 13. Each table includes the claim $c$ , pseudo-subgraphs $P_{s}$ , retrieved subgraphs $S_{c}$ , final justification $j$ , and verdict $v$ . Table 12 showcases correctly predicted examples, demonstrating ClaimPKG's ability to accurately capture claim structures and generate well-grounded justifications. Conversely, Table 13 highlights incorrectly predicted cases of two error types as detailed in Section 5.3. The first two examples illustrate Reasoning Errors, while the third represents a Retrieval Error. These insights serve as a foundation for future improvements, emphasizing key areas for future refinement. + +# F Prompt Templates + +For better reproducibility, we present all prompt templates in the appendix. Below is a quick reference list outlining the prompt templates and their usages: + +- Figure 6: Prompt the General LLM to reason on the input claim and retrieved subgraphs to produce justification and final verdict. +Figure 7: Few-shot prompts the General LLM to generate a Pseudo Subgraph with provided examples. +- Figure 8: Annotate the inside and outside entities of the input claim for the training dataset. + +Algorithm 1: LLM Decoding with Entity-Trie Constraint +```txt +Input:Specialized LLM, Input claim $c$ Entity TriE T +Output:Pseudo-Subgraph P +Initialize: $\mathcal{P}\gets \emptyset$ // Initialize pseudo subgraph + $h_0\gets$ InitializeHiddenStates(); constrained $\leftarrow$ False; +Function ConstrainedDecoding(LLM,c,T): +while True do + $p_t,h_t\gets LLM(\mathcal{P},c,h_{t - 1})$ // Compute token probabilities and update hidden states if constrained then +prefix $\leftarrow$ ExtractPrefix(P); // Retrieve tokens from last unclosed to the last allowed $\leftarrow$ T.lookup(prefix);// Retrieve allowed tokens from valid continuations in T $p_t\gets$ MaskProb $(p_t,$ allowed); // Impose probabilities of invalid tokens to be 0 +new_token $\leftarrow$ arg max $p_t$ . // Select new token for P + $\mathcal{P}\gets \mathcal{P}\cup \{\text{new_token}\}$ . if new_token $= = < e>$ then $\sqsubset$ constrained $\leftarrow$ True; if new_token $= = < / e>$ then $\sqsubset$ constrained $\leftarrow$ False; if new_token $= = EOS$ then $\sqsubset$ break; +return P +``` + +# GENERAL REASONING + +Task: Verify whether the fact in the given sentence is true or false based on the provided graph triplets. Use only the information in the triplets for verification. + +- The triplets provided represent all relevant knowledge that can be retrieved. +- If the fact is a negation and the triplets do not include the fact, consider the fact as true. +- Ignore questions and verify only the factual assertion within them. For example, in the question "When was Daniel Martínez (politician) a leader of Montevideo?", focusing on verifying the assertion "Daniel Martínez (politician) a leader of Montevideo". +- Interpret the “ $\sim$ ” symbol in triplets as indicating a reverse relationship. For example: “A $\sim$ south of B” means “B is north of A”. + +# Response Format: + +Provide your response in the following JSON format without any additional explanations: +{ "rationale": "A concise explanation for your decision", "verdict": "true/false as the JSON value" } + +# Triplets: + +{triplets} + +# Claim: + +{claim} + +Figure 6: Prompt template for the general LLM to perform reasoning + +Algorithm 2: Subgraph Retrieval +Input: Knowledge graph $\mathcal{G}$ Pseudo Subgraph List $P_{c}$ Top $k_{1}$ Candidate Unknown Entities, Top $k_{2}$ Complete Triplets +Output:Combined subgraph $S_{c}$ +Function SubgraphRetrieval $(\mathcal{G},\mathcal{P}_c,k_1,k_2)$ .. + $S\gets \emptyset$ . +foreach $\mathcal{P}\in \mathcal{P}_c$ do + $S\gets S\cup$ RetrieveSingleSubgraph $(\mathcal{G},\mathcal{P},k_1,k_2)$ // Process each pseudo subgraph +return JoinSubgraphs $(S)$ // Combine subgraphs +Function RetrieveSingleSubgraph $(\mathcal{G},\mathcal{P},k_1,k_2)$ .. + $(T_{comp},T_{inc})\leftarrow$ CategorizeTriplets( $\mathcal{P}$ );//Split into complete/incomplete triplets + $S_{inc}\gets$ RetrieveIncomplete $(\mathcal{G},T_{inc},k_1)$ . + $S_{comp}\gets$ RetrieveComplete $(\mathcal{G},T_{comp},k_1,k_2)$ . +return $S_{inc}\cup S_{comp}$ +Function RetrieveIncomplete $(\mathcal{G},T_{inc},k_1)$ .. + $S\gets \emptyset$ . + $G\gets$ GroupTripletsByUnknown $(T_{inc})$ //Group by unknown entity +foreach $g\in G$ do + $(E_u,R_u)\leftarrow$ ExtractPseudoStructure $(g)$ //Extract entities and relations associated to unknown entity + $C\gets \emptyset$ . +foreach $(e,r)\in (E_u,R_u)$ do + $(C_e,\mathrm{scores})\leftarrow$ GetCandidatesAndScores $(G,e,r)$ . + $C\gets C\cup \{(C_e,\mathrm{scores})\}$ . + $C =$ AggregateGlobalScore(C); //Aggregate candidate scores globally $C^{*}\gets$ RankTopKCandidates $(C,k_{1})$ //Select top- $k_{1}$ candidates + $S\gets S\cup$ GetTriplets $(C^{*},g)$ . +return $S$ +Function GetCandidatesAndScores $(G,e,r)$ .. + $R_{act}\gets$ RetrieveActualConnectedRelations $(G,e)$ . + $E_{act}\gets$ RetrieveActualConnectedEntities $(G,e)$ . + $r\_ score s\gets$ RelationScore(r, $R_{act}$ . + $S\gets \emptyset$ . +foreach $e^{\prime}\in E_{act}$ do + $s\gets$ MaxRelatedRelationScores(e',r Scores); + $S\gets S\cup \{(e^{\prime},s)\}$ . +return $S$ // Score connected entities +Function AggregateGlobalScore $(C)$ .. +//Calculate new scores and reassign for each $C\_ e$ +foreach $(C_e,\mathrm{scores})\in C$ do +foreach $(c,s)\in (C_e,\mathrm{scores})$ do + $s\gets$ Sum([s'(c) for $(C',s')$ in $C$ if $c\in C')$ +return $C$ . +Function RankTopKCandidates $(C,k_1)$ .. + $C^{*}\gets \emptyset$ . +foreach $(C_e,\mathrm{scores})\in C$ do + $C_e^*\gets$ TopKCandidates $(C_e,\mathrm{scores},k_1)$ . + $C^{*}\gets C^{*}\cup C_{e}^{*}$ . +return $C^{*}$ //Rank candidates per unknown entity +Function RetrieveComplete $(G,T_{comp},k_1,k_2)$ .. + $S\gets \emptyset$ . +foreach $(h,r,t)\in T_{comp}$ do if ExistConnection $(h,t)$ then $T\gets$ GetTripletsWithHeadAndTail $(G,h,t)$ scores $\leftarrow \emptyset$ . foreach $t'\in T$ do scores $\leftarrow$ scores U RelationScore(r, $t'$ .relation); $S\gets S\cup$ TopK(T,scores, $k_{2}$ ); //Rank connected triplets else $S_{1}\gets$ RetrieveIncomplete(G,[(unknown,r,t)], $k_{1}$ ) $S_{2}\gets$ RetrieveIncomplete(G,[(h,r,unknown)], $k_{1}$ ) $S\gets S\cup (S_1\cup S_2)$ // Handle missing connection + +
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
The 11th Mississippi Infantry Monument is not in Gettysburg, Pennsylvania.P1: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(11th Mississippi Infantry Monument, municipality, Gettysburg, Pennsylvania)Justification: The triplets provided include '11th Mississippi Infantry Monument', 'municipality', 'Gettysburg, Pennsylvania' and '11th Mississippi Infantry Monument', 'state', 'Pennsylvania', indicating the monument's location as Gettysburg, Pennsylvania.
P2: Gettysburg, Pennsylvania | -location | 11th Mississippi Infantry Monument(11th Mississippi Infantry Monument, website, 11th mississippianemy monument)
11th Mississippi Infantry Monument | location | unknown_0(U.S. Route 140, location, Gettysburg)
P3: 11th Mississippi Infantry Monument | location | Gettysburg(Pennsylvania Route 34, location, Gettysburg)
11th Mississippi Infantry Monument | location | Unknown_0(U.S. Route 30 in Pennsylvania, location, Gettysburg)
P4: 11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, state, Pennsylvania)
11th Mississippi Infantry Monument | is part of military conflict | Gettysburg(11th Mississippi Infantry Monument, region, Adams County, Pennsylvania)
11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, country, United States)
11th Mississippi Infantry Monument | location | Unknown_0(11th Mississippi Infantry Monument, category, Contributing property)
P5: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(Lutheran Theological Seminary at Gettysburg, city, Gettysburg)
(11th Mississippi Infantry Monument, range, Semi-nary Ridge)Verdict: False
It was released in Hardcover by J.V. JonesP1: Hardcover | -media type | unknown_0(Hardcover, -media type, A Fortress of Grey Ice)Justification: The triplets show that 'A Fortress of Grey Ice' is a book by 'J.V. Jones' and it is of 'Hardcover' media type, which supports the claim.
unknown_0 | author | J.V. Jones(Hardcover, -media type, A Clockwork Orange)
P2: unknown_0 | author | J.V. Jones(Hardcover, -media type, The Bell Curve)
unknown_0 | media type | Hardcover(A Fortress of Grey Ice, author, J.V. Jones)
unknown_0 | -genre | unknown_1(A Cavern of Black Ice, author, J.V. Jones)
P3: J.V. Jones | -author | unknown_0(The Barbed Coil, author, J.V. Jones)
unknown_0 | media type | Hardcover(Hardcover, -media type, The Andromeda Strain)
P4: Hardcover | -media type | J.V. Jones(1963, -date of birth, J.V. Jones)
J.V. Jones | -author | unknown_0(Liverpool, England, -place of birth, J.V. Jones)
unknown_0 | previous work | unknown_1
P5: Hardcover | -media type | unknown_0
unknown_0 | author | J.V. Jones
A person was born in the County of Ty-rol and later died in Austria which is led by Doris Bures.P1: Austria | leader | Doris Bures(Austria, leader, Doris Bures)Justification: The triplets show that Kurt Schuschnigg, Alfons Gorbach, and Josef Hellensteiner were born in the County of Tyrol and died in Austria. Additionally, Doris Bures is the leader of Austria.
Austria | -death place | unknown_0(Austria, -death place, Hans Hörberg)
unknown_0 | death place | County of Tyrol(Austria, -death place, Edgar Meyer (painter) )
P2: Austria | leader name | Doris Bures(Austria, -death place, Rakhat Aliyev)
Austria | -death place | unknown_0(Ahn Hörberg, death place, County of Tyrol)
unknown_0 | death place | Austria(Edgar Meyer (painter), death place, County of Tyrol)
unknown_0 | successor | Doris Bures(Friedrich Hasenöhr, death place, County of Tyrol)
P3: County of Tyrol | -birth place | unknown_0(County of Tyrol, -birth place, Kurt Schuschnigg)
unknown_0 | death place | Austria(Sky, death place, Alfons Gorbach)
unknown_0 | successor | Doris Bures(County of Tyrol, -birth place, Josef Hellensteiner)
P4: Doris Bures | -leader | Austria(Kurt Schuschnigg, death place, Austria)
Austria | -death place | unknown_0(Alfons Gorbach, death place, Austria)
unknown_0 | birth place | County of Tyrol(Josef Hellensteiner, death place, Austria)
P5: Austria | leader | Doris Bures(Barbara Prammer, successor, Doris Bures)
Austria | -death place | unknown_0(Werner Faymann, successor, Doris Bures)
unknown_0 | birth place | County of Tyrol(Alois Stöger, -successor, Doris Bures)
The New Britain Transit is the operat-ing organisation of Agra Airport which is located in India and has the location identifier VIAG.P1: Agra Airport | location | New Britain Transit(Agra Airport, location, India)Justification: The operator of Agra Airport is listed as 'Indian Air Force' and 'Airports Authority of India', not 'New Britain Transit', which operates 'Connecticut Transit New Britain and Bris-tol'
Agra Airport | location | India(Agra Airport, location, Uttar Pradesh)
Agra Airport | faa | VIAG(Agra Airport, location, Agra)
P2: Agra Airport | location | India(Connecticut Transit New Britain and Bristol, oper-ator, New Britain Transit)
Agra Airport | operator | New Britain Transit(Agra Airport, cao location identifier, VIAG)
Agra Airport | faa | VIAG(Agra Airport, operator, Airports Authority of India)
Agra Airport | faa | VIAG(Agra Airport, elevation, 167.94)Verdict: False
P3: Agra Airport | location | India
Agra Airport | operator | New Britain Transit
Agra Airport | location | India
Agra Airport | faa | VIAG
+ +Table 12: Case study of ClaimPKG on the FactKG dataset with correctly predicted samples. + +
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
When did Indian Air Force operate Agra Airport?P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets do not provide information about the specific time period when the Indian Air Force operated Agra Airport. However, they do confirm that the Indian Air Force is the operator of Agra Airport, implying that the operation is either current or has occurred at some point, but the exact timing is not specified. Verdict: False
P2: Indian Air Force || operator || Agra Airport(Agra Airport, runway length, 2743.0)
Agra Airport || ~runway designation || unknown_0(Agra Airport, runway length, 1818.0)
Agra Airport || ~runway designation || Indian Air Force(Agra Airport, location, India)
Agra Airport || ~location || unknown_0(Agra Airport, location, Uttar Pradesh)
unknown_0 || operator || Indian Air Force(Agra Airport, location, Agra)
P4: Agra Airport || operator || Indian Air Force(Bhuj Airport, operator, Indian Air Force)
P5: Agra Airport || operator || Indian Air Force(Bhuj Rudra Mata Air Force Base, operator, Indian Air Force)
(Car Nicobar Air Force Base, operator, Indian Air Force)
I imagined that Lady Anne Monson not lived in the Cape Verde whose capital was Winchester.P1: Cape Verde || capital || Winchester(Cape Verde, capital, Praia)Justification: The capital of Cape Verde is Praia, not Winchester. Winchester is the capital of Wessex and the Kingdom of England. Lady Anne Monson's residence is listed as India, and her birthplace is listed as Dar-lington and the Kingdom of England, with no mention of Cape Verde as her residence. Verdict: True
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, currency, Cape Verdean esculo)
P2: Lady Anne Monson || residence || Cape Verde(Cape Verde, ~headquarter, Movement for Democracy (Cape Verde))
Cape Verde || capital || Winchester(Wessex, capital, Winchester)
P3: Anne Monzon || residence || Cape Verde(Kingdom of England, capital, Winchester)
Cape Verde || capital || Winchester(River Itchen, Hampshire, city, Winchester)
P4: Cape Verde || capital || Winchester(Cape Verde, ~birth place, Aristides Pereira)
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, ~birth place, Pedro Pires)
P5: Cape Verde || capital || Winchester(Cape Verde, ~birth place, José Maria Neves)
Cape Verde || ~death place || Lady Anne Monson(India, ~residence, Lady Anne Monson)
(Kingdom of England, ~birth place, Lady Anne Monson)
(Darlington, ~birth place, Lady Anne Monson)
(Anne Monzon, ~cinematography, "Wont Last a Day Without You")
(Anne Monzon, ~cinematography, Pop Class)
(Anne Monzon, ~cinematography, Namets!)
(Cape Verde, ~death place, Corsino Fortes)
(Cape Verde, ~death place, Cesária Évora)
(Cape Verde, ~death place, Baltasar Lopes da Silva)
(1776-02-18, ~death date, Lady Anne Monson)
(1776, ~death year, Lady Anne Monson)
The operating organization for Agra Airport is the Indian Air Force who deploy the HAL Light Combat Helicopter and the Boeing C17 Globemaster III transport aircraft.P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets confirm Agra Airport is operated by the Indian Air Force, the Indian Air Force deploys the HAL Light Combat Helicopter, and the Boeing C-17 Globemaster III is indeed a transport aircraft, but there is no direct information linking the Indian Air Force at Agra Airport to the deployment of the Boeing C-17 Globemaster III. However, given the information provided, we can infer the involvement of the Indian Air Force with the mentioned aircraft types, albeit indirectly for the Boeing C-17 Globemaster III. Verdict: False
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Uttar Pradesh)
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Agra)
Agra Airport || ~location || Boeing C-17 Globemaster III(2012 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2013 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2009 in spaceflight (July-December), site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(Hindustan Aeronautics, manufacturer, HAL Light Combat Helicopter)
Agra Airport || ~location || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, United States Air Force)
Agra Airport || operator || Indian Air Force(Boeing C-17 Globemaster III, aircraft transport, Royal Air Force)
Agra Airport || runway length || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, Royal Australian Air Force)
Agra Airport || ~location || HAL Light Combat Helicopter(2743.0, runway length, Agra Airport)
Agra Airport || ~city || HAL Light Combat Helicopter(1818.0, runway length, Agra Airport)
Agra Airport || ~city || Boeing C-17 Globemaster III(HAL Light Combat Helicopter, aircraft helicopter, Indian Air Force)
(Aircraft, icoa location identifier, VIAG)
(Airlift, type, Boeing C-17 Globemaster III)
(United States, origin, Boeing C-17 Globemaster III)
(In service, status, Boeing C-17 Globemaster III)
+ +Table 13: Case study of ClaimPKG on the FactKG dataset with incorrectly predicted samples. + +# FEWSHOT PSEUDO SUBGRAPH GENERATION + +Task: Generate a reference graph to verify the following claim. Only return the subgraphs following the format of provided examples and do NOT include other unnecessary information. + +# Here are some examples: + +Claim: Akeem Priestley played for club RoPS and currently plays for the Orange County Blues FC, which is managed by Oliver Wyss. + +# Subgraphs: + +Orange County Blues FC || manager || Oliver Wyss +Orange County Blues FC || clubs || Akeem Priestley +Akeem Priestley || team || RoPS + +Claim: He is a Rhythm and Blues singer from Errata, Mississippi! + +# Subgraphs: + + || genre || unknown_0 +unknown_0 || birth place || Errata, Mississippi +unknown_0 || background || unknown_1 + +Claim: Arròs negro is a traditional dish from Spain, and from the Catalonia region, which is led by the Maria Norrfalk. + +# Subgraphs: + +$<\mathrm{e}>$ Arròs negro
|| country || Spain + $<\mathrm{e}>$ Arròs negro || region || Catalonia + $<\mathrm{e}>$ Catalonia || leader name || Maria Norrfalk + +Claim: Well, Jason Sherlock did not have a nickname! + +# Subgraphs: + +$<\mathrm{e}>$ Jason Sherlock | | nickname | | unknown_0 + +Claim: Garlic is the main ingredient of Ajoblanco, which is from Andalusia. + +# Subgraphs: + +$< \mathrm{e}>$ Ajoblanco || region || Andalusia + $< \mathrm{e}>$ Ajoblanco || ingredient || Garlic + +....More examples .... + +Claim: {{claim}} + +Subgraphs: + +Figure 7: Prompt template for the general LLM to generate pseudo subgraphs + +# ANNOTATE IN AND OUT ENTITIES + +Task: Specify if the following entities are mentioned in the claim or not. + +Respond correctly in the following JSON format and do not output anything else: { "in Entities": [list of entities that are in the claim], "out Entities": [list of entities that are not in the claim] } Do not change the entity names from the list of provided entities. + +Claim: {{claim}} + +Entities: {{entities}} + +Figure 8: Prompt template to annotate inside and outside entity of the claim. \ No newline at end of file diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/images.zip b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/images.zip new file mode 100644 index 0000000000000000000000000000000000000000..823464dac5252450a07ebc642a6ab991dbac566b --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/images.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37303e865ae0c53f19996acb4d25cb5248742052b5f0e619c4f083b7f2e92d4b +size 1408124 diff --git a/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/layout.json b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..632af5632f429f674eba295a967e2b612f61eac0 --- /dev/null +++ b/2025/ClaimPKG_ Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM/layout.json @@ -0,0 +1,16639 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 99, + 75, + 495, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 75, + 495, + 110 + ], + "spans": [ + { + "bbox": [ + 99, + 75, + 495, + 110 + ], + "type": "text", + "content": "ClaimPKG: Enhancing Claim Verification via Pseudo-Subgraph Generation with Lightweight Specialized LLM" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 151, + 127, + 445, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 127, + 445, + 142 + ], + "spans": [ + { + "bbox": [ + 151, + 127, + 445, + 142 + ], + "type": "text", + "content": "Hoang Pham*, Thanh-Do Nguyen*, Khac-Hoai Nam Bui†" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 163, + 142, + 430, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 142, + 430, + 156 + ], + "spans": [ + { + "bbox": [ + 163, + 142, + 430, + 156 + ], + "type": "text", + "content": "Viettel Artificial Intelligence and Data Services Center," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 239, + 157, + 353, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 157, + 353, + 170 + ], + "spans": [ + { + "bbox": [ + 239, + 157, + 353, + 170 + ], + "type": "text", + "content": "Viettel Group, Vietnam" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 185, + 171, + 408, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 171, + 408, + 185 + ], + "spans": [ + { + "bbox": [ + 185, + 171, + 408, + 185 + ], + "type": "text", + "content": "{hoangpv4, dont15, nambkh} @ viettel.com.vn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 246, + 274, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 246, + 274, + 641 + ], + "spans": [ + { + "bbox": [ + 84, + 246, + 274, + 641 + ], + "type": "text", + "content": "Integrating knowledge graphs (KGs) to enhance the reasoning capabilities of large language models (LLMs) is an emerging research challenge in claim verification. While KGs provide structured, semantically rich representations well-suited for reasoning, most existing verification methods rely on unstructured text corpora, limiting their ability to effectively leverage KGs. Additionally, despite possessing strong reasoning abilities, modern LLMs struggle with multi-step modular pipelines and reasoning over KGs without adaptation. To address these challenges, we propose ClaimPKG1, an end-to-end framework that seamlessly integrates LLM reasoning with structured knowledge from KGs. Specifically, the main idea of ClaimPKG is to employ a lightweight, specialized LLM to represent the input claim as pseudo-subgraphs, guiding a dedicated subgraph retrieval module to identify relevant KG subgraphs. These retrieved subgraphs are then processed by a general-purpose LLM to produce the final verdict and justification. Extensive experiments on the FactKG dataset demonstrate that ClaimPKG achieves state-of-the-art performance, outperforming strong baselines in this research field by " + }, + { + "bbox": [ + 84, + 246, + 274, + 641 + ], + "type": "inline_equation", + "content": "9\\% - 12\\%" + }, + { + "bbox": [ + 84, + 246, + 274, + 641 + ], + "type": "text", + "content": " accuracy points across multiple categories. Furthermore, ClaimPKG exhibits zero-shot generalizability to unstructured datasets such as HoVer and FEVERIOUS, effectively combining structured knowledge from KGs with LLM reasoning across various LLM backbones." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 655, + 154, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 655, + 154, + 667 + ], + "spans": [ + { + "bbox": [ + 68, + 655, + 154, + 667 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 678, + 291, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 678, + 291, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 678, + 291, + 732 + ], + "type": "text", + "content": "In today's rapidly evolving information landscape, distinguishing fact from misinformation is becoming more challenging, especially with the rise of AI-generated content. Robust claim verification" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 326, + 217, + 503, + 321 + ], + "blocks": [ + { + "bbox": [ + 326, + 217, + 503, + 321 + ], + "lines": [ + { + "bbox": [ + 326, + 217, + 503, + 321 + ], + "spans": [ + { + "bbox": [ + 326, + 217, + 503, + 321 + ], + "type": "image", + "image_path": "1f79aa990ca6454f337d823732bf436f1207f9d509390dd3c4d4aaae0fda0a94.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 326, + 322, + 502, + 375 + ], + "blocks": [ + { + "bbox": [ + 326, + 322, + 502, + 375 + ], + "lines": [ + { + "bbox": [ + 326, + 322, + 502, + 375 + ], + "spans": [ + { + "bbox": [ + 326, + 322, + 502, + 375 + ], + "type": "image", + "image_path": "101c2011dbb6cd268f3b14346c625c2dd2859bef56228ac675c763c1aa6f5077.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 375, + 446, + 383 + ], + "lines": [ + { + "bbox": [ + 375, + 375, + 446, + 383 + ], + "spans": [ + { + "bbox": [ + 375, + 375, + 446, + 383 + ], + "type": "text", + "content": "c) Our Method - ClaimPKG" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 392, + 526, + 476 + ], + "lines": [ + { + "bbox": [ + 302, + 392, + 526, + 476 + ], + "spans": [ + { + "bbox": [ + 302, + 392, + 526, + 476 + ], + "type": "text", + "content": "Figure 1: Different claim verification paradigms: (a) Unstructured Text-based methods focusing on claim decomposition and sequential reasoning over text, (b) KG-based methods facing challenges in entity resolution and structured reasoning, and (c) ClaimPKG's unified framework with specialized modules for pseudosubgraph generation, retrieval, and general reasoning." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 501, + 526, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 501, + 526, + 596 + ], + "spans": [ + { + "bbox": [ + 302, + 501, + 526, + 596 + ], + "type": "text", + "content": "systems, leveraging NLP methods to automatically assess the veracity of claims (Glockner et al., 2022a,b; Thorne and Vlachos, 2018), are essential to ensure information reliability. Effective methods require not only accuracy but also transparency, necessitating strong reasoning to identify evidence and provide clear justifications (Pan et al., 2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 597, + 525, + 759 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 597, + 525, + 759 + ], + "spans": [ + { + "bbox": [ + 302, + 597, + 525, + 759 + ], + "type": "text", + "content": "Most existing verification approaches focus on unstructured text corpora, using techniques like chain-of-thought (CoT) reasoning (Wei et al., 2022) to break down claims for verification. Approaches like ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023) employ modular pipelines to verify claims against text-based knowledge bases (Figure 1(a)). However, the inherent limitations of text representation pose challenges. Specifically, ambiguous entity references and complex multi-hop relationships make it difficult to perform rigorous verification against unstructured text." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 761, + 524, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 761, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 313, + 761, + 524, + 775 + ], + "type": "text", + "content": "In contrast, Knowledge Graphs (KGs) provide" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 80, + 741, + 156, + 751 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 741, + 156, + 751 + ], + "spans": [ + { + "bbox": [ + 80, + 741, + 156, + 751 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 82, + 752, + 167, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 752, + 167, + 762 + ], + "spans": [ + { + "bbox": [ + 82, + 752, + 167, + 762 + ], + "type": "text", + "content": "† Corresponding author." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 82, + 762, + 262, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 762, + 262, + 774 + ], + "spans": [ + { + "bbox": [ + 82, + 762, + 262, + 774 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 82, + 762, + 262, + 774 + ], + "type": "text", + "content": "https://github.com/HoangHoang1408/ClaimPKG" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "text", + "content": "5271" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 136, + 795, + 457, + 806 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 795, + 457, + 806 + ], + "spans": [ + { + "bbox": [ + 136, + 795, + 457, + 806 + ], + "type": "text", + "content": "Findings of the Association for Computational Linguistics: ACL 2025, pages 5271-5290" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "spans": [ + { + "bbox": [ + 160, + 807, + 433, + 818 + ], + "type": "text", + "content": "July 27 - August 1, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 66, + 71, + 293, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 71, + 293, + 232 + ], + "spans": [ + { + "bbox": [ + 66, + 71, + 293, + 232 + ], + "type": "text", + "content": "structured relationships for effective reasoning (Luo et al., 2024; Sun et al., 2024), yet their use in claim verification remains limited. Existing KG-based approaches (Figure 1(b)) (Kim et al., 2023b; Zhou et al., 2019; Kim et al., 2023a) lack end-to-end solutions, often requiring pre-extracted entities via modules like entity or relation extraction. Meanwhile, despite excelling at general reasoning, LLMs struggle with KG-specific tasks like entity resolution and multi-hop reasoning (Cao et al., 2021; Aly et al., 2021), suggesting the need for a system combining LLM capabilities with KG-based inference." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 233, + 292, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 233, + 292, + 396 + ], + "spans": [ + { + "bbox": [ + 66, + 233, + 292, + 396 + ], + "type": "text", + "content": "Overall, solving claim verification problems is hindered by following major limitations: (1) Entity Ambiguity: Systems must accurately disambiguate entities within claims to identify relevant evidence (Aly et al., 2021); (2) Multihop Reasoning: Complex claims often require reasoning across multiple evidence from different sources (Pan et al., 2023; Wang and Shu, 2023); and (3) Limited integration of KGs and LLMs: Current approaches are underexploring the potential of combining the application of structured representation with strong inference capabilities of LLMs (Kim et al., 2023a)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 396, + 292, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 396, + 292, + 748 + ], + "spans": [ + { + "bbox": [ + 66, + 396, + 292, + 748 + ], + "type": "text", + "content": "To address these challenges, we propose ClaimPKG (Claim Verification using Pseudo-Subgraph in Knowledge Graphs), a novel end-to-end framework that synergizes the adaptability and generalization strengths of LLMs with the structured and rigorous representation of KGs to enable robust and transparent claim verification. As specified in Figure 1(c), ClaimPKG operates through three phases: (1) Pseudo-Subgraphs Generation: A KG-specialized lightweight LLM generates pseudo subgraphs as the representations of input claims under a Trie-based KG-Entity Constraint, ensuring the correctness of extracted entities; (2) Subgraphs Retrieval: A retrieval algorithm considers generated pseudo subgraphs as queries to identify actual relevant KG subgraphs as evidence; and (3) General Reasoning: A general-purpose LLM reasons over the retrieved KG subgraphs to produce the verdict and human-readable justifications. Through extensive experiments on the FactKG dataset, ClaimPKG achieves state-of-the-art performance, demonstrating its effectiveness over various claim types with a small number of training samples. Furthermore, its zero-shot generalizability to unstructured datasets (HoVer, FEVEROUS) highlights its robustness." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "Our contributions can be summarized as follows: (1) We introduce ClaimPKG, a holistic framework" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 302, + 71, + 527, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 220 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 220 + ], + "type": "text", + "content": "that integrates LLMs and KGs for accurate and interpretable claim verification, handling various types of claims in a unified manner; (2) We develop a lightweight specialized LLM with its according decoding algorithm for pseudo-subgraph generation and pair it with general-purpose LLMs to achieve robust reasoning; and (3) We validate the effectiveness of ClaimPKG through extensive experiments, achieving state-of-the-art performance on structure-based datasets and generalizing to unstructure-based datasets." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 234, + 396, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 234, + 396, + 247 + ], + "spans": [ + { + "bbox": [ + 302, + 234, + 396, + 247 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 258, + 527, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 258, + 527, + 514 + ], + "spans": [ + { + "bbox": [ + 302, + 258, + 527, + 514 + ], + "type": "text", + "content": "Claim Verification Approaches. Claim verification systems utilize knowledge bases that can be categorized into unstructured and structured formats. In the unstructured domain, text-based verification methods predominate, with systems designed to verify claims against textual evidence, as demonstrated in the FEVER dataset (Thorne et al., 2018). Recent advances have focused on handling specialized verification scenarios, including ambiguous question-answer pairs (Park et al., 2022), detecting factual changes (Schuster et al., 2021), and processing multiple documents concurrently (Jiang et al., 2020). For structured verification, research has primarily focused on tables and graphs, with early work developing specialized architectures: graph neural networks for knowledge graph processing (Zhou et al., 2020), table-specific transformers (Herzig et al., 2020), and tree-structured decoders for hierarchical data (Wang et al., 2020)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 527, + 776 + ], + "type": "text", + "content": "Claim Verification over Knowledge Graphs (KGs). The emergence of Large Language Models (LLMs) has simplified direct reasoning over textual corpora for claim verification, as demonstrated by ProgramFC (Pan et al., 2023) and FOLK (Wang and Shu, 2023). However, structured data sources like tables and graphs can provide more grounded and robust verification results (Kim et al., 2023b). Knowledge graphs are particularly advantageous as they enable explicit representation of reasoning processes through logical rules over nodes and edges. FactKG (Kim et al., 2023b) established a foundation in this direction by introducing a comprehensive dataset for evaluating modern verification methods. KG-GPT (Kim et al., 2023a) followed this work by demonstrating performance gains through a pipeline that performs sentence decomposition, subgraph retrieval, and logical inference. Additionally, while not directly addressing" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "text", + "content": "5272" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "type": "text", + "content": "claim verification, StructGPT (Jiang et al., 2023) and RoG (Luo et al., 2024) achieved promising results in related tasks (e.g., Knowledge Base Question Answering) by collecting relevant evidence, such as subgraphs in KGs, then leveraging LLMs for complex reasoning in particular scenarios." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 162, + 151, + 175 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 162, + 151, + 175 + ], + "spans": [ + { + "bbox": [ + 67, + 162, + 151, + 175 + ], + "type": "text", + "content": "3 Preliminary" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "spans": [ + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": "Knowledge Graph: Knowledge Graph (KG) " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": " represents facts as triplets of format " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "t = (e,r,e')" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": ", where entities " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "e,e'\\in \\mathcal{E}" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": " are connected by a relation " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "r\\in \\mathcal{R}" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": " can also be referred as " + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "inline_equation", + "content": "r(e,e')" + }, + { + "bbox": [ + 67, + 182, + 290, + 237 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "spans": [ + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": "Claim Verification: Given a claim " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": ", a verification model " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{F}" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": " determines its veracity as Supported or Refuted based on an external knowledge base " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": ", while also providing a justification " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": " to explain the predicted label. This work specifically considers the scenario where " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": " is structured as a Knowledge Graph " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": ", enabling reasoning over graph knowledge to infer " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": ". Formally, the verification process is defined as: " + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "inline_equation", + "content": "(v,j) = \\mathcal{F}(c,\\mathcal{G})" + }, + { + "bbox": [ + 67, + 239, + 289, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 364, + 291, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 364, + 291, + 446 + ], + "spans": [ + { + "bbox": [ + 67, + 364, + 291, + 446 + ], + "type": "text", + "content": "Trie-based Constrained Decoding: A Trie (Wikipedia, 2025b) indexes predefined token sequences, where each root-to-node path represents a prefix. During LLM generation, this structure restricts token selection to only valid Trie paths, ensuring reliable output." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 454, + 146, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 454, + 146, + 467 + ], + "spans": [ + { + "bbox": [ + 67, + 454, + 146, + 467 + ], + "type": "text", + "content": "4 ClaimPKG" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 476, + 220, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 476, + 220, + 488 + ], + "spans": [ + { + "bbox": [ + 67, + 476, + 220, + 488 + ], + "type": "text", + "content": "4.1 Formulation of ClaimPKG" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "spans": [ + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": "We formulate the ClaimPKG framework using a probabilistic approach. Given a claim " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " and a prebuilt KG " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": ", our objective is to model the distribution " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "p_{\\theta}(v,j|c,\\mathcal{G})" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " denotes the verdict and " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " the justification. However, direct computation for this distribution is infeasible as reasoning over the entire KG is not practical given its large size. To address this, we propose to select " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "S_{c}" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": ", a subgraph of " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " relevant to " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " containing necessary information to derive our target distribution. Treating " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "S_{c}" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " as a latent variable, " + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "inline_equation", + "content": "p_{\\theta}(v,j|c,\\mathcal{G})" + }, + { + "bbox": [ + 67, + 493, + 290, + 643 + ], + "type": "text", + "content": " is decomposed as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 650, + 290, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 650, + 290, + 673 + ], + "spans": [ + { + "bbox": [ + 76, + 650, + 290, + 673 + ], + "type": "interline_equation", + "content": "p _ {\\theta} (v, j \\mid c, \\mathcal {G}) = \\sum_ {\\mathcal {S} _ {c}} p _ {\\theta} (v, j \\mid c, \\mathcal {S} _ {c}) p _ {\\theta} (\\mathcal {S} _ {c} \\mid c, \\mathcal {G}) \\tag {1}", + "image_path": "af7de3650b2f0e4b709884f331144520b78c91d473d241f076b57e29e57383a0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": " models the subgraph selection, and " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "p_{\\theta}(v,j|c,\\mathcal{S}_c)" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": " models the generator of the verdict and justification given " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": ". However, direct computation of " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathcal{S}_c|c,\\mathcal{G})" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": " is challenging due to modality mismatch between the input " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": " (text) and the target " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": " (graph structure), hindering the employment of retrieval methods for " + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c" + }, + { + "bbox": [ + 67, + 679, + 291, + 775 + ], + "type": "text", + "content": ". To bridge this" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 71, + 515, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 515, + 84 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 515, + 84 + ], + "type": "text", + "content": "gap, we decompose the subgraph selection into:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 89, + 525, + 112 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 89, + 525, + 112 + ], + "spans": [ + { + "bbox": [ + 314, + 89, + 525, + 112 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\mathcal {S} _ {c} | c, \\mathcal {G}\\right) = \\sum_ {\\mathcal {P} _ {c}} p _ {\\theta} \\left(\\mathcal {S} _ {c} \\mid \\mathcal {P} _ {c}, \\mathcal {G}\\right) p _ {\\theta} \\left(\\mathcal {P} _ {c} | c, \\mathcal {G}\\right) \\tag {2}", + "image_path": "c35acb93965d679242b7422355fa2024d7ec46ee4d70a5d7dfce4880f06a68f6.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "spans": [ + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathcal{P}_c|c,\\mathcal{G})" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": " models the generation of the graph representation " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": ", which we refer as \"pseudo subgraph\", from a textual claim " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "p_{\\theta}(\\mathcal{S}_c|\\mathcal{P}_c,\\mathcal{G})" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": " models the distribution over relevant subgraphs " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": ". While equations 1 and 2 establish our theoretical framework for ClaimPKG, computing exact probabilities by summing over all possible " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "(\\mathcal{S}_c,\\mathcal{P}_c)" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": " pairs is intractable. Addressing this we propose two approximations: (1) We infer the veracity using only the most relevant subgraph " + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c^*" + }, + { + "bbox": [ + 302, + 118, + 525, + 253 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 358, + 259, + 525, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 259, + 525, + 275 + ], + "spans": [ + { + "bbox": [ + 358, + 259, + 525, + 275 + ], + "type": "interline_equation", + "content": "\\left(v ^ {*}, j ^ {*}\\right) \\sim p _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {3}", + "image_path": "84747f29d4dff93efcf65bb1e2d4474f651e6eba473bb7edf319b80bd4e75fa3.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 280, + 525, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 280, + 525, + 319 + ], + "spans": [ + { + "bbox": [ + 302, + 280, + 525, + 319 + ], + "type": "text", + "content": "(2) We assume each generated pseudo-subgraph is reasonable with a high probability, allowing us to approximate the subgraph selection in 2 as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 344, + 324, + 525, + 342 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 324, + 525, + 342 + ], + "spans": [ + { + "bbox": [ + 344, + 324, + 525, + 342 + ], + "type": "interline_equation", + "content": "\\mathcal {S} _ {c} ^ {(i)} = \\arg \\max p _ {\\theta} \\left(\\mathcal {S} _ {c} | \\mathcal {P} _ {c} ^ {(i)}, \\mathcal {G}\\right) \\tag {4}", + "image_path": "8b5d3ea7fc0856397bfa2e49717cac73b3796fc3f4acffa62c190eed09ac5aa0.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "spans": [ + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c^{(i)}" + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "inline_equation", + "content": "ith" + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "content": " pseudo-graph generation. We then construct " + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c^*" + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "content": " by aggregating multiple sampled subgraphs, specifically " + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "inline_equation", + "content": "\\mathcal{S}_c^* = \\bigcup \\mathcal{S}_c^{(i)}" + }, + { + "bbox": [ + 302, + 348, + 524, + 391 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "spans": [ + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": "These approximations lead ClaimPKG to comprise 3 key modules as depicted in Figure 2: (1) Pseudo Subgraph Generation to generate graph representations " + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": "'s given claim " + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": "; (2) Subgraph Retrieval to retrieve relevant evidence subgraph " + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "inline_equation", + "content": "S_c^*" + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": "; and (3) General Reasoning to generate final verdict " + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": " and justification " + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 302, + 391, + 525, + 498 + ], + "type": "text", + "content": ". The inference procedure is described as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 508, + 475, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 508, + 475, + 520 + ], + "spans": [ + { + "bbox": [ + 310, + 508, + 475, + 520 + ], + "type": "text", + "content": "Inference Procedure of ClaimPKG" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 525, + 518, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 525, + 518, + 552 + ], + "spans": [ + { + "bbox": [ + 309, + 525, + 518, + 552 + ], + "type": "text", + "content": "Preprocessing: Index the KG " + }, + { + "bbox": [ + 309, + 525, + 518, + 552 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 309, + 525, + 518, + 552 + ], + "type": "text", + "content": " into an Entity. TriE for effective entity lookup." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 553, + 519, + 735 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "spans": [ + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "text", + "content": "1. Pseudo Subgraph Generation: Generate multiple graph representations (pseudo subgraphs) " + }, + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i=1}^N" + }, + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "text", + "content": " from claim " + }, + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 309, + 553, + 519, + 619 + ], + "type": "text", + "content": ", using a specialized LLM with beam search and Entity-Trie constraints." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "spans": [ + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": "2. Subgraph Retrieval: Use each pseudo graph in " + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_c" + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": " for querying the most respective relevant subgraph " + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "inline_equation", + "content": "S_{c}^{(i)}" + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": " in the KG " + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": ", resulting in a set of " + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "inline_equation", + "content": "\\{S_c^{(i)}\\}_{i = 1}^N" + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": " following Equation 4, then aggregate them to form " + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "inline_equation", + "content": "S_{c}^{*} = \\bigcup_{i = 1}^{N}S_{c}^{(i)}" + }, + { + "bbox": [ + 309, + 622, + 518, + 695 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 695, + 518, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 695, + 518, + 735 + ], + "spans": [ + { + "bbox": [ + 309, + 695, + 518, + 735 + ], + "type": "text", + "content": "3. General Reasoning: Employ a general-purpose LLM to reason veracity " + }, + { + "bbox": [ + 309, + 695, + 518, + 735 + ], + "type": "inline_equation", + "content": "(v^{*},j^{*})\\sim p_{\\theta}(v,j|c,\\mathcal{S}_{c}^{*})" + }, + { + "bbox": [ + 309, + 695, + 518, + 735 + ], + "type": "text", + "content": " following Equation 3." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 748, + 524, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 524, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 524, + 774 + ], + "type": "text", + "content": "The subsequent sections provide details about each component in the ClaimPKG framework." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5273" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 68, + 520, + 311 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 520, + 311 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 520, + 311 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 520, + 311 + ], + "type": "image", + "image_path": "8f8c020c8c78d4277712169bc006bc9507ecf8d5ded3239cae38ce9a08b55ea3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 317, + 525, + 354 + ], + "lines": [ + { + "bbox": [ + 67, + 317, + 525, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 317, + 525, + 354 + ], + "type": "text", + "content": "Figure 2: Illustration of the ClaimPKG for claim verification. The framework consists of three key modules: (1) Pseudo-subgraph Generation, constructing representative subgraphs; (2) Subgraph Retrieval, selecting the most pertinent KG subgraphs; and (3) General Reasoning, integrating them for accurate and interpretable verification." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 375, + 233, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 375, + 233, + 389 + ], + "spans": [ + { + "bbox": [ + 67, + 375, + 233, + 389 + ], + "type": "text", + "content": "4.2 Pseudo Subgraph Generation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 393, + 290, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 393, + 290, + 541 + ], + "spans": [ + { + "bbox": [ + 67, + 393, + 290, + 541 + ], + "type": "text", + "content": "The first step to effectively verify a claim is to understand its content thoroughly and represent it in a format compatible with the KG. Since evidence comes from KG, representing claims in the graph format is crucial, which captures hypothetical relations among entities in an effective way that enables effective comparisons with KG subgraphs for evidence retrieval. However, this process faces two main challenges: (1) handling ambiguity resolution and multi-hop reasoning, and (2) ensuring accurate entity extraction from the claim." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": "Specialized LLM. To address the first challenge, the Pseudo Subgraph Generation module employs a lightweight model optimized for processing input claims. Following (Li et al., 2013; Miwa and Bansal, 2016), the model is trained to jointly extract entities and their corresponding relations from a claim " + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": ". Specifically, from " + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": " the model constructs a pseudo subgraph " + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 67, + 544, + 291, + 775 + ], + "type": "text", + "content": " comprising triplets in the form of head_entity||relation||tail-entity (illustrated in Figure 2). To ensure the generated subgraph can identify entities requiring ambiguity resolution and multi-hop reasoning, we employ a specialized annotation mechanism: when the claim references an entity indirectly—either without explicit naming or through relations to other entities—we denote it as unknown_i, with the index i to keep track of different entities. This" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 302, + 375, + 525, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 375, + 525, + 430 + ], + "spans": [ + { + "bbox": [ + 302, + 375, + 525, + 430 + ], + "type": "text", + "content": "notation effectively signals the need for further disambiguation and reasoning within the KG in subsequent steps. Training details enabling this annotation strategy are presented in Appendix B.1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "spans": [ + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": "Trie-Constrained Decoding. For the second challenge, we develop a constrained decoding algorithm with an Entity Trie inspired by (Cao et al., 2021). We construct a trie " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " from the KG's entity set " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{E} = \\{e_1,e_2,\\ldots \\}" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": ". The specialized LLM generates entities using special tokens " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\langle e\\rangle" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\langle /e\\rangle" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " to mark entity boundaries. When " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\langle e\\rangle" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " is generated, the decoding process restricts token selection based on " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " until " + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "inline_equation", + "content": "\\langle /e\\rangle" + }, + { + "bbox": [ + 302, + 433, + 525, + 622 + ], + "type": "text", + "content": " is produced, ensuring all generated entities exist in the KG. Outside such boundaries, the model generates relations by sampling from an unconstrained original token distribution. This mechanism ensures entity reliability while preserving flexible relation extraction (Edge et al., 2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 625, + 525, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 625, + 525, + 709 + ], + "spans": [ + { + "bbox": [ + 302, + 625, + 525, + 709 + ], + "type": "text", + "content": "Multiple Representations. In order to capture different semantic views of a claim, we employ beam search along with the described sampling strategy, which is proved to improve the coverage of extracted triplets (table 8), resulting in multiple representations " + }, + { + "bbox": [ + 302, + 625, + 525, + 709 + ], + "type": "inline_equation", + "content": "\\mathbb{P}_c = \\{\\mathcal{P}_c^{(i)}\\}_{i = 1}^N" + }, + { + "bbox": [ + 302, + 625, + 525, + 709 + ], + "type": "text", + "content": " for an input claim." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 525, + 775 + ], + "type": "text", + "content": "In summary, each of the claim's graph representations satisfies following properties: (1) effectively capture the underlying graph structure of that claim, and (2) correctly align with the KG's entities." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5274" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 188, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 188, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 188, + 84 + ], + "type": "text", + "content": "4.3 Subgraph Retrieval" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "text", + "content": "The second component of ClaimPKG involves retrieving relevant KG subgraphs as evidence by using a dedicated algorithm that matches the pseudosubgraphs " + }, + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "text", + "content": "'s from the previous step to actual subgraphs in the KG. We present the high-level description of our algorithm here, while its complete formulation is detailed in Appendix D. We categorize triplets in a " + }, + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 67, + 89, + 291, + 238 + ], + "type": "text", + "content": " into: (1) Incomplete triplets, where either the head or tail entity is marked as unknown, and (2) Complete triplets, where both head and tail entities are explicitly identified." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 240, + 291, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 240, + 291, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 240, + 291, + 321 + ], + "type": "text", + "content": "Relation Scoring Function: We define a function " + }, + { + "bbox": [ + 67, + 240, + 291, + 321 + ], + "type": "inline_equation", + "content": "\\operatorname{Sim}(r_1, r_2)" + }, + { + "bbox": [ + 67, + 240, + 291, + 321 + ], + "type": "text", + "content": " to quantify the similarity between two relations, where a higher score indicates greater similarity. This function can be instantiated via various mechanisms (e.g., embedding similarity, re-ranking, fuzzy matching, etc.)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": "Incomplete Triplets Retrieval: Our goal is to identify evidence (actual triplets in the KG) to inform us about entities marked as unknown and their respective relations with explicit entities in the pseudo-subgraphs. First, for a " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": ", we group triplets sharing the same unknown entity " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " into a group " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " (e.g., in Figure 2, triplets associated with unknown_0 are grouped together). Subsequently, for each group " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " characterized by the unknown entity " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": ", we denote: " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_u = \\{e_{u1}, \\ldots, e_{un}\\}" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " as entities directly connected to " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " in the pseudo-subgraph " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_u = \\{r_{u1}, \\ldots, r_{un}\\}" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " as relations from " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " to corresponding entities in " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_c" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": ". In " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": ", for each explicit entity " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "e_{ui} \\in \\mathcal{E}_u" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": ", we first retrieve candidate set " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "C_{ui} = \\{e_{i1}^c, \\ldots, e_{im}^c\\}" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " containing all entities connected to " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "e_{ui}" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": " in the KG, then collect all candidate sets into " + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_u = \\{C_{u1}, \\ldots, C_{un}\\}" + }, + { + "bbox": [ + 67, + 324, + 291, + 554 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": "To determine the best candidates for resolving " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": ", we propose an Entity Scoring mechanism, which is based on two assumptions: (1) since " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " has pseudo relations with all entities in " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": ", a candidate " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "e^c" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " connected to more entities in " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " is more likely to resolve " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": "; and (2) because every information related to " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "e_{ui}" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " is crucial to verify the initial claim, each candidate set " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "C_{ui}" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " must contribute to the final verification. Note that an entity can appear in multiple candidate sets, hence we compute a \"global\" score for each " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "e_{ij}^c" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": " in a candidate set " + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "inline_equation", + "content": "C_{ui}" + }, + { + "bbox": [ + 67, + 555, + 291, + 705 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 108, + 714, + 290, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 714, + 290, + 735 + ], + "spans": [ + { + "bbox": [ + 108, + 714, + 290, + 735 + ], + "type": "interline_equation", + "content": "\\operatorname {s c o r e} \\left(e _ {i j} ^ {c}\\right) = \\sum_ {r} ^ {R _ {i j} ^ {u}} \\operatorname {S i m} \\left(r _ {u i}, r\\right) \\tag {5}", + "image_path": "97840d50964ca4e049569b715e9a7cc25f97de0587b2370b277cee00a289bf3f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "inline_equation", + "content": "R_{ij}^{u} = \\bigcup_{i = 1}^{\\left|\\mathcal{E}_{u}\\right|}\\{r(e_{ui},e_{ij}^{c})\\mid" + }, + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "inline_equation", + "content": "e_{ij}^{c}\\in C_{ui}\\}" + }, + { + "bbox": [ + 67, + 745, + 290, + 775 + ], + "type": "text", + "content": " , the set of all relations across candidate sets appearing" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": "in " + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_u" + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": " that connect " + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "inline_equation", + "content": "e_{ij}^c" + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": " with an " + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "inline_equation", + "content": "e_{ui}" + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": ". Subsequently, to construct the set " + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "inline_equation", + "content": "T_{u}" + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": " of most relevant triplets to a group " + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 302, + 71, + 526, + 112 + ], + "type": "text", + "content": ", we employ a ranking function as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 334, + 117, + 525, + 155 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 117, + 525, + 155 + ], + "spans": [ + { + "bbox": [ + 334, + 117, + 525, + 155 + ], + "type": "interline_equation", + "content": "T _ {u} = \\bigcup_ {i = 1} ^ {| C _ {u} |} \\underset {\\text {t r i p l e t}, k _ {1}} {\\arg \\max } \\left\\{\\pi_ {i j} \\mid j \\leq \\left| C _ {u i} \\right| \\right\\} \\tag {6}", + "image_path": "f3fb9a62baebec070348a13e7b5ed7660913ff520d5f9b2e89edadd1701f3251.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "spans": [ + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "\\pi_{ij}" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": " is simply " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "score(e_{ij}^{c})" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": " and (triplet, " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": ") denotes the selection of top " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": " triplets " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "(e_{ui}, r, e^{c})" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": " having the highest global scores from each set in " + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{C}_{u}" + }, + { + "bbox": [ + 302, + 160, + 526, + 201 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "spans": [ + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "text", + "content": "While equation 5 ensures candidates appearing in multiple candidate sets and having high similar scores are prioritized, equation 6 ensures every entity in " + }, + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_u" + }, + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "text", + "content": " has at least " + }, + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 302, + 201, + 525, + 269 + ], + "type": "text", + "content": " triplets, both of which make use of assumptions (1) and (2)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "spans": [ + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": "Complete Triplets Retrieval: For each triplet " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "(e_1, r, e_2)" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " in a " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{P}_c" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": ", we first find top " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "k_2" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " similar relations between " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "e_1" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "e_2" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " in the KG " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " using the Sim function. If no direct connection exists (e.g., \"103 Colmore Row\" and \"Vedat Tek\" as shown in figure 2), the triplet is decomposed into two: " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "(e_1, r, \\text{unknown}_0)" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "inline_equation", + "content": "(\\text{unknown}_0, r, e_2)" + }, + { + "bbox": [ + 302, + 272, + 525, + 380 + ], + "type": "text", + "content": ". These are then handled via Incomplete Triplets Retrieval." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "spans": [ + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "content": "Subgraph Union: In summary, for an input claim " + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "content": ", multiple pseudo-graphs are generated, containing complete and incomplete triplets. These triplets undergo processing to handle shared unknown entities and identified entities that are not connected in the KG " + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "content": ", and are used to query " + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "content": " for relevant triplets. All retrieved evidence triplets are aggregated into a final subgraph " + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "inline_equation", + "content": "S_{c}^{*}" + }, + { + "bbox": [ + 302, + 383, + 525, + 504 + ], + "type": "text", + "content": ", serving as the evidence for the final component of ClaimPKG." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 513, + 420, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 513, + 420, + 527 + ], + "spans": [ + { + "bbox": [ + 302, + 513, + 420, + 527 + ], + "type": "text", + "content": "4.4 General Reasoning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "spans": [ + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": "The General Reasoning module concludes the ClaimPKG framework by determining claim veracity through reasoning over input claim " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": " and retrieved evidence subgraph " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "S_{c}^{*}" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": ". As complex tasks, especially claim verification, require deliberate chain-of-thought reasoning (Jiang et al., 2020; Wang et al., 2023), we use a general-purpose LLM to analyze " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "S_{c}^{*}" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": ". Using carefully designed prompts (Figure 6), the module generates a natural language justification " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": " and verdict " + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 302, + 531, + 525, + 679 + ], + "type": "text", + "content": ". Expanded from equation 3, this step is formalized as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 686, + 525, + 702 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 686, + 525, + 702 + ], + "spans": [ + { + "bbox": [ + 315, + 686, + 525, + 702 + ], + "type": "interline_equation", + "content": "p _ {\\theta} (v, j | c, \\mathcal {S} _ {c} ^ {*}) = p _ {\\theta} (v | c, j, \\mathcal {S} _ {c} ^ {*}) p _ {\\theta} (j | c, \\mathcal {S} _ {c} ^ {*}) \\tag {7}", + "image_path": "649b6fdac049c2f4ea633d4bd8fbf25737efb0e2527330a8484c181a11713f1b.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "p(j|c, S_c^*)" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " produces the justification and " + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "inline_equation", + "content": "p(v|c, j, S_c^*)" + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": " determines veracity. This model-agnostic design enables integration with state-of-the-art LLMs (e.g., Llama, Qwen and GPT4) for zero-shot reasoning." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5275" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 155, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 155, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 155, + 84 + ], + "type": "text", + "content": "5 Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 93, + 189, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 93, + 189, + 106 + ], + "spans": [ + { + "bbox": [ + 67, + 93, + 189, + 106 + ], + "type": "text", + "content": "5.1 Experimental Setup" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 111, + 291, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 111, + 291, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 111, + 291, + 354 + ], + "type": "text", + "content": "Datasets. Our primary benchmark is the FactKG dataset (Kim et al., 2023b), designed for claim verification over the DBpedia KG (Lehmann et al., 2015). It consists of 108K claims grounded in DBpedia and labelled as either SUPPORTED or REFUTED. The claims span five distinct categories: One-hop, Conjunction, Existence, Multi-hop, and Negation, each posing unique challenges. For evaluation, we randomly sample 2K claims from the test set, ensuring balanced representation across categories under computational efficiency. To assess the generalizability of ClaimPKG beyond structured benchmarks, we also evaluate HoVer (Jiang et al., 2020) and FEVERIOUS (Aly et al., 2021), two widely-used unstructured-based benchmarks requiring multi-hop reasoning and evidence aggregation from Wikipedia. Additional statistics of datasets are provided in Appendix A." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 356, + 291, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 291, + 462 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 291, + 462 + ], + "type": "text", + "content": "Metrics. We use Accuracy as the primary metric along with Entity Correctness to measure if the claim's extracted entity is valid in KG. Additionally, for the FactKG dev set, we report Claim Structure Coverage, which quantifies the proportion of triplets from the original claim's graph structure successfully reconstructed by our pipeline. We refer readers to Appendix C for more details." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 464, + 291, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 464, + 291, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 464, + 291, + 544 + ], + "type": "text", + "content": "Annotation. For brevity, we use Llama-3B, Llama-70B, and Qwen-72B to refer to Llama-3.2-3B, Llama-3.3-70B, and Qwen2.5-72B respectively. The * symbol denotes models fine-tuned for pseudo subgraph generation. Full model names are used when necessary." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 544, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 292, + 775 + ], + "type": "text", + "content": "Baselines. We compare ClaimPKG with recent KG-based claim verification methods: Zero-shot CoT (Wei et al., 2022) prompts LLMs to generate rationales and verdicts without accessing the KG; GEAR (Zhou et al., 2019), originally designed for text-based verification, employs graph-based evidence aggregation with multiple aggregators to capture multi-evidence dependencies, using BERT for language representation and adapted for KG settings following (Kim et al., 2023b); and KG-GPT (Kim et al., 2023a), a pioneer work that combines LLMs and KGs through a structured pipeline of Sentence Segmentation, Graph Retrieval, and Logic Inference. Notably, unlike baselines which receive pre-identified claim entities along with the claim as the input, our method processes entities in an end-to-end pipeline." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "text", + "content": "Implementation. For a comprehensive evaluation, we evaluate baselines on three model series: Llama 3 (Meta, 2024), Qwen 2.5 (Qwen, 2024), and GPT4o-mini (OpenAI, 2024). In ClaimPKG, we configure the Specialized LLM to generate multiple pseudo-subgraphs using a beam size of 5. For the Subgraph Retrieval algorithm, we adopt an embedding-based approach leveraging BGE-LargeEN-v1.5 (Xiao et al., 2023) to compute dot-product similarity for the Relation Scoring Function, we set the primary hyperparameters to " + }, + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "inline_equation", + "content": "k_{1} = 3" + }, + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "inline_equation", + "content": "k_{2} = 1" + }, + { + "bbox": [ + 302, + 71, + 527, + 234 + ], + "type": "text", + "content": ". Detailed justification is provided in Appendix C." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 248, + 428, + 261 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 248, + 428, + 261 + ], + "spans": [ + { + "bbox": [ + 302, + 248, + 428, + 261 + ], + "type": "text", + "content": "5.2 Results and Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 269, + 524, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 269, + 524, + 295 + ], + "spans": [ + { + "bbox": [ + 302, + 269, + 524, + 295 + ], + "type": "text", + "content": "We present the main experimental results in this section and additional findings in Appendix C." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 299, + 526, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 299, + 526, + 352 + ], + "spans": [ + { + "bbox": [ + 302, + 299, + 526, + 352 + ], + "type": "text", + "content": "(RQ1): How Does ClaimPKG Perform Against the Baselines? Table 1 compares the accuracy " + }, + { + "bbox": [ + 302, + 299, + 526, + 352 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 302, + 299, + 526, + 352 + ], + "type": "text", + "content": " of ClaimPKG with baselines across claim categories of the FactKG. Key observations include:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "spans": [ + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "content": "(1) Direct inference using LLMs with CoT reasoning significantly underperforms compared to evidence-based methods, with the best average score reaching only " + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "inline_equation", + "content": "69.07\\%" + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "content": ", highlighting that despite LLM advancements, evidence retrieval remains crucial. (2) KG-GPT integrates knowledge graphs with LLMs but its best average score achieves only " + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "inline_equation", + "content": "74.70\\%" + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "content": " (Llama-70B Few-shot), falling short of GEAR's fine-tuned model at " + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "inline_equation", + "content": "76.65\\%" + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "content": ". This suggests that while LLMs excel at language tasks, they require specific adaptation for KG processing. (3) ClaimPKG, with the strongest configuration " + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "inline_equation", + "content": "(\\text{Llama}-3\\text{B}^{*} + \\text{Llama}-70\\text{B})" + }, + { + "bbox": [ + 302, + 353, + 527, + 623 + ], + "type": "text", + "content": " and constrained by Entity-Trie for valid KG entity generation, achieves a 12-point improvement over KG-GPT and 9 points over GEAR. It particularly excels in multi-hop reasoning, demonstrating strong performance across Llama-3 and Qwen-2.5 backbones through effective structured evidence retrieval and KG integration." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 626, + 526, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 626, + 526, + 706 + ], + "spans": [ + { + "bbox": [ + 302, + 626, + 526, + 706 + ], + "type": "text", + "content": "(RQ2): How Do Different Components Affect Performance? To evaluate the impact of each component in ClaimPKG, we conduct ablation studies of the following components, maintaining Llama-3B* as the Specialized LLM and Llama-70B as the General LLM." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "content": "Entity-Trie Constraint. We remove the Entity-Trie constraint to assess its necessity. Compared to the full setup, this reduces the entity extraction correctness from " + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "inline_equation", + "content": "87.5\\%" + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "content": ", and overall performance from " + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "inline_equation", + "content": "84.64\\%" + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "inline_equation", + "content": "82.72\\%" + }, + { + "bbox": [ + 302, + 708, + 526, + 773 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "text", + "content": "5276" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 68, + 526, + 257 + ], + "blocks": [ + { + "bbox": [ + 68, + 68, + 526, + 257 + ], + "lines": [ + { + "bbox": [ + 68, + 68, + 526, + 257 + ], + "spans": [ + { + "bbox": [ + 68, + 68, + 526, + 257 + ], + "type": "table", + "html": "
MethodEntity CorrectnessNegationExistenceConjunctionMulti-hopOne-hopAverage
Direct Inference With CoT - w/o Evidence Retrieval
GPT-4o-mini (Zero-shot CoT)-61.9159.4569.5160.8770.8364.51
Qwen-72B (Zero-shot CoT)-62.9162.2074.0462.3275.9867.49
Llama-70B (Zero-shot CoT)-64.3464.6272.4765.5878.3269.07
Baseline Comparison - w/ Evidence Retrieval
GEAR (Finetuned BERT)Known in Prior79.7279.1978.6368.3977.3476.65
KG-GPT (Llama-70B Few-shot)Known in Prior70.9165.0686.6458.8792.0274.70
KG-GPT (Qwen-72B Few-shot)Known in Prior67.3160.0889.1458.1990.8773.12
ClaimPKG (Llama-3B* + GPT-4o-mini)100.0%85.1072.6484.2372.2691.0181.05
ClaimPKG (Llama-3B* + Qwen-72B)100.0%85.2786.9084.0278.7191.2085.22
ClaimPKG (Llama-3B* + Llama-70B)100.0%84.5884.2085.6878.4990.2684.64
Ablation Results (Llama-3B* + Llama-70B) - w/ Evidence Retrieval
ClaimPKG (w/o Trie Constraint)87.50%82.5083.2483.8276.1388.0182.74
ClaimPKG (Few-shot Specialized LLM)86.52%77.9981.8977.8068.8281.6577.63
ClaimPKG (w/o Incomplete Retrieval)100.0%68.8051.2567.8461.2976.2265.08
", + "image_path": "61bfcdebf0f230d60799ac75fe83febfdd5cbc6f7c63931a562659166c79107f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "spans": [ + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "text", + "content": "Specialized LLM. When replacing the specialized LLM with few-shot prompting strategy using Llama-70B, a much larger general-purpose LLM, entity correctness further declines to " + }, + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "inline_equation", + "content": "86.52\\%" + }, + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "text", + "content": ", leading overall performance to drop to " + }, + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "inline_equation", + "content": "77.63\\%" + }, + { + "bbox": [ + 66, + 309, + 291, + 444 + ], + "type": "text", + "content": ". These results demonstrate that even with examples, general-purpose LLMs struggle to produce outputs with desired graph structure correctly, emphasizing the importance of the specialized LLM in generating pseudo subgraphs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 444, + 291, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 444, + 291, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 444, + 291, + 539 + ], + "type": "text", + "content": "Incomplete Retrieval. Removing the Incomplete Triplet Retrieval function, which forces the retrieval algorithm to only query evidence using complete triplets, causes a significant average performance drop of nearly " + }, + { + "bbox": [ + 67, + 444, + 291, + 539 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 67, + 444, + 291, + 539 + ], + "type": "text", + "content": " compared to the full setup, showing the complete graph structure of input claims is essential for optimal performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 542, + 291, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 542, + 291, + 608 + ], + "spans": [ + { + "bbox": [ + 67, + 542, + 291, + 608 + ], + "type": "text", + "content": "(RQ3): Robustness and Generalization of ClaimPKG? To assess ClaimPKG's robustness, we vary model backbones, examine zero-shot generalizability, analyze the effect of training data size, and conduct error analysis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 612, + 291, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 612, + 291, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 612, + 291, + 719 + ], + "type": "text", + "content": "Model Backbones. We evaluate different LLM architectures for both Specialized and General LLMs (Table 2). For General LLMs, we test various model sizes (7B to 70B parameters) using retrieved KG triplets as input. For Specialized LLMs, we experiment with different small fine-tuned backbones and few-shot prompt templates (Figure 7), while keeping Llama-3.3-70B as the fixed General LLM." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": "Results in Table 2 show larger General LLMs (GPT-4o-Mini, Llama-3.3-70B) outperform smaller ones (Qwen-2.5-7B, Llama-3.1-8B) by up to 8 points, highlighting model capacity's role in ag" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 315, + 306, + 514, + 452 + ], + "blocks": [ + { + "bbox": [ + 67, + 263, + 525, + 288 + ], + "lines": [ + { + "bbox": [ + 67, + 263, + 525, + 288 + ], + "spans": [ + { + "bbox": [ + 67, + 263, + 525, + 288 + ], + "type": "text", + "content": "Table 1: Performance (accuracy %) comparison of ClaimPKG with baselines on 5 claim categories of FactKG dataset and their average scores." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 315, + 306, + 514, + 452 + ], + "lines": [ + { + "bbox": [ + 315, + 306, + 514, + 452 + ], + "spans": [ + { + "bbox": [ + 315, + 306, + 514, + 452 + ], + "type": "table", + "html": "
ComponentStrategyBackboneAverage
General LLMZero-shotLlama 3.1 - 8B77.08
Llama 3.3 - 70B84.64
GPT4o - Mini81.05
Qwen 2.5 - 7B80.22
Qwen 2.5 - 72B85.22
Specialized LLMFinetuneLlama 3 - 3B84.64
Qwen 2.5 - 3B82.32
Llama 3 - 1B83.91
Qwen 2.5 - 1.5B82.20
Few-shotLlama 3.3 - 70B77.63
Qwen 2.5 - 72B77.10
", + "image_path": "9ac66200dbc3401fe34010e7a5000f1776a14812c0614b39e7ff8f7d53fd5cb1.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 318, + 460, + 508, + 471 + ], + "lines": [ + { + "bbox": [ + 318, + 460, + 508, + 471 + ], + "spans": [ + { + "bbox": [ + 318, + 460, + 508, + 471 + ], + "type": "text", + "content": "Table 2: Performance on Different Backbones." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 493, + 526, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 493, + 526, + 574 + ], + "spans": [ + { + "bbox": [ + 302, + 493, + 526, + 574 + ], + "type": "text", + "content": "gregating subgraph evidence. Notably, a fine-tuned 1B Specialized LLM outperforms the general 70B counterpart, demonstrating fine-tuning's effectiveness to process graph data. This supports the need to combine powerful General LLMs with adapted Specialized LLMs for optimal performance." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 304, + 597, + 526, + 664 + ], + "blocks": [ + { + "bbox": [ + 303, + 576, + 524, + 588 + ], + "lines": [ + { + "bbox": [ + 303, + 576, + 524, + 588 + ], + "spans": [ + { + "bbox": [ + 303, + 576, + 524, + 588 + ], + "type": "text", + "content": "Zero-shot Generalizability. To assess" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 597, + 526, + 664 + ], + "lines": [ + { + "bbox": [ + 304, + 597, + 526, + 664 + ], + "spans": [ + { + "bbox": [ + 304, + 597, + 526, + 664 + ], + "type": "table", + "html": "
BenchmarkLlama 3Qwen 2.5
HoVer (Zero-shot CoT)66.665.3
HoVer (Support-Predicted)70.7 (14.3%)69.4 (15.7%)
FEVEROUS (Zero-shot CoT)81.180.9
FEVEROUS (Support-Predicted)83.8 (12.5%)83.6 (12.9%)
", + "image_path": "08437af0052f12721d4cbbff83bb6f61ca3eb2e52371af20b92557139d77614a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 671, + 525, + 708 + ], + "lines": [ + { + "bbox": [ + 302, + 671, + 525, + 708 + ], + "spans": [ + { + "bbox": [ + 302, + 671, + 525, + 708 + ], + "type": "text", + "content": "Table 3: Zero-shot transferred performance on other unstructure-based benchmarks on the Support-Predicted samples along with Support Predicted rates." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 721, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 721, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 721, + 526, + 775 + ], + "type": "text", + "content": "ClaimPKG's zero-shot generalizability, we test transfer to HoVer (Jiang et al., 2020) and FEVEROUS (Aly et al., 2021) datasets. Using DBpedia (Lehmann et al., 2015) as the knowledge" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5277" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "text", + "content": "source, we evaluate with trained Specialized LLMs (Llama-3.2-3B and Qwen-2.5-3B) while keeping Llama-3.3-70B as the General LLM. Since external datasets may contain claims outside DBpedia's coverage, making it difficult to distinguish between knowledge gaps and actual verification failures of ClaimPKG for Refuted cases, we analyze only samples predicted as Supported. As shown in Table 3, ClaimPKG predicts Supported for only " + }, + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "inline_equation", + "content": "12.5\\% - 15.7\\%" + }, + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "text", + "content": " of samples, indicating limited knowledge overlap with DBpedia. However, on these samples, ClaimPKG outperforms Llama-3.3-70B's zero-shot CoT inference by " + }, + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 67, + 71, + 293, + 274 + ], + "type": "text", + "content": " accuracy on both datasets, demonstrating robust transfer to reasoning patterns in unseen data." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 275, + 291, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 275, + 291, + 289 + ], + "spans": [ + { + "bbox": [ + 67, + 275, + 291, + 289 + ], + "type": "text", + "content": "Training Data Size. To assess the impact of train" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 298, + 282, + 431 + ], + "blocks": [ + { + "bbox": [ + 76, + 298, + 282, + 431 + ], + "lines": [ + { + "bbox": [ + 76, + 298, + 282, + 431 + ], + "spans": [ + { + "bbox": [ + 76, + 298, + 282, + 431 + ], + "type": "image", + "image_path": "1f8ce5c2eb5971e4f38babdf748e06ca7c28c9b4b01a81101d541f6b5a409692.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 74, + 439, + 282, + 451 + ], + "lines": [ + { + "bbox": [ + 74, + 439, + 282, + 451 + ], + "spans": [ + { + "bbox": [ + 74, + 439, + 282, + 451 + ], + "type": "text", + "content": "Figure 3: Varying Specialized LLM's training data." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "spans": [ + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "text", + "content": "ing data on the Specialized LLM, we vary the number of training samples from 0.1K to 10K, using two configurations: Llama-3.2-3B and Qwen-2.5-3B as the specialized LLM and keep the General LLM to be Llama-3.3-70B. We evaluate performance based on two metrics: average accuracy on the test set and claim structure coverage on the dev set. As shown in Figure 3, the Specialized LLMs achieve satisfactory accuracy (Llama-3.2-3B: " + }, + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "inline_equation", + "content": "79.35\\%" + }, + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "text", + "content": ", Qwen-2.5-3B: " + }, + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "inline_equation", + "content": "77.62\\%" + }, + { + "bbox": [ + 67, + 465, + 291, + 682 + ], + "type": "text", + "content": ") with just 100 training samples, demonstrating efficiency and low training costs for KG adaptation. While both structure coverage and accuracy improve up to 5K samples, coverage plateaus thereafter, and accuracy begins to decline, indicating overfitting where excessive training data reduces generalizability." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 690, + 261, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 690, + 261, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 690, + 261, + 703 + ], + "type": "text", + "content": "5.3 Interpretability and Error Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 707, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 707, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 707, + 291, + 775 + ], + "type": "text", + "content": "ClaimPKG can improve claim verification performance while enhancing interpretability. Representative outputs of ClaimPKG (Figure 12, Appendix E) illustrate its ability to capture claim structure and provide well-grounded justifications. Notably," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 206 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 206 + ], + "type": "text", + "content": "when refuting claims, it explicitly presents contradicting evidence, ensuring transparent reasoning. To further assess reliability, we conducted a human analysis of 200 incorrect predictions from FactKG, categorizing errors (Figure 13, Appendix E) into: Claim Structure Errors: fail to capture the underlying claim structure; Retrieval Errors: fail to retrieve necessary evidence required for claim verification; and Reasoning Errors: incorrect logical inferences of the general LLM to judge the verdict." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 207, + 526, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 207, + 526, + 438 + ], + "spans": [ + { + "bbox": [ + 302, + 207, + 526, + 438 + ], + "type": "text", + "content": "Specifically, there are 0 (0%) Claim Structure Errors, 57 (28.5%) Retrieval Errors, and 143 (71.5%) Reasoning Errors. These results suggest that, with chances (multiple beams) to generate pseudosubgraphs, the Specialized LLM can effectively capture the structural representation of claims. However, the general-purpose LLM, despite its strong reasoning capabilities, still struggles with certain complex reasoning scenarios that require specific handling. Moreover, retrieval errors highlight cases where additional implicit reasoning is necessary, as we hypothesize that direct subgraph retrieval failed to provide a comprehensive picture of the required evidence. These highlight future improvements, focusing on enhancing retrieval inference and refining reasoning for complex claim verification over structured knowledge." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 449, + 448, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 449, + 448, + 462 + ], + "spans": [ + { + "bbox": [ + 302, + 449, + 448, + 462 + ], + "type": "text", + "content": "5.4 Scalability of ClaimPKG" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 468, + 525, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 468, + 525, + 603 + ], + "spans": [ + { + "bbox": [ + 302, + 468, + 525, + 603 + ], + "type": "text", + "content": "ClaimPKG maintains scalability and adaptability within dynamic knowledge environments. After training the Specialized LLM on a domain (e.g., Wikipedia), the system remains decoupled from the underlying Knowledge Graph (KG). Only the Entity-Trie component interfaces directly with the data. Consequently, when the KG undergoes updates, ClaimPKG requires merely an update of the corresponding entities within the Entity-Trie, ensuring an efficient adaptation process." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 616, + 381, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 616, + 381, + 629 + ], + "spans": [ + { + "bbox": [ + 302, + 616, + 381, + 629 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 525, + 775 + ], + "type": "text", + "content": "In this work, we present ClaimPKG, a novel claim verification combining the structure of Knowledge Graphs with the adaptability and reasoning of Large Language Models. Through Pseudosubgraph Generation, Subgraph Retrieval, and General Reasoning, it addresses limitations while ensuring transparency. Extensive experiments show state-of-the-art performance and generalizability across datasets, making ClaimPKG a step toward reliable and explainable misinformation detection." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5278" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 131, + 84 + ], + "type": "text", + "content": "Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 93, + 291, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 93, + 291, + 376 + ], + "spans": [ + { + "bbox": [ + 69, + 93, + 291, + 376 + ], + "type": "text", + "content": "Despite their advanced reasoning capabilities, LLMs are prone to errors and biases, necessitating careful deployment, particularly in fact-checking systems where incorrect or biased outputs could contribute to misinformation. Addressing these biases remains an ongoing research challenge, requiring effective mechanisms for detection, control, and mitigation. Additionally, real-world claim verification often requires inferring implicit reasoning, where further related knowledge for a problem is necessary, and making improvements in pipeline components to handle this type of information is crucial. Another limitation is the performance decline observed when the Specialized LLM is trained on an excessive number of examples, highlighting the need for future research into regularization strategies. Further improvements should also focus on the general reasoning module to infer missing knowledge more effectively and enhance intricate and nuanced claim verification cases over structured knowledge." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 399, + 127, + 412 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 399, + 127, + 412 + ], + "spans": [ + { + "bbox": [ + 69, + 399, + 127, + 412 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 418, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 69, + 418, + 291, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 291, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 291, + 518 + ], + "type": "text", + "content": "Rami Aly, Zhijiang Guo, Michael Sejr Schlichtkrull, James Thorne, Andreas Vlachos, Christos Christodoulopoulos, Oana Cocarascu, and Arpit Mittal. 2021. FEVEROUS: fact extraction and verification over unstructured and structured information. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 527, + 291, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 527, + 291, + 582 + ], + "spans": [ + { + "bbox": [ + 69, + 527, + 291, + 582 + ], + "type": "text", + "content": "Nicola De Cao, Gautier Izacard, Sebastian Riedel, and Fabio Petroni. 2021. Autoregressive entity retrieval. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 591, + 291, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 591, + 291, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 591, + 291, + 645 + ], + "type": "text", + "content": "Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, and Jonathan Larson. 2024. From local to global: A graph RAG approach to query-focused summarization. CoRR, abs/2404.16130." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 655, + 291, + 742 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 655, + 291, + 742 + ], + "spans": [ + { + "bbox": [ + 69, + 655, + 291, + 742 + ], + "type": "text", + "content": "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022a. Missing counter-evidence renders NLP fact-checking unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "text", + "content": "Max Glockner, Yufang Hou, and Iryna Gurevych. 2022b. Missing counter-evidence renders NLP fact-checking" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 774 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 314, + 72, + 526, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 526, + 138 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 526, + 138 + ], + "type": "text", + "content": "unrealistic for misinformation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022, Abu Dhabi, United Arab Emirates, December 7-11, 2022, pages 5916-5936. Association for Computational Linguistics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 304, + 146, + 526, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 146, + 526, + 224 + ], + "spans": [ + { + "bbox": [ + 304, + 146, + 526, + 224 + ], + "type": "text", + "content": "Jonathan Herzig, Pawel Krzysztof Nowak, Thomas Müller, Francesco Piccinno, and Julian Eisenschlos. 2020. TaPas: Weakly supervised table parsing via pre-training. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4320-4333, Online. Association for Computational Linguistics." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 231, + 526, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 231, + 526, + 309 + ], + "spans": [ + { + "bbox": [ + 304, + 231, + 526, + 309 + ], + "type": "text", + "content": "Jinhao Jiang, Kun Zhou, Zican Dong, Keming Ye, Xin Zhao, and Ji-Rong Wen. 2023. StructGPT: A general framework for large language model to reason over structured data. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 9237-9251, Singapore. Association for Computational Linguistics." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 316, + 526, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 316, + 526, + 405 + ], + "spans": [ + { + "bbox": [ + 304, + 316, + 526, + 405 + ], + "type": "text", + "content": "Yichen Jiang, Shikha Bordia, Zheng Zhong, Charles Dognin, Maneesh Kumar Singh, and Mohit Bansal. 2020. Hover: A dataset for many-hop fact extraction and claim verification. In Findings of the Association for Computational Linguistics: EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 3441-3460. Association for Computational Linguistics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 412, + 526, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 526, + 489 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 526, + 489 + ], + "type": "text", + "content": "Jiho Kim, Yeonsu Kwon, Yohan Jo, and Edward Choi. 2023a. KG-GPT: A general framework for reasoning on knowledge graphs using large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 9410-9421. Association for Computational Linguistics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 497, + 526, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 497, + 526, + 585 + ], + "spans": [ + { + "bbox": [ + 304, + 497, + 526, + 585 + ], + "type": "text", + "content": "Jiho Kim, Sungjin Park, Yeonsu Kwon, Yohan Jo, James Thorne, and Edward Choi. 2023b. Factkg: Fact verification via reasoning on knowledge graphs. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 16190-16206. Association for Computational Linguistics." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 592, + 526, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 592, + 526, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 592, + 526, + 671 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 677, + 526, + 744 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 677, + 526, + 744 + ], + "spans": [ + { + "bbox": [ + 304, + 677, + 526, + 744 + ], + "type": "text", + "content": "Jens Lehmann, Robert Isele, Max Jakob, Anja Jentzsch, Dimitris Kontokostas, Pablo N. Mendes, Sebastian Hellmann, Mohamed Morsey, Patrick van Kleef, Soren Auer, and Christian Bizer. 2015. Dbpedia - A large-scale, multilingual knowledge base extracted from wikipedia. Semantic Web, 6(2):167-195." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 751, + 526, + 774 + ], + "type": "text", + "content": "Qi Li, Heng Ji, and Liang Huang. 2013. Joint event extraction via structured prediction with global features." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5279" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 290, + 773 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 80, + 72, + 290, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 72, + 290, + 128 + ], + "spans": [ + { + "bbox": [ + 80, + 72, + 290, + 128 + ], + "type": "text", + "content": "In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics, ACL 2013, 4-9 August 2013, Sofia, Bulgaria, Volume 1: Long Papers, pages 73-82. The Association for Computer Linguistics." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 136, + 290, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 136, + 290, + 191 + ], + "spans": [ + { + "bbox": [ + 69, + 136, + 290, + 191 + ], + "type": "text", + "content": "Ilya Loshchilov and Frank Hutter. 2019. Decoupled weight decay regularization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "spans": [ + { + "bbox": [ + 69, + 200, + 289, + 265 + ], + "type": "text", + "content": "Linhao Luo, Yuan-Fang Li, Gholamreza Haffari, and Shirui Pan. 2024. Reasoning on graphs: Faithful and interpretable large language model reasoning. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 274, + 289, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 274, + 289, + 295 + ], + "spans": [ + { + "bbox": [ + 69, + 274, + 289, + 295 + ], + "type": "text", + "content": "Meta. 2024. Build the future of ai with meta llama 3, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 304, + 290, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 304, + 290, + 371 + ], + "spans": [ + { + "bbox": [ + 69, + 304, + 290, + 371 + ], + "type": "text", + "content": "Makoto Miwa and Mohit Bansal. 2016. End-to-end relation extraction using LSTMs on sequences and tree structures. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1105-1116, Berlin, Germany. Association for Computational Linguistics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 379, + 215, + 391 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 379, + 215, + 391 + ], + "spans": [ + { + "bbox": [ + 69, + 379, + 215, + 391 + ], + "type": "text", + "content": "OpenAI. 2024. Hello gpt-4o, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 399, + 290, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 399, + 290, + 487 + ], + "spans": [ + { + "bbox": [ + 69, + 399, + 290, + 487 + ], + "type": "text", + "content": "Liangming Pan, Xiaobao Wu, Xinyuan Lu, Anh Tuan Luu, William Yang Wang, Min-Yen Kan, and Preslav Nakov. 2023. Fact-checking complex claims with program-guided reasoning. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pages 6981-7004. Association for Computational Linguistics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 495, + 290, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 495, + 290, + 572 + ], + "spans": [ + { + "bbox": [ + 69, + 495, + 290, + 572 + ], + "type": "text", + "content": "Jungsoo Park, Sewon Min, Jaewoo Kang, Luke Zettle-moyer, and Hannaneh Hajishirzi. 2022. FaVIQ: FAct verification from information-seeking questions. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5154-5166, Dublin, Ireland. Association for Computational Linguistics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 580, + 287, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 580, + 287, + 592 + ], + "spans": [ + { + "bbox": [ + 69, + 580, + 287, + 592 + ], + "type": "text", + "content": "Qwen. 2024. Qwen2.5: A party of foundation models." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 600, + 290, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 600, + 290, + 677 + ], + "spans": [ + { + "bbox": [ + 69, + 600, + 290, + 677 + ], + "type": "text", + "content": "Tal Schuster, Adam Fisch, and Regina Barzilay. 2021. Get your vitamin C! robust fact verification with contrastive evidence. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 624-643, Online. Association for Computational Linguistics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 686, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 686, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 686, + 290, + 773 + ], + "type": "text", + "content": "Jiashuo Sun, Chengjin Xu, Lumingyuan Tang, Saizhuo Wang, Chen Lin, Yeyun Gong, Lionel M. Ni, Heung-Yeung Shum, and Jian Guo. 2024. Think-on-graph: Deep and responsible reasoning of large language model on knowledge graph. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. Open-Review.net." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 149 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 149 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 149 + ], + "type": "text", + "content": "James Thorne and Andreas Vlachos. 2018. Automated fact checking: Task formulations, methods and future directions. In Proceedings of the 27th International Conference on Computational Linguistics, COLING 2018, Santa Fe, New Mexico, USA, August 20-26, 2018, pages 3346-3359. Association for Computational Linguistics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 157, + 525, + 268 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 157, + 525, + 268 + ], + "spans": [ + { + "bbox": [ + 304, + 157, + 525, + 268 + ], + "type": "text", + "content": "James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. FEVER: a large-scale dataset for fact extraction and verification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 809-819. Association for Computational Linguistics." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 275, + 525, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 275, + 525, + 353 + ], + "spans": [ + { + "bbox": [ + 304, + 275, + 525, + 353 + ], + "type": "text", + "content": "Bailin Wang, Richard Shin, Xiaodong Liu, Oleksandr Polozov, and Matthew Richardson. 2020. RAT-SQL: Relation-aware schema encoding and linking for text-to-SQL parsers. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7567-7578, Online. Association for Computational Linguistics." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 361, + 525, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 525, + 427 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 525, + 427 + ], + "type": "text", + "content": "Haoran Wang and Kai Shu. 2023. Explainable claim verification via knowledge-grounded reasoning with large language models. In Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, December 6-10, 2023, pages 6288-6304. Association for Computational Linguistics." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 435, + 525, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 435, + 525, + 513 + ], + "spans": [ + { + "bbox": [ + 304, + 435, + 525, + 513 + ], + "type": "text", + "content": "Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc V. Le, Ed H. Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2023. Self-consistency improves chain of thought reasoning in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 520, + 525, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 520, + 525, + 608 + ], + "spans": [ + { + "bbox": [ + 304, + 520, + 525, + 608 + ], + "type": "text", + "content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Brian Ichter, Fei Xia, Ed H. Chi, Quoc V. Le, and Denny Zhou. 2022. Chain-of-thought prompting elicits reasoning in large language models. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 616, + 525, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 616, + 525, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 616, + 525, + 649 + ], + "type": "text", + "content": "Wikipedia. 2025a. Levenshtein distance — Wikipedia, The Free Encyclopedia. Accessed: 14-February-2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 658, + 525, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 658, + 525, + 681 + ], + "spans": [ + { + "bbox": [ + 304, + 658, + 525, + 681 + ], + "type": "text", + "content": "Wikipedia. 2025b. Trie — Wikipedia, The Free Encyclopedia. [Online; accessed 9-February-2025]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 688, + 525, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 688, + 525, + 732 + ], + "spans": [ + { + "bbox": [ + 304, + 688, + 525, + 732 + ], + "type": "text", + "content": "Shitao Xiao, Zheng Liu, Peitian Zhang, and Niklas Muennighoff. 2023. C-pack: Packaged resources to advance general chinese embedding. Preprint, arXiv:2309.07597." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 740, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 740, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 740, + 525, + 774 + ], + "type": "text", + "content": "Jie Zhou, Ganqu Cui, Shengding Hu, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2020. Graph" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5280" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 79, + 72, + 290, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 72, + 290, + 95 + ], + "spans": [ + { + "bbox": [ + 79, + 72, + 290, + 95 + ], + "type": "text", + "content": "neural networks: A review of methods and applications. AI Open, 1:57-81." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 102, + 291, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 102, + 291, + 191 + ], + "spans": [ + { + "bbox": [ + 68, + 102, + 291, + 191 + ], + "type": "text", + "content": "Jie Zhou, Xu Han, Cheng Yang, Zhiyuan Liu, Lifeng Wang, Changcheng Li, and Maosong Sun. 2019. GEAR: graph-based evidence aggregating and reasoning for fact verification. In Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL 2019, Florence, Italy, July 28- August 2, 2019, Volume 1: Long Papers, pages 892-901. Association for Computational Linguistics." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 201, + 198, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 201, + 198, + 214 + ], + "spans": [ + { + "bbox": [ + 68, + 201, + 198, + 214 + ], + "type": "text", + "content": "A Benchmark Datasets" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 85, + 230, + 273, + 382 + ], + "blocks": [ + { + "bbox": [ + 85, + 230, + 273, + 382 + ], + "lines": [ + { + "bbox": [ + 85, + 230, + 273, + 382 + ], + "spans": [ + { + "bbox": [ + 85, + 230, + 273, + 382 + ], + "type": "table", + "html": "
DatasetSplitSupportRefuteNEITotal
FactKGTrain4272343644-86367
Dev64266840-132666
Test43984643-9041
Total5354755127-108674
HoverTrain110237148-18171
Dev20002000-4000
Test20002000-4000
Total1502311148-26171
FEVER OUSTrain4183527215224171291
Dev390834815017890
Test3372297315007845
Total4911533669424287026
", + "image_path": "b55d997587224b02783b02b3b9127c03b061270dccee78a9fdf5a9ef8d856b89.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 82, + 439, + 276, + 545 + ], + "blocks": [ + { + "bbox": [ + 67, + 391, + 289, + 414 + ], + "lines": [ + { + "bbox": [ + 67, + 391, + 289, + 414 + ], + "spans": [ + { + "bbox": [ + 67, + 391, + 289, + 414 + ], + "type": "text", + "content": "Table 4: Basic statistics of Hover, FEVERIOUS, and FactKG Datasets" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 82, + 439, + 276, + 545 + ], + "lines": [ + { + "bbox": [ + 82, + 439, + 276, + 545 + ], + "spans": [ + { + "bbox": [ + 82, + 439, + 276, + 545 + ], + "type": "table", + "html": "
TypeWrittenColloquialTotal
ModelPresup
One-hop2,10615,9341,58019,530
Conjunction20,58715,90860237,097
Existence2804,0604,8329,172
Multi-hop10,23916,42060327,262
Negation1,34012,4661,80715,613
Total34,46264,7889,424108,674
", + "image_path": "a843bca345b9237b3cc9f99db24289489479dff382b844d530adf3e14e806541.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 555, + 289, + 567 + ], + "lines": [ + { + "bbox": [ + 68, + 555, + 289, + 567 + ], + "spans": [ + { + "bbox": [ + 68, + 555, + 289, + 567 + ], + "type": "text", + "content": "Table 5: Dataset statistics of FACTKG for claim types." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 582, + 290, + 744 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 582, + 290, + 744 + ], + "spans": [ + { + "bbox": [ + 67, + 582, + 290, + 744 + ], + "type": "text", + "content": "FEVEROUS. (Aly et al., 2021) FEVEROUS is a fact verification dataset comprising 87,026 verified claims sourced from Wikipedia (Table 4). Each claim is accompanied by evidence in the form of sentences and/or cells from tables, along with a label indicating whether the evidence supports, refutes, or does not provide enough information to verify the claim. The dataset includes metadata like annotator actions and challenge types, designed to minimize biases. It is used for tasks that involve verifying claims against both unstructured (textual) and structured (tabular) information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "HoVer. (Jiang et al., 2020) HoVer is a dataset containing 26,171 samples, designed for open-domain," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 526, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 220 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 220 + ], + "type": "text", + "content": "multi-hop fact extraction and claim verification, using the Wikipedia corpus. Claims in HoVer are adapted from question-answer pairs and require the extraction of facts from multiple (up to four) Wikipedia articles to determine if the claim is supported or not supported. The complexity of HoVer, particularly in the 3/4-hop claims, is further amplified because these claims are often expressed across multiple sentences, which introduces challenges related to long-range dependencies, such as accurately resolving coreferences." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 222, + 526, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 222, + 526, + 657 + ], + "spans": [ + { + "bbox": [ + 302, + 222, + 526, + 657 + ], + "type": "text", + "content": "FactKG. (Kim et al., 2023b) FactKG is a challenging fact verification dataset comprised of 108,674 samples, designed to rigorously test models' abilities to reason over structured knowledge represented in a knowledge graph. Its difficulty arises from a combination of factors. First, it demands proficiency in five distinct reasoning types: one-hop (single relationship), conjunction (combining multiple relationships), existence (verifying entity/relationship presence), multi-hop (traversing multiple relationships), and, crucially, negation (reasoning about the absence of relationships). Second, FactKG incorporates linguistic diversity, encompassing both formal, written-style claims and more challenging colloquial expressions, requiring models to handle paraphrasing, idiomatic language, and less direct wording. Third, instead of unstructured text, FactKG utilizes the DBpedia knowledge graph (derived from Wikipedia), necessitating that models correctly link entities and relations mentioned in the claim to the graph's nodes and edges, and perform complex path-based reasoning, especially for multi-hop claims. The addition of a weakly semantic knowledge source, and cross-style evaluation to assess generalizability, further contributes to the difficulty of this dataset. These features collectively make FactKG significantly more complex than datasets relying solely on unstructured text for verification. Detailed statistics of this dataset can be found in table 5. Readers can refer to table 4 for the overall basic statistics of all employed datasets for ClaimPKG." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 303, + 677, + 446, + 692 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 677, + 446, + 692 + ], + "spans": [ + { + "bbox": [ + 303, + 677, + 446, + 692 + ], + "type": "text", + "content": "B Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "We conducted all experiments on a DGX server with 8 NVIDIA A100 GPUs. The General LLM is hosted within the vLLM framework (Kwon et al., 2023). Below, we detail the training process of the Specialized LLM." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "text", + "content": "5281" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 246, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 246, + 97 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 246, + 97 + ], + "type": "text", + "content": "B.1 Specialized LLM Training Data Annotation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 103, + 291, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 103, + 291, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 103, + 291, + 185 + ], + "type": "text", + "content": "To tailor the specialized model for improved comprehension and processing of KG-specific data, we construct a dedicated dataset for training, leveraging the provided version of FactKG (Kim et al., 2023b) (illustrated in Figure 4). The annotation process consists of the following steps:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 72, + 198, + 285, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 198, + 285, + 218 + ], + "spans": [ + { + "bbox": [ + 72, + 198, + 285, + 218 + ], + "type": "text", + "content": "Claim: A musical artist, whose music is Post-metal, played with the band Twilight and performs for Mamiffer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 74, + 219, + 257, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 219, + 257, + 238 + ], + "spans": [ + { + "bbox": [ + 74, + 219, + 257, + 238 + ], + "type": "text", + "content": "Entities: [Mamiffer, Post-metal, Twilight_(band)] Evidence:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 239, + 285, + 279 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 73, + 239, + 285, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 239, + 285, + 259 + ], + "spans": [ + { + "bbox": [ + 73, + 239, + 285, + 259 + ], + "type": "text", + "content": "- Twilight_(band), (associatedMusicalArtist, associatedBand), Mamiffer)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 73, + 259, + 285, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 259, + 285, + 279 + ], + "spans": [ + { + "bbox": [ + 73, + 259, + 285, + 279 + ], + "type": "text", + "content": "- Twilight_(band), (associatedMusicalArtist, genre), Postmetal" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 107, + 298, + 249, + 310 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 249, + 310 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 249, + 310 + ], + "type": "text", + "content": "Figure 4: Provided data of FactKG" + } + ] + } + ], + "index": 7, + "type": "text" + }, + { + "bbox": [ + 67, + 333, + 291, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 291, + 400 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 291, + 400 + ], + "type": "text", + "content": "Preprocessing: All entities and relations from FactKG, including the train, development, and test datasets, as well as the DBPedia KG, are normalized by splitting concatenated words to ensure consistency." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 409, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 409, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 409, + 291, + 774 + ], + "type": "text", + "content": "Graph Construction: Using the provided evidence information from FactKG, we observe that while evidence may not explicitly exist in the graph, it accurately captures the underlying structure of the claim. Accordingly, for triplets with relation paths exceeding one hop, we decompose them into multiple triplets while introducing a placeholder entity, denoted as \"unknown_[index]\", to preserve structural integrity. This placeholder represents an ambiguous or missing entity that requires identification. For instance, the triplet: \"Twilight_(band), (~associatedMusicalArtist, associatedBand), Mamiffer\" is transformed into the following triplets: \"Twilight_(band), associatedBand, unknown_1\" and \"unknown_1\", associatedMusicalArtist, Mamiffer\". Additionally, entities present in the Entities set but absent from the graph are also introduced as unknown_[index]. To further enhance graph completeness, GPT-4 is employed to verify whether entities from the Entities set are explicitly mentioned in the claim. This ensures that relevant entities are either linked to existing nodes or added as placeholders. The automatic entity verification process is conducted using a prompt template, as shown in Figure 8. Additionally, the symbol \"\\~\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 71, + 525, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 137 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 137 + ], + "type": "text", + "content": "is retained to denote inverse relations. Random shuffle among constructed triplets but preserving the sequential order of “unknown” entity is applied to improve the robustness of the model being trained." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 146, + 525, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 146, + 525, + 186 + ], + "spans": [ + { + "bbox": [ + 302, + 146, + 525, + 186 + ], + "type": "text", + "content": "Generated Pseudo-Subgraph: The transformed claim results in the pseudo-subgraph illustrated in Figure 5." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 200, + 405, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 200, + 405, + 211 + ], + "spans": [ + { + "bbox": [ + 308, + 200, + 405, + 211 + ], + "type": "text", + "content": "Pseudo Subgraph Label:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 211, + 511, + 241 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 307, + 211, + 511, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 511, + 221 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 511, + 221 + ], + "type": "text", + "content": "- Twilight (band), associated musical artist, unknown_0" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 221, + 458, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 221, + 458, + 230 + ], + "spans": [ + { + "bbox": [ + 307, + 221, + 458, + 230 + ], + "type": "text", + "content": "- unknown_0, associated band, Mamiffer" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 231, + 424, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 231, + 424, + 241 + ], + "spans": [ + { + "bbox": [ + 307, + 231, + 424, + 241 + ], + "type": "text", + "content": "- unknown_0, genre, Post-metal" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 261, + 525, + 285 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 261, + 525, + 285 + ], + "spans": [ + { + "bbox": [ + 302, + 261, + 525, + 285 + ], + "type": "text", + "content": "Figure 5: Pseudo-Subgraph label as the output of the data annotation process." + } + ] + } + ], + "index": 17, + "type": "text" + }, + { + "bbox": [ + 303, + 307, + 525, + 334 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 307, + 525, + 334 + ], + "spans": [ + { + "bbox": [ + 303, + 307, + 525, + 334 + ], + "type": "text", + "content": "B.2 Training and Hyperparameter Settings of the Specialized LLM" + } + ] + } + ], + "index": 18 + }, + { + "type": "table", + "bbox": [ + 336, + 348, + 492, + 454 + ], + "blocks": [ + { + "bbox": [ + 336, + 348, + 492, + 454 + ], + "lines": [ + { + "bbox": [ + 336, + 348, + 492, + 454 + ], + "spans": [ + { + "bbox": [ + 336, + 348, + 492, + 454 + ], + "type": "table", + "html": "
ParameterValue
BackboneLlama-3-Base
Qwen-2.5-Base
Learning Rate1e-5
Training Epoch1
Training Steps128
OptimizerAdamW
", + "image_path": "7d37f1c7c7d6b89c99c8673233300e4d431d4973cce33808f4428a06a932ca3e.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 462, + 525, + 486 + ], + "lines": [ + { + "bbox": [ + 302, + 462, + 525, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 462, + 525, + 486 + ], + "type": "text", + "content": "Table 6: Hyperparameters of the Specialized LLM in ClaimPKG." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 500, + 526, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 500, + 526, + 717 + ], + "spans": [ + { + "bbox": [ + 302, + 500, + 526, + 717 + ], + "type": "text", + "content": "The training configurations for the Specialized LLM are summarized in Table 6. The model training is based on the Base version of Llama-3 (Llama3.2-1B, Llama-3.2-3B, Llama-3.1-8B) and Qwen 2.5 (Qwen-2.5-1.5B, Qwen-2.5-3B, Qwen-2.5-7B). These base models are selected to preserve their inherent linguistic capabilities while facilitating optimal adaptation to domain-specific tasks during fine-tuning. The training process employs the annotated dataset described in Section B.1 and is conducted over one single epoch using the AdamW (Loshchilov and Hutter, 2019) optimizer. This strategy enables the generation of multiple variants of the Specialized LLM, ensuring task-specific adaptation while maintaining robust generalization across diverse linguistic structures." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 302, + 727, + 495, + 740 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 727, + 495, + 740 + ], + "spans": [ + { + "bbox": [ + 302, + 727, + 495, + 740 + ], + "type": "text", + "content": "C Additional Experimental Results" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "content": "In this section, we present additional experimental results through a systematic analysis on the FactKG" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5282" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "text", + "content": "development set with 2000 randomly sampled data points across claim categories. First, we provide a more detailed explanation of the evaluation metrics used. Second, we examine the performance of the specialized LLM by varying the beam size and backbone model size. Third, we analyze the Subgraph Retrieval by adjusting the hyperparameters " + }, + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 67, + 71, + 293, + 208 + ], + "type": "text", + "content": " as explained in the 4.3, which influence the diversity and correctness of the retrieved subgraphs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 216, + 135, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 216, + 135, + 227 + ], + "spans": [ + { + "bbox": [ + 67, + 216, + 135, + 227 + ], + "type": "text", + "content": "C.1 Metrics" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 233, + 291, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 233, + 291, + 341 + ], + "spans": [ + { + "bbox": [ + 67, + 233, + 291, + 341 + ], + "type": "text", + "content": "The specialized LLM's generation of pseudosubgraphs plays a crucial role in ClaimPKG's performance. We evaluated the specialized LLM's performance using four metrics: claim structure coverage (coverage), entity correctness (correctness), unique triplet count, and average end-to-end accuracy. While the final metric is straightforward, the three former metrics can be described as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "text", + "content": "(1) Structure coverage quantifies the alignment between the LLM-generated pseudo-graph and the reference claim graph in the FactKG dataset. Specifically, for a generated graph " + }, + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "text", + "content": " and reference graph " + }, + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 67, + 344, + 291, + 413 + ], + "type": "text", + "content": ", coverage is computed as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 421, + 289, + 450 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 421, + 289, + 450 + ], + "spans": [ + { + "bbox": [ + 67, + 421, + 289, + 450 + ], + "type": "interline_equation", + "content": "c o v e r a g e (P, Q) = \\frac {\\# (P . t r i p l e t s \\cap Q . t r i p l e t s)}{\\# (Q . t r i p l e t s)}", + "image_path": "94056b0ade32750f8a037a6e2629c730cacf78e9cc1fe514bc91a4381ffc9449.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "text", + "content": "(2) Entity correctness quantifies the correctness of a claim's extracted entities, i.e., whether these entities exist in the KG. Specifically, for a generated graph " + }, + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "text", + "content": " and a knowledge graph " + }, + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 67, + 458, + 291, + 526 + ], + "type": "text", + "content": ", correctness is computed as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 535, + 295, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 535, + 295, + 564 + ], + "spans": [ + { + "bbox": [ + 67, + 535, + 295, + 564 + ], + "type": "interline_equation", + "content": "\\operatorname {c o r r e c t n e s s} (P, \\mathcal {G}) = \\frac {\\# (P . e n i t i e s \\cap \\mathcal {G} . e n t i t i e s)}{\\# (P . e n t i t i e s)}", + "image_path": "d8c068816dec870ddf9f8ac61fbe6bea2c4f6fa49324f6db448970aec5e931b7.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 572, + 291, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 572, + 291, + 626 + ], + "spans": [ + { + "bbox": [ + 67, + 572, + 291, + 626 + ], + "type": "text", + "content": "(3) Unique triplet count measures the diversity of generated graph structures, with higher counts potentially enabling better subgraph retrieval through increased coverage of possible relationships." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 635, + 280, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 635, + 280, + 661 + ], + "spans": [ + { + "bbox": [ + 67, + 635, + 280, + 661 + ], + "type": "text", + "content": "C.2 Different Beam Sizes of the Specialized LLM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "To evaluate the LLM's decoding strategy across different beam sizes, we utilized three average accuracy, structure coverage and unique triplet count as metrics. Table 7 details the impact of the number of beam sizes on the previously mentioned metrics on the FactKG dev set. Both Llama and Qwen models demonstrate consistent improvements in average performance and claim structure coverage" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 304, + 69, + 526, + 196 + ], + "blocks": [ + { + "bbox": [ + 304, + 69, + 526, + 196 + ], + "lines": [ + { + "bbox": [ + 304, + 69, + 526, + 196 + ], + "spans": [ + { + "bbox": [ + 304, + 69, + 526, + 196 + ], + "type": "table", + "html": "
BackboneBeam SizeAverage AccuracyStructure CoverageUnique Triplets
Llama-3BBeam 179.7876.514.48
Beam 381.8081.276.44
Beam 582.0483.028.39
Beam 1082.3384.6113.83
Qwen-3BBeam 178.8477.953.82
Beam 380.7682.665.16
Beam 581.4183.586.73
Beam 1082.1984.629.58
", + "image_path": "1f537c4a4fe0d1d8f1cdfa130bbd1c1ae8bc7b9c0e025ff93aa1d1f0bb180cb6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 304, + 242, + 525, + 314 + ], + "blocks": [ + { + "bbox": [ + 302, + 205, + 524, + 228 + ], + "lines": [ + { + "bbox": [ + 302, + 205, + 524, + 228 + ], + "spans": [ + { + "bbox": [ + 302, + 205, + 524, + 228 + ], + "type": "text", + "content": "Table 7: Performance metrics for different models on FactKG dev set." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 242, + 525, + 314 + ], + "lines": [ + { + "bbox": [ + 304, + 242, + 525, + 314 + ], + "spans": [ + { + "bbox": [ + 304, + 242, + 525, + 314 + ], + "type": "table", + "html": "
Beam SizeGen Graph (s)Retrieve (s)Reason (s)
beam 11.020.242.19
beam 32.160.382.22
beam 53.520.502.33
beam 1035.181.012.88
", + "image_path": "50ffcfd1ce834cb1fb16fb167f2106725eaed26e8420039ebfc0fc3271131bc6.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 322, + 525, + 346 + ], + "lines": [ + { + "bbox": [ + 302, + 322, + 525, + 346 + ], + "spans": [ + { + "bbox": [ + 302, + 322, + 525, + 346 + ], + "type": "text", + "content": "Table 8: Computing time for different beam sizes on FactKG dev set." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "spans": [ + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "text", + "content": "as beam size increases from 1 to 10. At beam size 10, Llama achieves " + }, + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "inline_equation", + "content": "84.61\\%" + }, + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "text", + "content": " coverage while Qwen reaches " + }, + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "inline_equation", + "content": "84.62\\%" + }, + { + "bbox": [ + 301, + 368, + 525, + 462 + ], + "type": "text", + "content": ", showing comparable performance at higher beam sizes. The unique triplet count shows more pronounced growth with larger beam sizes, with Llama generating 13.83 unique triplets and Qwen 9.58 triplets at beam size 10." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "spans": [ + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "text", + "content": "However, table 8 shows this improved performance comes with significant computational overhead. Table 8 details on the time taken for generating pseudo-graphs, retrieving sub-graphs and reasoning with retrieved evidence. Most notably, while the time required for retrieving sub-graphs and reasoning with evidence only increase marginally as the beam size increase, this figure for pseudo-graph generation increases dramatically as the beam size goes to 10, from 1.02s at beam size 1 to 35.18s at beam size 10 - a " + }, + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "inline_equation", + "content": "34.5 \\times" + }, + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "text", + "content": " increase. Based on this measurement, in our official framework we select beam size " + }, + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "inline_equation", + "content": "= 5" + }, + { + "bbox": [ + 302, + 464, + 525, + 653 + ], + "type": "text", + "content": " to balance the performance gain and computational costs." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 662, + 518, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 662, + 518, + 687 + ], + "spans": [ + { + "bbox": [ + 302, + 662, + 518, + 687 + ], + "type": "text", + "content": "C.3 Different Model Sizes of the Specialized LLM" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "type": "text", + "content": "To evaluate how model size affects performance, we compare different variants of Llama and Qwen models ranging from 1B to 8B parameters. Table 9 presents the performance on the FactKG dev set across three key metrics: average performance, structure coverage, and unique triplets generated," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "5283" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 212, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 212, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 212, + 84 + ], + "type": "text", + "content": "which was explained previously." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 79, + 95, + 279, + 207 + ], + "blocks": [ + { + "bbox": [ + 79, + 95, + 279, + 207 + ], + "lines": [ + { + "bbox": [ + 79, + 95, + 279, + 207 + ], + "spans": [ + { + "bbox": [ + 79, + 95, + 279, + 207 + ], + "type": "table", + "html": "
BackboneAverage AccuracyStructure CoverageUnique Triplets
Llama - 1B80.2678.988.97
Llama - 3B82.0483.028.39
Llama - 8B82.6382.849.34
Qwen - 1.5B80.4881.346.58
Qwen - 3B81.4183.586.73
Qwen - 7B81.7982.887.05
", + "image_path": "4c6128eb3c9daad179b5f54f1a032ab8bf7e5de180c8216a7b8f4717c2aaa9a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "spans": [ + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": "For both model families, we observe improvements in performance as model size increases, though with different patterns. The Llama family shows more notable gains, with average performance increasing from " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "80.26\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": " (1B) to " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "82.63\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": " (8B), while Qwen demonstrates more modest improvements from " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "80.48\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": " (1.5B) to " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "81.79\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": " (7B). Structure coverage peaks with the 3B variants for both families - Llama-3B achieving " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "83.02\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": " and Qwen-3B reaching " + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "inline_equation", + "content": "83.58\\%" + }, + { + "bbox": [ + 67, + 254, + 291, + 456 + ], + "type": "text", + "content": ". The models keep the increasing trend in their triplet generation patterns: Llama maintains relatively stable unique triplet counts (8.39 - 9.34) across sizes, while the figures for Qwen are (6.58 - 7.05) as the model size increases." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 458, + 291, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 458, + 291, + 565 + ], + "spans": [ + { + "bbox": [ + 67, + 458, + 291, + 565 + ], + "type": "text", + "content": "Overall, scaling to larger models shows slight improvements while increasing computational requirements. Based on these results, we select 3B variants of both model families in our official implementation, which offer an optimal balance of performance and model size, with Llama-3B and Qwen-3B showing comparable effectiveness across all metrics." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 576, + 287, + 602 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 576, + 287, + 602 + ], + "spans": [ + { + "bbox": [ + 67, + 576, + 287, + 602 + ], + "type": "text", + "content": "C.4 Different Hyperparameters of Subgraph Retrieval" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 101, + 616, + 257, + 687 + ], + "blocks": [ + { + "bbox": [ + 67, + 216, + 289, + 240 + ], + "lines": [ + { + "bbox": [ + 67, + 216, + 289, + 240 + ], + "spans": [ + { + "bbox": [ + 67, + 216, + 289, + 240 + ], + "type": "text", + "content": "Table 9: Performance metrics for different models on the FactKG dev set." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 101, + 616, + 257, + 687 + ], + "lines": [ + { + "bbox": [ + 101, + 616, + 257, + 687 + ], + "spans": [ + { + "bbox": [ + 101, + 616, + 257, + 687 + ], + "type": "table", + "html": "
Hyper ParamsAverage AccuracyUnique Triplets
k1=5;k2=382.0011.42
k1=3;k2=182.048.39
k1=1;k2=181.873.58
", + "image_path": "667e6e99399c7756b9a7db1758e3d845a66740b40ed72846c0076e18439f73fc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 291, + 775 + ], + "type": "text", + "content": "To assess the impact of different hyperparameters in the subgraph retrieval algorithm on overall" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 152 + ], + "type": "text", + "content": "performance, we systematically vary these hyperparameters while keeping the specialized LLM and general LLM fixed as Llama-3.2-3B and Llama-3.3-70B, respectively. Table 10 presents the performance across two key metrics: average accuracy and the number of unique triplets generated." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "spans": [ + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": "The results indicate that increasing " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " leads to a higher number of unique triplets, suggesting greater diversity in retrieved claims. However, this increase does not consistently translate to overall performance gains, which fall in the range of 81.87 - 82.00. Notably, performance peaks at " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{1} = 3" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{2} = 1" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": ", suggesting that a more focused retrieval strategy is sufficient to achieve optimal performance, whereas excessively high " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " values may introduce noise or irrelevant information. Based on these results, we select " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{1} = 3" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "inline_equation", + "content": "k_{2} = 1" + }, + { + "bbox": [ + 302, + 153, + 526, + 342 + ], + "type": "text", + "content": " in our official implementation, which balancing between information discovery and computing required." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 350, + 517, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 350, + 517, + 377 + ], + "spans": [ + { + "bbox": [ + 302, + 350, + 517, + 377 + ], + "type": "text", + "content": "C.5 Different Methods for Relation Scoring Function" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 326, + 390, + 503, + 466 + ], + "blocks": [ + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "lines": [ + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "spans": [ + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "type": "text", + "content": "Table 10: Performance of different subgraph retrieval configurations " + }, + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 67, + 694, + 291, + 731 + ], + "type": "text", + "content": " with Llama-3.2-3B + Llama-3.3-70B on the FactKG dev set." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 326, + 390, + 503, + 466 + ], + "lines": [ + { + "bbox": [ + 326, + 390, + 503, + 466 + ], + "spans": [ + { + "bbox": [ + 326, + 390, + 503, + 466 + ], + "type": "table", + "html": "
MethodAverage Accuracy
Embedding Based84.64
Rerank Based84.73
Fuzzy Matching82.19
Exact Matching81.57
", + "image_path": "8610827d03ca8d49eee8ca854a131213d4b26795f9a93f91e26eb477e672e274.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 474, + 525, + 499 + ], + "lines": [ + { + "bbox": [ + 302, + 474, + 525, + 499 + ], + "spans": [ + { + "bbox": [ + 302, + 474, + 525, + 499 + ], + "type": "text", + "content": "Table 11: Performance of different scoring approach of the Subgraph Retrieval on the FactKG test set" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 515, + 526, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 515, + 526, + 636 + ], + "spans": [ + { + "bbox": [ + 302, + 515, + 526, + 636 + ], + "type": "text", + "content": "To assess the impact of different scoring mechanisms on performance, we vary the scoring function and evaluate the test set of FactKG while fix the Specialized LLM and the General LLM. Specifically, we explore multiple strategies for the Relation Scoring Function (Sim), as described in Section 4.3, incorporating diverse techniques such as embedding-based retrieval, reranking, fuzzy text matching (Wikipedia, 2025a), and exact matching." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 637, + 527, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 637, + 527, + 745 + ], + "spans": [ + { + "bbox": [ + 302, + 637, + 527, + 745 + ], + "type": "text", + "content": "For embedding-based and reranking approaches, we employ state-of-the-art pre-trained models, namely BGE-Large-EN-v1.5² and BGE-Reranker-Large³, as provided by (Xiao et al., 2023). Experimental results indicate that deep learning-based methods, such as embedding and reranking, achieve superior performance, with accuracy scores of 84.64 and 84.56, respectively. In contrast," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 315, + 751, + 493, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 751, + 493, + 762 + ], + "spans": [ + { + "bbox": [ + 315, + 751, + 493, + 762 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 751, + 493, + 762 + ], + "type": "text", + "content": "https://huggingface.co/BAAI/bge-large-en-v1.5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 762, + 495, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 762, + 495, + 774 + ], + "spans": [ + { + "bbox": [ + 315, + 762, + 495, + 774 + ], + "type": "text", + "content": "3https://huggingface.co/BAAI/bge-reranker-large" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5284" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 138 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 138 + ], + "type": "text", + "content": "text-matching-based methods yield lower accuracy, with fuzzy matching and exact matching scoring 82.19 and 81.57, respectively. These findings highlight the effectiveness of deep learning-based approaches." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 139, + 291, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 139, + 291, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 139, + 291, + 232 + ], + "type": "text", + "content": "We recommend embedding-based retrieval as it enables pre-indexing of corpus relations. This allows precomputation of relation embeddings and requires encoding only the query relation for new Pseudo Subgraphs, eliminating the need to re-encode existing knowledge graph relations during inference." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 243, + 184, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 184, + 257 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 184, + 257 + ], + "type": "text", + "content": "D Algorithm Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 264, + 291, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 264, + 291, + 332 + ], + "spans": [ + { + "bbox": [ + 67, + 264, + 291, + 332 + ], + "type": "text", + "content": "The detailed implementation of the Entity Trie-constrained decoding algorithm is provided as the pseudo-code in Algorithm 1 and the Algorithm 2 details the implementation of the Subgraph Retrieval." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 341, + 149, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 341, + 149, + 355 + ], + "spans": [ + { + "bbox": [ + 67, + 341, + 149, + 355 + ], + "type": "text", + "content": "E Case Study" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "spans": [ + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": "We present the case study results of ClaimPKG on the FactKG dataset in Tables 12 and 13. Each table includes the claim " + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": ", pseudo-subgraphs " + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "inline_equation", + "content": "P_{s}" + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": ", retrieved subgraphs " + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "inline_equation", + "content": "S_{c}" + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": ", final justification " + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": ", and verdict " + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 67, + 362, + 291, + 550 + ], + "type": "text", + "content": ". Table 12 showcases correctly predicted examples, demonstrating ClaimPKG's ability to accurately capture claim structures and generate well-grounded justifications. Conversely, Table 13 highlights incorrectly predicted cases of two error types as detailed in Section 5.3. The first two examples illustrate Reasoning Errors, while the third represents a Retrieval Error. These insights serve as a foundation for future improvements, emphasizing key areas for future refinement." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 561, + 185, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 561, + 185, + 576 + ], + "spans": [ + { + "bbox": [ + 67, + 561, + 185, + 576 + ], + "type": "text", + "content": "F Prompt Templates" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 582, + 291, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 582, + 291, + 636 + ], + "spans": [ + { + "bbox": [ + 67, + 582, + 291, + 636 + ], + "type": "text", + "content": "For better reproducibility, we present all prompt templates in the appendix. Below is a quick reference list outlining the prompt templates and their usages:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 81, + 643, + 290, + 766 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 81, + 643, + 289, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 643, + 289, + 682 + ], + "spans": [ + { + "bbox": [ + 81, + 643, + 289, + 682 + ], + "type": "text", + "content": "- Figure 6: Prompt the General LLM to reason on the input claim and retrieved subgraphs to produce justification and final verdict." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 81, + 692, + 289, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 692, + 289, + 730 + ], + "spans": [ + { + "bbox": [ + 81, + 692, + 289, + 730 + ], + "type": "text", + "content": "Figure 7: Few-shot prompts the General LLM to generate a Pseudo Subgraph with provided examples." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 81, + 740, + 290, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 740, + 290, + 766 + ], + "spans": [ + { + "bbox": [ + 81, + 740, + 290, + 766 + ], + "type": "text", + "content": "- Figure 8: Annotate the inside and outside entities of the input claim for the training dataset." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5285" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 77, + 121, + 509, + 357 + ], + "blocks": [ + { + "bbox": [ + 74, + 107, + 329, + 120 + ], + "lines": [ + { + "bbox": [ + 74, + 107, + 329, + 120 + ], + "spans": [ + { + "bbox": [ + 74, + 107, + 329, + 120 + ], + "type": "text", + "content": "Algorithm 1: LLM Decoding with Entity-Trie Constraint" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "lines": [ + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "spans": [ + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": "Input:Specialized LLM, Input claim " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " Entity TriE T \nOutput:Pseudo-Subgraph P \nInitialize: " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\mathcal{P}\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " // Initialize pseudo subgraph \n" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "h_0\\gets" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " InitializeHiddenStates(); constrained " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " False; \nFunction ConstrainedDecoding(LLM,c,T): \nwhile True do \n" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "p_t,h_t\\gets LLM(\\mathcal{P},c,h_{t - 1})" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " // Compute token probabilities and update hidden states if constrained then \nprefix " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " ExtractPrefix(P); // Retrieve tokens from last unclosed to the last allowed " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " T.lookup(prefix);// Retrieve allowed tokens from valid continuations in T " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "p_t\\gets" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " MaskProb " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "(p_t," + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " allowed); // Impose probabilities of invalid tokens to be 0 \nnew_token " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " arg max " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "p_t" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " . // Select new token for P \n" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\mathcal{P}\\gets \\mathcal{P}\\cup \\{\\text{new_token}\\}" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " . if new_token " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "= = < e>" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\sqsubset" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " constrained " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " True; if new_token " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "= = < / e>" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\sqsubset" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " constrained " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " False; if new_token " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "= = EOS" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "inline_equation", + "content": "\\sqsubset" + }, + { + "bbox": [ + 77, + 121, + 509, + 357 + ], + "type": "text", + "content": " break; \nreturn P" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 76, + 443, + 178, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 443, + 178, + 453 + ], + "spans": [ + { + "bbox": [ + 76, + 443, + 178, + 453 + ], + "type": "text", + "content": "GENERAL REASONING" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 76, + 463, + 517, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 463, + 517, + 494 + ], + "spans": [ + { + "bbox": [ + 76, + 463, + 517, + 494 + ], + "type": "text", + "content": "Task: Verify whether the fact in the given sentence is true or false based on the provided graph triplets. Use only the information in the triplets for verification." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 74, + 503, + 518, + 573 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 74, + 503, + 352, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 503, + 352, + 513 + ], + "spans": [ + { + "bbox": [ + 74, + 503, + 352, + 513 + ], + "type": "text", + "content": "- The triplets provided represent all relevant knowledge that can be retrieved." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 513, + 393, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 513, + 393, + 523 + ], + "spans": [ + { + "bbox": [ + 74, + 513, + 393, + 523 + ], + "type": "text", + "content": "- If the fact is a negation and the triplets do not include the fact, consider the fact as true." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 74, + 523, + 518, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 523, + 518, + 552 + ], + "spans": [ + { + "bbox": [ + 74, + 523, + 518, + 552 + ], + "type": "text", + "content": "- Ignore questions and verify only the factual assertion within them. For example, in the question \"When was Daniel Martínez (politician) a leader of Montevideo?\", focusing on verifying the assertion \"Daniel Martínez (politician) a leader of Montevideo\"." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "spans": [ + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "text", + "content": "- Interpret the “" + }, + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "text", + "content": "” symbol in triplets as indicating a reverse relationship. For example: “A " + }, + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 74, + 553, + 517, + 573 + ], + "type": "text", + "content": " south of B” means “B is north of A”." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 76, + 582, + 147, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 582, + 147, + 592 + ], + "spans": [ + { + "bbox": [ + 76, + 582, + 147, + 592 + ], + "type": "text", + "content": "Response Format:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 593, + 399, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 593, + 399, + 643 + ], + "spans": [ + { + "bbox": [ + 75, + 593, + 399, + 643 + ], + "type": "text", + "content": "Provide your response in the following JSON format without any additional explanations: \n{ \"rationale\": \"A concise explanation for your decision\", \"verdict\": \"true/false as the JSON value\" }" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 76, + 652, + 110, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 652, + 110, + 663 + ], + "spans": [ + { + "bbox": [ + 76, + 652, + 110, + 663 + ], + "type": "text", + "content": "Triplets:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 76, + 663, + 117, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 663, + 117, + 673 + ], + "spans": [ + { + "bbox": [ + 76, + 663, + 117, + 673 + ], + "type": "text", + "content": "{triplets}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 76, + 682, + 104, + 692 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 682, + 104, + 692 + ], + "spans": [ + { + "bbox": [ + 76, + 682, + 104, + 692 + ], + "type": "text", + "content": "Claim:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 76, + 692, + 113, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 692, + 113, + 703 + ], + "spans": [ + { + "bbox": [ + 76, + 692, + 113, + 703 + ], + "type": "text", + "content": "{claim}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 157, + 725, + 436, + 738 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 725, + 436, + 738 + ], + "spans": [ + { + "bbox": [ + 157, + 725, + 436, + 738 + ], + "type": "text", + "content": "Figure 6: Prompt template for the general LLM to perform reasoning" + } + ] + } + ], + "index": 15, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5286" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 77, + 89, + 509, + 765 + ], + "blocks": [ + { + "bbox": [ + 74, + 74, + 223, + 86 + ], + "lines": [ + { + "bbox": [ + 74, + 74, + 223, + 86 + ], + "spans": [ + { + "bbox": [ + 74, + 74, + 223, + 86 + ], + "type": "text", + "content": "Algorithm 2: Subgraph Retrieval" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "lines": [ + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "spans": [ + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": "Input: Knowledge graph " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "\\mathcal{G}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " Pseudo Subgraph List " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "P_{c}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " Top " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " Candidate Unknown Entities, Top " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " Complete Triplets \nOutput:Combined subgraph " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{c}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " \nFunction SubgraphRetrieval " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},\\mathcal{P}_c,k_1,k_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "\\mathcal{P}\\in \\mathcal{P}_c" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets S\\cup" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveSingleSubgraph " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},\\mathcal{P},k_1,k_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " // Process each pseudo subgraph \nreturn JoinSubgraphs " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(S)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " // Combine subgraphs \nFunction RetrieveSingleSubgraph " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},\\mathcal{P},k_1,k_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(T_{comp},T_{inc})\\leftarrow" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " CategorizeTriplets( " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "\\mathcal{P}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " );//Split into complete/incomplete triplets \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{inc}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveIncomplete " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},T_{inc},k_1)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{comp}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveComplete " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},T_{comp},k_1,k_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nreturn " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{inc}\\cup S_{comp}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " \nFunction RetrieveIncomplete " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(\\mathcal{G},T_{inc},k_1)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "G\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " GroupTripletsByUnknown " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(T_{inc})" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " //Group by unknown entity \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "g\\in G" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(E_u,R_u)\\leftarrow" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " ExtractPseudoStructure " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(g)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " //Extract entities and relations associated to unknown entity \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(e,r)\\in (E_u,R_u)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C_e,\\mathrm{scores})\\leftarrow" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " GetCandidatesAndScores " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,e,r)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C\\gets C\\cup \\{(C_e,\\mathrm{scores})\\}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C =" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " AggregateGlobalScore(C); //Aggregate candidate scores globally " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C^{*}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RankTopKCandidates " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C,k_{1})" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " //Select top- " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " candidates \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets S\\cup" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " GetTriplets " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C^{*},g)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nreturn " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " \nFunction GetCandidatesAndScores " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,e,r)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "R_{act}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveActualConnectedRelations " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,e)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "E_{act}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveActualConnectedEntities " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,e)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "r\\_ score s\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RelationScore(r, " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "R_{act}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "e^{\\prime}\\in E_{act}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "s\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " MaxRelatedRelationScores(e',r Scores); \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets S\\cup \\{(e^{\\prime},s)\\}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nreturn " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " // Score connected entities \nFunction AggregateGlobalScore " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n//Calculate new scores and reassign for each " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C\\_ e" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C_e,\\mathrm{scores})\\in C" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(c,s)\\in (C_e,\\mathrm{scores})" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "s\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " Sum([s'(c) for " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C',s')" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " if " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "c\\in C')" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " \nreturn " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nFunction RankTopKCandidates " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C,k_1)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C^{*}\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C_e,\\mathrm{scores})\\in C" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C_e^*\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " TopKCandidates " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(C_e,\\mathrm{scores},k_1)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C^{*}\\gets C^{*}\\cup C_{e}^{*}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nreturn " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "C^{*}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " //Rank candidates per unknown entity \nFunction RetrieveComplete " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,T_{comp},k_1,k_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " .. \n" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . \nforeach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(h,r,t)\\in T_{comp}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do if ExistConnection " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(h,t)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " then " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "T\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " GetTripletsWithHeadAndTail " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "(G,h,t)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " scores " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "\\leftarrow \\emptyset" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " . foreach " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "t'\\in T" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " do scores " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " scores U RelationScore(r, " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "t'" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": ".relation); " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets S\\cup" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " TopK(T,scores, " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{2}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " ); //Rank connected triplets else " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{1}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveIncomplete(G,[(unknown,r,t)], " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": ") " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S_{2}\\gets" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " RetrieveIncomplete(G,[(h,r,unknown)], " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "k_{1}" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": ") " + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "inline_equation", + "content": "S\\gets S\\cup (S_1\\cup S_2)" + }, + { + "bbox": [ + 77, + 89, + 509, + 765 + ], + "type": "text", + "content": " // Handle missing connection" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5287" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 110, + 526, + 709 + ], + "blocks": [ + { + "bbox": [ + 68, + 110, + 526, + 709 + ], + "lines": [ + { + "bbox": [ + 68, + 110, + 526, + 709 + ], + "spans": [ + { + "bbox": [ + 68, + 110, + 526, + 709 + ], + "type": "table", + "html": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
The 11th Mississippi Infantry Monument is not in Gettysburg, Pennsylvania.P1: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(11th Mississippi Infantry Monument, municipality, Gettysburg, Pennsylvania)Justification: The triplets provided include '11th Mississippi Infantry Monument', 'municipality', 'Gettysburg, Pennsylvania' and '11th Mississippi Infantry Monument', 'state', 'Pennsylvania', indicating the monument's location as Gettysburg, Pennsylvania.
P2: Gettysburg, Pennsylvania | -location | 11th Mississippi Infantry Monument(11th Mississippi Infantry Monument, website, 11th mississippianemy monument)
11th Mississippi Infantry Monument | location | unknown_0(U.S. Route 140, location, Gettysburg)
P3: 11th Mississippi Infantry Monument | location | Gettysburg(Pennsylvania Route 34, location, Gettysburg)
11th Mississippi Infantry Monument | location | Unknown_0(U.S. Route 30 in Pennsylvania, location, Gettysburg)
P4: 11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, state, Pennsylvania)
11th Mississippi Infantry Monument | is part of military conflict | Gettysburg(11th Mississippi Infantry Monument, region, Adams County, Pennsylvania)
11th Mississippi Infantry Monument | location | Pennsylvania(11th Mississippi Infantry Monument, country, United States)
11th Mississippi Infantry Monument | location | Unknown_0(11th Mississippi Infantry Monument, category, Contributing property)
P5: 11th Mississippi Infantry Monument | location | Gettysburg, Pennsylvania(Lutheran Theological Seminary at Gettysburg, city, Gettysburg)
(11th Mississippi Infantry Monument, range, Semi-nary Ridge)Verdict: False
It was released in Hardcover by J.V. JonesP1: Hardcover | -media type | unknown_0(Hardcover, -media type, A Fortress of Grey Ice)Justification: The triplets show that 'A Fortress of Grey Ice' is a book by 'J.V. Jones' and it is of 'Hardcover' media type, which supports the claim.
unknown_0 | author | J.V. Jones(Hardcover, -media type, A Clockwork Orange)
P2: unknown_0 | author | J.V. Jones(Hardcover, -media type, The Bell Curve)
unknown_0 | media type | Hardcover(A Fortress of Grey Ice, author, J.V. Jones)
unknown_0 | -genre | unknown_1(A Cavern of Black Ice, author, J.V. Jones)
P3: J.V. Jones | -author | unknown_0(The Barbed Coil, author, J.V. Jones)
unknown_0 | media type | Hardcover(Hardcover, -media type, The Andromeda Strain)
P4: Hardcover | -media type | J.V. Jones(1963, -date of birth, J.V. Jones)
J.V. Jones | -author | unknown_0(Liverpool, England, -place of birth, J.V. Jones)
unknown_0 | previous work | unknown_1
P5: Hardcover | -media type | unknown_0
unknown_0 | author | J.V. Jones
A person was born in the County of Ty-rol and later died in Austria which is led by Doris Bures.P1: Austria | leader | Doris Bures(Austria, leader, Doris Bures)Justification: The triplets show that Kurt Schuschnigg, Alfons Gorbach, and Josef Hellensteiner were born in the County of Tyrol and died in Austria. Additionally, Doris Bures is the leader of Austria.
Austria | -death place | unknown_0(Austria, -death place, Hans Hörberg)
unknown_0 | death place | County of Tyrol(Austria, -death place, Edgar Meyer (painter) )
P2: Austria | leader name | Doris Bures(Austria, -death place, Rakhat Aliyev)
Austria | -death place | unknown_0(Ahn Hörberg, death place, County of Tyrol)
unknown_0 | death place | Austria(Edgar Meyer (painter), death place, County of Tyrol)
unknown_0 | successor | Doris Bures(Friedrich Hasenöhr, death place, County of Tyrol)
P3: County of Tyrol | -birth place | unknown_0(County of Tyrol, -birth place, Kurt Schuschnigg)
unknown_0 | death place | Austria(Sky, death place, Alfons Gorbach)
unknown_0 | successor | Doris Bures(County of Tyrol, -birth place, Josef Hellensteiner)
P4: Doris Bures | -leader | Austria(Kurt Schuschnigg, death place, Austria)
Austria | -death place | unknown_0(Alfons Gorbach, death place, Austria)
unknown_0 | birth place | County of Tyrol(Josef Hellensteiner, death place, Austria)
P5: Austria | leader | Doris Bures(Barbara Prammer, successor, Doris Bures)
Austria | -death place | unknown_0(Werner Faymann, successor, Doris Bures)
unknown_0 | birth place | County of Tyrol(Alois Stöger, -successor, Doris Bures)
The New Britain Transit is the operat-ing organisation of Agra Airport which is located in India and has the location identifier VIAG.P1: Agra Airport | location | New Britain Transit(Agra Airport, location, India)Justification: The operator of Agra Airport is listed as 'Indian Air Force' and 'Airports Authority of India', not 'New Britain Transit', which operates 'Connecticut Transit New Britain and Bris-tol'
Agra Airport | location | India(Agra Airport, location, Uttar Pradesh)
Agra Airport | faa | VIAG(Agra Airport, location, Agra)
P2: Agra Airport | location | India(Connecticut Transit New Britain and Bristol, oper-ator, New Britain Transit)
Agra Airport | operator | New Britain Transit(Agra Airport, cao location identifier, VIAG)
Agra Airport | faa | VIAG(Agra Airport, operator, Airports Authority of India)
Agra Airport | faa | VIAG(Agra Airport, elevation, 167.94)Verdict: False
P3: Agra Airport | location | India
Agra Airport | operator | New Britain Transit
Agra Airport | location | India
Agra Airport | faa | VIAG
", + "image_path": "62617df14bd514789f8d69ce3158412a62ad76bde340f8c42a83081a3750257c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 718, + 481, + 730 + ], + "lines": [ + { + "bbox": [ + 111, + 718, + 481, + 730 + ], + "spans": [ + { + "bbox": [ + 111, + 718, + 481, + 730 + ], + "type": "text", + "content": "Table 12: Case study of ClaimPKG on the FactKG dataset with correctly predicted samples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5288" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 68, + 96, + 525, + 724 + ], + "blocks": [ + { + "bbox": [ + 68, + 96, + 525, + 724 + ], + "lines": [ + { + "bbox": [ + 68, + 96, + 525, + 724 + ], + "spans": [ + { + "bbox": [ + 68, + 96, + 525, + 724 + ], + "type": "table", + "html": "
Claim cPseudo-Subgraphs PcRetrieved Subgraphs ScJustification j; Verdict v
When did Indian Air Force operate Agra Airport?P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets do not provide information about the specific time period when the Indian Air Force operated Agra Airport. However, they do confirm that the Indian Air Force is the operator of Agra Airport, implying that the operation is either current or has occurred at some point, but the exact timing is not specified. Verdict: False
P2: Indian Air Force || operator || Agra Airport(Agra Airport, runway length, 2743.0)
Agra Airport || ~runway designation || unknown_0(Agra Airport, runway length, 1818.0)
Agra Airport || ~runway designation || Indian Air Force(Agra Airport, location, India)
Agra Airport || ~location || unknown_0(Agra Airport, location, Uttar Pradesh)
unknown_0 || operator || Indian Air Force(Agra Airport, location, Agra)
P4: Agra Airport || operator || Indian Air Force(Bhuj Airport, operator, Indian Air Force)
P5: Agra Airport || operator || Indian Air Force(Bhuj Rudra Mata Air Force Base, operator, Indian Air Force)
(Car Nicobar Air Force Base, operator, Indian Air Force)
I imagined that Lady Anne Monson not lived in the Cape Verde whose capital was Winchester.P1: Cape Verde || capital || Winchester(Cape Verde, capital, Praia)Justification: The capital of Cape Verde is Praia, not Winchester. Winchester is the capital of Wessex and the Kingdom of England. Lady Anne Monson's residence is listed as India, and her birthplace is listed as Dar-lington and the Kingdom of England, with no mention of Cape Verde as her residence. Verdict: True
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, currency, Cape Verdean esculo)
P2: Lady Anne Monson || residence || Cape Verde(Cape Verde, ~headquarter, Movement for Democracy (Cape Verde))
Cape Verde || capital || Winchester(Wessex, capital, Winchester)
P3: Anne Monzon || residence || Cape Verde(Kingdom of England, capital, Winchester)
Cape Verde || capital || Winchester(River Itchen, Hampshire, city, Winchester)
P4: Cape Verde || capital || Winchester(Cape Verde, ~birth place, Aristides Pereira)
Cape Verde || ~residence || Lady Anne Monson(Cape Verde, ~birth place, Pedro Pires)
P5: Cape Verde || capital || Winchester(Cape Verde, ~birth place, José Maria Neves)
Cape Verde || ~death place || Lady Anne Monson(India, ~residence, Lady Anne Monson)
(Kingdom of England, ~birth place, Lady Anne Monson)
(Darlington, ~birth place, Lady Anne Monson)
(Anne Monzon, ~cinematography, "Wont Last a Day Without You")
(Anne Monzon, ~cinematography, Pop Class)
(Anne Monzon, ~cinematography, Namets!)
(Cape Verde, ~death place, Corsino Fortes)
(Cape Verde, ~death place, Cesária Évora)
(Cape Verde, ~death place, Baltasar Lopes da Silva)
(1776-02-18, ~death date, Lady Anne Monson)
(1776, ~death year, Lady Anne Monson)
The operating organization for Agra Airport is the Indian Air Force who deploy the HAL Light Combat Helicopter and the Boeing C17 Globemaster III transport aircraft.P1: Agra Airport || operator || Indian Air Force(Agra Airport, operator, Indian Air Force)Justification: The triplets confirm Agra Airport is operated by the Indian Air Force, the Indian Air Force deploys the HAL Light Combat Helicopter, and the Boeing C-17 Globemaster III is indeed a transport aircraft, but there is no direct information linking the Indian Air Force at Agra Airport to the deployment of the Boeing C-17 Globemaster III. However, given the information provided, we can infer the involvement of the Indian Air Force with the mentioned aircraft types, albeit indirectly for the Boeing C-17 Globemaster III. Verdict: False
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Uttar Pradesh)
Agra Airport || ~location || Boeing C-17 Globemaster III(Agra Airport, location, Agra)
Agra Airport || ~location || Boeing C-17 Globemaster III(2012 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2013 in spaceflight, site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(2009 in spaceflight (July-December), site, Boeing C-17 Globemaster III)
Agra Airport || ~location || Boeing C-17 Globemaster III(Hindustan Aeronautics, manufacturer, HAL Light Combat Helicopter)
Agra Airport || ~location || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, United States Air Force)
Agra Airport || operator || Indian Air Force(Boeing C-17 Globemaster III, aircraft transport, Royal Air Force)
Agra Airport || runway length || Boeing C-17 Globemaster III(Boeing C-17 Globemaster III, aircraft transport, Royal Australian Air Force)
Agra Airport || ~location || HAL Light Combat Helicopter(2743.0, runway length, Agra Airport)
Agra Airport || ~city || HAL Light Combat Helicopter(1818.0, runway length, Agra Airport)
Agra Airport || ~city || Boeing C-17 Globemaster III(HAL Light Combat Helicopter, aircraft helicopter, Indian Air Force)
(Aircraft, icoa location identifier, VIAG)
(Airlift, type, Boeing C-17 Globemaster III)
(United States, origin, Boeing C-17 Globemaster III)
(In service, status, Boeing C-17 Globemaster III)
", + "image_path": "217bb3327d615bb082f158c5d98c1b78a369f72661768f72a57d65a2e27c788b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 732, + 485, + 745 + ], + "lines": [ + { + "bbox": [ + 107, + 732, + 485, + 745 + ], + "spans": [ + { + "bbox": [ + 107, + 732, + 485, + 745 + ], + "type": "text", + "content": "Table 13: Case study of ClaimPKG on the FactKG dataset with incorrectly predicted samples." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5289" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 96, + 276, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 96, + 276, + 106 + ], + "spans": [ + { + "bbox": [ + 76, + 96, + 276, + 106 + ], + "type": "text", + "content": "FEWSHOT PSEUDO SUBGRAPH GENERATION" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 116, + 518, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 116, + 518, + 137 + ], + "spans": [ + { + "bbox": [ + 75, + 116, + 518, + 137 + ], + "type": "text", + "content": "Task: Generate a reference graph to verify the following claim. Only return the subgraphs following the format of provided examples and do NOT include other unnecessary information." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 76, + 147, + 173, + 157 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 147, + 173, + 157 + ], + "spans": [ + { + "bbox": [ + 76, + 147, + 173, + 157 + ], + "type": "text", + "content": "Here are some examples:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 166, + 518, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 166, + 518, + 187 + ], + "spans": [ + { + "bbox": [ + 75, + 166, + 518, + 187 + ], + "type": "text", + "content": "Claim: Akeem Priestley played for club RoPS and currently plays for the Orange County Blues FC, which is managed by Oliver Wyss." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 76, + 187, + 120, + 197 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 187, + 120, + 197 + ], + "spans": [ + { + "bbox": [ + 76, + 187, + 120, + 197 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 75, + 197, + 326, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 197, + 326, + 227 + ], + "spans": [ + { + "bbox": [ + 75, + 197, + 326, + 227 + ], + "type": "text", + "content": "Orange County Blues FC || manager || Oliver Wyss \nOrange County Blues FC || clubs || Akeem Priestley \nAkeem Priestley || team || RoPS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 236, + 317, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 236, + 317, + 247 + ], + "spans": [ + { + "bbox": [ + 76, + 236, + 317, + 247 + ], + "type": "text", + "content": "Claim: He is a Rhythm and Blues singer from Errata, Mississippi!" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 76, + 247, + 121, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 247, + 121, + 257 + ], + "spans": [ + { + "bbox": [ + 76, + 247, + 121, + 257 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 257, + 272, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 257, + 272, + 286 + ], + "spans": [ + { + "bbox": [ + 75, + 257, + 272, + 286 + ], + "type": "text", + "content": " || genre || unknown_0 \nunknown_0 || birth place || Errata, Mississippi \nunknown_0 || background || unknown_1" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 296, + 518, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 296, + 518, + 316 + ], + "spans": [ + { + "bbox": [ + 75, + 296, + 518, + 316 + ], + "type": "text", + "content": "Claim: Arròs negro is a traditional dish from Spain, and from the Catalonia region, which is led by the Maria Norrfalk." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 76, + 317, + 121, + 327 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 317, + 121, + 327 + ], + "spans": [ + { + "bbox": [ + 76, + 317, + 121, + 327 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "spans": [ + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "inline_equation", + "content": "<\\mathrm{e}>" + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "text", + "content": " Arròs negro
|| country || Spain \n" + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "inline_equation", + "content": "<\\mathrm{e}>" + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "text", + "content": " Arròs negro || region || Catalonia \n" + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "inline_equation", + "content": "<\\mathrm{e}>" + }, + { + "bbox": [ + 75, + 327, + 287, + 356 + ], + "type": "text", + "content": " Catalonia || leader name || Maria Norrfalk" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 76, + 365, + 274, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 365, + 274, + 375 + ], + "spans": [ + { + "bbox": [ + 76, + 365, + 274, + 375 + ], + "type": "text", + "content": "Claim: Well, Jason Sherlock did not have a nickname!" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 76, + 376, + 121, + 386 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 376, + 121, + 386 + ], + "spans": [ + { + "bbox": [ + 76, + 376, + 121, + 386 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 75, + 386, + 255, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 386, + 255, + 396 + ], + "spans": [ + { + "bbox": [ + 75, + 386, + 255, + 396 + ], + "type": "inline_equation", + "content": "<\\mathrm{e}>" + }, + { + "bbox": [ + 75, + 386, + 255, + 396 + ], + "type": "text", + "content": " Jason Sherlock | | nickname | | unknown_0" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 76, + 405, + 354, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 405, + 354, + 416 + ], + "spans": [ + { + "bbox": [ + 76, + 405, + 354, + 416 + ], + "type": "text", + "content": "Claim: Garlic is the main ingredient of Ajoblanco, which is from Andalusia." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 76, + 416, + 121, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 416, + 121, + 425 + ], + "spans": [ + { + "bbox": [ + 76, + 416, + 121, + 425 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "spans": [ + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "type": "inline_equation", + "content": "< \\mathrm{e}>" + }, + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "type": "text", + "content": " Ajoblanco || region || Andalusia \n" + }, + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "type": "inline_equation", + "content": "< \\mathrm{e}>" + }, + { + "bbox": [ + 75, + 425, + 251, + 445 + ], + "type": "text", + "content": " Ajoblanco || ingredient || Garlic" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 455, + 160, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 455, + 160, + 465 + ], + "spans": [ + { + "bbox": [ + 76, + 455, + 160, + 465 + ], + "type": "text", + "content": "....More examples ...." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 76, + 475, + 143, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 475, + 143, + 485 + ], + "spans": [ + { + "bbox": [ + 76, + 475, + 143, + 485 + ], + "type": "text", + "content": "Claim: {{claim}}" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 76, + 486, + 121, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 486, + 121, + 496 + ], + "spans": [ + { + "bbox": [ + 76, + 486, + 121, + 496 + ], + "type": "text", + "content": "Subgraphs:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 139, + 526, + 452, + 539 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 526, + 452, + 539 + ], + "spans": [ + { + "bbox": [ + 139, + 526, + 452, + 539 + ], + "type": "text", + "content": "Figure 7: Prompt template for the general LLM to generate pseudo subgraphs" + } + ] + } + ], + "index": 21, + "type": "text" + }, + { + "bbox": [ + 76, + 594, + 226, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 594, + 226, + 604 + ], + "spans": [ + { + "bbox": [ + 76, + 594, + 226, + 604 + ], + "type": "text", + "content": "ANNOTATE IN AND OUT ENTITIES" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 75, + 614, + 338, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 614, + 338, + 625 + ], + "spans": [ + { + "bbox": [ + 75, + 614, + 338, + 625 + ], + "type": "text", + "content": "Task: Specify if the following entities are mentioned in the claim or not." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 75, + 625, + 370, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 625, + 370, + 686 + ], + "spans": [ + { + "bbox": [ + 75, + 625, + 370, + 686 + ], + "type": "text", + "content": "Respond correctly in the following JSON format and do not output anything else: { \"in Entities\": [list of entities that are in the claim], \"out Entities\": [list of entities that are not in the claim] } Do not change the entity names from the list of provided entities." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 76, + 694, + 143, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 694, + 143, + 705 + ], + "spans": [ + { + "bbox": [ + 76, + 694, + 143, + 705 + ], + "type": "text", + "content": "Claim: {{claim}}" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 76, + 705, + 155, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 705, + 155, + 715 + ], + "spans": [ + { + "bbox": [ + 76, + 705, + 155, + 715 + ], + "type": "text", + "content": "Entities: {{entities}}" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 142, + 737, + 449, + 750 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 737, + 449, + 750 + ], + "spans": [ + { + "bbox": [ + 142, + 737, + 449, + 750 + ], + "type": "text", + "content": "Figure 8: Prompt template to annotate inside and outside entity of the claim." + } + ] + } + ], + "index": 27, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "5290" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file