Add Batch 67e4236a-733e-4bdf-9c79-002d7c884233
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_content_list.json +3 -0
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_model.json +3 -0
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_origin.pdf +3 -0
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/full.md +404 -0
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/images.zip +3 -0
- EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/layout.json +3 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_content_list.json +3 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_model.json +3 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_origin.pdf +3 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/full.md +1012 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/images.zip +3 -0
- EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/layout.json +3 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_content_list.json +3 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_model.json +3 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_origin.pdf +3 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/full.md +415 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/images.zip +3 -0
- EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/layout.json +3 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_content_list.json +3 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_model.json +3 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_origin.pdf +3 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/full.md +844 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/images.zip +3 -0
- EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/layout.json +3 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_content_list.json +3 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_model.json +3 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_origin.pdf +3 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/full.md +543 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/images.zip +3 -0
- EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/layout.json +3 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_content_list.json +3 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_model.json +3 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_origin.pdf +3 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/full.md +448 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/images.zip +3 -0
- EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/layout.json +3 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_content_list.json +3 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_model.json +3 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_origin.pdf +3 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/full.md +575 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/images.zip +3 -0
- EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/layout.json +3 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_content_list.json +3 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_model.json +3 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_origin.pdf +3 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/full.md +0 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/images.zip +3 -0
- EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/layout.json +3 -0
- EMNLP/2025/Your Language Model Can Secretly Write Like Humans_ Contrastive Paraphrase Attacks on LLM-Generated Text Detectors/3a804804-ac4a-4762-a147-7d8c694ee698_content_list.json +3 -0
- EMNLP/2025/Your Language Model Can Secretly Write Like Humans_ Contrastive Paraphrase Attacks on LLM-Generated Text Detectors/3a804804-ac4a-4762-a147-7d8c694ee698_model.json +3 -0
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0a1a64b7446549b50548480cf90410d5421700c1fd75565f4020681a54e7c8c8
|
| 3 |
+
size 87376
|
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee10efeb71630fe0d580e9c2db09182d9c751bcb59c42dcdaa406fa8ef8c3bf5
|
| 3 |
+
size 105871
|
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/9c0f859b-b21f-4a78-a690-59b6603a292d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca4caf8c25789b9577579a011e28c06eba76e2345d6c6330a070ff70083eb390
|
| 3 |
+
size 452110
|
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/full.md
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Word Salad Chopper: Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly
|
| 2 |
+
|
| 3 |
+
Wenya Xie $^{1*}$ , Shaochen (Henry) Zhong $^{2*}$ , Hoang Anh Duy Le $^{2}$ , Zhaozhuo Xu $^{3}$ , Jianwen Xie $^{4}$ , Zirui Liu $^{1}$ ,
|
| 4 |
+
|
| 5 |
+
$^{1}$ University of Minnesota $^{2}$ Rice University
|
| 6 |
+
|
| 7 |
+
3Stevens Institute of Technology Lambda, Inc
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Large Reasoning Models (LRMs) are often bottlenecked by the high cost of output tokens. We show that a significant portion of these tokens are useless self-repetitions — what we call "word salad" — that exhaust the decoding budget without adding value. Interestingly, we observe that LRMs are self-aware when trapped in these loops: the hidden states of $\langle \backslash n \backslash n \rangle$ tokens trailing each reasoning chunk exhibit patterns that allow us to detect word salad behavior on-the-fly via a single-layer linear classifier. Once detected, a simple chop appended by a straightforward regeneration prompt yields substantial length savings with minimal quality loss. Our work offers WordSaladChopper (WSC) — a lightweight, turnkey component for LRM that is minimally invasive to its reasoning trajectory by only removing semantically redundant tokens. Given its low overhead, strong savings, and the lack of semantic value of word salad tokens, we believe it is not too far-fetched to argue that WSC — or a similar component — is a must-have for all LRM applications with user experience in mind. Our code is publicly available at https://github.com/wenyaxie023/WordSaladChopper.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Despite the drastic boost in performance over their non-reasoning counterparts, one innate issue of LRMs is that they essentially trade more decoded tokens for capabilities. However, a prolonged decoding section is among the most expensive operations a Large Language Model (LLM) can experience due to compute, memory, and scheduling challenges. For instance, OpenAI o3 charges $10/$ 40 per one million of input/output tokens, $^{1}$ a striking $4 \times$ difference between decoding and prefill. Despite the high cost of long thinking traces, a less well-known and rarely quantified fact (Li et al.,
|
| 16 |
+
|
| 17 |
+
2025; Yeo et al., 2025) is that LRMs tend to waste an enormous amount of decoding budget, simply by repeating themselves verbatim, with slight variations, or engaging in endless enumeration of cases until all budget has been expensed (see examples at Appendix G) — we refer to such behavior as Word Salad, a term often used to mock public spokespersons for giving long-winded, jargon-filled responses that ultimately lack substance or clear meaning. The “Original” column in Table 1 shows that when answering GPQA-Diamond (Rein et al., 2024), we observe $55\%+$ of tokens generated by DeepSeek-R1-Distill models are marked as “word salad tokens,” where they do not add value from a semantic standpoint.[2]
|
| 18 |
+
|
| 19 |
+
Table 1: Percentage of word salad tokens in answering GPQA-Diamond. $55\% +$ of the budget has been wasted.
|
| 20 |
+
|
| 21 |
+
<table><tr><td>Model</td><td>Original</td><td>After Chop</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-1.5B</td><td>63.37</td><td>5.29</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-7B</td><td>61.92</td><td>4.23</td></tr><tr><td>DeepSeek-R1-Distill-Llama-8B</td><td>56.60</td><td>5.60</td></tr></table>
|
| 22 |
+
|
| 23 |
+
Naturally, making such thinking sections shorter while preserving answer quality has become a major goal of the efficiency community. In fact, many works have emerged in a short period, forming a new subfield of long-to-short (L2S); with some of the most effective L2S methods often requiring training intervention (Sui et al., 2025; Wang et al., 2025a; Liu et al., 2025). While effective, with major parameter updates, such training-based L2S methods surely introduce a rather aggressive "invasion" into the original reasoning trajectory of LRMs, where the side effects remain largely unknown. Moreover, such methods typically do not stack well with one another, as different training recipes often demand intrinsically conflicting oper-
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: General workflow of WordSaladChopper. 1) Detect: We allow the reasoning model to freely generate, following its original reasoning flow. Meanwhile, we classify the hidden state of each chunk's trailing $\langle \backslash n\backslash n\rangle$ token using our trained linear classifier in an on-the-fly manner; 2) Chop: Once a chopping point is reached — in this case, it is defined by having two consecutive word salad chunks detected — we truncate the generation to the left of it; 3) Regenerate: We append a regeneration prompt with constant budget, allowing the model to complete its answer by its own via $\langle \mathrm{eos}\rangle$ or until the new budget is fully expensed.
|
| 27 |
+
|
| 28 |
+
ations. Instead, in this work, we explore whether it is possible to advance efficient reasoning in a turnkey and minimally invasive manner, just by reducing the word salad behavior — as such salad tokens are likely universally agreed to be redundant, if not at all useless, from a semantic standpoint.
|
| 29 |
+
|
| 30 |
+
Surprisingly, we find that the model is actually self-aware when it is trapped in such "word salad" loops — specifically, the hidden states of $\langle \backslash n \backslash n \rangle$ tokens at the end of each reasoning chunk show distinguishable patterns when the model is trapped versus when it is not. Leveraging this observation, we train a lightweight linear classifier that runs on-the-fly to detect this word salad behavior. Once detected, a simple chop and regeneration prompt yields significant length savings with minimal quality loss — e.g., the chopping would immediately reduce up to 92% of word salad tokens in DeepSeek-R1-Distill-Qwen-7B when undergoing GPQA-Diamond (Table 1). In summary, our main contributions are as follows:
|
| 31 |
+
|
| 32 |
+
- Comprehensive investigation of LRM word salad behavior. To the best of our knowledge, we are the first to systematically study the general repetition phenomenon in LRM reasoning traces, identifying its key characteristics, persistence, and its robustness to existing reputation penalties.
|
| 33 |
+
- Empirical evidence that LRMs are self-aware when trapped in word salad loops. We show that the hidden states of $\langle \backslash n \backslash n \rangle$ tokens carry distinct signals when the model is stuck in word salad loops versus when it is reasoning normally — revealing a hidden opportunity for detection and intervention.
|
| 34 |
+
- A lightweight, turnkey, minimally invasive component for all LRM applications. We
|
| 35 |
+
|
| 36 |
+
propose a specially-trained linear classifier that runs on-the-fly without retraining or architectural modification on the LRM end. Once word salad behavior is detected, a chop-then-regenerate routine significantly reduces output length with minimal reasoning quality degradation.
|
| 37 |
+
|
| 38 |
+
In this work, we aim to deliver our following messages clearly and quickly: 1) Word salad is an overlooked but severe issue present across likely all LRMs. It offers no benefit yet consumes an atrocious amount of decoding budget; and 2) LRMs are self-aware of such behavior, where on-the-fly detection and intervention is possible. We believe any LRM-serving application should consider adopting our component — or something similar — as an almost-free-lunch solution for immediate cost savings and latency improvements. Due to page limitations and lack of tightly relevant art, we refer the reader to Appendix A for Related Work discussions.
|
| 39 |
+
|
| 40 |
+
# 2 Observations
|
| 41 |
+
|
| 42 |
+
In this section, we outline four empirically supported observations of LRM word salad behavior.
|
| 43 |
+
|
| 44 |
+
# 2.1 A Heavy Contributor of Long Thinking is Word Salad-like Self-Repetitions
|
| 45 |
+
|
| 46 |
+
Much of the contribution of our work depends on whether there truly exists a significant amount of word-salad-like self-repetitions within LRM's reasoning traces. Defining such behavior demands carefulness, as LRMs typically do not exhibit strictly verbatim repetitions, rendering rule-based methods not applicable. To achieve an accurate yet simple flagging, we employ an embedding model $E$ . Then, for a given trace $T$ , we first chunk $T$ into different chunks based on some common de
|
| 47 |
+
|
| 48 |
+
limiter — in this case, $< \backslash n \backslash n >$ — so we'd have $T = c_{1} \oplus c_{2} \oplus \dots \oplus c_{n}$ where $c_{i}$ represents the $i$ -th chunk of $T$ and $\oplus$ represents concatenation. A chunk $c_{i}$ is considered a "word salad chunk" if $E(c_{i}, c_{j}) \geq \theta$ for $j = \{1, 2, \ldots, i - 1\}$ , where $\theta$ is a similarity threshold. Namely, $c_{i}$ is flagged as a word salad chunk if it is highly similar to a previous chunk $c_{j}$ within the thinking trace $T$ , per the embedding model $E$ . We consider all tokens within a word salad chunk as word salad tokens.
|
| 49 |
+
|
| 50 |
+
Table 2: Percentage of word salad chunks within reasoning traces. Result are presented as (temp $\tau = 0.0$ , 0.6).
|
| 51 |
+
|
| 52 |
+
<table><tr><td>Model</td><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td>Qwen-1.5B</td><td>(51.2, 37.4)</td><td>(62.9, 10.6)</td><td>(77.5, 18.7)</td><td>(87.7, 42.4)</td></tr><tr><td>Qwen-7B</td><td>(23.9, 8.1)</td><td>(45.4, 10.9)</td><td>(52.1, 10.9)</td><td>(72.7, 25.3)</td></tr><tr><td>Llama-8B</td><td>(35.0, 8.3)</td><td>(53.1, 10.5)</td><td>(62.9, 13.6)</td><td>(60.1, 18.0)</td></tr></table>
|
| 53 |
+
|
| 54 |
+
Table 2 indicates that such word salad chunks indeed occupy a non-trivial presence in the reasoning traces. We additionally note that, unless otherwise specified, all reported models are of DeepSeek-R1-Distill series with temp $\tau = 0$ .
|
| 55 |
+
|
| 56 |
+
# 2.2 Once Word Salad Happens, LRMs are Unlikely to Get Out on Their Own
|
| 57 |
+
|
| 58 |
+
One unique characteristic of word salad that would result in a poor user experience is that once the model triggers word salad, it is unlikely to untrap itself. Thus, the model will most likely be trapped in such word salad loops until all the decoding budget has been fully expensed. We refer to this boundary as the chopping point (Table 3).
|
| 59 |
+
|
| 60 |
+
Table 3: Percentage of word salad chunks before / after the chopping point.
|
| 61 |
+
|
| 62 |
+
<table><tr><td>Model</td><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td colspan="5">τ = 0.0</td></tr><tr><td>Qwen-1.5B</td><td>2.08 / 98.00</td><td>9.48 / 94.91</td><td>11.68 / 99.05</td><td>17.19 / 96.93</td></tr><tr><td>Qwen-7B</td><td>1.21 / 98.30</td><td>6.59 / 89.63</td><td>10.03 / 81.82</td><td>13.13 / 95.63</td></tr><tr><td colspan="5">τ = 0.6</td></tr><tr><td>Qwen-1.5B</td><td>2.75 / 97.21</td><td>8.23 / 51.35</td><td>8.95 / 60.07</td><td>8.84 / 93.92</td></tr><tr><td>Qwen-7B</td><td>0.34 / 77.32</td><td>2.30 / 21.80</td><td>3.10 / 13.79</td><td>1.93 / 42.81</td></tr></table>
|
| 63 |
+
|
| 64 |
+
Needless to say, this presents a catastrophic issue to users, as an ideally much shorter thinking section is now maximized with useless repetitions. So the user is essentially paying the maximum cost for a (likely) wrong answer, while enduring the longest end-to-end latency. In practice, we find that Qwen-1.5B often requires a much longer runtime than its 7B counterpart, for the exact reason that it is maximizing its decoding budget a lot more often with word salad chunks. This goes against the main drive of using smaller LRM in the first place.
|
| 65 |
+
|
| 66 |
+
# 2.3 Such Kind of "Word Salad" Behavior is Hard to Address with Existing Means.
|
| 67 |
+
|
| 68 |
+
The previous two observations demonstrated the prevalence and severity of word salad. However, this is really only an issue if it cannot be trivially addressed via existing detection methods or various available repetition penalty designs. Given that our word salad detection, as described in Section 2.1, relies on leveraging an embedding model $E$ to compute pairwise chunk similarities, the pipeline itself naturally serves as a mechanism for identifying word salad behavior. However, this approach is far from efficient enough to be deployed on-the-fly, as it incurs a complexity of $\Theta(n^2)$ for $n$ chunks. Even with cashing, each operation requires fully passing one chunk through $E$ , which is infeasible to be deployed on-the-fly.
|
| 69 |
+
|
| 70 |
+
One alternative avenue is to employ existing decoding penalties, such as repeat (Keskar et al., 2019), presence, and frequency penalties. Unfortunately, those penalties introduce much randomness to the correctness of LRMs, often negatively. Results from Table 4 suggest they are too aggressive in their invasions of the reasoning trajectory of LRMs, and therefore too volatile to be usable.
|
| 71 |
+
|
| 72 |
+
Table 4: Task performance w/ penalties $\left( {\tau = {0.6}}\right)$
|
| 73 |
+
|
| 74 |
+
<table><tr><td>Decoding Setting</td><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td>Vanilla</td><td>89.76</td><td>90.80</td><td>37.92</td><td>43.43</td></tr><tr><td>Repeat Penalty</td><td>86.05</td><td>87.20</td><td>25.83</td><td>49.49</td></tr><tr><td>Presence Penalty</td><td>89.61</td><td>89.80</td><td>41.67</td><td>48.48</td></tr><tr><td>Frequency Penalty</td><td>78.54</td><td>43.80</td><td>13.33</td><td>36.87</td></tr></table>
|
| 75 |
+
|
| 76 |
+
# 2.4 Models are Self-Aware when it is Trapped in Word Salad Loops
|
| 77 |
+
|
| 78 |
+
We, rather surprisingly, find that LRMs are self-aware when they are trapped in word salad loops. Specifically, we find that it is possible for us to train a simple linear classifier — with special data curation and training recipe detailed in Section 3.1 — to distinguish the hidden state of trailing $\langle \backslash n\backslash n\rangle$ token of word salad chunks versus benign reasoning chunks. The lightweightness of this linear classifier opens the door for on-the-fly detection, where we can effectively intervene with different operations to address models trapped in word salad loops. Table 5 supports the effectiveness of this classifier.
|
| 79 |
+
|
| 80 |
+
Table 5: Classifier performance on word salad chunks detection with Qwen-7B. Results as (Acc. / AUROC).
|
| 81 |
+
|
| 82 |
+
<table><tr><td>Temp</td><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td>τ = 0.0</td><td>92.72 / 98.63</td><td>92.31 / 95.95</td><td>89.77 / 95.84</td><td>93.52 / 97.89</td></tr><tr><td>τ = 0.6</td><td>91.42 / 96.22</td><td>88.14 / 95.26</td><td>77.96 / 80.15</td><td>93.80 / 96.96</td></tr></table>
|
| 83 |
+
|
| 84 |
+
# 3 Proposed Method
|
| 85 |
+
|
| 86 |
+
# 3.1 Training a Lightweight Linear Classifier as the Word Salad Chunk Detector
|
| 87 |
+
|
| 88 |
+
Based on observations from Section 2.1 and 2.2, we are aware that chunks after the chopping point are primarily word salad chunks. Thus, it is practically sensible to mark all chunks after these chopping points as word salad chunks — even if some of them are not by definition of Section 2.1 — as stopping generation at the chopping point is reasonable.
|
| 89 |
+
|
| 90 |
+
Data Curation Following this design principle, we collect 1,000 seed thinking traces by feeding the s1 (Muennighoff et al., 2025) questions to each model tested. Adopting the similar methodology from Section 2.1, we first chunk each thinking trace $T$ as $n$ chunks by $T = \{c_1, c_2, \ldots, c_n\}$ by $<\backslash n \backslash n>$ . Then, we label chunk $c_i$ as "word salad chunk" (say label 1) if $E(c_i, c_j) \geq \theta$ for $j < i$ , where $\theta$ is a similarity threshold set to 0.99; otherwise, $c_i$ is labeled as a "benign reasoning chunk" (say with label $\emptyset$ ). Additionally, to avoid undesired long range dependency (labeling a chunk as word salad because a much, much earlier chunk is considered similar to it), we limited $(j - i) \leq 100$ . We then identify the chunk of the earliest chopping point $c_t$ within this labeled $T$ , where $k - 1$ consecutive chunks of $c_t$ are all labeled as word salad chunks. We then relabel all chunks before $c_t$ as label $\emptyset$ and all chunks including and after $c_t$ as 1.
|
| 91 |
+
|
| 92 |
+
Training Recipe With this relabeled data collected, we collect the output of the final transformer block of each $\langle \backslash n\backslash n\rangle$ from models, along with their binary labels, to train a linear classifier consisting of a fully-connected layer, as detailed in Appendix C. We emphasize that we essentially only "pretrain" this lightweight linear classifier once per each model on our s1-curated data, where all reasoning evaluation results are collected on unseen data with no finetuning involved.
|
| 93 |
+
|
| 94 |
+
# 3.2 Detect, Chop, then Regenerate
|
| 95 |
+
|
| 96 |
+
Due to space limits, we refer readers to Figure 1 for the WordSaladChopper workflow. As supporting evidence, Table 5 shows that the linear classifier is extremely accurate in detecting the word salad chunks; yet Table 6 demonstrates that the regeneration prompt helps recover the task accuracy lost from brute-force chopping.
|
| 97 |
+
|
| 98 |
+
Table 6: Original/Chopped/Regenerated Acc. for Qwen-7B at $\tau = 0.6$
|
| 99 |
+
|
| 100 |
+
<table><tr><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td>89.76 / 78.24 / 89.69</td><td>90.8 / 83.2 / 89.60</td><td>37.92 / 29.17 / 37.92</td><td>43.43 / 42.93 / 43.43</td></tr></table>
|
| 101 |
+
|
| 102 |
+
# 4 Experiments and Discussion
|
| 103 |
+
|
| 104 |
+
Table 7: End-to-end task performance of WSC w/ $\tau = 0$ in terms of task accuracy and length compression. (AIME25 is omitted here as the variance can be extreme w/ $\tau = 0$ , where only one pass of 30 questions is possible.)
|
| 105 |
+
|
| 106 |
+
<table><tr><td rowspan="2">Setting</td><td colspan="2">GSM8K</td><td colspan="2">MATH-500</td><td colspan="2">GPQA-Diamond</td></tr><tr><td>Acc.</td><td>Len.</td><td>Acc.</td><td>Len.</td><td>Acc.</td><td>Len.</td></tr><tr><td colspan="7">Qwen-1.5B</td></tr><tr><td>Original</td><td>82.03</td><td>1904</td><td>72.20</td><td>8126</td><td>32.83</td><td>23449</td></tr><tr><td>WSC (Ours)</td><td>82.64↑0.61</td><td>1082↓43.19%</td><td>72.60↑0.40</td><td>4253↓47.66%</td><td>31.82↓1.01</td><td>10004↓57.34%</td></tr><tr><td colspan="7">Qwen-7B</td></tr><tr><td>Original</td><td>89.99</td><td>758</td><td>87.60</td><td>4925</td><td>44.95</td><td>12974</td></tr><tr><td>WSC (Ours)</td><td>90.45↑0.46</td><td>567↓25.23%</td><td>86.80↑0.20</td><td>3399↓31.00%</td><td>42.42↓2.53</td><td>6027↓53.55%</td></tr><tr><td colspan="7">Llama-8B</td></tr><tr><td>Original</td><td>85.60</td><td>894</td><td>79.20</td><td>5556</td><td>38.89</td><td>11969</td></tr><tr><td>WSC (Ours)</td><td>85.67↑0.07</td><td>667↓25.40%</td><td>80.4↑1.20</td><td>3684↓33.69%</td><td>38.89↑0.00</td><td>7292↓39.07%</td></tr></table>
|
| 107 |
+
|
| 108 |
+
Table 8: End-to-end task performance of WSC w/ $\tau = 0.6$ . (AIME25 results are averaged over 8 passes.)
|
| 109 |
+
|
| 110 |
+
<table><tr><td rowspan="2">Setting</td><td colspan="2">GSM8K</td><td colspan="2">MATH-500</td><td colspan="2">AIME25</td><td colspan="2">GPQA-Diamond</td></tr><tr><td>Acc.</td><td>Len.</td><td>Acc.</td><td>Len.</td><td>Acc.</td><td>Len.</td><td>Acc.</td><td>Len.</td></tr><tr><td colspan="9">Qwen-1.5B</td></tr><tr><td>Original</td><td>82.56</td><td>1012</td><td>81.60</td><td>4485</td><td>21.67</td><td>16462</td><td>35.86</td><td>7790</td></tr><tr><td rowspan="2">WSC (Ours)</td><td>83.02</td><td>818</td><td>80.40</td><td>4065</td><td>21.67</td><td>13591</td><td>35.35</td><td>5708</td></tr><tr><td>↑0.46</td><td>↓19.20%</td><td>↓1.23</td><td>↓9.38%</td><td>↑0.00</td><td>↓17.44%</td><td>↓0.45</td><td>↓26.73%</td></tr><tr><td colspan="9">Qwen-7B</td></tr><tr><td>Original</td><td>89.76</td><td>565</td><td>90.80</td><td>3597</td><td>37.92</td><td>15305</td><td>43.43</td><td>6201</td></tr><tr><td rowspan="2">WSC (Ours)</td><td>89.99</td><td>545</td><td>90.40</td><td>3215</td><td>36.25</td><td>12239</td><td>43.43</td><td>5345</td></tr><tr><td>↑0.23</td><td>↓3.44%</td><td>↓0.40</td><td>↓10.62%</td><td>↓1.67</td><td>↓20.03%</td><td>↑0.00</td><td>↓13.81%</td></tr><tr><td colspan="9">Llama-8B</td></tr><tr><td>Original</td><td>85.75</td><td>650</td><td>83.60</td><td>3899</td><td>28.75</td><td>14358</td><td>44.44</td><td>7061</td></tr><tr><td rowspan="2">WSC (Ours)</td><td>85.67</td><td>650</td><td>83.8</td><td>3641</td><td>29.16</td><td>13768</td><td>44.44</td><td>6604</td></tr><tr><td>↓0.08</td><td>↓1.32%</td><td>↑0.20</td><td>↓6.60%</td><td>↑0.42</td><td>↓4.11%</td><td>↑0.00</td><td>↓6.46%</td></tr></table>
|
| 111 |
+
|
| 112 |
+
Result Discussion Table 7 and 8 showcased the effectiveness of our method, where we shall observe WordSaladChoper is capable of yielding similar reasoning benchmark performance to the original model but with reduced length. We emphasize that this is achieved with negligible overhead, as once the linear classifier is trained, the inference of this linear classifier consists of passing the hidden state of just one $\langle \backslash n\backslash n\rangle$ token for each chunk. Given the fact that this linear classifier is so lightweight, its wall-clock runtime is exponentially quicker than decoding a full chunk in an LRM, making the overhead nicely hidden from an LRM inference perspective (see Appendix I for details).
|
| 113 |
+
|
| 114 |
+
# 5 Conclusion
|
| 115 |
+
|
| 116 |
+
Our work investigates the phenomenon of word salad behavior in LRM and introduces a lightweight, turnkey, minimally invasive way to reduce such useless budget wasting.
|
| 117 |
+
|
| 118 |
+
# Limitations
|
| 119 |
+
|
| 120 |
+
While our WordSaladChopper successfully curbs the onset of repetition and maintains answer completeness through fixed-budget regeneration, we observe that certain generations still lapse into repetitive loops even after the rescue regeneration phase. This suggests that future work will require more robust and adaptive interventions to effectively disengage the model from such failure modes.
|
| 121 |
+
|
| 122 |
+
We emphasize that our work is not to present an end-to-end solution that addresses the general long-to-short task of efficient reasoning; rather, we intend to highlight the severity of word salad behaviors and present a new avenue for effective LRM control and usage. Our regeneration prompt is presented as the most straightforward way to accompany word salad reduction, and there sure can be more sophisticated ways to deal with such post-chopping operations. For instance, one can explore the following strategies.
|
| 123 |
+
|
| 124 |
+
- Grant the model a small regeneration budget after the regeneration prompt (our approach in this work). So even if it repeats, it will max out soon.
|
| 125 |
+
- Continuously apply WordSaladChopper for more chopping and more regenerations.
|
| 126 |
+
- Force append an end-of-think token and compel the model to output an answer on the spot. This can be combined with strategies above — giving the model a limited regeneration budget, letting it keep thinking, chopping and regenerating if necessary; then, when the budget is nearly or fully expended, forcing it to conclude and provide a short answer.
|
| 127 |
+
|
| 128 |
+
We made the decision (of not exploring sophisticated end-to-end solutions) consciously because we truly believe a WSC-like component can be a must-have turnkey addition to any LRM serving system — as no one wants to waste decoding budget on useless repetitions. So, how it is integrated into different systems will naturally demand variations.
|
| 129 |
+
|
| 130 |
+
Further, it is our honest belief that many efficient reasoning methods appear effective partly because current reasoning evaluation benchmarks have much room for improvement. Should we develop more comprehensive evaluation suites (Gema et al., 2025; Huan et al., 2025) — which we surely will in the future — we expect to see many efficient reasoning methods fail, or
|
| 131 |
+
|
| 132 |
+
behave much differently than their vanilla LRM counterparts. For this reason, we want to make our approach as faithful to the original reasoning trajectory of the LRM as possible, as this is failproof to benchmark deficiency. We therefore keep the operations after the chop simple and straightforward — as there is no useful “reasoning trajectory ground truth” to adhere to once the model is already trapped in a word salad loop.
|
| 133 |
+
|
| 134 |
+
Last, we want to highlight that since our Chopper requires model-specific training, it is possible that its performance may vary under different modeltask combinations. We kindly ask our end users to practice caution when adopting our method.
|
| 135 |
+
|
| 136 |
+
# Ethical Considerations
|
| 137 |
+
|
| 138 |
+
We do not believe our work is applicable to ethical review, though our work does interfere with the original output of the model, where end users should treat its output with care.
|
| 139 |
+
|
| 140 |
+
# Acknowledgments
|
| 141 |
+
|
| 142 |
+
We gratefully acknowledge the support of Lambda, Inc. for providing the compute for this project. The work of Zhaozhuo Xu and Zirui Liu is supported by NSF 2450524. Zhaozhuo Xu is also supported by NSF 2451398. Wenya Xie is supported in part by the Data Science Initiative (DSI) Fellowship at the University of Minnesota.
|
| 143 |
+
|
| 144 |
+
# References
|
| 145 |
+
|
| 146 |
+
Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697.
|
| 147 |
+
Simon A Aytes, Jinheon Baek, and Sung Ju Hwang. 2025. Sketch-of-thought: Efficient llm reasoning with adaptive cognitive-inspired sketching. arXiv preprint arXiv:2503.05179.
|
| 148 |
+
Yingqian Cui, Pengfei He, Jingying Zeng, Hui Liu, Xianfeng Tang, Zhenwei Dai, Yan Han, Chen Luo, Jing Huang, Zhen Li, Suhang Wang, Yue Xing, Jiliang Tang, and Qi He. 2025. Stepwise perplexity-guided refinement for efficient chain-of-thought reasoning in large language models. In Findings of the Association for Computational Linguistics: ACL 2025, pages 18581-18597, Vienna, Austria. Association for Computational Linguistics.
|
| 149 |
+
|
| 150 |
+
Aryo Pradipta Gema, Alexander Hagele, Runjin Chen, Andy Arditi, Jacob Goldman-Wetzler, Kit Fraser-Taliente, Henry Sleight, Linda Petrini, Julian Michael, Beatrice Alex, et al. 2025. Inverse scaling in test-time compute. arXiv preprint arXiv:2507.14417.
|
| 151 |
+
Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
|
| 152 |
+
Tingxu Han, Zhenting Wang, Chunrong Fang, Shiyu Zhao, Shiqing Ma, and Zhenyu Chen. 2025. Token-budget-aware LLM reasoning. In Findings of the Association for Computational Linguistics: ACL 2025, pages 24842-24855, Vienna, Austria. Association for Computational Linguistics.
|
| 153 |
+
Shibo Hao, Sainbayar Sukhbaatar, DiJia Su, Xian Li, Zhiting Hu, Jason Weston, and Yuandong Tian. 2024. Training large language models to reason in a continuous latent space. arXiv preprint arXiv:2412.06769.
|
| 154 |
+
Bairu Hou, Yang Zhang, Jiabao Ji, Yujuan Liu, Kaizhi Qian, Jacob Andreas, and Shiyu Chang. 2025. Thinkprune: Pruning long chain-of-thought of llms via reinforcement learning. arXiv preprint arXiv:2504.01296.
|
| 155 |
+
Maggie Huan, Yuetai Li, Tuney Zheng, Xiaoyu Xu, Seungone Kim, Minxin Du, Radha Poovendran, Graham Neubig, and Xiang Yue. 2025. Does math reasoning improve general llm capabilities? understanding transferability of llm reasoning. arXiv preprint arXiv:2507.00432.
|
| 156 |
+
Nitish Shirish Keskar, Bryan McCann, Lav R Varshney, Caiming Xiong, and Richard Socher. 2019. Ctrl: A conditional transformer language model for controllable generation. arXiv preprint arXiv:1909.05858.
|
| 157 |
+
Yiwei Li, Peiwen Yuan, Shaoxiong Feng, Boyuan Pan, Xinglin Wang, Bin Sun, Heda Wang, and Kan Li. 2024. Escape sky-high cost: Early-stopping self-consistency for multi-step reasoning. In The Twelfth International Conference on Learning Representations.
|
| 158 |
+
Yuetai Li, Xiang Yue, Zhangchen Xu, Fengqing Jiang, Luyao Niu, Bill Yuchen Lin, Bhaskar Ramasubramanian, and Radha Poovendran. 2025. Small models struggle to learn from strong reasoners. arXiv preprint arXiv:2502.12143.
|
| 159 |
+
Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. 2025. Efficient inference for large reasoning models: A survey. arXiv preprint arXiv:2503.23077.
|
| 160 |
+
Mateo Mahaut and Francesca Franzon. 2025. Repetitions are not all alike: distinct mechanisms sustain repetition in language models. arXiv preprint arXiv:2504.01100.
|
| 161 |
+
|
| 162 |
+
Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393.
|
| 163 |
+
Tergel Munkhbat, Namgyu Ho, Seo Hyun Kim, Yongjin Yang, Yujin Kim, and Se-Young Yun. 2025. Self-training elicits concise reasoning in large language models. arXiv preprint arXiv:2502.20122.
|
| 164 |
+
Sania Nayab, Giulio Rossolini, Marco Simoni, Andrea Saracino, Giorgio Buttazzo, Nicolamaria Manes, and Fabrizio Giacomelli. 2024. Concise thoughts: Impact of output length on llm reasoning and cost. arXiv preprint arXiv:2407.19825.
|
| 165 |
+
David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R. Bowman. 2024. GPQA: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling.
|
| 166 |
+
Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, et al. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419.
|
| 167 |
+
Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. 2024. Improving text embeddings with large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 11897-11916, Bangkok, Thailand. Association for Computational Linguistics.
|
| 168 |
+
Rui Wang, Hongru Wang, Boyang Xue, Jianhui Pang, Shudong Liu, Yi Chen, Jiahao Qiu, Derek Fai Wong, Heng Ji, and Kam-Fai Wong. 2025a. Harnessing the reasoning economy: A survey of efficient reasoning for large language models. arXiv preprint arXiv:2503.24377.
|
| 169 |
+
Xinglin Wang, Shaoxiong Feng, Yiwei Li, Peiwen Yuan, Yueqi Zhang, Chuyi Tan, Boyuan Pan, Yao Hu, and Kan Li. 2025b. Make every penny count: Difficulty-adaptive self-consistency for cost-efficient reasoning. In Findings of the Association for Computational Linguistics: NAACL 2025, pages 6904-6917, Albuquerque, New Mexico. Association for Computational Linguistics.
|
| 170 |
+
Yue Wang, Qiuzhi Liu, Jiahao Xu, Tian Liang, Xingyu Chen, Zhiwei He, Linfeng Song, Dian Yu, Juntao Li, Zhuosheng Zhang, et al. 2025c. Thoughts are all over the place: On the underthinking of ol-like llms. arXiv preprint arXiv:2501.18585.
|
| 171 |
+
Heming Xia, Chak Tou Leong, Wenjie Wang, Yongqi Li, and Wenjie Li. 2025. Tokenskip: Controllable chain-of-thought compression in llms. arXiv preprint arXiv:2502.12067.
|
| 172 |
+
|
| 173 |
+
Yuchen Yan, Yongliang Shen, Yang Liu, Jin Jiang, Mengdi Zhang, Jian Shao, and Yueting Zhuang. 2025. Infntythink: Breaking the length limits of long-context reasoning in large language models. arXiv preprint arXiv:2503.06692.
|
| 174 |
+
An Yang, Anfeng Li, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Gao, Chengen Huang, Chenxu Lv, et al. 2025a. Qwen3 technical report. arXiv preprint arXiv:2505.09388.
|
| 175 |
+
Chenxu Yang, Qingyi Si, Yongjie Duan, Zheliang Zhu, Chenyu Zhu, Zheng Lin, Li Cao, and Weiping Wang. 2025b. Dynamic early exit in reasoning models. arXiv preprint arXiv:2504.15895.
|
| 176 |
+
Qwen An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxin Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yi-Chao Zhang, Yunyang Wan, Yuqi Liu, Zeyu Cui, Zhenru Zhang, Zihan Qiu, Shanghaoran Quan, and Zekun Wang. 2024. Qwen2.5 technical report. ArXiv, abs/2412.15115.
|
| 177 |
+
Junchi Yao, Shu Yang, Jianhua Xu, Lijie Hu, Mengdi Li, and Di Wang. 2025. Understanding the repeat curse in large language models from a feature perspective. In *Findings of the Association for Computational Linguistics: ACL* 2025, pages 7787-7815, Vienna, Austria. Association for Computational Linguistics.
|
| 178 |
+
Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373.
|
| 179 |
+
Anqi Zhang, Yulin Chen, Jane Pan, Chen Zhao, Aurojit Panda, Jinyang Li, and He He. 2025a. Reasoning models know when they're right: Probing hidden states for self-verification. arXiv preprint arXiv:2504.05419.
|
| 180 |
+
Peitian Zhang, Zheng Liu, Shitao Xiao, Ninglu Shao, Qiwei Ye, and Zhicheng Dou. 2025b. Long context compression with activation beacon. In The Thirteenth International Conference on Learning Representations.
|
| 181 |
+
|
| 182 |
+
# A Related Works
|
| 183 |
+
|
| 184 |
+
Training-based Long to Short (L2S) Large reasoning models often produce lengthy chain-of-thoughts due to the refinement of intermediate reasoning, inflating latency and cost. A series of posttraining approaches (Yan et al., 2025; Munkhbat et al., 2025) teach models to reach correct answers with fewer tokens by constructing more concise training data. TokenSkip (Xia et al., 2025), SPIRIT-FT (Cui et al., 2025), Coconut (Hao et al., 2024), and CCoT (Nayab et al., 2024), which shorten reasoning traces via finetuning or latent-space supervision. These methods are effective but require re/post-training and are sometimes tied to specific architectures. Reinforcement learning approaches (Aggarwal and Welleck, 2025; Hou et al., 2025) explicitly give short length as a reward to reduce response length. While effective, all of these approaches require additional finetuning (either on LRMs or from a non-thinking model) and cannot directly function upon off-the-shelf LRMs.
|
| 185 |
+
|
| 186 |
+
Our main reservation about such kinds of approaches is, finetuning heavily perturbs the original reasoning trajectory of the LRMs. Although most L2S literature claims that they experience minimal performance degradation, it is our honest belief that many efficient reasoning methods appear effective partly because current reasoning evaluation benchmarks have much room for improvement. Should we develop more comprehensive evaluation suites — which we surely will in the future — we expect to see many efficient reasoning methods fail, or behave much differently than their vanilla LRM counterparts (and there is nothing wrong with that, just the typical trade-offs and good progression of science). For this very reason, we want to make our approach as faithful to the original reasoning trajectory of the LRM as possible, as this is failproof to benchmark deficiency. We therefore keep the operations after the chop simple and straightforward — as there is no useful “reasoning trajectory ground truth” to adhere to once the model is already trapped in a word salad loop.
|
| 187 |
+
|
| 188 |
+
On the fly and training-free intervention Rather than additional finetuning, many methods attempt lightweight control during inference. Prompt-based strategies like TALE (Han et al., 2025) and Sketch-of-Thought (Aytes et al., 2025) control generation budgets via prompt engineering, but they rely on accurate length estimation and often struggle with complex reasoning. Difficulty
|
| 189 |
+
|
| 190 |
+
aware budgeting approaches such as DSC (Wang et al., 2025b) and Dynasor (Nayab et al., 2024) dynamically allocate compute based on estimated query difficulty or model confidence. While they share similarities with WSC in adapting decoding, they operate at the query level, whereas WSC monitors intra-sequence reasoning dynamics.
|
| 191 |
+
|
| 192 |
+
A second line of work directly manipulates the decoding process. ESC (Li et al., 2024) dynamically stops the sampling process when a local observation window reaches a low-entropy state, while DEER (Yang et al., 2025b) exploits hidden-state transitions to plant new reasoning paths upon high provisional confidence. Zhang et al. (2025a) trains a linear probe on hidden states to predict correctness and halt decoding early. Additionally, some methods apply decoding-time penalties to discourage repetitive outputs, such as repeat penalty (Keskar et al., 2019) and frequency and presence penalties<sup>6</sup>. However, these methods can alter the model's original reasoning trajectory and may damage overall performance. In contrast, our method focuses on identifying the onset of repetitive behavior — an orthogonal dimension of redundancy — and intervenes only to prevent pretentious loops, thereby preserving the model's full reasoning capabilities.
|
| 193 |
+
|
| 194 |
+
LRM repetition We emphasize that repetition (and, by extension, overthinking) in LLMs/LRMs has received increasing attention, where our work is certainly not the first to notice such repetition behaviors — evident from the long-standing repetition penalties highlighted and featured in our Section 2.3. Here, we feature several more modern studies regarding LRM repetition.
|
| 195 |
+
|
| 196 |
+
Wang et al. (2025c) provides a valuable analysis of overthinking behaviors and proposes a self-training-based finetuning approach to simplify reasoning trajectories. Its link to repetition appears mainly in Section 2.3, where the authors observe that later solutions sometimes repeat earlier ones and therefore promote solution diversity. Ultimately, Wang et al. (2025c) is a typical L2S method that utilizes a compound finetuning approach to encourage several desirable reasoning behaviors (not just repetition reduction) by finetuning on the model's self-generated data. WSC differs from it by providing inference-time repetition detection with negligible overhead. To the best of our
|
| 197 |
+
|
| 198 |
+
knowledge, no prior work offers on-the-fly detection of repetition in LRRMs, and this lightweight capability makes WSC a turnkey drop-in for most reasoning pipelines, including Wang et al. (2025c).
|
| 199 |
+
|
| 200 |
+
Yao et al. (2025) leverages pretrained Sparse Autoencoders (SAEs) to pinpoint layer-specific "repetition features," then performs activation patching to damp those features and lower the repeat score. The method is not lightweight enough for true on-the-fly use: as one must load pretrained SAE encoder + decoder for every steered layer, where each SAE block can be larger than the layer it modifies. Further, the patch is applied to every newly decoded token, thus risking divergence from the LRM's original reasoning path — a concern we have discussed above under the L2S paragraph, given today's limited reasoning benchmarks.
|
| 201 |
+
|
| 202 |
+
Last, we have Mahaut and Franzon (2025) being a phenomenological/diagnostic study that analyzes how repetition arises via attention-head patterns but proposes no application-focused solutions. Its relationship to WSC is rather tangential, but we thought featuring here might interest the broader audiences.
|
| 203 |
+
|
| 204 |
+
# B Details of Regeneration.
|
| 205 |
+
|
| 206 |
+
We conduct all generation experiments on $4 \times$ NVIDIA A100 80G GPUs. During the rescue regeneration stage, we use tensor_parallel = 4 to fully leverage model parallelism across the available GPUs.
|
| 207 |
+
|
| 208 |
+
# B.1 Initial Generation Settings
|
| 209 |
+
|
| 210 |
+
We allow the model to generate up to 32k tokens during the initial decoding phase. This is consistent across all models and tasks.
|
| 211 |
+
|
| 212 |
+
# B.2 Rescue Regeneration Settings
|
| 213 |
+
|
| 214 |
+
we apply a fixed token budget during the rescue regeneration stage. Table 9 summarizes the settings used in our experiments.
|
| 215 |
+
|
| 216 |
+
# Rescue Regeneration Prompt
|
| 217 |
+
|
| 218 |
+
I can find a clearer solution if I focus on the core problem.
|
| 219 |
+
|
| 220 |
+
# C Training Details of Linear Classifier
|
| 221 |
+
|
| 222 |
+
We train a single-layer logistic classifier with its default hyper-parameters: Adam optimizer
|
| 223 |
+
|
| 224 |
+
Table 9: Rescue regeneration budget (after chopping) for all experiments. (unit: # of tokens)
|
| 225 |
+
|
| 226 |
+
<table><tr><td>Model</td><td>GSM8K</td><td>MATH-500</td><td>AIME25</td><td>GPQA-Diamond</td></tr><tr><td colspan="5">τ = 0.0</td></tr><tr><td>Qwen-1.5B</td><td>4k</td><td>4k</td><td>NA</td><td>4k</td></tr><tr><td>Qwen-7B</td><td>4k</td><td>4k</td><td>NA</td><td>4k</td></tr><tr><td>Llama-8B</td><td>4k</td><td>4k</td><td>NA</td><td>4k</td></tr><tr><td colspan="5">τ = 0.6</td></tr><tr><td>Qwen-1.5B</td><td>4k</td><td>4k</td><td>8k</td><td>4k</td></tr><tr><td>Qwen-7B</td><td>4k</td><td>4k</td><td>8k</td><td>4k</td></tr><tr><td>Llama-8B</td><td>4k</td><td>4k</td><td>4k</td><td>4k</td></tr></table>
|
| 227 |
+
|
| 228 |
+
(learning rate $1 \times 10^{-2}$ , weight decay 0), BCEWithLogitsLoss, and a mini-batch size of 8192. Training proceeds for 50 epochs, with all random seeds fixed at 41 for reproducibility. To mitigate label imbalance, we first rebalance the training set to a 1:1 ratio of positive to negative chunks, and (where minor residual skew remains) set pos_weight to the inverse class frequency.
|
| 229 |
+
|
| 230 |
+
# D Details of Chopper
|
| 231 |
+
|
| 232 |
+
At each generation step we compute a repetition score $p_i$ and classify the current sentence as short or long based on its token count and the parameter len_threshold. We maintain two counters: long_streak for consecutive long sentences with $p_i > \text{thresh}$ and short_streak for consecutive short sentences with $p_i > \text{thresh}$ . Whenever $p_i \leq \text{thresh}$ we reset the corresponding counter. We stop generation and trim all remaining sentences as soon as long_streak reaches streak_len or short_streak reaches short_streak_len. In our experiments we set thresh=0.5, streak_len=2, len_threshold=10, and short_streak_len=5.
|
| 233 |
+
|
| 234 |
+
# E Datasets Details
|
| 235 |
+
|
| 236 |
+
# E.1 Linear Classifier Training Corpus
|
| 237 |
+
|
| 238 |
+
s1K (Muennighoff et al., 2025) contains 1000 multi-domain, competition-style questions (math, science, logic, general reasoning) with chain-of-thought solutions. The dataset is released under the Apache 2.0 license.
|
| 239 |
+
|
| 240 |
+
# E.2 Evaluation Datasets.
|
| 241 |
+
|
| 242 |
+
- GSM8K: 8792 grade-school word-problems (7473 train / 1319 test) that each require 2-8 arithmetic steps. We use the 1,319-item test set.
|
| 243 |
+
- MATH-500: A 500-problem test subset drawn from the 12,500-item MATH dataset. We use this
|
| 244 |
+
|
| 245 |
+
500-item test set. MATH benchmark, covering algebra, number theory, geometry, combinatorics and precalculus with worked solutions.
|
| 246 |
+
|
| 247 |
+
- GPQA-Diamond: 198 multiple-choice graduate-level questions across physics, biology and chemistry designed to defeat information-retrieval baselines.
|
| 248 |
+
- AIME25 (2025): 30 free-response problems (AIME I + II 2025) requiring creative high-school competition math; answers are three-digit integers.
|
| 249 |
+
|
| 250 |
+
# E.3 Availability and Licensing of Artifacts
|
| 251 |
+
|
| 252 |
+
# - Datasets
|
| 253 |
+
|
| 254 |
+
- s1K: Apache 2.0.
|
| 255 |
+
- GSM8K: MIT.
|
| 256 |
+
- MATH-500: MIT (inheritits parent benchmark license).
|
| 257 |
+
- GPQA Diamond: MIT.
|
| 258 |
+
|
| 259 |
+
-AIME25: MIT license for the JSON wrapper; original problem statements © 2025 MAA, redistributed here under academic fair use.
|
| 260 |
+
|
| 261 |
+
# - Models
|
| 262 |
+
|
| 263 |
+
- DeepSeek-R1-Distill-Qwen-1.5B, DeepSeek-R1-Distill-Qwen-7B, and DeepSeek-R1-Distill-Llama-8B: All three checkpoints are released by DeepSeek under the MIT License, which permits commercial use, redistribution, and the creation of derivative works without additional approval. Although each model is distilled from its respective parent (Qwen-2.5 (Yang et al., 2024) or Llama-3.1 (Grattafori et al., 2024)), the redistributed weights themselves inherit the MIT terms.
|
| 264 |
+
|
| 265 |
+
- Code All custom scripts will be released under the MIT license.
|
| 266 |
+
|
| 267 |
+
All artifacts used in this work have been utilized in a manner consistent with their original intended use, as specified by their respective licenses. No proprietary or restricted data were included.
|
| 268 |
+
|
| 269 |
+
# F WordSaladChopper Algorithm
|
| 270 |
+
|
| 271 |
+
We present the pseudocode for WordSaladChopper in Algorithm 1.
|
| 272 |
+
|
| 273 |
+
# G Case Studies
|
| 274 |
+
|
| 275 |
+
We provide qualitative demonstrations of degeneration behaviors and our method's intervention
|
| 276 |
+
|
| 277 |
+
Algorithm 1 WordSaladChopper
|
| 278 |
+
1: Inputs: $M, C, P, R, L$ , params
|
| 279 |
+
2: ids ← serialize(P)
|
| 280 |
+
3: long_streak, short_streak ← 0, 0
|
| 281 |
+
4: last_nl_pos ← |ids| - 1
|
| 282 |
+
5: while |ids| < L do
|
| 283 |
+
6: logits, h ← M.forward(ids)
|
| 284 |
+
7: next_id ← sample(logits)
|
| 285 |
+
8: ids.append(next_id)
|
| 286 |
+
9: if next_id ∈ NEWLINE_TOKEN_IDS then
|
| 287 |
+
10: // repetition probability
|
| 288 |
+
11: p ← C(h)
|
| 289 |
+
12: chunk_len ← |ids| - last_nl_pos - 1
|
| 290 |
+
13: last_nl_pos ← |ids| - 1
|
| 291 |
+
14: is_rep ← (p > thresh)
|
| 292 |
+
15: if is_rep then
|
| 293 |
+
16: if chunk_len ≥ len_threshold then
|
| 294 |
+
17: long_streak ← long_streak + 1
|
| 295 |
+
18: short_streak ← 0
|
| 296 |
+
19: else
|
| 297 |
+
20: short_streak ← short_streak + 1
|
| 298 |
+
21: long_streak ← 0
|
| 299 |
+
22: end if
|
| 300 |
+
23: else
|
| 301 |
+
24: long_streak, short_streak ← 0, 0
|
| 302 |
+
25: end if
|
| 303 |
+
26: chop_now ← (long_streak ≥ streak_len)
|
| 304 |
+
27: or (short_streak ≥ short_streak_len)
|
| 305 |
+
28: if chop_now then
|
| 306 |
+
29: // CHOP
|
| 307 |
+
30: ids ← ids[:-(chunk_len + 1)]
|
| 308 |
+
31: // Append regeneration prompt
|
| 309 |
+
32: ids.append(tokenize(R))
|
| 310 |
+
33: return continue_ until_eos(M, ids, L)
|
| 311 |
+
34: end if
|
| 312 |
+
35: end if
|
| 313 |
+
36: end while
|
| 314 |
+
37: return detokenize(ids)
|
| 315 |
+
|
| 316 |
+
strategy.
|
| 317 |
+
|
| 318 |
+
Case 1: Semantic Loop from Unresolved Ambiguity (MATH-500 #462). The model begins with valid reasoning but then becomes trapped in a semantic loop — repeating the same confusion without resolution:
|
| 319 |
+
|
| 320 |
+
"But when I added step-by-step, I got 9997.\n\n
|
| 321 |
+
|
| 322 |
+
"But wait, 6270 + 3737 is 10,007, so why is the step-by-step adding 3000, 700, 30, and 7 giving me 9997\n\n
|
| 323 |
+
|
| 324 |
+
"But why does the step-by-step addition give me 9997?\n\n
|
| 325 |
+
|
| 326 |
+
"Wait, so 6270 + 3737 is 10,007...\n\n
|
| 327 |
+
|
| 328 |
+
WSC detects early signs of degeneration and chops at the third chunk, followed by a regeneration prompt. The regenerated continuation quickly resolves the problem with correct reasoning within a 4k budget.
|
| 329 |
+
|
| 330 |
+
Case 2: Endless Enumeration without Convergence (MATH-500 #110). The model attempts
|
| 331 |
+
|
| 332 |
+
a brute-force enumeration without reaching a conclusion:
|
| 333 |
+
|
| 334 |
+
"For $k = 1$ ..
|
| 335 |
+
|
| 336 |
+
" $k = 12$ ..
|
| 337 |
+
|
| 338 |
+
" $k = 14$ .." (chopped here)
|
| 339 |
+
|
| 340 |
+
“k=27: ...”
|
| 341 |
+
|
| 342 |
+
Here, WSC intervenes at chunk 318 to prevent further unbounded enumeration, ensuring the continuation remains within budget. This illustrates WSC's ability to detect degeneration early and prevent catastrophic repetition.
|
| 343 |
+
|
| 344 |
+
# H Discussion on Choice of Delimiter
|
| 345 |
+
|
| 346 |
+
A natural question concerns our use of “\n\nAs the segmentation point for reasoning traces. We provide both intuition and empirical evidence for this choice.\n\n
|
| 347 |
+
|
| 348 |
+
Rationale We opt for " $\backslash n\backslash n$ ” because it is (i) prevalent in the reasoning traces of Large Reasoning Models (LRMs), and (ii) carries minimal semantic meaning. In contrast, tokens such as "Wait" or "Alternatively" embed semantic cues that may bias downstream classifiers. While there is no universally agreed delimiter for LRMs due to their recency, choosing minimal or non-semantic trailing tokens as chunk representatives has long been a practice in NLP. For example, dense retrievers often use the <eos> token at the end of a passage as the vector representation of the whole passage (Wang et al., 2024), and efficiency works register special <beacon> tokens at chunk boundaries to encode chunk-level information (Zhang et al., 2025b). The " $\backslash n\backslash n$ ” token naturally fulfills both criteria (minimal / non-semantic + trailing), making it a strong candidate for our purposes.
|
| 349 |
+
|
| 350 |
+
Empirical Evidence We further observe that sentences with similar semantic content yield different classifier scores at their trailing “\n\nRepetitions accumulate, later chunks become progressively easier for the classifier to identify as degenerate. Table 10 illustrates this progression: classifier scores (0 - 1, with higher scores indicating stronger repetition) sharply increase with more repetitions, making “\n\nrepetitions, making “\n\nrepetition detection.\n\n
|
| 351 |
+
|
| 352 |
+
Takeaway These results demonstrate that " $\backslash n\backslash n$ provides both a theoretically sound and empirically effective delimiter for identifying the onset of repetitive behavior in LRMs. It strikes a balance between being common in generation, semantically
|
| 353 |
+
|
| 354 |
+
Table 10: Classifier scores at the trailing “\n\nrepetitions (MATH-500 #462, DeepSeek-R1-DistillQwen-7B, Temp=0.6).
|
| 355 |
+
|
| 356 |
+
<table><tr><td>Chunk idx</td><td>Sentence</td><td>Score</td></tr><tr><td>209</td><td>"But when I added step-by-step, I got 9997.\n\n"</td><td>1.19e-10</td></tr><tr><td>255</td><td>"But when I did the step-by-step addition, I got 9997.\n\n"</td><td>3.69e-5</td></tr><tr><td>...</td><td>...</td><td>...</td></tr><tr><td>430</td><td>"Wait, so that must mean that 6270 + 3737 is 9997.\n\n"</td><td>1.000</td></tr></table>
|
| 357 |
+
|
| 358 |
+
neutral, and progressively sensitive to degenerative repetition patterns.
|
| 359 |
+
|
| 360 |
+
# I Latency of On-the-fly Detector and its Integration Strategies
|
| 361 |
+
|
| 362 |
+
Takeaway Our linear classifier for word-salad detection can be integrated into LRM decoding with negligible to near-zero latency overhead. When implemented asynchronously (in parallel with the LLM forward pass), it introduces effectively no extra wall-clock latency. When implemented sequentially (LLM waits for the classifier at each $\backslash n\backslash n$ ), the overhead is bounded to roughly $0 - 0.4\%$ under our settings.
|
| 363 |
+
|
| 364 |
+
# I.1 Integration Strategies
|
| 365 |
+
|
| 366 |
+
Asynchronous (parallel) integration Once an \n\nToken is generated, we extract its hidden state and run the linear classifier in parallel with the next LLM forward. Because a single LLM forward step is consistently slower than a single classifier forward, the classifier latency is fully hidden. This mode adds practically no additional latency.\n\n
|
| 367 |
+
|
| 368 |
+
Sequential (wait-on-classifier) integration Alternatively, the LLM may wait for the classifier decision at each $\backslash n\backslash n$ before proceeding. In that case, the overhead equals one classifier forward per reasoning chunk. Based on the runtimes in Table 11 and an average chunk length of $\sim 32$ tokens on MATH-500, this corresponds to an estimated overhead of about $0.4\%$ per chunk for a 7B model.
|
| 369 |
+
|
| 370 |
+
# I.2 Empirical Runtime
|
| 371 |
+
|
| 372 |
+
We benchmark the latency of a one-token LLM forward pass versus a single classifier prediction using the hidden state of the trailing $\backslash n\backslash n$ . The classifier inference is consistently $\sim 5$ ms, significantly faster than an LLM forward step.
|
| 373 |
+
|
| 374 |
+
# I.3 Overhead Analysis
|
| 375 |
+
|
| 376 |
+
Let $T_{\mathrm{LLM}}$ and $T_{\mathrm{clf}}$ denote the per-step runtime of the LLM and the classifier, respectively, and let $\bar{L}$
|
| 377 |
+
|
| 378 |
+
Table 11: Average runtime over 5 runs. "LLM Fwd" = one-token forward; "Clf Fwd" = one classifier prediction from the trailing hidden state.
|
| 379 |
+
|
| 380 |
+
<table><tr><td>Model</td><td>LLM Fwd (1 tok)</td><td>Clf Fwd (1 pred.)</td><td>Hidden Dim</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-1.5B</td><td>31.52 ms</td><td>4.96 ms</td><td>1536</td></tr><tr><td>DeepSeek-R1-Distill-Qwen-7B</td><td>39.16 ms</td><td>4.95 ms</td><td>3584</td></tr><tr><td>DeepSeek-R1-Distill-Llama-8B</td><td>41.12 ms</td><td>4.95 ms</td><td>4096</td></tr></table>
|
| 381 |
+
|
| 382 |
+
be the average chunk length (in tokens). Under the sequential mode, the per-chunk overhead ratio is
|
| 383 |
+
|
| 384 |
+
$$
|
| 385 |
+
\frac {T _ {\mathrm {c l f}}}{L \cdot T _ {\mathrm {L L M}}} .
|
| 386 |
+
$$
|
| 387 |
+
|
| 388 |
+
With $T_{\mathrm{LLM}} \approx 39.16 \, \mathrm{ms}$ , $T_{\mathrm{clf}} \approx 4.95 \, \mathrm{ms}$ , and $\bar{L} \approx 32$ , the estimated overhead is
|
| 389 |
+
|
| 390 |
+
$$
|
| 391 |
+
\frac{4.95}{32\times 39.16}\approx 0.004 = 0.4\% .
|
| 392 |
+
$$
|
| 393 |
+
|
| 394 |
+
This is a theoretical estimate rather than an end-to-end measurement.
|
| 395 |
+
|
| 396 |
+
# J Additional Results on Qwen3
|
| 397 |
+
|
| 398 |
+
Setup To assess generalization beyond DeepSeek-R1 models, we evaluate the WordSaladChopper (WSC) classifier on Qwen3-8B (Yang et al., 2025a) in the thinking mode across three benchmarks (GSM8K, MATH-500, AIME25) and two decoding temperatures (0.0, 0.6). The classifier operates on the hidden state of the trailing "\\n\\n" token to detect repetitive ("word salad") chunks on-the-fly.
|
| 399 |
+
|
| 400 |
+
Table 12: Classifier accuracy (%) for word-salad chunk detection on Qwen3-8B. Higher is better.
|
| 401 |
+
|
| 402 |
+
<table><tr><td>Temp</td><td>GSM8K</td><td>MATH-500</td><td>AIME’25</td></tr><tr><td>0.0</td><td>78.0</td><td>88.1</td><td>81.4</td></tr><tr><td>0.6</td><td>78.9</td><td>87.0</td><td>84.3</td></tr></table>
|
| 403 |
+
|
| 404 |
+
Findings As shown in Table 12, the classifier achieves robust accuracy on Qwen3-8B, averaging around $\sim 83\%$ across datasets/temperatures. This is lower than on DeepSeek-R1-Distill-Qwen-7B (e.g., 92.72/92.31/89.77 at $\tau = 0.0$ ), but remains usable in practice since WSC triggers a chop only after multiple consecutive detections, and simple gating rules can further reduce unnecessary interventions in hybrid reasoning pipelines.
|
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0e4516b1aa014e33434b61c5910e631b7fde59dae25123449605e526eaa17fd
|
| 3 |
+
size 309943
|
EMNLP/2025/Word Salad Chopper_ Reasoning Models Waste A Ton Of Decoding Budget On Useless Repetitions, Self-Knowingly/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d381974234a02d954a54e6434a1f595e9008e8259a1f99d5ed6efefcc8e34d0c
|
| 3 |
+
size 422195
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fd14ec3e2a28cc30cf2aa951f9304b53432f8b6fb493a995a297f98e8226ff9
|
| 3 |
+
size 181747
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9998d0b87b2ec450a9b01a0a5b23bfeb4ad683f062a915b96705e1f97938c084
|
| 3 |
+
size 229893
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/149075e0-6f30-4e89-bfcd-2d28eac29102_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8124698cbbbfe9441b416d9a3a6d99fc5861c9e6b39dfc3836f46c21852fd679
|
| 3 |
+
size 4876965
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/full.md
ADDED
|
@@ -0,0 +1,1012 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Words Like Knives: Backstory-Personalized Modeling and Detection of Violent Communication
|
| 2 |
+
|
| 3 |
+
Jocelyn Shen
|
| 4 |
+
|
| 5 |
+
Akhila Yerukola
|
| 6 |
+
|
| 7 |
+
Xuhui Zhou
|
| 8 |
+
|
| 9 |
+
Cynthia Breazeal
|
| 10 |
+
|
| 11 |
+
Maarten Sap
|
| 12 |
+
|
| 13 |
+
Hae Won Park
|
| 14 |
+
|
| 15 |
+
Massachusetts Institute of Technology, Cambridge, MA, USA
|
| 16 |
+
Carnegie Mellon University, Pittsburgh, PA, USA
|
| 17 |
+
$\spadesuit$ Allen Institute for Artificial Intelligence, Seattle, WA, USA
|
| 18 |
+
|
| 19 |
+
joceshen@mit.edu, ayerukol@andrew.cmu.edu, xuhuiz@andrew.cmu.edu
|
| 20 |
+
|
| 21 |
+
breazeal@mit.edu, msap2@andrew.cmu.edu, haewon@mit.edu
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
Conversational breakdowns in close relationships are deeply shaped by personal histories and emotional context, yet most NLP research treats conflict detection as a general task, overlooking the relational dynamics that influence how messages are perceived. In this work, we leverage nonviolent communication (NVC) theory to evaluate LLMs in detecting conversational breakdowns and assessing how relationship backstory influences both human and model perception of conflicts. Given the sensitivity and scarcity of real-world datasets featuring conflict between familiar social partners with rich personal backstories, we contribute the PERSONACONFLICTS CORPUS<sup>1</sup>, a dataset of $N = 5$ , 772 naturalistic simulated dialogues spanning diverse conflict scenarios between friends, family members, and romantic partners. Through a controlled human study, we annotate a subset of dialogues and obtain fine-grained labels of communication breakdown types on individual turns, and assess the impact of backstory on human and model perception of conflict in conversation. We find that the polarity of relationship backstories significantly shifted human perception of communication breakdowns and impressions of the social partners, yet models struggle to meaningfully leverage those backstories in the detection task. Additionally, we find that models consistently overestimate how positively a message will make a listener feel. Our findings underscore the critical role of personalization to relationship contexts in enabling LLMs to serve as effective mediators in human communication for authentic connection.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
"Words are windows (or they're walls)"
|
| 30 |
+
|
| 31 |
+
—Ruth Bebermeyer
|
| 32 |
+
|
| 33 |
+
# Conflict scenario:
|
| 34 |
+
|
| 35 |
+
"Lina, Mel's daughter expresses a need to go out and spend time with her friend. Mel denies this request."
|
| 36 |
+
|
| 37 |
+
# Evaluate:
|
| 38 |
+
|
| 39 |
+
Relationship:
|
| 40 |
+
|
| 41 |
+
Mel
|
| 42 |
+
|
| 43 |
+
parent-child
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+
Mom, can I go to Maya's place this weekend? A few of us are getting together to watch movies and work on the group project.
|
| 48 |
+
|
| 49 |
+
No. You've already been out once this week. You should spend more time at home with me.
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+
But I finished all my homework and it's not even late. I don't see why I can't go.
|
| 56 |
+
|
| 57 |
+
I just don't like how often you're running around. It's not good for girls your age to be out all the time.
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
|
| 61 |
+
I feel like you're always chasing fun instead of focusing on your future.
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
Figure 1: Conversation turns can be perceived as more or less problematic depending on relationship backstory.
|
| 67 |
+
|
| 68 |
+
Human communication is inherently contextual, especially in close relationships such as those between romantic partners, family members, or friends. In these settings, speakers and listeners share a rich history and tailor their messages based on past experiences, personal sensitivities, and relational dynamics (Isaacs and Clark, 1987; Wheatley et al., 2023; Pillemer, 1992). It is also in these intimate contexts where conversational breakdowns are most likely to occur—moments when language evokes hurt, misunderstanding, or conflict. Cru
|
| 69 |
+
|
| 70 |
+
cially, whether or not a message constitutes a breakdown depends on how it is interpreted in light of the dyadic relationship (Zhou et al., 2023b; Schurz et al., 2021; Dvash and Shamay-Tsoory, 2014). For example, as shown in Figure 1, a mother telling her daughter "You should spend more time at home with me" may be perceived as manipulative and controlling in one context (Backstory B), or as a tender expression of grief in another (Backstory A).
|
| 71 |
+
|
| 72 |
+
The prevalence of such breakdowns has motivated the emergence of AI-mediated communication (AIMC) systems, which aim to reframe language to promote empathy and understanding in digital interactions (Sharma et al., 2023; Kambhatla et al., 2024; Argyle et al., 2023). While promising, most existing AIMC systems are developed for public, often anonymized contexts—peer support platforms or online debates—where speakers are strangers and little is known about each participant's background. As such, these systems tend to operate without modeling interpersonal histories or long-standing relationship dynamics. Yet, it is precisely in close relationships, where such histories run deep, that conversational breakdowns are most emotionally charged—and where mitigation may have the greatest impact (Gaelick et al., 1985; Fitness and Fletcher, 1993). It is also these settings where datasets are scarce or lacking entirely, given privacy concerns and the sensitivity of real world conflicts between familiar partners.
|
| 73 |
+
|
| 74 |
+
To address these gaps, we develop a framework that simulates and analyzes communication breakdowns in intimate conversations, taking into account the relationship context. Our approach draws on Nonviolent Communication (NVC) theory (Rosenberg and Chopra, 2015), a structured approach widely used in conflict resolution and therapeutic settings, to guide our evaluation of LLM's detecting harmful communication. First, we introduce PERSONACONFLICTS CORPUS, a dataset of $N = 5,772$ simulated conflict dialogues between familiar social partners, spanning across diverse scenarios. For each conversation, we generate two distinct backstories: a positive backstory, which frames one character's actions as more understandable or sympathetic, and a negative backstory, which portrays the same character in a more problematic or blameworthy light (Moon et al., 2024). Through human validation, we show that scenarios and backstories are largely believable.
|
| 75 |
+
|
| 76 |
+
With this dataset, we investigate the role of backstory in shaping perceptions of conversational conflict. We conduct a human study on a subset of 120 conversations (240 backstory variants), collecting fine-grained turn-level annotations on violent and nonviolent communication acts, as defined by NVC theory. Our study addresses two research questions:
|
| 77 |
+
|
| 78 |
+
- RQ1: How does relationship backstory influence human perception of conflict in conversation?
|
| 79 |
+
- RQ2: How does relationship backstory influence LLM detection of conversational breakdowns?
|
| 80 |
+
|
| 81 |
+
We show that backstory significantly impacts human perception of conflict at the turn-level and overall conversation dynamics. In contrast, we find that models often fail to adjust assessments of problematic turn detection and prediction of emotional impact on the listener based on the relationship backstory. Our findings underscore the need for more context-aware approaches in modeling communication, specifically demonstrating the value of backstory-personalized AI in mediating emotionally complex interpersonal exchanges.
|
| 82 |
+
|
| 83 |
+
# 2 Related Work
|
| 84 |
+
|
| 85 |
+
# 2.1 Conversational Breakdown Detection and Reframing
|
| 86 |
+
|
| 87 |
+
In the area of AIMC, text rewriting can be used to improve interpersonal outcomes like empathy or social connection by suggesting changes to the tone or style of a message at the right time (Hancock et al., 2020). To support such systems, prior tasks propose detecting conversational breakdowns between people, and reframing messages to be more empathetic.
|
| 88 |
+
|
| 89 |
+
A growing body of research has explored detecting breakdowns in complex, user-centered settings. These works detect empathy to automatically identify spaces for intervention (Hou et al., 2025; Guda et al., 2021). Such works draw on multimodal cues to predict relational affect (Javed et al., 2024) or use linguistic and pragmatic features to detect anti/pro - social features in conversation (Zhang et al., 2018; Bao et al., 2021; Kasianenko et al., 2024).
|
| 90 |
+
|
| 91 |
+
However, none of the aforementioned works explore how breakdowns between close social partners are tailored to the relationship context of the dyad. Our work addresses this gap, acknowledging that a single generalizable notion of conflict
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
|
| 95 |
+
Moral judgment
|
| 96 |
+
|
| 97 |
+
"The problem with you is that you're too selfish."
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
|
| 101 |
+
Observation
|
| 102 |
+
|
| 103 |
+
"When I saw that you didn't offer to help during dinner cleanup..."
|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+
Comparison
|
| 108 |
+
|
| 109 |
+
"It's not as bad as what they're going through."
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
|
| 113 |
+
Feeling
|
| 114 |
+
|
| 115 |
+
...and I feel concerned and a little unsure how to support you..."
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
Figure 2: We use Nonviolent Communication Theory to ground labels for communication types. Only violent communication types were injected into simulated conflict conversations. Both communication types were used for human annotation.
|
| 119 |
+
|
| 120 |
+
Deny responsibility
|
| 121 |
+
|
| 122 |
+
"If you didn't get mad first, I wouldn't be acting like this"
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+
Need
|
| 127 |
+
|
| 128 |
+
...because I want to feel understood and speak without feeling judged."
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
|
| 132 |
+
Demand
|
| 133 |
+
|
| 134 |
+
"You need to apologize to me right now!"
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
|
| 138 |
+
Request
|
| 139 |
+
|
| 140 |
+
"Would you be open to talking about what happened and hearing how I felt?"
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
|
| 144 |
+
Punishment
|
| 145 |
+
|
| 146 |
+
"She got what she deserved, and that was coming to her..."
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
|
| 150 |
+
Empathy
|
| 151 |
+
|
| 152 |
+
"It sounds like you're feeling really overwhelmed—do you want to talk more about it?"
|
| 153 |
+
|
| 154 |
+
understanding might not exist even for the same dialogue context, and LLMs should take into account relationship backstory in the detection process.
|
| 155 |
+
|
| 156 |
+
# 2.2 Contextualized Language Understanding
|
| 157 |
+
|
| 158 |
+
Context is crucial in accurately interpreting and generating language, particularly when evaluating harm, intent, and appropriateness (Vidgen et al., 2021; Sap et al., 2020). This has been captured in work on pragmatics (Fried et al., 2023), modeling how context influences the meaning and interpretation of language (Yerukola et al., 2024), as well as defeasible inference, where reasoning is adjusted with new information provided to the model (Rudinger et al., 2020). More recently, works indicate how social and situational context affects the perceived offensiveness of a statement, showing that context can invert a statement's interpretation entirely (Zhou et al., 2023b). Beyond language understanding, prior works also indicate that ignoring context in tasks like stylistic rewriting can lead to generic rewrites and undermine human-alignment in evaluation (Yerukola et al., 2023). However, no prior works have explored how relationship contexts influence the perception of conflict in intimate interpersonal dialogues from both human and model perspectives, which we address in our work through the lens of personalization to relationship backstories.
|
| 159 |
+
|
| 160 |
+
# 3 Non-Violent Communication Framework
|
| 161 |
+
|
| 162 |
+
Rosenberg and Chopra (2015) introduced the theory of Nonviolent Communication (NVC), a framework for compassionate communication that has been shown to promote reconciliation through peaceful dialogue in educational settings and war
|
| 163 |
+
|
| 164 |
+
torn zones (Pinto and Cunha, 2023). We draw on NVC theory to predict conversational breakdowns at the turn level. In particular, NVC delineates 5 life-alienating communication forms (see Figure 2 for full examples): (1) Moralistic Judgments label others as "good" or "bad". (2) Making Comparisons in ways that induce guilt or resentment. (3) Denial of Responsibility shifts blame onto external forces rather than owning one's choices. (4) Communicating Desires as Demands pressures the listener rather than encouraging cooperation. (5) "Deserve" Thinking and Punishment justifies retribution instead of addressing unmet needs.
|
| 165 |
+
|
| 166 |
+
In contrast to violent communication, nonviolent communication is expressed through the following core components: (1) Observation: Describing events neutrally without judgment. (2) Feeling: Expressing emotions without assigning blame. (3) Need: Clarifying underlying needs to foster understanding. (4) Request: Making concrete, actionable, and positive requests instead of demands. (5) Empathy/Understanding: Expressed concern or checks in on other's emotions.
|
| 167 |
+
|
| 168 |
+
In the NVC framework described above, we inject violent communication types into simulated conflict conversations but use both nonviolent and violent communication types to obtain fine-grained labels for problematic or constructive communication turns during human annotation.
|
| 169 |
+
|
| 170 |
+
# 4 PERSONACONFLICTS CORPUS
|
| 171 |
+
|
| 172 |
+
We introduce the PERSONACONFLICTS CORPUS, a dataset of realistic conflict and non-conflict scenarios simulated using LLMs (Figure 3). Collecting large-scale real datasets of private, authentic breakdowns is non trivial, as these conversations are rarely shared publicly (unlike online or social
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
1. Selecting Agents & Plausible Relationships
|
| 176 |
+
Figure 3: Overview of our simulation framework for generating conflict and non-conflict conversations and relationship backstories.
|
| 177 |
+
|
| 178 |
+
media disputes), much less with backstories of the relationship history between individuals. Further, steps must be taken to ensure privacy, obtain consent, and address ethical considerations. As such, we draw on recent work in backstory generation and persona alignment (Moon et al., 2024; Jiang et al., 2024; Hu and Collier, 2024) as well as multiagent simulation (Zhou et al., 2023a, 2025; Ahmad et al., 2025; Kim et al., 2024) to generate conflict-laden scenarios which inject violent communication practices in turns and generate a set of non-conflict conversations. Our simulation setup uses the gpt-4 model and all prompts are included in Appendix A. We discuss our human evaluation verifying soundness of the dataset in Section 5.
|
| 179 |
+
|
| 180 |
+
Characters and Relationships. First, we define a set of conflict scenarios and characters with familiar relationships using SOTOPIA, a social simulation framework that comes with LLM-powered social agents with different personas and relationships (Zhou et al., 2023a). We sampled from the 40 base agents with features like age, gender, and personality of the characters. To create diverse relationships, we focused on prompting with pairs of character profiles to determine if one of the following 3 relationship types was plausible: friends, partners, and family members, where family members included parent-child, grandparent-grandchild, siblings, and extended family relations. For example, some relationships only make sense when one character is significantly older than the second character (grandparent-grandchild relationship). Based on the character profiles, we derived 74 plausible rela
|
| 181 |
+
|
| 182 |
+
tionships across character dyads (balanced/approximately a third in each of the relationship types).
|
| 183 |
+
|
| 184 |
+
Conflict and Non-Conflict Scenarios. We inject character profiles and theory-grounded scenarios into the simulation setup to create conversational episodes with observable communication breakdowns (for conflict conversations), and also simulate a set of non-conflict conversations. We grounded scenarios in Rosenberg and Chopra (2015)'s basic human needs, which contains 7 high level categories (e.g. interdependence, autonomy, etc.) and 39 more specific human needs (e.g. interdependence $\rightarrow$ respect), and we curated conflict-inducing or nonconflict scenarios based on these specific needs.
|
| 185 |
+
|
| 186 |
+
Conversation Simulation. We simulate 10-15 turn long conversations between the two agents based on the conflict or non-conflict scenarios and their relationship. For conflict-laden conversations, we embed violent communication markers, prompting the model to generate realistic emotional dialogue with subtle, rather than completely overt conflict statements. Note that the model was provided with VC types and chose the conflict types most relevant at natural turns. Non-conflict conversations were not provided with VC types and were guided to be conflict neutral. Both conflict and non-conflict conversations were prompted to avoid repetition and overly formal language.
|
| 187 |
+
|
| 188 |
+
Backstory Generation. Finally, we generated 2 plausible relationship backstories for each conversation: a positive backstory, which paints a chosen
|
| 189 |
+
|
| 190 |
+
Social Scenario
|
| 191 |
+
Your Ratings
|
| 192 |
+
Figure 4: Example of turn-level annotation interface
|
| 193 |
+

|
| 194 |
+
Relationship: Jaxon and Emily have a siblings relationship, where Jaxon is the brother of Emily.
|
| 195 |
+
Relationship Backstory: While growing up, Jaxon was constantly overshadowed by his little sister Emily's achievements. From earning top grades to being loved and praised by everyone, Emily seemed to be perfect. Over time, Jaxon developed an unspoken grudge against Emily. He still loves her as family, but he can't help but feel a constant need to pull her down or find flaws in her work, to feel superior. Consequently, Jaxon developed a more aggressive and manipulative personality, often resorting to undermining her accomplishments under the guise of giving her 'real and honest criticism'. His harsh approach continued even when they grew up and started their respective careers.
|
| 196 |
+
Assume BOTH characters are aware of the backstory.
|
| 197 |
+
|
| 198 |
+
character in a less problematic light, and a negative backstory, which paints the same character in a more problematic light. In particular, certain scenarios and ways of conveying backstory can influence empathy towards a narrator (Shen et al., 2023; Gueorgueva et al., 2023; Shen et al., 2024). We prompt the model with examples of positive and negative scenarios that induce different understanding or affect towards the speaker. Backstory generation was conditioned on character profiles (Moon et al., 2024), and the model outputs the chosen character who is painted in a more positive or negative light to make polarity consistent.
|
| 199 |
+
|
| 200 |
+
# 5 Human Study and Annotation
|
| 201 |
+
|
| 202 |
+
To answer RQ1, how backstory influences people's perceptions of conflict, we conducted a human study to assess the impact of backstory variant on perception of conflict at the conversation level and turn level, as well as to evaluate the quality of our dataset and obtain fine-grained annotations of violent or non-violent communication types. Our validation approach is grounded in prior work on synthetic dialogue evaluation (Zhou et al., 2023a; Li et al., 2023; Zhan et al., 2023; Bao et al., 2023; Zhou et al., 2023b), which rely on human ratings of plausibility, naturalness, or coherence to validate generated conversations.
|
| 203 |
+
|
| 204 |
+
Procedure and Participants. We conducted a between-subjects study where participants are assigned to a positive or negative backstory. First, participants read the background of the characters
|
| 205 |
+
|
| 206 |
+
and the conversation and rated overall measures of the conversation. Then, they were asked to rate each turn of the dialogue (See Figure 4 for an example of our user interface). The average work time was 17.28 minutes, and workers were paid $3 for each HIT. Two independent workers completed each HIT for inter-annotator agreement calculation, resulting in a total of 480 annotations (3,474 turns rated across 120 conversations with 2 versions of backstory and 2 annotators per conversation). All annotation templates and discussion of quality controls are included in the Appendix.
|
| 207 |
+
|
| 208 |
+
We recruited 91 human annotators/participants from Mechanical Turk, with 55 participants in the negative backstory condition and 36 participants in the positive backstory condition. Participants were excluded from the other condition using MTurk qualifications to ensure clean between-subjects study design.
|
| 209 |
+
|
| 210 |
+
Measures. Numerous psychological literature indicates that personal experience influences how people empathize with one another (Pillemer, 1992; Fabi et al., 2019; Weisz and Zaki, 2018; Decety and Lamm, 2006) as well as how people justify intent or actions of a narrator (Keen, 2006; Gueorguieva et al., 2023). Furthermore, empathy, empathic concern, and sympathy are directly tied to relational or interaction quality (Morelli et al., 2015, 2017; Gould and MacNeil Gautreau, 2014). As such, for conversation-level measures, we assess (1) level of sympathy/personally relating to the character (Waldron and Kelley, 2005), (2) understandability of the character's way of communicating (McAdams, 2001), (3) positive or negative underlying intention towards the other character (4) whether the character was overall a problematic communicator or not, and finally (5) believability of the dialogue and backstory (Zhou et al., 2023a). For turn-level metrics, we gather (1) the extent to which a turn is problematic or not, which we define as potential harm towards the listener (2) fine-grained labels of NVC or VC communication types depending on problematic rating (3) how the turn will make the other character feel if they heard the statement (better/worse/the same).
|
| 211 |
+
|
| 212 |
+
# 5.1 Believability
|
| 213 |
+
|
| 214 |
+
For believability of our simulated conversations and backstories across 2 independent annotators, we find agreement of 0.68 using Free Marginal Kappa, which calculates inter-rater agreement
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
Figure 5: Distribution of believability scores for simulated dialogues
|
| 218 |
+
|
| 219 |
+
<table><tr><td>Condition</td><td>Metric</td><td>PPA</td><td>KA</td></tr><tr><td rowspan="2">NEG</td><td>Turn is problematic (4 point)</td><td>.80</td><td>.44</td></tr><tr><td>Emotional impact (3 point)</td><td>.79</td><td>.46</td></tr><tr><td rowspan="2">POS</td><td>Turn is problematic (4 point)</td><td>.78</td><td>.34</td></tr><tr><td>Emotional impact (3 point)</td><td>.78</td><td>.42</td></tr></table>
|
| 220 |
+
|
| 221 |
+
Table 1: Inter-annotator agreement across backstory conditions for turn-level annotations (PPA = pairwise percent agreement, KA = Krippendorff's Alpha).
|
| 222 |
+
|
| 223 |
+
when datasets are imbalanced. As shown in Figure 5, $87.8\%$ of annotators agree or strongly agree that conversations and backstories are believable. For example, participants mention realistic conversations based on the scenario, relationship or emotional tone of the dialogue: "Many siblings grow up with different personalities and sometimes one sibling is mature and the other isn't. This type of conflict can happen when both characters feel the need to be right." Another participant shared, "The pivot in the conversation feels a little awkward, but I could imagine people talking this way depending on their mood or mental state." For conversations that weren't believable, participants mentioned occasional divergences between the relationship and tone of the dialogue. For example, "With how emotionally charged the exchange was, I don't think the last response from Gwen would be realistic if it weren't sarcastic." In subsequent experiments, note that we filter only on believable stories to ensure validity of our results.
|
| 224 |
+
|
| 225 |
+
# 5.2 Inter-Annotator Agreement
|
| 226 |
+
|
| 227 |
+
Table 1 shows moderate agreement between annotators on whether a turn is problematic or not and whether a turn will make the other character feel better, the same, or worse. Overall, we generally observe that agreement scores are higher for the negative backstory condition, which we hypothesize can be due to variations in subjective interpretation or cognitive dissonance when a conflict occurs between a supposedly positive relationship.
|
| 228 |
+
|
| 229 |
+
# 6 Effect of Backstory Personalization on Human Participants
|
| 230 |
+
|
| 231 |
+
We quantitatively assess how positive vs negative backstory impacts human perception of conflict in dialogue. We use independent t-tests to compare outcome metrics, as we identify that data is normally distributed. Recall that our positive/negative backstories make a chosen character less or more problematic, respectively. To make the backstory polarity direction consistent, the results we report focus on changes in outcome metrics for the chosen character.
|
| 232 |
+
|
| 233 |
+
As shown in Figure 6, we found that providing relationship backstory significantly shifted participant perceptions of communication quality. Specifically, for negative backstories, characters were rated as more problematic both at the turn level $(t(1083) = 3.73$ , $p = 0.0002$ , Cohen's $d = 0.23)$ and at the overall conversation level $(t(232) = 4.18$ , $p < 0.0001$ , Cohen's $d = 0.55)$ . Additionally, with negative backstories, participants found the character's communication less understandable $(t(232) = -4.70$ , $p < 0.0001$ , Cohen's $d = -0.61)$ and interpreted their behavior as expressing more negative intent towards the other character $(t(232) = -4.95$ , $p < 0.0001$ , Cohen's $d = -0.65)$ . Notably, participants expressed significantly lower sympathy towards the character when a negative backstory was present $(t(232) = -7.05$ , $p < 0.0001$ , Cohen's $d = -0.92)$ , indicating a strong effect of relationship context on social judgments. These findings demonstrate that backstory personalization meaningfully influences how human raters interpret conflict.
|
| 234 |
+
|
| 235 |
+
Next, we perform mediation analysis using structural equation modeling to understand the effect sympathy towards a character has on perceived problematic-ness of that character's communication. We hypothesize that backstory variant can influence sympathy towards a character, and that higher sympathy will mitigate how problematic the character's utterances are. As shown in Figure 7, we find that sympathy mediates the relationship between backstory type and how problematic the character is perceived. Specifically, participants reported more sympathy toward characters with a positive backstory $(\beta_{1} = 0.31)$ , and greater sympathy was associated with lower ratings of problematic communication $(\beta_{2} = -0.46)$ .
|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
Figure 6: Human study results comparing impact of neg/pos backstory on perception of conflict and characters.
|
| 239 |
+
|
| 240 |
+
<table><tr><td>Model</td><td>Backstory</td><td>Condition</td><td>F1 (Problematic)</td><td>F1 (Emotional impact)</td></tr><tr><td rowspan="6">GPT-4o</td><td rowspan="3">positive</td><td>turn</td><td>43.57</td><td>57.21</td></tr><tr><td>turn + convo</td><td>45.96**</td><td>56.30</td></tr><tr><td>turn + convo + backstory</td><td>48.07</td><td>55.08</td></tr><tr><td rowspan="3">negative</td><td>turn</td><td>41.59</td><td>59.02</td></tr><tr><td>turn + convo</td><td>42.98</td><td>58.81</td></tr><tr><td>turn + convo + backstory</td><td>45.56***</td><td>60.27**</td></tr><tr><td rowspan="6">LLaMA-4</td><td rowspan="3">positive</td><td>turn</td><td>42.66</td><td>49.53</td></tr><tr><td>turn + convo</td><td>48.08*</td><td>55.22***</td></tr><tr><td>turn + convo + backstory</td><td>49.38</td><td>54.82</td></tr><tr><td rowspan="3">negative</td><td>turn</td><td>43.83</td><td>52.54</td></tr><tr><td>turn + convo</td><td>52.75***</td><td>58.24***</td></tr><tr><td>turn + convo + backstory</td><td>37.38***</td><td>61.38*</td></tr><tr><td rowspan="6">Gemini-1.5-pro</td><td rowspan="3">positive</td><td>turn</td><td>42.73</td><td>55.26</td></tr><tr><td>turn + convo</td><td>45.99**</td><td>57.89**</td></tr><tr><td>turn + convo + backstory</td><td>46.54</td><td>58.64</td></tr><tr><td rowspan="3">negative</td><td>turn</td><td>42.11</td><td>57.86</td></tr><tr><td>turn + convo</td><td>45.33***</td><td>60.62**</td></tr><tr><td>turn + convo + backstory</td><td>37.37***</td><td>48.25**</td></tr></table>
|
| 241 |
+
|
| 242 |
+
Table 2: F1 scores (in percentage) for predicting turn problematicness and emotional impact across models, conditions, and backstory types. Bold indicates the highest score in a model/backstory group; underline indicates the second highest. Significance stars denote statistical difference from the prior condition: $* \mathrm{p} < {0.05}, * * \mathrm{p} < {0.01}$ , *** p<0.001.
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 7: Sympathy mediates the relationship between backstory type and whether the character is a problematic communicator or not.
|
| 246 |
+
|
| 247 |
+
# 7 LLMs for Detecting Conversational Breakdowns
|
| 248 |
+
|
| 249 |
+
Finally, we design controlled experiments to test RQ2, how varying levels of context impact the way models perceive conflict in conversation.
|
| 250 |
+
|
| 251 |
+
# 7.1 Tasks and Method
|
| 252 |
+
|
| 253 |
+
We assess how well LLMs perform on 2 tasks: (1) PROBLEMATIC DETECTION - predicting whether a turn is problematic or not (4-point likert) and (2)
|
| 254 |
+
|
| 255 |
+
EMOTIONAL IMPACT – predicting whether a turn will make the other character feel better, worse, or the same (3 classes). We vary context using the following 3 conditions:
|
| 256 |
+
|
| 257 |
+
- C1: provide turn to rate alone
|
| 258 |
+
C2: provide turn + full conversation
|
| 259 |
+
- C3: provide turn + full conversation + relationship backstory
|
| 260 |
+
|
| 261 |
+
Our experiments are conducted across 3 models: GPT-4o, Llama-4-Maverick-17B-128E-InstructFP8, and Gemini-1.5-pro. All models use a temperature of 0 for reproducibility. For evaluation, we obtain human gold labels by aggregating across the 2 annotators for each task, taking average for Likert ratings within each backstory condition (positive/negative). We compute the F1 score between model outputs and human ratings.
|
| 262 |
+
|
| 263 |
+
# 7.2 Results and Discussion
|
| 264 |
+
|
| 265 |
+
Table 2 reports model performance across varying context conditions and relationship backstories. Overall, models performed comparably on the task of PROBLEMATIC DETECTION, with F1 scores ranging from 37.6 to 52.7 for 4 classes. Across all three models, we observed significant improvements from C1 (turn only) to C2 (turn + full conversation) regardless of backstory type, suggesting that access to the broader conversational context helps LLMs better assess whether a message is problematic. However, we found no significant improvement when adding relationship backstory (from C2 to C3) in the positive backstory condition for any model. Even more surprisingly, for the negative backstory condition, both LLaMA and Gemini showed decreases in performance when backstory was introduced, despite the additional context. This may be due to models overcorrecting or misinterpreting emotionally complex backstory cues as indicators of justified behavior, thereby mislabeling harmful speech as less problematic.
|
| 266 |
+
|
| 267 |
+
On the EMOTIONAL IMPACT prediction task, models again showed similar performance trends, with F1 scores ranging from 48.5 to 61.4 for 3 classes. Across all models, positive backstory did not lead to significant gains, suggesting that positive backstories may not provide enough discriminative information to shift a model's understanding of how a message affects the listener. In contrast, negative backstory led to significant improvements in prediction for GPT-4o and LLaMA, indicating that models may find it easier to predict emotional harm when the speaker is portrayed more negatively. However, Gemini-1.5-pro showed the opposite pattern, with decreased performance when negative backstory was added.
|
| 268 |
+
|
| 269 |
+
These findings collectively highlight that while additional context generally helps models detect problematic turns, the benefit of backstory is asymmetric: it aids detection when the backstory aligns clearly with harm (in the negative case), but can introduce noise depending on the model's sensitivity to nuanced relational dynamics.
|
| 270 |
+
|
| 271 |
+
Bias and Error Analysis Delving deeper into model performance results, we evaluate whether models are biased towards over- or under-predicting how problematic a statement is, given a particular backstory. To this end, we run the Wilcoxon signed-rank test to compare model predictions against human annotations.
|
| 272 |
+
|
| 273 |
+
On the PROBLEMATIC DETECTION task, we observe significant overprediction in the negative backstory condition for LLaMA $(p < 0.0001$ , $\Delta M = +0.25)$ and Gemini $(p < 0.0001$ , $\Delta M = +0.10)$ , suggesting that when a character is portrayed more negatively, these models tend to label their speech as more problematic than humans do. In contrast, GPT-4o shows no significant difference from human ratings in the negative condition $(p = 0.45)$ , indicating better calibration. In the positive backstory condition, both GPT-4o and Gemini slightly underpredict problematic turns compared to humans $(p < 0.001$ , $\Delta M = -0.07$ for GPT-4o; $p = 0.001$ , $\Delta M = -0.04)$ , while LLaMA significantly overpredicts problematic statements $(p < 0.001$ , $\Delta M = +0.11)$ .
|
| 274 |
+
|
| 275 |
+
On the EMOTIONAL IMPACT task, all models tend to overpredict emotional positivity, especially in the positive backstory condition: GPT-4o ( $p < 0.0001$ , $\Delta M = +0.12$ ), Gemini ( $p < 0.0001$ , $\Delta M = +0.08$ ), and LLaMA ( $p < 0.0001$ , $\Delta M = +0.08$ ). Interestingly, only Gemini reverses this pattern in the negative backstory condition, significantly underpredicting emotional positivity ( $p < 0.0001$ , $\Delta M = -0.07$ ), while GPT-4o and LLaMA continue to slightly overpredict how good the listener would feel.
|
| 276 |
+
|
| 277 |
+
These results suggest that while GPT-4o is the most consistent with human perception across both tasks, LLaMA is prone to strong overestimation in both problematic detection and emotional response. Gemini's behavior is more sensitive to backstory polarity, with notable shifts in prediction direction depending on whether a speaker is portrayed sympathetically or not, however these shifts are more extreme than human annotators' ratings. Overall, our findings indicate that models might not be effectively leveraging relationship backstories to tailor understanding of conversational dynamics, in alignment with human perception. To further delve into these results, we include qualitative examples across models in Appendix E
|
| 278 |
+
|
| 279 |
+
# 8 Conclusion
|
| 280 |
+
|
| 281 |
+
In this work, we introduce a novel framework, grounded in Nonviolent Communication Theory, for simulating and detecting communication breakdowns using relationship-contextualized LLMs. We contribute a dataset of 5,772 simulated conversations between familiar social partners with 11,544 relationship backstories, and validate its
|
| 282 |
+
|
| 283 |
+
realism and utility through a human study. Our findings demonstrate that backstory significantly shapes human judgments of conflict, and that this effect is mediated by sympathy. However, LLMs—while benefiting from conversational context—struggle to meaningfully integrate backstory information, often overestimating emotional positivity and problematicness in nuanced scenarios. These results underscore the gap between human and model reasoning in intimate interpersonal communication and call for future work in relationship-contextualized NLP systems. We hope that our findings advance future directions of LLMs as tools to meaningfully promote empathy and conflict resolution in the real world.
|
| 284 |
+
|
| 285 |
+
# Limitations
|
| 286 |
+
|
| 287 |
+
While our study demonstrates the importance of relationship backstory in shaping perceptions of conversational conflict, several limitations should be acknowledged.
|
| 288 |
+
|
| 289 |
+
Simulation-based data. Our dataset and evaluation pipeline offer a scalable and theory-informed way to study communication breakdowns, but the conversations are generated via simulation. Synthetic conversations may not fully capture the ecological validity of real-world interpersonal dynamics (Wang et al., 2025), and the NVC-based four-step generation procedure may impose a more structured progression of conflict than naturally occurs. Language models can mirror patterns of speech and emotion, but they lack lived experience, embodied context, and nuanced power dynamics. Thus, findings from our simulated dialogue corpus may not fully generalize to naturally occurring conflicts, where nonverbal cues, cultural context, and relationship history shape interpretation in more complex ways. However, consistent with prior work in social dialogue simulation (Bao et al., 2023; Chuang et al., 2024; Hu and Collier, 2024; Zhou et al., 2023a; Li et al., 2023), our generated conversations were deemed largely naturalistic by human raters, and obtaining large-scale datasets of intimate, conflict-laden conversations between loved ones remains infeasible due to ethical and privacy constraints. We view our work as a proof-of-concept testbed rather than a replica of real-world phenomena, and future work should explore methods to bridge the gap between simulation and authentic data, such as incorporating real-world pilot studies or mixed human-synthetic
|
| 290 |
+
|
| 291 |
+
evaluation designs (Finch and Choi, 2024).
|
| 292 |
+
|
| 293 |
+
Annotation and evaluation. Our validation relied on crowdsourced ratings of believability, following accepted practice in simulation-based dialogue studies. While we provided extensive guidelines and quality controls, believability captures whether a conversation could plausibly happen, not whether it is fully representative or authentic. Interrater agreement was moderate (Krippendorff's $\alpha = 0.34 - 0.46$ ), underscoring subjectivity in conflict judgments. We also focused our human study on a subset of key outcome measures (e.g., problematicness, sympathy, intention), leaving unexplored dimensions such as trust, perceived agency, or emotional volatility for future research. Automatic evaluation metrics on coherence and naturalness could also complement human ratings in future versions of the corpus.
|
| 294 |
+
|
| 295 |
+
Scope and generalizability. Our study is limited to single-modality (text) interactions, Western relationship contexts, and the English language. Cultural variation in conflict expression and resolution is well-documented (Tschacher et al., 2014), and expanding to cross-cultural, multilingual, and multimodal settings (e.g., tone, pitch, and gestures) remains important. Moreover, our focus was on detection of conflict rather than modeling effective responses to conflict. While this narrower scope was intentional, we see our work as a first step toward contextualized conflict response generation in intimate interpersonal domains.
|
| 296 |
+
|
| 297 |
+
# Ethical Implications
|
| 298 |
+
|
| 299 |
+
All studies conducted in this work were classified under Institutional Review Board (IRB) exemption status. While our work aims to enhance interpersonal understanding and mitigate conflict through AIMC, the use of simulated dialogues and backstories about emotionally sensitive relationships—such as those between romantic partners or family members—raises concerns around realism and potential misuse. Although we do not collect or model real personal data, generated dialogues might still resemble real-life situations and emotional dynamics. If deployed in real-world applications, such systems could be used to influence perceptions of others, shape interpretations of interpersonal interactions, or even manipulate emotional outcomes, especially in high-stakes or abusive relationships. It is crucial that such tools remain assistive rather than prescriptive, provid-
|
| 300 |
+
|
| 301 |
+
ing support while preserving user autonomy and avoiding overreach in delicate relational contexts.
|
| 302 |
+
|
| 303 |
+
Additionally, our use of backstory personalization may amplify or reduce perceptions of blame or sympathy toward certain characters. While this highlights the strength of our system in capturing nuanced human judgment, it also reflects the risks of modeling interpersonal conflict with biased or one-dimensional framing. Care must be taken to ensure that AI interventions do not reinforce harmful stereotypes, justify manipulative behaviors, or flatten complex social dynamics into reductive labels.
|
| 304 |
+
|
| 305 |
+
# Acknowledgements
|
| 306 |
+
|
| 307 |
+
We would like to thank all of our participants and teammates for their invaluable contributions to this project. Special thanks to Ashish Sharma and Shannon Shen for feedback on the project. This work was funded in part by DSO National Laboratories and supported by the Defense Advanced Research Projects Agency (DARPA) under Agreement No. HR00112490410.
|
| 308 |
+
|
| 309 |
+
# References
|
| 310 |
+
|
| 311 |
+
Adnan Ahmad, Stefan Hillmann, and Sebastian Möller. 2025. Simulating User Diversity in Task-Oriented Dialogue Systems using Large Language Models. ArXiv:2502.12813 [cs].
|
| 312 |
+
Lisa P. Argyle, Christopher A. Bail, Ethan C. Busby, Joshua R. Gubler, Thomas Howe, Christopher Rytting, Taylor Sorensen, and David Wingate. 2023. Leveraging AI for democratic discourse: Chat interventions can improve online political conversations at scale. Proceedings of the National Academy of Sciences, 120(41):e2311627120.
|
| 313 |
+
Jiajun Bao, Junjie Wu, Yiming Zhang, Eshwar Chandrasekharan, and David Jurgens. 2021. Conversations Gone Alright: Quantifying and Predicting Prosocial Outcomes in Online Conversations. In Proceedings of the Web Conference 2021, pages 1134-1145, Ljubljana Slovenia. ACM.
|
| 314 |
+
Jianzhu Bao, Rui Wang, Yasheng Wang, Aixin Sun, Yitong Li, Fei Mi, and Ruifeng Xu. 2023. A Synthetic Data Generation Framework for Grounded Dialogues. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10866-10882, Toronto, Canada. Association for Computational Linguistics.
|
| 315 |
+
Yun-Shiuan Chuang, Agam Goyal, Nikunj Harlalka, Siddharth Suresh, Robert Hawkins, Sijia Yang, Dhavan Shah, Junjie Hu, and Timothy Rogers. 2024. Simulating Opinion Dynamics with Networks of
|
| 316 |
+
|
| 317 |
+
LLM-based Agents. In Findings of the Association for Computational Linguistics: NAACL 2024, pages 3326-3346, Mexico City, Mexico. Association for Computational Linguistics.
|
| 318 |
+
Jean Decety and Claus Lamm. 2006. Human Empathy Through the Lens of Social Neuroscience. The Scientific World Journal, 6:1146-1163. 610 citations (Crossref) [2024-09-24] Publisher: Hindawi.
|
| 319 |
+
Jonathan Dvash and Simone G. Shamay-Tsoory. 2014. Theory of Mind and Empathy as Multidimensional Constructs: Neurological Foundations. Topics in Language Disorders, 34(4):282-295.
|
| 320 |
+
Sarah Fabi, Lydia Anna Weber, and Hartmut Leuthold. 2019. Empathic concern and personal distress depend on situational but not dispositional factors. PLoS ONE, 14(11):e0225102-e0225102. Publisher: Public Library of Science.
|
| 321 |
+
James D. Finch and Jinho D. Choi. 2024. Diverse and Effective Synthetic Data Generation for Adaptable Zero-Shot Dialogue State Tracking. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, pages 12527-12544, Miami, Florida, USA. Association for Computational Linguistics.
|
| 322 |
+
Julie Fitness and Garth J. O. Fletcher. 1993. Love, hate, anger, and jealousy in close relationships: A prototype and cognitive appraisal analysis. Journal of Personality and Social Psychology, 65(5):942-958. Place: US Publisher: American Psychological Association.
|
| 323 |
+
Daniel Fried, Nicholas Tomlin, Jennifer Hu, Roma Patel, and Aida Nematzadeh. 2023. Pragmatics in Language Grounding: Phenomena, Tasks, and Modeling Approaches.
|
| 324 |
+
Lisa Gaelick, Galen Bodenhausen, and Jr Wyer. 1985. Emotional Communication in Close Relationships. Journal of personality and social psychology, 49:1246-65.
|
| 325 |
+
Odette N. Gould and Sylvia MacNeil Gautreau. 2014. Empathy and Conversational Enjoyment in Younger and Older Adults. Experimental Aging Research, 40(1):60-80. Publisher: Routledge _eprint: https://doi.org/10.1080/0361073X.2014.857559.
|
| 326 |
+
Bhanu Prakash Reddy Guda, Aparna Garimella, and Niyati Chhaya. 2021. EmpathBERT: A BERT-based Framework for Demographic-aware Empathy Prediction. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 3072-3079, Online. Association for Computational Linguistics.
|
| 327 |
+
Emma S Gueorguieva, Tatiana Lau, Eliana Hadjian-dreou, and Desmond C Ong. 2023. The Language of an Empathy-Inducing Narrative.
|
| 328 |
+
Jeffrey T Hancock, Mor Naaman, and Karen Levy. 2020. AI-Mediated Communication: Definition, Research Agenda, and Ethical Considerations. Journal of
|
| 329 |
+
|
| 330 |
+
Computer-Mediated Communication, 25(1):89-100. 185 citations (Crossref) [2024-12-03].
|
| 331 |
+
Thomas Hartvigsen, Saadia Gabriel, Hamid Palangi, Maarten Sap, Dipankar Ray, and Ece Kamar. 2022. ToxiGen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 3309-3326, Dublin, Ireland. Association for Computational Linguistics.
|
| 332 |
+
Yu Hou, Hal Daumé III, and Rachel Rudinger. 2025. Language Models Predict Empathy Gaps Between Social In-groups and Out-groups. ArXiv:2503.01030 [cs].
|
| 333 |
+
Tiancheng Hu and Nigel Collier. 2024. Quantifying the Persona Effect in LLM Simulations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10289-10307, Bangkok, Thailand. Association for Computational Linguistics.
|
| 334 |
+
Ellen A. Isaacs and Herbert H. Clark. 1987. References in conversation between experts and novices. Journal of Experimental Psychology: General, 116(1):26-37.
|
| 335 |
+
Hifza Javed, Weinan Wang, Affan Bin Usman, and Nawid Jamali. 2024. Modeling interpersonal perception in dyadic interactions: towards robot-assisted social mediation in the real world. Frontiers in Robotics and AI, 11:1410957.
|
| 336 |
+
Hang Jiang, Xiajie Zhang, Xubo Cao, Cynthia Breazeal, Deb Roy, and Jad Kabbara. 2024. PersonaLLM: Investigating the Ability of Large Language Models to Express Personality Traits. In Findings of the Association for Computational Linguistics: NAACL 2024, pages 3605-3627, Mexico City, Mexico. Association for Computational Linguistics.
|
| 337 |
+
Gauri Kambhatla, Matthew Lease, and Ashwin Rajadesingan. 2024. Promoting Constructive Deliberation: Reframing for Receptiveness. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 5110-5132, Miami, Florida, USA. Association for Computational Linguistics.
|
| 338 |
+
Kateryna Kasianenko, Shima Khanehzar, Stephen Wan, Ehsan Dehghan, and Axel Bruns. 2024. Detecting Online Community Practices with Large Language Models: A Case Study of Pro-Ukrainian Publics on Twitter. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 20106-20135, Miami, Florida, USA. Association for Computational Linguistics.
|
| 339 |
+
Suzanne Keen. 2006. A Theory of Narrative Empathy. *Narrative*, 14(3):207-236. Publisher: Ohio State University Press.
|
| 340 |
+
Sunwoong Kim, Jongho Jeong, Jin Soo Han, and Donghyuk Shin. 2024. LLM-Mirror: A Generated-Persona Approach for Survey Pre-Testing. ArXiv:2412.03162 [cs].
|
| 341 |
+
|
| 342 |
+
Oliver Li, Mallika Subramanian, Arkadiy Saakyan, Sky CH-Wang, and Smaranda Muresan. 2023. NormDial: A Comparable Bilingual Synthetic Dialog Dataset for Modeling Social Norm Adherence and Violation. 9 citations (Semantic Scholar/arXiv) [2024-09-24] arXiv:2310.14563 [cs].
|
| 343 |
+
Dan P. McAdams. 2001. The psychology of life stories. Review of General Psychology, 5(2):100-122. Place: US Publisher: Educational Publishing Foundation.
|
| 344 |
+
Suhong Moon, Marwa Abdulhai, Minwoo Kang, Joseph Suh, Widyadewi Soedarmadji, Eran Kohen Behar, and David M. Chan. 2024. Virtual Personas for Language Models via an Anthology of Backstories. 1 citations (Semantic Scholar/arXiv) [2024-09-24] arXiv:2407.06576 [cs].
|
| 345 |
+
Sylvia A. Morelli, Matthew D. Lieberman, and Jamil Zaki. 2015. The Emerging Study of Positive Empathy. Social and Personality Psychology Compass, 9(2):57-68.
|
| 346 |
+
Sylvia A. Morelli, Desmond C. Ong, Rucha Makati, Matthew O. Jackson, and Jamil Zaki. 2017. Empathy and well-being correlate with centrality in different social networks. Proceedings of the National Academy of Sciences, 114(37):9843-9847. Publisher: Proceedings of the National Academy of Sciences.
|
| 347 |
+
David B. Pillemer. 1992. Remembering personal circumstances: A functional analysis. In Affect and accuracy in recall: Studies of "flashbulb" memories, Emory symposia in cognition, 4., pages 236-264. Cambridge University Press, New York, NY, US.
|
| 348 |
+
Sílvia Costa Pinto and Maria Nascimento Cunha. 2023. NONVIOLENT COMMUNICATION - A LITERATURE REVIEW. 2(1).
|
| 349 |
+
Marshall B. Rosenberg and Deepak Chopra. 2015. Nonviolent Communication: A Language of Life: Life-Changing Tools for Healthy Relationships. PuddleDancer Press. Google-Books-ID: A3qACgAAQBAJ.
|
| 350 |
+
Rachel Rudinger, Vered Shwartz, Jena D. Hwang, Chandra Bhagavatula, Maxwell Forbes, Ronan Le Bras, Noah A. Smith, and Yejin Choi. 2020. Thinking Like a Skeptic: Defeasible Inference in Natural Language. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 4661-4675, Online. Association for Computational Linguistics.
|
| 351 |
+
Maarten Sap, Saadia Gabriel, Lianhui Qin, Dan Jurafsky, Noah A. Smith, and Yejin Choi. 2020. Social Bias Frames: Reasoning about Social and Power Implications of Language. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5477-5490, Online. Association for Computational Linguistics.
|
| 352 |
+
Matthias Schurz, Joaquim Radua, Matthias G. Tholen, Lara Maliske, Daniel S. Margulies, Rogier B. Mars, Jerome Sallet, and Philipp Kanske. 2021. Toward
|
| 353 |
+
|
| 354 |
+
a hierarchical model of social cognition: A neuroimaging meta-analysis and integrative review of empathy and theory of mind. Psychological Bulletin, 147(3):293-327.
|
| 355 |
+
Ashish Sharma, Inna W. Lin, Adam S. Miner, David C. Atkins, and Tim Althoff. 2023. Human-AI collaboration enables more empathic conversations in text-based peer-to-peer mental health support. Nature Machine Intelligence, 5(1):46-57. Number: 1 Publisher: Nature Publishing Group.
|
| 356 |
+
Jocelyn Shen, Joel Mire, Hae Won Park, Cynthia Breazeal, and Maarten Sap. 2024. HEART-felt Narratives: Tracing Empathy and Narrative Style in Personal Stories with LLMs. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 1026-1046, Miami, Florida, USA. Association for Computational Linguistics.
|
| 357 |
+
Jocelyn Shen, Maarten Sap, Pedro Colon-Hernandez, Hae Park, and Cynthia Breazeal. 2023. Modeling Empathic Similarity in Personal Narratives. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6237-6252, Singapore. Association for Computational Linguistics.
|
| 358 |
+
Che-Wei Tsai, Yen-Hao Huang, Tsu-Keng Liao, Didier Fernando Salazar Estrada, Retnani Latifah, and Yi-Shin Chen. 2024. Leveraging conflicts in social media posts: Unintended offense dataset. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 4512-4522, Miami, Florida, USA. Association for Computational Linguistics.
|
| 359 |
+
Wolfgang Tschacher, Georg M. Rees, and Fabian Ramseyer. 2014. Nonverbal synchrony and affect in dyadic interactions. Frontiers in Psychology, 5:1323.
|
| 360 |
+
Bertie Vidgen, Dong Nguyen, Helen Margetts, Patricia Rossini, and Rebekah Tromble. 2021. Introducing CAD: the Contextual Abuse Dataset. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2289-2303, Online. Association for Computational Linguistics.
|
| 361 |
+
Vincent R. Waldron and Douglas L. Kelley. 2005. For-giving communication as a response to relational transgressions. Journal of Social and Personal Relationships, 22(6):723-742. Place: US Publisher: Sage Publications.
|
| 362 |
+
Angelina Wang, Jamie Morgenstern, and John P. Dickerson. 2025. Large language models that replace human participants can harmfully misportray and flatten identity groups. Nature Machine Intelligence, pages 1-12. Publisher: Nature Publishing Group.
|
| 363 |
+
Erika Weisz and Jamil Zaki. 2018. Motivated empathy: a social neuroscience perspective. *Current Opinion in Psychology*, 24:67-71. 82 citations (Crossref) [2024-09-24].
|
| 364 |
+
|
| 365 |
+
Thalia Wheatley, Mark A. Thornton, Arjen Stolk, and Luke J. Chang. 2023. The Emerging Science of Interacting Minds. Perspectives on Psychological Science, page 17456916231200177. 5 citations (Crossref) [2024-09-24] Publisher: SAGE Publications Inc.
|
| 366 |
+
Akhila Yerukola, Saujas Vaduguru, Daniel Fried, and Maarten Sap. 2024. Is the pope catholic? yes, the pope is catholic. generative evaluation of non-literal intent resolution in llms. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 265-275.
|
| 367 |
+
Akhila Yerukola, Xuhui Zhou, Elizabeth Clark, and Maarten Sap. 2023. Don't Take This Out of Context!: On the Need for Contextual Models and Evaluations for Stylistic Rewriting. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 11419-11444, Singapore. Association for Computational Linguistics.
|
| 368 |
+
Haolan Zhan, Zhuang Li, Yufei Wang, Linhao Luo, Tao Feng, Xiaoxi Kang, Yuncheng Hua, Lizhen Qu, Lay-Ki Soon, Suraj Sharma, Ingrid Zukerman, Zhaleh Semnani-Azad, and Gholamreza Haffari. 2023. SocialDial: A Benchmark for Socially-Aware Dialogue Systems. ArXiv:2304.12026 [cs].
|
| 369 |
+
Justine Zhang, Jonathan P. Chang, Cristian Danescu-Niculescu-Mizil, Lucas Dixon, Yiqing Hua, Nithum Thain, and Dario Taraborelli. 2018. Conversations Gone Awry: Detecting Early Signs of Conversational Failure. ArXiv:1805.05345 [cs].
|
| 370 |
+
Xuhui Zhou, Zhe Su, Sophie Feng, Jiaxu Zhou, Jenstse Huang, Hsien-Te Kao, Spencer Lynch, Svitlana Volkova, Tongshuang Sherry Wu, Anita Woolley, Hao Zhu, and Maarten Sap. 2025. SOTOPIA-S4: a user-friendly system for flexible, customizable, and large-scale social simulation.
|
| 371 |
+
Xuhui Zhou, Hao Zhu, Leena Mathur, Ruohong Zhang, Haofei Yu, Zhengyang Qi, Louis-Philippe Morency, Yonatan Bisk, Daniel Fried, Graham Neubig, and Maarten Sap. 2023a. SOTOPIA: Interactive Evaluation for Social Intelligence in Language Agents. 54 citations (Semantic Scholar/arXiv) [2024-09-24] 54 citations (Semantic Scholar/DOI) [2024-09-24] arXiv:2310.11667 [cs].
|
| 372 |
+
Xuhui Zhou, Hao Zhu, Akhila Yerukola, Thomas Davidson, Jena D. Hwang, Swabha Swayamdipta, and Maarten Sap. 2023b. COBRA Frames: Contextual Reasoning about Effects and Harms of Offensive Statements. In Findings of the Association for Computational Linguistics: ACL 2023, pages 6294-6315, Toronto, Canada. Association for Computational Linguistics.
|
| 373 |
+
|
| 374 |
+
# A Prompts
|
| 375 |
+
|
| 376 |
+
# A.1 Relationship Plausibility
|
| 377 |
+
|
| 378 |
+
```txt
|
| 379 |
+
You are a worldbuilder. Given two detailed character profiles and a proposed relationship category (\{relationship_category\}), choose the ONE most plausible fine-grained relationship subtype from this list: (relationship_subcategory). You must only choose from this list. If none are realistic, respond with plausible = false. Consider age, gender, and life circumstances when choosing. Do not, for example, assign a parent-child relationship if one person is younger than the other, or a romantic relationship if the age difference is extreme and implausible. Married couples must only be selected under the 'partner' category (4), not 'family' (5).
|
| 380 |
+
$\equiv \equiv$ PERSON A $\equiv \equiv$ {agent_1_data}
|
| 381 |
+
$\equiv \equiv$ PERSON B $\equiv \equiv$ {agent_2_data}
|
| 382 |
+
```
|
| 383 |
+
|
| 384 |
+
# A.2 Simulation Prompt (Non-Conflict)
|
| 385 |
+
|
| 386 |
+
```txt
|
| 387 |
+
Let's think step by step. Generate a 10 to maximum 15 turn conversation between {speaker} and {nonviolent-speaker} based on the given general scenario.
|
| 388 |
+
Make sure that the conversation is not overly negative OR overly positive. Keep the dialogues neutral and ambiguous, leaving opening for multiple meanings depending on the backstory of the characters.
|
| 389 |
+
For example, "You look better in the other dress" may be alright coming from a close, but honest friend, whereas it may come off conflict-inducing from a controlling and toxic romantic partner.
|
| 390 |
+
Conversely, "You should move on with your life" may be conflict-inducing coming from a rude and judgmental sibling, whereas it may be harmless coming from a caring friend who is worried after the other person's breakup.
|
| 391 |
+
Don't make the conversation turn into a conflict, just leaving open for interpretation. The conversation can be shorter than 15 turns if the characters decide to leave the conversation. Please vary the conversation length and diversify the length.
|
| 392 |
+
Scenario: {originalscenario}
|
| 393 |
+
```
|
| 394 |
+
```
|
| 395 |
+
Speakers: {agent_1_name} and {agent_2_name}
|
| 396 |
+
--- {agent_1_name} Profile --- {agent_1_data}
|
| 397 |
+
--- {agent_2_name} Profile --- {agent_2_data}
|
| 398 |
+
```
|
| 399 |
+
```
|
| 400 |
+
{agent_1_name} and {agent_2_name} have a {relationship_type}--{relationship subtype} relationship, where {agent_1_name} is the {agent_1-role} of {agent_2_name}
|
| 401 |
+
```
|
| 402 |
+
|
| 403 |
+
# Important overall conversation guidelines:
|
| 404 |
+
|
| 405 |
+
1. The conversation should be contextualized to the scenario and the character profiles
|
| 406 |
+
|
| 407 |
+
2. The conversation should have a rise and fall, rather than repeating the same points over and over again.
|
| 408 |
+
|
| 409 |
+
3. The conversation does NOT need to have a resolution.
|
| 410 |
+
|
| 411 |
+
4. Use realistic emotional speech patterns - trailing off, pausing, short bursts.
|
| 412 |
+
|
| 413 |
+
5. Avoid sounding like a therapist or a robot. The conversation should sound human.
|
| 414 |
+
|
| 415 |
+
6. Use INFORMAL language.
|
| 416 |
+
|
| 417 |
+
7. Each turn should be short.
|
| 418 |
+
|
| 419 |
+
8. Do NOT keep referring to the other person's name (bad example: "John, you should...", "It's not like that, Mary"). In realistic dialogue, people often don't refer to each other's names.
|
| 420 |
+
|
| 421 |
+
9. Depending on the relationship, characters should use pet names or titles (e.g. "babe", "honey", "sweetie", "Mom", "Dad")
|
| 422 |
+
|
| 423 |
+
10. Remember, keep the dialogues neutral and ambiguous, leaving opening for multiple meanings depending on the backstory of the characters.
|
| 424 |
+
|
| 425 |
+
#####
|
| 426 |
+
|
| 427 |
+
```txt
|
| 428 |
+
////
|
| 429 |
+
Format the output as:
|
| 430 |
+
Turn #1
|
| 431 |
+
(speaker 1's first name): dialogue
|
| 432 |
+
```
|
| 433 |
+
|
| 434 |
+
```txt
|
| 435 |
+
Turn #2 (speaker 2's first name): dialogue
|
| 436 |
+
```
|
| 437 |
+
|
| 438 |
+
# A.3 Simulation Prompt (Conflict)
|
| 439 |
+
|
| 440 |
+
Let's think step by step. Generate a 10 to maximum 15 turn conversation between {speaker} and {nonviolent-speaker}.
|
| 441 |
+
|
| 442 |
+
The conversation can be shorter than 15 turns if the characters decide to leave the conversation. Please vary the conversation length and diversify the length.
|
| 443 |
+
|
| 444 |
+
```ruby
|
| 445 |
+
Scenario: {rewrittenscenario}
|
| 446 |
+
###
|
| 447 |
+
Speakers: {agent_1_name} and {agent_2_name}
|
| 448 |
+
```
|
| 449 |
+
|
| 450 |
+
```txt
|
| 451 |
+
---{agent_1_name} Profile ---{agent_1_data}
|
| 452 |
+
```
|
| 453 |
+
|
| 454 |
+
```txt
|
| 455 |
+
---{agent_2_name} Profile ---{agent_2_data}
|
| 456 |
+
```
|
| 457 |
+
|
| 458 |
+
```erlang
|
| 459 |
+
>>>
|
| 460 |
+
{agent_1_name} and {agent_2_name} have a
|
| 461 |
+
{relationship_type}-{relationship_subtype}
|
| 462 |
+
relationship, where {agent_1_name} is the
|
| 463 |
+
{agent_1-role} of {agent_2_name}
|
| 464 |
+
```
|
| 465 |
+
|
| 466 |
+
##
|
| 467 |
+
|
| 468 |
+
#####
|
| 469 |
+
|
| 470 |
+
The characters should create conflict and communicate poorly with each other (whether intentionally or unintentionally). They should each choose only ONE of the most applicable conflict types to the given scenario:
|
| 471 |
+
|
| 472 |
+
1. Judgment - Definition: Assigns fault or labels someone as bad/wrong. Example: "You're such an idiot for doing that." (Moralistic judgment)
|
| 473 |
+
2. Comparison - Definition: Unfavorably contrasts a person to another, causing inferiority/shame. Example: "Your work isn't as good as X's work." "No one else is as dramatic as you"
|
| 474 |
+
3. Deflection of Responsibility - Definition: Denies ownership of one's actions or feelings; blames external forces. Example: "I hit you because you provoked me." "It's your fault I'm in a crappy mood" "I feel like you don't love me anymore"
|
| 475 |
+
4. Demand/Threat - Definition: Pressure or order with implied punishment or guilt if not obeyed. Example: "You must do this, or you'll be sorry." "You better fix your problem."
|
| 476 |
+
|
| 477 |
+
5. Deserve/Punitive - Definition: Uses "deserve," rewards, or punishment language to judge behavior. Example: "She messed up, so she deserves whatever happens to her."
|
| 478 |
+
|
| 479 |
+
# ##
|
| 480 |
+
|
| 481 |
+
Read through the overall conversation guidelines carefully. These are import
|
| 482 |
+
|
| 483 |
+
1. Not every turn should be a conflict-inducing statement (ONLY 1-2 TURNS AT MOST FROM EACH CHARACTER)
|
| 484 |
+
2. The conflict should be extremely subtle, rather than overtly and obviously offensive.
|
| 485 |
+
3. Make sure the conflict statement is appropriate to the magnitude of the scenario (e.g. "Joe recently lost a friend", bad example: "Oh come on, it's not like you lost your mom")
|
| 486 |
+
4. The conversation should be contextualized to the scenario and the character profiles
|
| 487 |
+
5. The conflict should have a rise and fall, rather than repeating the same points over and over again.
|
| 488 |
+
6. Each character should respond to the other person's attacks without backing down.
|
| 489 |
+
7. Remember a conflict happen between TWO people. BOTH characters should be responsible for the conflict
|
| 490 |
+
8. The conflict does NOT need to have a resolution -- it can be cut off in the middle.
|
| 491 |
+
9. Use realistic emotional speech patterns - trailing off, pausing, short bursts of anger.
|
| 492 |
+
10. Use INFORMAL language.
|
| 493 |
+
11. Avoid sounding like a therapist or a robot. The conversation should sound human.
|
| 494 |
+
12. Both characters should respond irrationally and emotionally
|
| 495 |
+
13. Each turn should be short.
|
| 496 |
+
14. Be creative!
|
| 497 |
+
15. Do NOT keep referring to the other person's name (bad example: "John, you should...", "It's not like that, Mary"). In realistic dialogue, people often don't refer to each other's names.
|
| 498 |
+
16. Depending on the relationship, characters should use pet names or titles (e.g. "babe", "honey", "sweetie", "Mom", "Dad")
|
| 499 |
+
|
| 500 |
+
# ##
|
| 501 |
+
|
| 502 |
+
# ##
|
| 503 |
+
|
| 504 |
+
Format the output as:
|
| 505 |
+
|
| 506 |
+
Turn #1
|
| 507 |
+
|
| 508 |
+
(speaker 1's first name): dialogue
|
| 509 |
+
|
| 510 |
+
Turn #2
|
| 511 |
+
|
| 512 |
+
-speaker 2's first name): dialogue
|
| 513 |
+
|
| 514 |
+
# A.4 Backstory Generation
|
| 515 |
+
|
| 516 |
+
Let's think step by step. We are analyzing a conversation between two people, {speaker} and {nonviolent-speaker} that just occurred.
|
| 517 |
+
|
| 518 |
+
You will generate TWO backstories, that will create opposing interpretations of the conversation that happened -- one will make the conflict language in the conversation MORE understandable, and one will make the language LESS acceptable.
|
| 519 |
+
|
| 520 |
+
# To do this, you can:
|
| 521 |
+
|
| 522 |
+
(a) assign more fault to {speaker} in one story vs. more fault to {nonviolent-speaker} in the other story
|
| 523 |
+
(b) imply that one character is gaslighting or instilling guilt/negative feelings in the other person
|
| 524 |
+
(c) make the characters have an extremely toxic relationship under the hood vs. an extremely positive relationship
|
| 525 |
+
|
| 526 |
+
# Example conversation:
|
| 527 |
+
|
| 528 |
+
Lina:
|
| 529 |
+
|
| 530 |
+
Mom, can I go to Maya's place this weekend? A few of us are getting together to watch movies and work on the group project.
|
| 531 |
+
|
| 532 |
+
# Mel:
|
| 533 |
+
|
| 534 |
+
No. You've already been out once this week. You should spend more time at home with me.
|
| 535 |
+
|
| 536 |
+
# Lina:
|
| 537 |
+
|
| 538 |
+
But I finished all my homework and it's not even late. I don't see why I can't go.
|
| 539 |
+
|
| 540 |
+
# Mel:
|
| 541 |
+
|
| 542 |
+
I just don't like how often you're running around. It's not good for girls your age to be out all the time.
|
| 543 |
+
|
| 544 |
+
# Mel:
|
| 545 |
+
|
| 546 |
+
I feel like you're always chasing fun instead of focusing on your future.
|
| 547 |
+
|
| 548 |
+
Specifically, create one OPPOSING backstory where the conversation is extremely conflict-REDUCING or very understandable due to their past/recent events.
|
| 549 |
+
|
| 550 |
+
To this end, you can (a) assign more fault to one character than the other (b) make one character's harsh speech more understandable or (c) imply that one character is gaslighting or instilling guilt/negative feelings in the other person.
|
| 551 |
+
|
| 552 |
+
Examples from the sample conversation:
|
| 553 |
+
|
| 554 |
+
Backstory one:
|
| 555 |
+
|
| 556 |
+
- Mel is actually a very toxic and controlling mother towards Lina.
|
| 557 |
+
|
| 558 |
+
Backstory two (opposite backstory):
|
| 559 |
+
|
| 560 |
+
- Lina has always lied to her mother about going out, and instead getting drunk with her friends. Mel is a very caring mother who is just concerned about her daughter going out.
|
| 561 |
+
|
| 562 |
+
- Mel recently experienced passing of a friend and wanted to spend time with her daughter.
|
| 563 |
+
|
| 564 |
+
Other examples: "You look better in the other dress" may be alright coming from a close, but honest friend, whereas it may come off conflict-inducing from a controlling and toxic romantic partner.
|
| 565 |
+
|
| 566 |
+
Conversely, "You should move on with your life" may be conflict-inducing coming from a rude and judgmental sibling, whereas it may be harmless coming from a caring friend who is worried after the other person's breakup.
|
| 567 |
+
|
| 568 |
+
Important backstory guidelines:
|
| 569 |
+
|
| 570 |
+
1. Make sure the backstory reads as an actual narrative with flow, rather than a list of attributes. For example, instead of stating a number fo how frequently they interact, show through the story.
|
| 571 |
+
2. Do NOT reference the scenario or conversation in the backstory. The scenario happened AFTER the backstory.
|
| 572 |
+
4. Be as creative as possible
|
| 573 |
+
5. Do not simply state what the relationship between the characters is like, but SHOW through narrative and life events.
|
| 574 |
+
6. Each backstory should be at most a paragraph.
|
| 575 |
+
|
| 576 |
+
This conversation happened in this scenario: {originalscenario}
|
| 577 |
+
|
| 578 |
+
##
|
| 579 |
+
|
| 580 |
+
Speakers: {agent_1_name} and {agent_2_name}
|
| 581 |
+
|
| 582 |
+
---{agent_1_name} Profile ---{agent_1_data}
|
| 583 |
+
|
| 584 |
+
---{agent_2_name} Profile ---{agent_2_data}
|
| 585 |
+
|
| 586 |
+
#
|
| 587 |
+
|
| 588 |
+
{agent_1_name} and {agent_2_name} have a {relationship_type}-{relationship_subtype} relationship, where {agent_1_name} is the {agent_1-role} of {agent_2_name}
|
| 589 |
+
|
| 590 |
+
##
|
| 591 |
+
|
| 592 |
+
Conversation: {conversation}
|
| 593 |
+
|
| 594 |
+
Output the result in the following \*\*JSON format\*:
|
| 595 |
+
|
| 596 |
+
"backstory_one": [BACKSTORY WITH ONE INTERPRETATION OF THE CONVERSATION], "backstory_two": [BACKSTORY WITH EXTREMELY OPPOSITE INTERPRETATION OF THE CONVERSATION]
|
| 597 |
+
|
| 598 |
+
}
|
| 599 |
+
|
| 600 |
+
# A.5 Model Experiment Prompts
|
| 601 |
+
|
| 602 |
+
PROBLEMATIC_RATING_INSTRUCTIONS = ""Rate if the turn (CONVERSATION TURN TO RATE) was problematic in terms of how it was communicated. Assess the overall tone and potential harm of the message towards the other person. Use the scale from:
|
| 603 |
+
1: "Not problematic at all"
|
| 604 |
+
2: "Not really problematic"
|
| 605 |
+
3: "Somewhat problematic"
|
| 606 |
+
4: "Very problematic"
|
| 607 |
+
|
| 608 |
+
[Examples]
|
| 609 |
+
|
| 610 |
+
- "Thanks for doing your part!" $\rightarrow$ 1 (Encouraging and affirming)
|
| 611 |
+
- "You need to try harder." $\rightarrow$ 3 (Directive tone with some pressure)
|
| 612 |
+
- "You're such an idiot." $\rightarrow$ 4 (Harsh insult with no constructive intent)
|
| 613 |
+
|
| 614 |
+
{backstoryInstruction}
|
| 615 |
+
|
| 616 |
+
DO NOT OUTPUT YOUR REASONING PROCESS, just output the result as a JSON object like below. Return just the JSON object.
|
| 617 |
+
|
| 618 |
+
{"response": a single number (for example, 1 if "not problematic at all" or 4 if "very problematic")
|
| 619 |
+
}
|
| 620 |
+
|
| 621 |
+
PROBLEMATICTYPES_INSTRUCTIONS $=$ ""What types of problematic communication markers were used in the provided turn (CONVERSATION TURN TO RATE)? Identify which harmful language types are present in the message. Select all that apply. Even if there is not enough information, do your best.
|
| 622 |
+
|
| 623 |
+
Statement Type | Definition | Example
|
| 624 |
+
|
| 625 |
+
- (1) Judgment: Assigns fault - "You're just a selfish person."
|
| 626 |
+
- (2) Comparison: Unfavorably contrasts - "No one else is as dramatic as you."
|
| 627 |
+
- (3) Deflection of Responsibility: Blames others - "You got mad first, that's why I'm like this."
|
| 628 |
+
- (4) Demand / Threat - "You better do this, or else."
|
| 629 |
+
- (5) Deserve / Punitive - "She messed up, so she deserves it."
|
| 630 |
+
|
| 631 |
+
[Examples]
|
| 632 |
+
|
| 633 |
+
- "You're so much more insecure than everyone else." $\rightarrow$ [2]
|
| 634 |
+
-I'm sorry I got angry, you're just too emotional." $\rightarrow$ [1, 3]
|
| 635 |
+
|
| 636 |
+
{backstoryInstruction}
|
| 637 |
+
|
| 638 |
+
DO NOT OUTPUT YOUR REASONING PROCESS, just output the result as a JSON object like below. Return just the JSON object. {{
|
| 639 |
+
|
| 640 |
+
"response": list of numbers (e.g., [1, 2])
|
| 641 |
+
|
| 642 |
+
```txt
|
| 643 |
+
}
|
| 644 |
+
```
|
| 645 |
+
|
| 646 |
+
NONPROBLEMATICTYPES_INSTRUCTIONS $=$ ""What types of not-problematic communication markers are present in the provided turn (CONVERSATION TURNS TO RATE)? Select all that apply. Even if there is not enough information, do your best.
|
| 647 |
+
|
| 648 |
+
Statement Type | Definition | Example
|
| 649 |
+
|
| 650 |
+
- (1) Neutral Observation - "You didn't wash the dishes."
|
| 651 |
+
- (2) Feeling Statement - "I feel anxious about the meeting."
|
| 652 |
+
- (3) Need Statement - "I need some quiet time."
|
| 653 |
+
- (4) Request - "Could you please lower the volume?"
|
| 654 |
+
- (5) Empathic / Understanding - "Are you feeling upset?"
|
| 655 |
+
|
| 656 |
+
[Examples]
|
| 657 |
+
|
| 658 |
+
-I noticed you didn't put the dishes away." $\rightarrow$ [1]
|
| 659 |
+
- "I feel really hurt when you don't help." → [2]
|
| 660 |
+
|
| 661 |
+
- "Could you maybe try helping unpacking?" → [4]
|
| 662 |
+
|
| 663 |
+
{backstoryInstruction}
|
| 664 |
+
|
| 665 |
+
DO NOT OUTPUT YOUR REASONING PROCESS, just output the result as a JSON object like below. Return just the JSON object.
|
| 666 |
+
|
| 667 |
+
{ "response":list of numbers (e.g., [2, 4]) }
|
| 668 |
+
|
| 669 |
+
LISTENER_IMPACT_INSTRUCTIONS = "" How would the provided conversation turn (CONVERSATION TURN TO RATE) make the other person feel? Choose one:
|
| 670 |
+
|
| 671 |
+
(1) Worse / (2) The Same / (3) Better
|
| 672 |
+
|
| 673 |
+
[Examples]
|
| 674 |
+
|
| 675 |
+
- "You're always like this." $\rightarrow$ 1
|
| 676 |
+
- "Maybe let's talk later?" $\rightarrow$ 2
|
| 677 |
+
- "Thanks for being honest." $\rightarrow$ 3
|
| 678 |
+
|
| 679 |
+
{backstoryInstruction}
|
| 680 |
+
|
| 681 |
+
DO NOT OUTPUT YOUR REASONING PROCESS, just output the result as a JSON object like below. Return just the JSON object.
|
| 682 |
+
|
| 683 |
+
{""response":a single number (e.g.,1,2,or 3)
|
| 684 |
+
}
|
| 685 |
+
|
| 686 |
+
backstory Instruction = "" Now consider how RELATIONSHIP BACKSTORY might change interpretation:
|
| 687 |
+
|
| 688 |
+
[Examples with backstory]
|
| 689 |
+
|
| 690 |
+
- "You should spend more time at home with me."
|
| 691 |
+
|
| 692 |
+
$\rightarrow$ (4) Very problematic if the speaker has been emotionally controlling.
|
| 693 |
+
$\rightarrow$ (2) Not really problematic if the speaker is grieving and missing their partner.
|
| 694 |
+
|
| 695 |
+
- "You should move on with your life."
|
| 696 |
+
|
| 697 |
+
# Short instructions & Consent Form
|
| 698 |
+
|
| 699 |
+
Summary: This research study aims to study realistic social interactions across different groups of participants with different profiles and social goals. The MIT Media Lab is funding the study.
|
| 700 |
+
|
| 701 |
+
Background: Here at Massachusetts Institute of Technology's Media Lab, we're really interested in figuring out how well AI systems can simulate realistic social situations. Our work is dedicated to bridging the gap between technology and social collaborative interactions, ultimately paving the way for more proficient and pro-social AI.
|
| 702 |
+
|
| 703 |
+
Participation: You must be at least 18 years old. Participation is voluntary. You may apply for participation by submitting the research activity. You may print a copy of this consent form for your records.
|
| 704 |
+
|
| 705 |
+
Data collection & sharing: We will not ask you for your name, and we will not ask you to use a computer. We will only ask you for the best of our extent. We will securely store the data on our servers and only share with qualified researchers. If you later decide that we should be included in this study, please email us to exclude your work.
|
| 706 |
+
|
| 707 |
+
Context: If you have any questions about this pilot study, you should feel free to ask them by contacting us (via the MTA) at 1-2-3-4-5-6-7-8-9-10-11-12-13-14-15-16-17-18-19-20-21-22-23-24-25-26-27-28-29-30-31-32-33-34-35-36-37-38-39-40-41-42-43-44-45-46-47-48-49-50-51-52-53-54-55-56-57-58-59-60-61-62-63-64-65-66-67-68-69-70-71-72-73-74-75-76-77-78-79-80
|
| 708 |
+
|
| 709 |
+
Expectation: We expect you to complete the task within approximately 10 to 15 minutes and we will manually check the quality of your work. If we see evidence of not taking the task seriously, you might risk losing your qualification to participate in future HITs. Furthermore, we ask you to not use any AI tools to assist you in completing the task, and if we find evidence of using such tools, you might risk getting your HIT rejected.
|
| 710 |
+
|
| 711 |
+
Please note that you are limited to a maximum of 30 HITs for this batch (failure to adhere to this could lead to rejection).
|
| 712 |
+
|
| 713 |
+
Consent to the task to participate:
|
| 714 |
+
Check this box indicates that you have read and understood the information above, are 18 years or older, and agree to participate in our study.
|
| 715 |
+
|
| 716 |
+
$\rightarrow$ (4) Very problematic if the speaker is cold and dismissive.
|
| 717 |
+
$\rightarrow$ (3) Somewhat problematic if coming from a well-meaning but blunt friend.
|
| 718 |
+
|
| 719 |
+
- "You look better in the other dress."
|
| 720 |
+
|
| 721 |
+
$\rightarrow$ (4) Very problematic if it comes from a partner who criticizes appearance.
|
| 722 |
+
$\rightarrow$ (2) Not really problematic if it's from a close friend with fashion sense.
|
| 723 |
+
|
| 724 |
+
backstory Instruction feeling $=$ ""IMPORTANT: consider how RELATIONSHIP BACKSTORY might affect how the listener feels:
|
| 725 |
+
|
| 726 |
+
[Examples with backstory]
|
| 727 |
+
|
| 728 |
+
- "You should spend more time at home with me."
|
| 729 |
+
|
| 730 |
+
$\rightarrow$ (1) Worse if the speaker has a history of being controlling.
|
| 731 |
+
$\rightarrow$ (2) The Same or even neutral if the speaker is just sad and missing them.
|
| 732 |
+
|
| 733 |
+
- "You look better in the other dress."
|
| 734 |
+
|
| 735 |
+
$\rightarrow$ (1) Worse if from a judgmental partner.
|
| 736 |
+
$\rightarrow$ (2) The Same or (3) Better if from a fashion-savvy friend.
|
| 737 |
+
|
| 738 |
+
1 1
|
| 739 |
+
|
| 740 |
+
# B MTurk Quality Controls
|
| 741 |
+
|
| 742 |
+
To ensure reliable annotations, we recruited experienced MTurk workers with Master's status, implemented attention checks, filtered low-effort responses, and enforced minimum completion times. Prior studies of interpersonal conflict and toxicity (e.g., ToxiGen (Hartvigsen et al., 2022), Unintended Offense (Tsai et al., 2024), Sotopia (Zhou et al., 2023a)) similarly rely on diverse crowdworkers, who bring lived social experience to interpreting interpersonal exchanges.
|
| 743 |
+
|
| 744 |
+
# C Annotation Templates
|
| 745 |
+
|
| 746 |
+
# Full Instructions (Expand/Collapse)
|
| 747 |
+
|
| 748 |
+
# Detailed instructions
|
| 749 |
+
|
| 750 |
+
1) Carefully read the given social interaction between two agents, with a US sociocultural perspective in mind.
|
| 751 |
+
|
| 752 |
+
2) Rate the social interaction across various metrics.
|
| 753 |
+
|
| 754 |
+
# Believability
|
| 755 |
+
|
| 756 |
+
Evaluate whether this conversation COULD happen in the real world by some pair of people somewhere, and that the characters interact in a natural and realistic manner. This is INDEPENDENT of whether you think the conversation was problematic or not. Even if characters have a good relationship, they can still have intense conflicts and even if characters have a bad relationship, they can have good conversations.
|
| 757 |
+
|
| 758 |
+
Mia was mostly believable except that the conversation kept sounding like it was winding down but kept going. Weirdly so. One person was really meant to the other, but overall the conversation was believable.
|
| 759 |
+
|
| 760 |
+
4 The conversation was natural for the most part.
|
| 761 |
+
|
| 762 |
+
The conversation was believable for the most part, but the characters were overly sweet to each other despite having a bad relationship.
|
| 763 |
+
The conversation was mostly believable, but there was some overly formal or proper language. Liam repeats what Ethan said once or hallucinates things that are inconsistent with the conversation.
|
| 764 |
+
|
| 765 |
+
4 The conversation was verifiable, and annotators should not judge believability based on whether the character was a good listener.
|
| 766 |
+
|
| 767 |
+
The conversation style was believable but the scenario was not that realistic.
|
| 768 |
+
|
| 769 |
+
3 The conversation style was not that realistic but the scenario was believable.
|
| 770 |
+
|
| 771 |
+
The conversation was unnatural.
|
| 772 |
+
|
| 773 |
+
# Understandability of the Character's Communication
|
| 774 |
+
|
| 775 |
+
Evaluate whether the character's way of communicating makes sense given their backstory, emotional state, and personal context—even if they were flawed or hurtful. This is about your empathetic understanding of where they're coming from, not whether what they stated was appropriate.
|
| 776 |
+
|
| 777 |
+
The speaker lost someone close and struggled with depression themselves, so they're projecting that experience. "You never listen to me." The character had felt ignored for years and finally snapped down a tense conversation. "You're so lazy!" The character was raised in a highly critical household and hasn't developed constructive communication habits. "I don't care what you think." There's no explanation or context suggesting emotional vulnerability - just dismissal.
|
| 778 |
+
|
| 779 |
+
4 Minimally understandable — emotionally rooted in their past experience, but still not a nice thing to say.
|
| 780 |
+
|
| 781 |
+
4 Somewhat understandable - a reactive but understandable complaint.
|
| 782 |
+
|
| 783 |
+
2 Background helps explain, but doesn't justify.
|
| 784 |
+
|
| 785 |
+
1 Not understandable - lacking emotional grounding or context.
|
| 786 |
+
|
| 787 |
+
Tip: Ask yourself: if you were in this character's shoes — with their background and emotional state — would their communication make sense, even if not ideal?
|
| 788 |
+
|
| 789 |
+
# Overall, the speaker generally had good intentions toward the other speaker
|
| 790 |
+
|
| 791 |
+
This evaluates whether you felt the speaker was trying to be helpful, kind, or supportive, even if they sometimes expressed themselves poorly. It's about the underlying intention behind their communication - not just how it came across.
|
| 792 |
+
|
| 793 |
+
Example
|
| 794 |
+
|
| 795 |
+
<table><tr><td>Example</td><td>Rating</td><td>Assessment</td></tr><tr><td rowspan="5">"I know it's hard, but you have to keep going." The speaker wants to encourage the other person to stay strong, despite sounding a bit blunt. "You never take anything seriously." The speaker is frustrated, but wants the other person to take more responsibility in a shared situation. "I'm done with you." The speaker feels hurt and is reacting from a place of self-protection rather than care. "I brought this because I thought it might help you." The speaker proactively tried to be helpful or thoughtful. "Whatever, it's not my problem." Disengaged and dismissive - no clear concern for the other person.</td><td rowspan="5">5</td><td>Clearly well-intentioned - may lack warmth, but aims to uplift. 3</td></tr><tr><td>Mixed - intention might be to help, but the delivery is critical and harsh. 2</td></tr><tr><td>Little evidence of good intention - emotionally reactive and distancing. 5</td></tr><tr><td>Clearly kind - intention to support is obvious. 1</td></tr><tr><td>Not well-intentioned - emotionally checked out or hostile.</td></tr></table>
|
| 796 |
+
|
| 797 |
+
Tip: Ask yourself: If you had to guess what this person was trying to achieve in saying those things — was it to help, connect, or protect? Or were they trying to control, shame, or hurt?
|
| 798 |
+
|
| 799 |
+
# Overall, the speaker said problematic things to the other speaker
|
| 800 |
+
|
| 801 |
+
This evaluates whether you felt that overall the speaker said things that were inappropriate, potentially harmful, or toxic during the conversation.
|
| 802 |
+
|
| 803 |
+
Example
|
| 804 |
+
|
| 805 |
+
<table><tr><td>Example</td><td>Rating</td><td>Assessment</td></tr><tr><td>"You're such an idiot."</td><td>Very problematic</td><td>Clearly harmful and judgmental.</td></tr><tr><td>"You need to try harder." Directive tone with potential pressure.</td><td>Somewhat problematic</td><td>Could create pressure or defensiveness depending on delivery.</td></tr><tr><td>"I think you could improve here." Gently stated feedback.</td><td>Not really problematic</td><td>Constructive intent with respectful tone.</td></tr><tr><td>"Thanks for doing your part!! Encouraging and affirming.</td><td>Not problematic at all</td><td>Positive and emotionally supportive.</td></tr></table>
|
| 806 |
+
|
| 807 |
+
□ I have read the instructions and examples
|
| 808 |
+
|
| 809 |
+
# Your Task
|
| 810 |
+
|
| 811 |
+
# Social Scenario
|
| 812 |
+
|
| 813 |
+
Relationship: Jaxon and Emily have a siblings relationship, where Jaxon is the brother of Emily.
|
| 814 |
+
|
| 815 |
+
Relationship Backstory: While growing up, Jaxon was constantly overshadowed by his little sister Emily's achievements. From earning top grades to being loved and praised by everyone, Emily seemed to be perfect. Over time, Jaxon developed an unspoken grudge against Emily. He still loves her as family, but he can't help but feel a constant need to pull her down or find flaws in her work, to feel superior. Consequently, Jaxon developed a more aggressive and manipulative personality, often resorting to undermining her accomplishments under the guise of giving her 'real' and honest criticism. His harsh approach continued even when they grew up and started their respective careers.
|
| 816 |
+
|
| 817 |
+
Assume BOTH characters are aware of the backstory.
|
| 818 |
+
|
| 819 |
+
Briefly describe your third party impression of the characters.
|
| 820 |
+
|
| 821 |
+
# Episode (Expand/Collapse)
|
| 822 |
+
|
| 823 |
+
Hey Em, I read one of your latest novels and
|
| 824 |
+
|
| 825 |
+
Wai
|
| 826 |
+
|
| 827 |
+
one of my novels? How did you find out?
|
| 828 |
+
|
| 829 |
+
Doesn't matter. Look, I read it and I need to tell you, I think your plot twists are way too predictable.
|
| 830 |
+
|
| 831 |
+
# Your Ratings
|
| 832 |
+
|
| 833 |
+
# Hover the mouse over the colored dimension to see the descriptions.
|
| 834 |
+
|
| 835 |
+
This conversation could happen somewhere by some pair of people. [Note: Do not judge whether the conversation was problematic or not. Remember that characters with good relationships can still have heated conflict and characters with bad relationships can still have sweet/poitive conversations.]
|
| 836 |
+
○ Strongly disagree ○ Disagree ○ Neutral ○ Agree ○ Strongly agree
|
| 837 |
+
|
| 838 |
+
Briefly explain why you thought the conversation was or wasn't believable.
|
| 839 |
+
|
| 840 |
+
Emily was overall problematic towards Jaxon.
|
| 841 |
+
|
| 842 |
+
Jaxon was overall problematic towards Emily.
|
| 843 |
+
|
| 844 |
+
Not problematic at all Not really problematic Somewhat problematic
|
| 845 |
+
|
| 846 |
+
It is understandable that Emily communicated the way they did.
|
| 847 |
+
|
| 848 |
+
- Strongly disagree - Strongly agree - No opinion - Agree - Strongly agree
|
| 849 |
+
|
| 850 |
+
It is understandable that Jaxon communicated the way they did.
|
| 851 |
+
|
| 852 |
+
Strongly disagree Disagree Neutral Agree Strongly agree
|
| 853 |
+
|
| 854 |
+
Emily generally said things with good intention towards Jaxon.
|
| 855 |
+
|
| 856 |
+
Strongly disagree Disagree Neutral Agree Strongly agree
|
| 857 |
+
|
| 858 |
+
Jaxon generally said things with good in
|
| 859 |
+
|
| 860 |
+
Strongly disagree Disagree Neutral Agree Strongly agree
|
| 861 |
+
|
| 862 |
+
I personally can relate to Emily
|
| 863 |
+
|
| 864 |
+
Strongly disagree Disagree Neutral Agree Strongly agree
|
| 865 |
+
|
| 866 |
+
I personally can relate to Jaxon
|
| 867 |
+
|
| 868 |
+
Strongly disagree Disagree Neutral Agree Strongly agree
|
| 869 |
+
|
| 870 |
+
Very problematic
|
| 871 |
+
|
| 872 |
+
Very problematic
|
| 873 |
+
|
| 874 |
+
$\therefore m : x = 1$ 或 ${3x} + {4y} + 1 = 0$
|
| 875 |
+
|
| 876 |
+
# Conversation analysis rating
|
| 877 |
+
|
| 878 |
+
Detailed Instructions: Per-Turn Conversation Rating
|
| 879 |
+
|
| 880 |
+
1 Carefully read each message from the conversation, considering tone, intent, and relationship.
|
| 881 |
+
|
| 882 |
+
Account for the context (backstory) when interpreting the social interaction.
|
| 883 |
+
|
| 884 |
+
2 Determine whether the message is problematic or constructive.
|
| 885 |
+
3 Label the communication markers present, based on definitions and examples.
|
| 886 |
+
4 Consider the emotional effect on the listener.
|
| 887 |
+
|
| 888 |
+
# Was this turn problematic? (accounting for context)
|
| 889 |
+
|
| 890 |
+
Assess the overall tone and potential harm of the message. Same examples from above
|
| 891 |
+
|
| 892 |
+
# Types of problematic communication markers used
|
| 893 |
+
|
| 894 |
+
Identify which harmful language types are present in the message. Use checkboxes to select all that apply.
|
| 895 |
+
|
| 896 |
+
Rating Assessment
|
| 897 |
+
|
| 898 |
+
<table><tr><td>Statement Type</td><td>Definition</td><td>Example</td></tr><tr><td>Judgment</td><td>Assigns fault or labels someone as bad/wrong</td><td>"You're such an idiot for doing that."</td></tr><tr><td>Comparison</td><td>Unfavorably contrasts someone, evoking shame</td><td>"No one else is as dramatic as you."</td></tr><tr><td>Deflection of Responsibility</td><td>Blames others for one's own actions or emotions</td><td>"It's your fault I'm upset."</td></tr><tr><td>Demand / Threat</td><td>Applies pressure, guilt, or punishment</td><td>"You better do this, or else."</td></tr><tr><td>Deserve / Punitive</td><td>Uses "deserve" logic to justify harm</td><td>"She messed up, so she deserves it."</td></tr></table>
|
| 899 |
+
|
| 900 |
+
# Types of NOT-problematic communication markers used
|
| 901 |
+
|
| 902 |
+
Identify helpful or emotionally aware communication markers. Use checkboxes to select all that apply.
|
| 903 |
+
|
| 904 |
+
Rating Assessment
|
| 905 |
+
|
| 906 |
+
<table><tr><td>Statement Type</td><td>Definition</td><td>Example</td></tr><tr><td>Neutral Observation</td><td>Objective statement with no exaggeration or judgment</td><td>"You didn't wash the dishes when you came home."</td></tr><tr><td>Feeling Statement</td><td>Real emotional expression about self (uses "I feel...")</td><td>"I feel anxious about the meeting."</td></tr><tr><td>Need Statement</td><td>States a need without blame</td><td>"I need some quiet time to recharge."</td></tr><tr><td>Request (No Pressure Ask)</td><td>Politie request that allows for refusal</td><td>"Could you please lower the volume?"</td></tr><tr><td>Empathic / Understanding</td><td>Expresses concern or checks in on other's emotions</td><td>"Are you feeling upset about the schedule change?"</td></tr></table>
|
| 907 |
+
|
| 908 |
+
# Conversation analysis rating
|
| 909 |
+
|
| 910 |
+
Detailed Instructions: Per-Turn Conversation Rating
|
| 911 |
+
|
| 912 |
+
1 Carefully read each message from the conversation, considering tone, intent, and relationship.
|
| 913 |
+
Account for the context (backstory) when interpreting the social interaction.
|
| 914 |
+
2 Determine whether the message is problematic or constructive.
|
| 915 |
+
3 Label the communication markers present, based on definitions and examples.
|
| 916 |
+
4 Consider the emotional effect on the listener.
|
| 917 |
+
|
| 918 |
+
# Was this turn problematic? (accounting for context)
|
| 919 |
+
|
| 920 |
+
Assess the overall tone and potential harm of the message. Same examples from above.
|
| 921 |
+
|
| 922 |
+
# Types of problematic communication marker
|
| 923 |
+
|
| 924 |
+
Identify which harmful language types are present in the message. Use checkboxes to select all that apply.
|
| 925 |
+
|
| 926 |
+
Statement Type Definition
|
| 927 |
+
|
| 928 |
+
Judgment Assigns fault or labels someone as bad/wrong
|
| 929 |
+
Comparison Unfavorably contrasts someone, evoking shame
|
| 930 |
+
Disorder of Responsibility Blame an action or emotion
|
| 931 |
+
Demand /Threat Applies pressure, guilt, or punishment
|
| 932 |
+
Desire/Punitive Uses "deserve" logic to justify harm
|
| 933 |
+
|
| 934 |
+
Example
|
| 935 |
+
|
| 936 |
+
Example
|
| 937 |
+
|
| 938 |
+
"You're such an idiot for doing that." "No one else is as dramatic as you." "It's your fault I'm upset." "I'm sorry to you." "She messed up, so she deserves it."
|
| 939 |
+
|
| 940 |
+
# Types of NOT-problematic communication markers used
|
| 941 |
+
|
| 942 |
+
Identify helpful or emotionally aware communication markers. Use checkboxes to select all that apply.
|
| 943 |
+
|
| 944 |
+
<table><tr><td>Statement Type</td><td>Definition</td><td>Example</td></tr><tr><td>Neutral Observation</td><td>Objective statement with no exaggeration or judgment</td><td>"You didn't wash the dishes when you came home."</td></tr><tr><td>Feeling Statement</td><td>Real emotional expression about self (uses "I feel...")</td><td>"I feel anxious about the meeting."</td></tr><tr><td>Need Statement</td><td>States a need without blame</td><td>"I need some quiet time to recharge."</td></tr><tr><td>Request (No Pressure Ask)</td><td>Polite request that allows for refusal</td><td>"Could you please lower the volume?"</td></tr><tr><td>Empathic / Understanding</td><td>Expresses concern or checks in on other's emotions</td><td>"Are you feeling upset about the schedule change?"</td></tr></table>
|
| 945 |
+
|
| 946 |
+
# How would this make the other character feel?
|
| 947 |
+
|
| 948 |
+
Estimate the emotional impact on the listener. Choose one: Worse / The Same / Better
|
| 949 |
+
|
| 950 |
+
<table><tr><td>Example</td><td>Rating</td><td>Assessment</td></tr><tr><td>"You're always like this."</td><td>Worse</td><td>Felt dismissive and accusatory. Likely to trigger defensiveness or hurt.</td></tr><tr><td>"Maybe let's talk later?"</td><td>The same</td><td>Neutral phrasing with no escalation. Maintains status quo.</td></tr><tr><td>"Thanks for being honest."</td><td>Better</td><td>Offered emotional support. Validates the listener and builds trust.</td></tr></table>
|
| 951 |
+
|
| 952 |
+
□ I have read the Per-Turn annotation instructions and I am familiar with the statement types.
|
| 953 |
+
|
| 954 |
+
ship.
|
| 955 |
+
|
| 956 |
+
$\frac{1 + u}{1} - \frac{u}{1} = \frac{\left( {1 + u}\right) u}{1} < \frac{u}{1} = u$
|
| 957 |
+
|
| 958 |
+
__________
|
| 959 |
+
|
| 960 |
+
3
|
| 961 |
+
|
| 962 |
+
#
|
| 963 |
+
|
| 964 |
+
$\therefore m = \frac{3}{11}$
|
| 965 |
+
|
| 966 |
+
.
|
| 967 |
+
|
| 968 |
+
#
|
| 969 |
+
|
| 970 |
+
act all that apply.
|
| 971 |
+
|
| 972 |
+
Example
|
| 973 |
+
|
| 974 |
+
"You're such an idiot for doing that." "No one else is as dramatic as you." "It's your fault I'm upset." "I'm sorry to you." "She messed up, so she deserves it."
|
| 975 |
+
|
| 976 |
+

|
| 977 |
+
|
| 978 |
+
<table><tr><td>Condition</td><td>Type</td><td>F1 Score</td><td>Jaccard</td></tr><tr><td rowspan="2">NEG</td><td>VC Type</td><td>0.477</td><td>0.478</td></tr><tr><td>NVC Type</td><td>0.460</td><td>0.446</td></tr><tr><td rowspan="2">POS</td><td>VC Type</td><td>0.362</td><td>0.434</td></tr><tr><td>NVC Type</td><td>0.355</td><td>0.346</td></tr></table>
|
| 979 |
+
|
| 980 |
+
Table 3: Overall agreement scores on VC and NVC type labels across conditions. F1 score, and Jaccard score, to account for the multi-label setting.
|
| 981 |
+
|
| 982 |
+
<table><tr><td>Group</td><td>Label</td><td>NEG (F1)</td><td>POS (F1)</td></tr><tr><td rowspan="5">VC Types</td><td>Moralistic Judgment</td><td>0.624</td><td>0.648</td></tr><tr><td>Comparison</td><td>0.517</td><td>0.483</td></tr><tr><td>Denial of Responsibility</td><td>0.557</td><td>0.482</td></tr><tr><td>Demand</td><td>0.442</td><td>0.159</td></tr><tr><td>Deserve Thinking</td><td>0.245</td><td>0.042</td></tr><tr><td rowspan="5">NVC Types</td><td>Neutral Observation</td><td>0.511</td><td>0.459</td></tr><tr><td>Feeling Statement</td><td>0.562</td><td>0.423</td></tr><tr><td>Need Statement</td><td>0.306</td><td>0.160</td></tr><tr><td>Request (No-Pressure Ask)</td><td>0.407</td><td>0.362</td></tr><tr><td>Empathic/Understanding</td><td>0.515</td><td>0.370</td></tr></table>
|
| 983 |
+
|
| 984 |
+
Table 4: Per-label F1 agreement scores for VC and NVC types in negative and positive conditions. Bolded values indicate highest agreement per category and condition.
|
| 985 |
+
|
| 986 |
+
# D Supplementary Results
|
| 987 |
+
|
| 988 |
+
Table 3 shows overall agreement between annotators on multiple class selection of VC or NVC types, calculated on samples where annotators agreed if a turn was problematic or not. Again, we find moderate agreement across 5 classes. Fi
|
| 989 |
+
|
| 990 |
+
nally, Table 4 shows agreement for each of the 5 VC and NVC types. Most communication types show moderate agreement except for the "Deserve Thinking" violent communication type, which is also the lowest prevalence type.
|
| 991 |
+
|
| 992 |
+
# E Qualitative Examples of Model Error
|
| 993 |
+
|
| 994 |
+
GPT-4o example for positive backstory: Jaxon was the eldest amongst his siblings and has always been the protector, especially of Emily, who was the youngest. Even during their childhood days, Jaxon played the role of a gentle critic, pointing out Emily's potential growth areas in her drawings and writings, nurturing her ability to improve and perform better. Their parents often commended him for his supportive and yet constructive attitude towards Emily. And it was this constant guidance and criticism from Jaxon that played a significant role in Emily developing her writing skills, leading her to become a recognized writer under a pseudonym. When Jaxon came across Emily's secret, he couldn't help but continue his role as a supportive critic, hoping to elevate her writing to even greater heights, just as he had during their childhood.
|
| 995 |
+
|
| 996 |
+
Turn: "(Jaxon): Hey Em, I read one of your latest novels and..." Human Rating: 1.5 (slightly negative feeling) Model Rating: 3.0 (positive feeling)
|
| 997 |
+
|
| 998 |
+
Llama example for negative backstory: Baxter has always been a loner and finds comfort in the isolation of his work. His lack of social interaction led him to develop a cynical attitude towards people and their capacities. He'd been friends with Isabelle's parents and saw Isabelle grow up. When Isabelle's parents passed away in an accident, Baxter - unable to process his grief - began to push her away, masking his fears behind a facade of humor and sarcasm. Still considering Isabelle as the young, naive girl he once knew, his jokes became more demeaning over time, depreciating her efforts in her job and personal growth. His remarks started to create a distance between them, leading to an increasingly toxic relationship.
|
| 999 |
+
|
| 1000 |
+
Turn: "Isabelle: You know what, Baxter? I've been trying to ignore your snarky comments for a while, but this is starting to hurt." Human Rating: 1.0 (not really problematic) Model Rating: 3.0 (moderately problematic)
|
| 1001 |
+
|
| 1002 |
+
Llama example for positive backstory: Leo had years of experience juggling between his work
|
| 1003 |
+
|
| 1004 |
+
as a dentist and taking care of his beloved daughter. When his daughter's mother left, he found himself thrust into a dual role of being both a father and a mother. Dealing with this dynamic was challenging yet it taught Leo the value of family and instilled in him a deep sense of responsibility. As the elder brother to Naomi, he felt it was his duty to ensure he was there for her, just as he was for his little daughter. Having lost their own parents at an early age, Leo was adamant in providing a strong family base for his sister. Naomi was more than just Leo's sister, she was also his confidante and friend. Leo's unwavering dedication to family often led him to bear the responsibility of solving everything on his own.
|
| 1005 |
+
|
| 1006 |
+
Turn: "(Leo): I think I've been so tied up recently with work...sometimes I overlook things. Family stuff, you know?" Human Rating: 2.0 (neutral feeling) Model Rating: 3.0 (positive feeling)
|
| 1007 |
+
|
| 1008 |
+
Gemini examples for negative backstory: Baxter has always been a loner and finds comfort in the isolation of his work. His lack of social interaction led him to develop a cynical attitude towards people and their capacities. He'd been friends with Isabelle's parents and saw Isabelle grow up. When Isabelle's parents passed away in an accident, Baxter - unable to process his grief - began to push her away, masking his fears behind a facade of humor and sarcasm. Still considering Isabelle as the young, naive girl he once knew, his jokes became more demeaning over time, depreciating her efforts in her job and personal growth. His remarks started to create a distance between them, leading to an increasingly toxic relationship.
|
| 1009 |
+
|
| 1010 |
+
Turn: "Baxter: You're right... I'm sorry. I shouldn't have said those things." Human Rating: 1.0 (not really problematic) Model Rating: 3.0 (moderately problematic)
|
| 1011 |
+
|
| 1012 |
+
Turn: "Baxter: Small? I've seen you handle customers double your age with that articulation of yours." Human Rating: 2.5 (moderately positive feeling) Model Rating: 1.0 (negative feeling)
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:15d10d68e1e25aadea39d4bb525b62e8fcfa7d50ac75660d68de502cc9981015
|
| 3 |
+
size 473971
|
EMNLP/2025/Words Like Knives_ Backstory-Personalized Modeling and Detection of Violent Communication/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83fe8c0382444f4bd53d204dd20ec24eeacafdb68cc4d27199730b2b734e97f9
|
| 3 |
+
size 888213
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60127f56d41de505f08990fa0acc64e247de4f8e1e654f158196df31947e67a7
|
| 3 |
+
size 89068
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2f6d5eed74eab7a0e3eb77d0ec6432a47658c1efe9377f55846c7d49a1789f4
|
| 3 |
+
size 112209
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/e9f42600-a626-4237-b1d0-179ae202bf7f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0dca0d1639065b4e1baa6748123fe4591f532f3add8f74820c172c4b91fd17d7
|
| 3 |
+
size 1330121
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/full.md
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# X-CoT: Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning
|
| 2 |
+
|
| 3 |
+
Prasanna Reddy Pulakurthi<sup>1</sup>, Jiamian Wang<sup>1</sup>, Majid Rabbani<sup>1</sup>, Sohail Dianat<sup>1</sup>, Raghuveer Rao<sup>2</sup>, and Zhiqiang Tao<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Rochester Institute of Technology, $^{2}$ DEVCOM Army Research Laboratory
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Prevalent text-to-video retrieval systems mainly adopt embedding models for feature extraction and compute cosine similarities for ranking. However, this design presents two limitations. Low-quality text-video data pairs could compromise the retrieval, yet are hard to identify and examine. Cosine similarity alone provides no explanation for the ranking results, limiting the interpretability. We ask that can we interpret the ranking results, so as to assess the retrieval models and examine the text-video data? This work proposes X-CoT, an explainable retrieval framework upon LLM CoT reasoning in place of the embedding model-based similarity ranking. We first expand the existing benchmarks with additional video annotations to support semantic understanding and reduce data bias. We also devise a retrieval CoT consisting of pairwise comparison steps, yielding detailed reasoning and complete ranking. X-CoT empirically improves the retrieval performance and produces detailed rationales. It also facilitates the model behavior and data quality analysis. Code and data are available at: github.com/PrasannaPulakurthi/X-CoT.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Text-to-video retrieval finds the most relevant video for a text query, being widely used for retrieval-augmented generation (Jeong et al., 2025), question-answering (Sun et al., 2024b), and agent memory enhancement (Fan et al., 2024; Sun et al., 2024a), etc. Recent progress mainly depends on embedding models, e.g., CLIP-based (Ma et al., 2022; Wang et al., 2024a,b) or MLLM-based (Jiang et al., 2024; Sun et al., 2024c) for retrieval.
|
| 14 |
+
|
| 15 |
+
However, an embedding model-based retrieval system bears some limitations. First, the model is prone to the data quality of text-video pairs. Public datasets can introduce either flawed videos (e.g., blur, distortion) or crude captions (Radford et al., 2021), undermining the retrieval and making it hard
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Existing retrieval systems mainly adopt embedding models to compute cosine similarities. We propose LLM CoT reasoning-based retrieval to provide explanations beyond rankings. Our method can also be integrated upon diverse embedding model methods.
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+
to track. Second, the embedding model mainly computes the cosine similarity in the latent space, which only tells the ranking but fails to justify the ranking results. Both of these reasons call for an explainable retrieval system to interpret why a video candidate was retrieved, so as to assist the users to comprehend the ranking results, assess the retrieval system, and examine the input data quality.
|
| 23 |
+
|
| 24 |
+
To achieve interpretability, this work proposes X-CoT, an explainable framework that exchanges traditional cosine similarity-based ranking with LLM-based judgment (see Fig. 1) and devises a chain-of-thought pipeline for text-video retrieval. Firstly, we expand the existing benchmark datasets with additional video annotations to facilitate the LLM's reasoning and reduce the raw video data bias. Secondly, we define a retrieval CoT consisting of pairwise comparison steps upon the Bradley-Terry model (Bradley and Terry, 1952). By collecting the stepwise results, the proposed method not only enables the improved ranking performance over embedding model-based baselines but also delivers detailed rationales. In addition, without requiring
|
| 25 |
+
|
| 26 |
+
the paired text-video data training, this method could serve as a general processing step that integrates with distinct embedding models.
|
| 27 |
+
|
| 28 |
+
We summarize the contributions as follows: (1) This work proposes X-CoT, an explainable retrieval system upon LLM chain-of-thought reasoning, advancing the trustworthy and trackable retrieval beyond the embedding model design. (2) We collect and release high-quality text annotation data for the raw videos to augment existing benchmark text-video datasets for future LLM study. (3) This work devises a retrieval CoT upon a pretrained LLM, being free of optimization and plug-and-play on top of the existing retrieval systems. (4) Experiments demonstrate the remarkable performance boost of X-CoT upon diverse embedding models and benchmark datasets. With X-CoT, we empirically analyze the behaviors of embedding models and identify the inferior text-video data.
|
| 29 |
+
|
| 30 |
+
# 2 Related Work
|
| 31 |
+
|
| 32 |
+
Text-Video (T2V) Retrieval has been driven by embedding models like X-CLIP (Ma et al., 2022), Clip4clip (Luo et al., 2022), Clip-vip (Xue et al., 2022), Cap4video (Wu et al., 2023), UMT (Li et al., 2023), and InternVid (Wang et al., 2024d), which learn joint video-text representations for retrieval.
|
| 33 |
+
|
| 34 |
+
MLLMs for Retrieval. Recent advances in MLLMs extend language models with visual understanding, enabling new capabilities in retrieval and reasoning. VLM2Vec (Jiang et al., 2024) excels at text-image retrieval, having been trained for large-scale multimodal embedding tasks. MM-REACT (Yang et al., 2023) combines visual tools with LLM reasoning. While Video-ChatGPT (Maaz et al., 2024) and VideoLLaVA (Lin et al., 2024) allow free-form video understanding through frame-by-frame perception and dialogue. BRIGHT (SU et al., 2025) introduces a challenging benchmark focused on reasoning-intensive multimodal retrieval, highlighting the need for interpretable and robust systems like ours.
|
| 35 |
+
|
| 36 |
+
# 3 Method
|
| 37 |
+
|
| 38 |
+
# 3.1 Preliminaries
|
| 39 |
+
|
| 40 |
+
Existing text-to-video retrieval systems are mainly embedding model-based. Given a video candidate $v$ and a text query $q$ , an embedding model produces the video and text embedding, respectively, i.e., $\mathbf{z}_v,\mathbf{z}_q\in \mathbb{R}^d$ , where $d$ denotes the dimension of the embedding space. Given the features, the system
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Figure 2: Video annotation collection pipeline. Structured text is constructed to enrich the semantics and assist LLM reasoning. Ground-truth captions are not directly used.
|
| 44 |
+
|
| 45 |
+
# GT Caption: adding ingredients to a pizza
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 3: Example of one structured video annotation.
|
| 49 |
+
|
| 50 |
+
# Frame Captions:
|
| 51 |
+
|
| 52 |
+
1. "A hand holding a spoon is spreading a red sauce onto a circular, perforated surface."
|
| 53 |
+
|
| 54 |
+
2. "A hand uses a spoon to spread tomato sauce onto a pizza crust."
|
| 55 |
+
|
| 56 |
+
3. "A hand is sprinkling shredded cheese over a pizza base with tomato sauce.
|
| 57 |
+
|
| 58 |
+
Summary:"a person sprinkles shredded cheese over a pizza base with tomato sauce."
|
| 59 |
+
|
| 60 |
+
Objects: ["sauce", "cheese", "pan", "surface", "crust", "hand", "spoon", "pizza", "base", "person"]
|
| 61 |
+
|
| 62 |
+
Actions: ["spread", "sprinkle", "add", "reach", "rest"]
|
| 63 |
+
|
| 64 |
+
Scenes: ["black", "red", "circular", "perforated"]
|
| 65 |
+
|
| 66 |
+
computes the cosine similarity score $s$ for ranking, i.e., $s(q,v) = (\mathbf{z}_q^\top \mathbf{z}_v) / (\| \mathbf{z}_q\|_2\| \mathbf{z}_v\|_2)$ . However, it is hard to understand the rationale behind a specific cosine similarity score, e.g., what is the specific reason that the $s(q,v)$ is high/low for text $q$ and video $v$ , which could attribute to either text-video data correspondence or embedding models' behavior. To this end, this work studies explainable retrieval.
|
| 67 |
+
|
| 68 |
+
# 3.2 Video Annotation Collection
|
| 69 |
+
|
| 70 |
+
Motivation. We first expand the existing text-video benchmarks with additional video annotations for the following reasons. (1) Videos can contain complex semantics, such as scenes with rapid motions or massive objects. Additional annotations provide a better chance for video understanding. (2) Video could be noisy and mislead the retrieval due to blur and distortion. Additional annotations provide useful information to describe the video semantics, reducing the bias caused by noisy frames.
|
| 71 |
+
|
| 72 |
+
Data Collection Pipeline. To collect the high-quality annotations, we develop an MLLM-based pipeline (see Fig. 2). For every video $v$ , we uniformly sample $N$ frames and apply the filters to remove near-duplicates (see Appendix A). We
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
Figure 4: X-CoT pipeline, which contains pairwise comparisons upon LLM for stepwise ranking and reasoning.
|
| 76 |
+
|
| 77 |
+
then adopt an MLLM (Qwen2.5-VL-7B-Captioner-Relaxed) to generate frame-level captions, which are aggregated and rephrased to form structured annotations comprising objects, actions, and scenes, plus a high-level video summary.
|
| 78 |
+
|
| 79 |
+
We apply additional post-processing steps to improve annotation quality, including (i) Noun Filter: Extract and retain relevant object and scene tags for grounding entities. (ii) Verb Filter: Extract action-related verbs to support temporal and causal reasoning. (iii) Dedduplication: Redundant or semantically equivalent tags (e.g., "a dog", "dog", "the dog") are merged to avoid repetition. (iv) Stop Word Removal: Common stop words (e.g., "the", "is", "in") are filtered out to retain only informative content words. (v) Proofing: Correct grammatical or formatting inconsistencies in the tags. (vi) Normalization: We apply basic text normalization, including lowercasing and punctuation removal. All videos are equipped with structured annotations, as illustrated in Fig. 3.
|
| 80 |
+
|
| 81 |
+
# 3.3 Retrieval CoT
|
| 82 |
+
|
| 83 |
+
Given the annotation data, this work adopts LLM reasoning for explainable retrieval. We construct a retrieval CoT to jointly produce the ranking and explanations, as shown in Fig. 4. The whole pipeline contains three steps.
|
| 84 |
+
|
| 85 |
+
Step 1: One can optionally adopt diverse embedding models to produce top- $K$ candidate pool for a given query. Since the existing embedding model-based methods enable accurate retrieval with a large $K$ value, one can apply the proposed X-CoT to reason among a small range, e.g., $\mathcal{V} = \{v_{1},\ldots ,v_{K}\}$ $K < 25$
|
| 86 |
+
|
| 87 |
+
Step 2: We then generate pairwise combinations of the top- $K$ candidates, forming input tuple
|
| 88 |
+
|
| 89 |
+
$[q, v_i, v_j]$ . We adopt LLM to process each tuple, yielding the binary preference $(e.g., v_i < v_j)$ and the text justification. The structured annotations are employed to facilitate the reasoning.
|
| 90 |
+
|
| 91 |
+
Step 3: Notably, we further refine the ranking by approximating the Bradley-Terry (BT) model on the pairwise set via MLE (Hunter, 2004) and compute the ability scores $\theta_{k}$ with $P_{r}[v_{i} > v_{j}] = \theta_{i} / (\theta_{i} + \theta_{j})$ . By this means, we correct the comparisons with noisy or cyclic judgments. Accordingly, the final ranking list $\hat{\mathcal{V}}$ is produced by Sorting in descending order. We provide the X-CoT algorithm in Appendix F.
|
| 92 |
+
|
| 93 |
+
# 4 Experiment
|
| 94 |
+
|
| 95 |
+
# 4.1 Experimental Settings
|
| 96 |
+
|
| 97 |
+
We evaluate X-CoT on four benchmarks: MSR-VTT (Xu et al., 2016), MSVD (Chen and Dolan, 2011), LSMDC (Rohrbach et al., 2015), and DiDeMo (Anne Hendricks et al., 2017). We report Recall@K (R@1, R@5, R@10), Median Rank (MdR), and Mean Rank (MnR).
|
| 98 |
+
|
| 99 |
+
We consider three off-the-shelf embedding models to generate the coarse top- $K$ list $(K = 20)$ , including CLIP-ViT-B/32 (Radford et al., 2021), Qwen2-VL (Wang et al., 2024c) model by VLM2Vec (Jiang et al., 2024), and X-Pool (Gorti et al., 2022). The former two are zero-shot retrievers, and X-Pool is trained with text-video data.
|
| 100 |
+
|
| 101 |
+
# 4.2 Performance Comparison
|
| 102 |
+
|
| 103 |
+
Table 1 and 2 show the text-to-video retrieval performance with the proposed X-CoT on four datasets and three embedding models. X-CoT enables a remarkable performance boost over embedding models on all metrics, e.g., $+5.6\%$ in R@1 for CLIP on MSVD, $+1.9\%$ in R@1 on MSVD for X-Pool. Overall, LLM CoT reasoning-based retrieval enjoys accurate retrieval over cosine similarity-based ranking upon embedding models.
|
| 104 |
+
|
| 105 |
+
# 4.3 Ablation Study
|
| 106 |
+
|
| 107 |
+
We conduct an ablation study toward the X-CoT in Table 3. We adopt the CLIP model as the baseline. We study the effect of the proposed CoT with w/o CoT, i.e., directly ask the LLM to rank the top-K results, leading to a significant drop in performance, e.g., $-2.9\%$ for R@1 - pairwise comparison is much easier than selecting best-of-K. We also find that the CoT model (w/o BT) benefits the retrieval. Jointly considering the CoT and the BT model, the
|
| 108 |
+
|
| 109 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">MSR-VTT</td><td colspan="5">MSVD</td></tr><tr><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>How2Cap (Shvetsova et al., 2024)</td><td>37.6</td><td>62.0</td><td>73.3</td><td>3.0</td><td>-</td><td>44.5</td><td>73.3</td><td>82.1</td><td>2.0</td><td>-</td></tr><tr><td>TVTSv2 (Zeng et al., 2023)</td><td>38.2</td><td>62.4</td><td>73.2</td><td>3.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>InternVideo (Wang et al., 2024e)</td><td>40.7</td><td>65.3</td><td>74.1</td><td>2.0</td><td>-</td><td>43.4</td><td>69.9</td><td>79.1</td><td>-</td><td>-</td></tr><tr><td>BT-Adapter (Liu et al., 2024)</td><td>40.9</td><td>64.7</td><td>73.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ViCLIP (Wang et al., 2024d)</td><td>42.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>49.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CLIP (Radford et al., 2021)</td><td>31.6</td><td>53.8</td><td>63.4</td><td>4.0</td><td>39.0</td><td>36.5</td><td>64.0</td><td>73.9</td><td>3.0</td><td>20.8</td></tr><tr><td>X-CoT (ours)</td><td>33.7</td><td>56.7</td><td>64.6</td><td>4.0</td><td>38.7</td><td>42.1</td><td>67.4</td><td>75.4</td><td>2.0</td><td>20.5</td></tr><tr><td>VLM2Vec (Jiang et al., 2024)</td><td>36.4</td><td>60.2</td><td>70.7</td><td>3.0</td><td>27.3</td><td>46.7</td><td>73.8</td><td>82.6</td><td>2.0</td><td>12.8</td></tr><tr><td>X-CoT (ours)</td><td>37.2</td><td>61.8</td><td>71.5</td><td>3.0</td><td>27.1</td><td>48.4</td><td>74.8</td><td>83.2</td><td>2.0</td><td>12.6</td></tr><tr><td>X-Pool (Gorti et al., 2022)</td><td>46.9</td><td>73.0</td><td>82.0</td><td>2.0</td><td>14.2</td><td>47.2</td><td>77.2</td><td>86.0</td><td>2.0</td><td>9.3</td></tr><tr><td>X-CoT (ours)</td><td>47.3</td><td>73.3</td><td>82.1</td><td>2.0</td><td>14.2</td><td>49.1</td><td>78.0</td><td>86.6</td><td>2.0</td><td>9.2</td></tr></table>
|
| 110 |
+
|
| 111 |
+
Table 1: Text-to-video retrieval performance comparison on MSR-VTT and MSVD.
|
| 112 |
+
|
| 113 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">DiDeMo</td><td colspan="5">LSMDC</td></tr><tr><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>HiTeA (Ye et al., 2023)</td><td>36.1</td><td>60.1</td><td>70.3</td><td>-</td><td>-</td><td>15.5</td><td>31.1</td><td>39.8</td><td>-</td><td>-</td></tr><tr><td>TVTSv2 (Zeng et al., 2023)</td><td>34.6</td><td>61.9</td><td>71.5</td><td>3.0</td><td>-</td><td>17.3</td><td>32.5</td><td>41.4</td><td>20.0</td><td>-</td></tr><tr><td>InternVideo (Wang et al., 2024e)</td><td>31.5</td><td>57.6</td><td>68.2</td><td>3.0</td><td>-</td><td>17.6</td><td>32.4</td><td>40.2</td><td>23.0</td><td>-</td></tr><tr><td>BT-Adapter (Liu et al., 2024)</td><td>35.6</td><td>61.9</td><td>72.6</td><td>-</td><td>-</td><td>19.5</td><td>35.9</td><td>45.0</td><td>-</td><td>-</td></tr><tr><td>ViCLIP (Wang et al., 2024d)</td><td>18.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>20.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CLIP (Radford et al., 2021)</td><td>25.2</td><td>49.4</td><td>59.0</td><td>6.0</td><td>49.7</td><td>15.9</td><td>28.4</td><td>35.3</td><td>31.0</td><td>129.6</td></tr><tr><td>X-CoT (ours)</td><td>29.7</td><td>52.1</td><td>60.6</td><td>5.0</td><td>49.2</td><td>17.6</td><td>29.0</td><td>36.1</td><td>31.0</td><td>129.4</td></tr><tr><td>VLM2Vec (Jiang et al., 2024)</td><td>33.5</td><td>57.7</td><td>68.4</td><td>4.0</td><td>34.1</td><td>18.2</td><td>33.6</td><td>41.4</td><td>23.0</td><td>119.1</td></tr><tr><td>X-CoT (ours)</td><td>35.8</td><td>59.2</td><td>68.8</td><td>3.0</td><td>33.9</td><td>18.9</td><td>35.1</td><td>41.9</td><td>23.0</td><td>118.9</td></tr><tr><td>X-Pool (Gorti et al., 2022)</td><td>44.6</td><td>72.5</td><td>81.0</td><td>2.0</td><td>15.1</td><td>23.6</td><td>42.9</td><td>52.4</td><td>9.0</td><td>54.1</td></tr><tr><td>X-CoT (ours)</td><td>45.1</td><td>73.1</td><td>81.8</td><td>2.0</td><td>15.0</td><td>23.8</td><td>43.8</td><td>53.1</td><td>8.0</td><td>54.0</td></tr></table>
|
| 114 |
+
|
| 115 |
+
Table 2: Text-to-video retrieval performance comparison on DiDeMo and LSMDC.
|
| 116 |
+
|
| 117 |
+
<table><tr><td>Method</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>Baseline</td><td>25.2</td><td>49.4</td><td>59.0</td><td>6.0</td><td>49.7</td></tr><tr><td>w/o CoT</td><td>22.3</td><td>39.4</td><td>58.9</td><td>6.0</td><td>49.7</td></tr><tr><td>w/o BT</td><td>29.3</td><td>51.8</td><td>60.4</td><td>5.0</td><td>49.4</td></tr><tr><td>X-CoT</td><td>29.7</td><td>52.1</td><td>60.6</td><td>5.0</td><td>49.2</td></tr></table>
|
| 118 |
+
|
| 119 |
+
Table 3: Ablation study of proposed X-CoT with CLIP-ViT-B/32 model ( $K = 20$ ) and upon DiDeMo Dataset.
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
Figure 5: top- $K$ discussion to facilitate X-CoT. Performance reported with CLIP model on DiDeMo dataset.
|
| 123 |
+
|
| 124 |
+
proposed method improves the baseline by $4.5\%$ on R@1.
|
| 125 |
+
|
| 126 |
+
# 4.4 Model Discussion
|
| 127 |
+
|
| 128 |
+
In Fig. 5, we discuss the top- $K$ ranges to facilitate X-CoT. X-CoT effectively identifies and ranks relevant candidates as $K$ grows, demonstrating an adaptivity to the pool scale. We further discuss
|
| 129 |
+
|
| 130 |
+
the explainability of the proposed X-CoT. Fig. 6 discusses the explainability of X-CoT in evaluating the retrieval model's behavior. With explanations, one can diagnose the semantic factors that could be missed by the embedding model. e.g., the concept of "man" plays an important role. In addition, one can evaluate the text-video data quality with the proposed X-CoT. As shown in Fig. 7, the proposed X-CoT fails for the given text query. However, the incorrect retrieval could be attributed to the text flaws by jointly examining the text caption, relevant video, and the CoT explanations. This demonstrates the power of the explainable retrieval system in the text-video data quality assessment. We provide success examples in Appendix H.
|
| 131 |
+
|
| 132 |
+
# 5 Conclusion
|
| 133 |
+
|
| 134 |
+
This work studied explainable retrieval systems and introduced X-CoT, an LLM CoT reasoning-based retrieval system in place of the embedding model cosine similarity-based ranking. To achieve the goal, we first expand the existing benchmarks with additional video annotation. We then constructed a pairwise CoT to provide reasoning and ranking. Experiments show X-CoT improves re
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
GT Caption: a man grabs at snakes and throws them around the room
|
| 138 |
+
X-Pool Rank-1st
|
| 139 |
+
|
| 140 |
+
# X-CoT
|
| 141 |
+
|
| 142 |
+
Reasoning: Video A does not mention any actions involving grabbing or throwing snakes, while Video B describes a man handling and throwing snakes.
|
| 143 |
+
|
| 144 |
+
1) Video A focuses on a python in a container, displaying its pattern, and mentions no actions of grabbing or throwing snakes.
|
| 145 |
+
|
| 146 |
+
2) Video B describes a man in a white shirt and blue pants handling a group of snakes in a confined space, which include grabbing and throwing snakes as per the query. Answer: B
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
X-CoT Rank-1st
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
X-CoT Rank-2nd
|
| 153 |
+
Figure 6: Explainability discussion. X-Pool fails in ranking highly similar videos. By comparison, X-CoT identifies the relevant video, with subtle differences clearly explained.
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
GT Caption: the object is travelling away, but then comes back some type of sign is seen in the bottom left. a stop sign shakes a red sign flaps in the breeze.
|
| 157 |
+
Figure 7: Explainability discussion. By jointly examining the text caption, relevant video, and the CoT reasoning by X-CoT, one can find the ambiguous (e.g., object) and minor (e.g., stop sign) claims in the text caption, misleading the retrieval and introducing noise.
|
| 158 |
+
|
| 159 |
+
CoT Reasoning: Based on the pairwise comparisons, Video 1 consistently aligns with the query's requirements:
|
| 160 |
+
|
| 161 |
+
- It includes a sign (possibly a stop sign) in the bottom left, which is a key element of the query.
|
| 162 |
+
- It describes an object (a car) traveling away and coming back, which matches the query's description.
|
| 163 |
+
|
| 164 |
+
Therefore, Video 1 closely matches the query's description of an object traveling away and coming back to reveal a sign, particularly a stop sign in the bottom left.
|
| 165 |
+
|
| 166 |
+
GT Caption Noise: The sign in the original video is not a stop sign.
|
| 167 |
+
|
| 168 |
+
trieval performance while providing explanations, demonstrating its potential for interpretable multimodal retrieval. We hope this work can inspire future endeavors in explainable retrieval.
|
| 169 |
+
|
| 170 |
+
# Limitations
|
| 171 |
+
|
| 172 |
+
This work studies the explainable text-to-video retrieval upon LLM CoT reasoning. A potential limitation is that the reasoning and the ranking highly depend on the capacity of the LLM. While modern LLMs demonstrate strong generalization ability, they may be less effective in domain-specific or highly noisy text-video data scenarios, such as very long video comprehension. Considering that this
|
| 173 |
+
|
| 174 |
+
could be one of the first efforts in this direction, we will explore more challenging text-to-video retrieval scenarios in future work.
|
| 175 |
+
|
| 176 |
+
While the Bradley-Terry (BT) model provides a principled way to aggregate pairwise preferences, it also imposes certain constraints. The current formulation relies on binary win/loss outcomes and does not capture the uncertainty or nuanced reasoning strength that LLMs may provide. Future work could explore the incorporation of soft confidence scores or learnable aggregation strategies so that the richness of LLM reasoning in text-to-video retrieval can be better captured.
|
| 177 |
+
|
| 178 |
+
# Acknowledgments
|
| 179 |
+
|
| 180 |
+
This research was supported in part by the DEVCOM Army Research Laboratory under Contract W911QX-21-D-0001, the National Science Foundation under Grant 2502050, and the National Institutes of Health under Award R16GM159146. The content is solely the responsibility of the authors and does not necessarily represent the official views of the funding agencies.
|
| 181 |
+
|
| 182 |
+
# References
|
| 183 |
+
|
| 184 |
+
Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. 2017. Localizing moments in video with natural language. In ICCV.
|
| 185 |
+
Ralph Allan Bradley and Milton E Terry. 1952. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 39(3/4):324-345.
|
| 186 |
+
David Chen and William B Dolan. 2011. Collecting highly parallel data for paraphrase evaluation. In Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies.
|
| 187 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. 2009. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition. IEEE.
|
| 188 |
+
Jay DeYoung, Sarthak Jain, Nazneen Fatema Rajani, Eric Lehman, Caiming Xiong, Richard Socher, and Byron C. Wallace. 2020. ERASER: A benchmark to evaluate rationalized NLP models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4443-4458, Online. Association for Computational Linguistics.
|
| 189 |
+
Finale Doshi-Velez and Been Kim. 2017. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608.
|
| 190 |
+
|
| 191 |
+
Yue Fan, Xiaojian Ma, Rongpeng Su, Jun Guo, Rujie Wu, Xi Chen, and Qing Li. 2024. Embodied videoagent: Persistent memory from egocentric videos and embodied sensors enables dynamic scene understanding. arXiv preprint arXiv:2501.00358.
|
| 192 |
+
Yuying Ge, Yixiao Ge, Xihui Liu, Dian Li, Ying Shan, Xiaohu Qie, and Ping Luo. 2022a. Bridging videotext retrieval with multiple choice questions. In CVPR.
|
| 193 |
+
Yuying Ge, Yixiao Ge, Xihui Liu, Jinpeng Wang, Jianping Wu, Ying Shan, Xiaohu Qie, and Ping Luo. 2022b. Miles: Visual bert pre-training with injected language semantics for video-text retrieval. In ECCV.
|
| 194 |
+
Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, and Ishan Misra. 2023. Imagebind: One embedding space to bind them all. In CVPR.
|
| 195 |
+
Satya Krishna Gorti, Noel Vouitsis, Junwei Ma, Keyvan Golestan, Maksims Volkovs, Animesh Garg, and Guangwei Yu. 2022. X-pool: Cross-modal language-video attention for text-video retrieval. In CVPR.
|
| 196 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In CVPR.
|
| 197 |
+
David R Hunter. 2004. Mm algorithms for generalized bradley-terry models. The annals of statistics, 32(1):384-406.
|
| 198 |
+
Soyeong Jeong, Kangsan Kim, Jinheon Baek, and Sung Ju Hwang. 2025. Videorag: Retrievalaugmented generation over video corpus. arXiv preprint arXiv:2501.05874.
|
| 199 |
+
Ziyan Jiang, Rui Meng, Xinyi Yang, Semih Yavuz, Yingbo Zhou, and Wenhu Chen. 2024. Vlm2vec: Training vision-language models for massive multimodal embedding tasks. arXiv preprint arXiv:2410.05160.
|
| 200 |
+
Dongxu Li, Junnan Li, Hongdong Li, Juan Carlos Niebles, and Steven CH Hoi. 2022. Align and prompt: Video-and-language pre-training with entity prompts. In CVPR.
|
| 201 |
+
Kunchang Li, Yali Wang, Yizhuo Li, Yi Wang, Yinan He, Limin Wang, and Yu Qiao. 2023. Unmasked teacher: Towards training-efficient video foundation models. In ICCV.
|
| 202 |
+
Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. 2024. Video-LLaVA: Learning united visual representation by alignment before projection. In EMNLP.
|
| 203 |
+
Ruyang Liu, Chen Li, Yixiao Ge, Thomas H. Li, Ying Shan, and Ge Li. 2024. Bt-adapter: Video conversation is feasible without video instruction tuning. In CVPR.
|
| 204 |
+
|
| 205 |
+
Yikun Liu, Yajie Zhang, Jiayin Cai, Xiaolong Jiang, Yao Hu, Jiangchao Yao, Yanfeng Wang, and Weidi Xie. 2025. Lamra: Large multimodal model as your advanced retrieval assistant. In CVPR.
|
| 206 |
+
Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. 2022. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomput., 508(C):293-304.
|
| 207 |
+
Yiwei Ma, Guohai Xu, Xiaoshuai Sun, Ming Yan, Ji Zhang, and Rongrong Ji. 2022. X-clip: End-to-end multi-grained contrastive learning for video-text retrieval. In ACM international conference on multimedia.
|
| 208 |
+
Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. 2024. Video-chatgpt: Towards detailed video understanding via large vision and language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024).
|
| 209 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, and 1 others. 2021. Learning transferable visual models from natural language supervision. In ICML.
|
| 210 |
+
Anna Rohrbach, Marcus Rohrbach, Niket Tandon, and Bernt Schiele. 2015. A dataset for movie description. In CVPR.
|
| 211 |
+
Nina Shvetsova, Anna Kukleva, Xudong Hong, Christian Rupprecht, Bernt Schiele, and Hilde Kuehne. 2024. Howtocaption: Prompting llms to transform video annotations at scale. In ECCV.
|
| 212 |
+
Hongjin SU, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, Ruoxi Sun, Jinsung Yoon, Sercan O Arik, Danqi Chen, and Tao Yu. 2025. BRIGHT: A realistic and challenging benchmark for reasoning-intensive retrieval. In ICLR.
|
| 213 |
+
Guohao Sun, Yue Bai, Xueying Yang, Yi Fang, Yun Fu, and Zhiqiang Tao. 2024a. Aligning out-of-distribution web images and caption semantics via evidential learning. In Proceedings of the ACM on Web Conference 2024, WWW '24, page 2271-2281, New York, NY, USA. Association for Computing Machinery.
|
| 214 |
+
Guohao Sun, Can Qin, Huazhu Fu, Linwei Wang, and Zhiqiang Tao. 2024b. Stllava-med: Self-training large language and vision assistant for medical question-answering. In EMNLP.
|
| 215 |
+
Guohao Sun, Can Qin, Jiamian Wang, Zeyuan Chen, Ran Xu, and Zhiqiang Tao. 2024c. Sq-llava: Self-questioning for large vision-language assistant. In ECCV.
|
| 216 |
+
|
| 217 |
+
Jiamian Wang, Guohao Sun, Pichao Wang, Dongfang Liu, Sohail Dianat, Majid Rabbani, Raghunteer Rao, and Zhiqiang Tao. 2024a. Text is mass: Modeling as stochastic embedding for text-video retrieval. In CVPR.
|
| 218 |
+
Jiamian Wang, Pichao Wang, Dongfang Liu, Qiang Guan, Sohail Dianat, Majid Rabbani, Raghuveer Rao, and Zhiqiang Tao. 2024b. Diffusion-inspired truncated sampler for text-video retrieval. In NeurIPS.
|
| 219 |
+
Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Luowei Zhou, Yucheng Zhao, Yujia Xie, Ce Liu, YuGang Jiang, and Lu Yuan. 2022. Omnivl: One foundation model for image-language and video-language tasks. In NeurIPS.
|
| 220 |
+
Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, and 1 others. 2024c. Qwen2vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.
|
| 221 |
+
Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinhao Li, Guo Chen, Xinyuan Chen, Yaohui Wang, Ping Luo, Ziwei Liu, Yali Wang, Limin Wang, and Yu Qiao. 2024d. Internvid: A large-scale video-text dataset for multimodal understanding and generation. In ICLR.
|
| 222 |
+
Yi Wang, Kunchang Li, Yizhuo Li, Yinan He, Bingkun Huang, Zhiyu Zhao, Hongjie Zhang, Jilan Xu, Yi Liu, Zun Wang, and 1 others. 2024e. Internvideo: General video foundation models via generative and discriminative learning. In ECCV.
|
| 223 |
+
Wenhao Wu, Haipeng Luo, Bo Fang, Jingdong Wang, and Wanli Ouyang. 2023. Cap4video: What can auxiliary captions do for text-video retrieval? In CVPR.
|
| 224 |
+
Jun Xu, Tao Mei, Ting Yao, and Yong Rui. 2016. Msrvtt: A large video description dataset for bridging video and language. In CVPR.
|
| 225 |
+
Hongwei Xue, Yuchong Sun, Bei Liu, Jianlong Fu, Ruihua Song, Houqiang Li, and Jiebo Luo. 2022. Clipvip: Adapting pre-trained image-text model to video-language representation alignment. arXiv preprint arXiv:2209.06430.
|
| 226 |
+
Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Ehsan Azarnasab, Faisal Ahmed, Zicheng Liu, Ce Liu, Michael Zeng, and Lijuan Wang. 2023. Mm-react: Prompting chatgpt for multimodal reasoning and action. arXiv preprint arXiv:2303.11381.
|
| 227 |
+
Qinghao Ye, Guohai Xu, Ming Yan, Haiyang Xu, Qi Qian, Ji Zhang, and Fei Huang. 2023. Hitea: Hierarchical temporal-aware video-language pre-training. In ICCV.
|
| 228 |
+
Ziyun Zeng, Yixiao Ge, Zhan Tong, Xihui Liu, Shu-Tao Xia, and Ying Shan. 2023. Tvtsv2: Learning out-of-the-box spatiotemporal visual representations at scale. arXiv preprint arXiv:2305.14173.
|
| 229 |
+
|
| 230 |
+
Bin Zhu, Bin Lin, Munan Ning, Yang Yan, Jiaxi Cui, WANG HongFa, Yatian Pang, Wenhao Jiang, Junwu Zhang, Zongwei Li, Cai Wan Zhang, Zhifeng Li, Wei Liu, and Li Yuan. 2024. Languagebind: Extending video-language pretraining to n-modality by language-based semantic alignment. In ICLR.
|
| 231 |
+
|
| 232 |
+
<table><tr><td>Methods</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>Struct. ann. w/ CLIP</td><td>16.9</td><td>30.5</td><td>39.1</td><td>25.5</td><td>141.9</td></tr><tr><td>Struct. ann. w/ X-CoT</td><td>33.7</td><td>56.7</td><td>64.6</td><td>4.0</td><td>38.7</td></tr></table>
|
| 233 |
+
|
| 234 |
+
Table 4: Feeding structured video annotations to CLIP vs. using X-CoT on the MSR-VTT dataset.
|
| 235 |
+
|
| 236 |
+
<table><tr><td>Annotation Type</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>20% noisy tags</td><td>32.3</td><td>53.9</td><td>62.0</td><td>4.0</td><td>49.1</td></tr><tr><td>Complete annotations</td><td>33.7</td><td>56.7</td><td>64.6</td><td>4.0</td><td>38.7</td></tr></table>
|
| 237 |
+
|
| 238 |
+
Table 5: Effect of noisy structured annotations on X-CoT (MSR-VTT dataset).
|
| 239 |
+
|
| 240 |
+
# A Similar Frame Filtering
|
| 241 |
+
|
| 242 |
+
To ensure diversity in the frame annotations, we use a lightweight ResNet18 (He et al., 2016) model pretrained on ImageNet (Deng et al., 2009) to extract frame-level visual features. Each frame is resized, normalized, and passed through the network to obtain a feature embedding, which is L2-normalized. We then compare the current frame to all previously retained frames using cosine similarity, and if the maximum similarity is below a threshold (e.g., 0.95), the frame is kept. This process continues sequentially until the final set of non-duplicate frames is obtained, ensuring diversity and promoting frame-level annotation quality.
|
| 243 |
+
|
| 244 |
+
# B Structured Video Annotations as Input: CLIP vs. X-CoT
|
| 245 |
+
|
| 246 |
+
To test whether video annotations alone would suffice for CLIP, we use structured video annotations instead of the video embeddings and recompute cosine similarity with CLIP. As seen from Table 4, the performance drops compared to using X-CoT, suggesting that LLM reasoning is required to exploit long, verb-rich context.
|
| 247 |
+
|
| 248 |
+
# C Robustness to Noisy Annotations
|
| 249 |
+
|
| 250 |
+
To test the sensitivity of X-CoT to imperfect annotations, we perturb $20\%$ of tags in the structured annotations and re-run X-CoT on MSR-VTT, as shown in Table 5. The proposed X-CoT experiences a small performance decline in the noisy scenario, demonstrating the robustness to the annotation data quality. We also observe that the complete annotation gives improved performance, showing the effectiveness of the collected annotation data.
|
| 251 |
+
|
| 252 |
+
# GT Caption: people are singing on the beach
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
Figure 8: Example of collected annotations.
|
| 256 |
+
|
| 257 |
+
# Frame Captions:
|
| 258 |
+
|
| 259 |
+
1. "A group of young people are dancing energetically on a sandy beach."
|
| 260 |
+
2. "A group of children are playing and dancing on a sandy beach."
|
| 261 |
+
3. "A group of people, mostly young adults, are dancing and playing in a sandy area, enjoying a lively beach party."
|
| 262 |
+
4. "A young woman in a pink top and black jacket is dancing energetically on a beach, surrounded by a group of people."
|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
|
| 266 |
+
Summary:"a group of people dancing and having fun on a sandy beach."
|
| 267 |
+
|
| 268 |
+
Objects: ["beach", "people", "text"],
|
| 269 |
+
Actions: ["display", "lead", "enjoy", "surround", "dance", "shoot", "run", "raise", "play"],
|
| 270 |
+
|
| 271 |
+
Scenes: ["group", "fun", "lively", "leading", "celebration", "playful", "party", "shirt", "young", "energetic", "yellow", "joyful"]
|
| 272 |
+
|
| 273 |
+
# D Additional Qualitative Video Annotation Examples
|
| 274 |
+
|
| 275 |
+
Fig. 8 and Fig. 9 show examples where structured video annotations provide more accurate scene descriptions than the original dataset captions. These cases reveal:
|
| 276 |
+
|
| 277 |
+
1. Semantic misalignment in GT labels as shown in Fig. 8 (e.g., labeling "dancing on a beach" as "singing").
|
| 278 |
+
2. Fine-grained object and action detection as shown in Fig. 9 (e.g., political figures identified by name, or scene attributes like "joyful" or "heated").
|
| 279 |
+
|
| 280 |
+
Such annotations serve as the foundation for X-CoT's reasoning mechanism and improve the overall retrieval reliability.
|
| 281 |
+
|
| 282 |
+
# E Quantitative Evaluation of Video Annotations
|
| 283 |
+
|
| 284 |
+
We introduce a proxy metric to assess the semantic faithfulness of the generated explanations. For each query in the MSR-VTT testing set, we record the top-1 video embedding $v_{\mathrm{ori}}$ obtained from VLM2Vec. We then apply X-CoT to produce a re-ranked top-1 video embedding $v_{\mathrm{xcot}}$ and the corresponding explanation embedding $e_{\mathrm{expl}}$ (both derived from VLM2Vec). We compute the similar
|
| 285 |
+
|
| 286 |
+
GT Caption: fox news presidential debate recapping the GOP debate with donald trump and ted cruz
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
Frame captions:
|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
Figure 9: Example of collected annotations.
|
| 293 |
+
|
| 294 |
+
1. "The image shows two men, Donald Trump and Ted Cruz, standing at podiums during a CNN GOP debate, with text at the bottom reading "Moments of Tension & Friendship on Display at CNN GOP Debate."
|
| 295 |
+
2. "Two men, are engaged in a CNN GOP debate, with the text "Moments of Tension & Friendship on Display at CNN GOP Debate" displayed at the bottom,"
|
| 296 |
+
3. "The image shows two men, Donald Trump and Ted Cruz, participating in a CNN GOP debate, with Trump on the left and Cruz on the right, both standing at podiums."
|
| 297 |
+
4. "The image shows two men, Donald Trump and Ted Cruz, engaged in a heated discussion during a CNN GOP debate."
|
| 298 |
+
Summary: "two men, donald trump and ted cruz, are engaged in a heated debate on a cnn."
|
| 299 |
+
Objects: ["friendship", "tension"]
|
| 300 |
+
Actions: ["display", "listen", "reading", "debate", "text", "overlay", "participate", "speak", "stand", "engage"],
|
| 301 |
+
Scenes: ["men", "stage"]
|
| 302 |
+
|
| 303 |
+
ity between two types of video embedding as:
|
| 304 |
+
|
| 305 |
+
$$
|
| 306 |
+
\operatorname {s i m} _ {\text {b a s e l i n e}} = \cos \left\langle e _ {\text {e x p l}}, v _ {\text {o r i}} \right\rangle , \tag {1}
|
| 307 |
+
$$
|
| 308 |
+
|
| 309 |
+
$$
|
| 310 |
+
\operatorname {s i m} _ {\mathrm {x c o t}} = \cos \left\langle e _ {\text {e x p l}}, v _ {\mathrm {x c o t}} \right\rangle . \tag {2}
|
| 311 |
+
$$
|
| 312 |
+
|
| 313 |
+
Averaging these values across all queries yields $\bar{s} m_{\mathrm{baseline}} = 0.273$ and $\bar{s} m_{\mathrm{xcot}} = 0.350$ . The $+0.077$ gain demonstrates that the explanation embeddings align more strongly with the X-CoT reranked results compared to the baseline retrieval, indicating that explanations are semantically faithful to the system's final decision.
|
| 314 |
+
|
| 315 |
+
To further guide future human-centered evaluation, established explanation-quality frameworks such as (Doshi-Velez and Kim, 2017) and (DeYoung et al., 2020) can be applied to assess interpretability and rationalization.
|
| 316 |
+
|
| 317 |
+
# F X-CoT Pairwise Ranking Algorithm
|
| 318 |
+
|
| 319 |
+
The pseudo-code for the pairwise ranking is provided in Algorithm 1. Given the coarse top- $K$ list $V = [v_{1},\dots,v_{K}]$ (we set $K = 20$ ), X-CoT performs at most $P = 10$ sliding-window sweeps. During each sweep, the list is scanned from left to right; for every adjacent pair $(v_{i},v_{i + 1})$ . An LLM receives the query plus two structured video descriptions and must reply with its choice and reason. If the answer favors $v_{i + 1}$ , the two items are swapped.
|
| 320 |
+
|
| 321 |
+
Complexity. In the best-case scenario, the number of pair-wise comparisons is $(K - 1)$ , and in the worst case, $P(K - 1)$ .
|
| 322 |
+
|
| 323 |
+
LRU Caching. The comparison routine is protected by an LRU cache keyed on the triple
|
| 324 |
+
|
| 325 |
+
(query, $v_{i}, v_{i+1}$ ). Thus, although up to $(K-1)P = 200$ comparisons are possible, only $\sim 30-40$ unique LLM calls are required on average, saving $\approx 85\%$ of LLM calls.
|
| 326 |
+
|
| 327 |
+
Global Aggregation. All newly observed win-loss edges are converted to ability scores $\theta_{k}$ via a Bradley-Terry maximum-likelihood fit (weak Gaussian prior $\alpha = 10^{-3}$ ). Sorting $\theta_{k}$ in descending order yields the final ranking $\hat{V}$ . In addition to the ranking, the individual explanations collected during each pairwise comparison are concatenated and summarized in a final single-shot LLM call.
|
| 328 |
+
|
| 329 |
+
# G Efficiency and Scalability
|
| 330 |
+
|
| 331 |
+
In Table 6, we report the runtime and GPU memory cost under different hardware settings (e.g., number of NVIDIA RTX 3090 GPUs). As shown by Table 6, the runtime per query could be drastically reduced as we scale the number of GPUs, being comparable with the CLIP-based embedding model (X-Pool) and the MLLM-based embedding model (VLM2Vec). This enhances the feasibility of real-world deployment. The above speedup is achieved by substantial engineering endeavors, including sliding window, caching, odd-even parallelization, and GPU parallelization.
|
| 332 |
+
|
| 333 |
+
Sliding Window and Caching. Since the embedding model already provides a good initial ranking, our proposed method, which builds atop embedding models, only needs to perform a small number of local swaps, rather than running a total of $K(K - 1) = 380$ LLM calls for top-20 ( $K = 20$ ) candidates per query. We adopt a sliding window strategy that compares only adjacent video pairs (e.g., (v1, v2), (v2, v3), ..., ) across multiple passes. Since many of the pairwise comparisons recur across the passes, we cache the pairwise results to avoid repetitive LLM calls. We empirically find that such a strategy can reduce the total number of LLM calls per query by $90\%$ on average (e.g., less than 40 LLM calls per query).
|
| 334 |
+
|
| 335 |
+
Odd-Even Parallelization. In each sliding window pass, for $K = 20$ there will be 19 adjacent pairs. We partition these pairs into odd (e.g., (v1, v2), (v3, v4), ..., (v19, v20)) and even (e.g., (v2, v3), (v4, v5), ..., (v18, v19)) groups, where both the odd and even groups consist of non-overlapping pairs. The comparisons within each group are executed in parallel via multi-threaded dispatch, thereby reducing the wall-clock latency of each pass.
|
| 336 |
+
|
| 337 |
+
GPU Parallelization. For each query, multi
|
| 338 |
+
|
| 339 |
+
Algorithm 1: X-COT RANKING VIA PAIRWISE COMPARISONS
|
| 340 |
+
Input: Text query q; Top-K candidate list $\mathcal{V} = [v_{1},\dots ,v_{K}]$ Number of passes $P = 10$ Output: Sorted list V; Pairwise explanation R; Final explanation E; Initialize pairwise log $\mathcal{L}\gets []$ // pairwise win log for Bradley-Terry Initialize reason list $\mathcal{R}\gets []$ // natural-language reasons from the LLM CompareLLM: takes query and a pair of candidates, returns the closed match to the query and a reason ExplainLLM: summarizes the full set of pairwise reasons into a final explanation
|
| 341 |
+
3 for $p\gets 1$ to $P$ do for $i\gets 1$ to $K - 1$ do $(w,r)\gets \mathrm{COMPARELLM}(q,\mathcal{V}[i],\mathcal{V}[i + 1]) / /$ LLM returns winner w and reason r Append $r$ to $\mathcal{R}$ ,and w to $\mathcal{L}$ // Log result and explanation if $w = \mathcal{V}[i + 1]$ then Swap $\mathcal{V}[i]$ and $\mathcal{V}[i + 1]$ // If right candidate wins, swap positions
|
| 342 |
+
9 $\hat{\nu}\gets$ BRADLEY-TERRY AGGREGATE(L)
|
| 343 |
+
10 $\mathcal{E}\gets$ EXPLAINLLM(R)
|
| 344 |
+
11 return $(\hat{\nu},\mathcal{E},\mathcal{R})$
|
| 345 |
+
|
| 346 |
+
<table><tr><td>Methods (#GPU)</td><td>X-CoT(×1)</td><td>X-CoT(×2)</td><td>X-CoT(×4)</td><td>X-CoT(×8)</td><td>X-CoT(×32)</td><td>X-Pool</td><td>VLM2Vec</td></tr><tr><td>GPU Memory (GB)</td><td>16.7</td><td>33.4</td><td>64.0</td><td>130.2</td><td>535.0</td><td>4.0</td><td>16.6</td></tr><tr><td>Runtime / query (s)</td><td>3.6</td><td>1.8</td><td>0.9</td><td>0.45</td><td>0.10</td><td>0.11</td><td>0.88</td></tr></table>
|
| 347 |
+
|
| 348 |
+
Table 6: Runtime and memory profile of X-CoT with increasing GPU parallelism alongside embedding-based retrieval baselines. A local open-source LLM (Qwen 2.5-7B-Instruct-1M) was used (no API cost).
|
| 349 |
+
|
| 350 |
+
ple LLM calls (i.e., pairwise comparisons) are independent and can be parallelized. We leverage GPU-level concurrency to distribute the LLM calls across multiple devices. Together with the above engineering strategies, we reduce the latency as shown in Table 6.
|
| 351 |
+
|
| 352 |
+
Since we adopt the open-source LLM (Qwen 2.5-7B-Instruct-1M) and the local hardware, no direct monetary cost is incurred.
|
| 353 |
+
|
| 354 |
+
# H X-CoT Ranking Examples
|
| 355 |
+
|
| 356 |
+
Fig. 10 illustrates how our method re-ranks candidate videos through pairwise reasoning and global aggregation. From the multiple pairwise judgments, culminating in the accurate re-ranking of a video showing a procter in Brazil speaking to a reporter, precisely matching the query.
|
| 357 |
+
|
| 358 |
+
# I Embedding Model Details and Complete Benchmarking Results
|
| 359 |
+
|
| 360 |
+
We evaluate two zero-shot models, CLIP (Radford et al., 2021) and VLM2Vec (Jiang et al., 2024), alongside a fine-tuned model, X-Pool (Gorti et al., 2022), to assess retrieval performance across diverse settings. The complete benchmarking results for MSR-VTT (Xu et al., 2016) and MSVD (Chen and Dolan, 2011) are presented in Table 7, and for DiDeMo (Anne Hendricks et al., 2017) and LSMDC (Rohrbach et al., 2015) are presented in Table 8.
|
| 361 |
+
|
| 362 |
+
# GT Caption: a woman interviewing about her part in a protest happening in brazil
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
X-Pool Rank-1st
|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
X-Pool Rank-2nd
|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
X-Pool Rank-3rd
|
| 372 |
+
|
| 373 |
+

|
| 374 |
+
X-Pool Rank-4th
|
| 375 |
+
|
| 376 |
+
# X-CoT Ranking and Explanation
|
| 377 |
+
|
| 378 |
+
# Pair-wise Reasoning:
|
| 379 |
+
|
| 380 |
+
-(Video 4 > Video 3): 'Video 4 more closely aligns with the query as it explicitly mentions a young woman speaking directly to the camera in a live news broadcast, identifying herself as a prouter in Brazil.'
|
| 381 |
+
-(Video 4 > Video 2): 'Video 4 more closely matches the query as it explicitly mentions a woman speaking about her role in a protest in Brazil.'
|
| 382 |
+
-(Video 4 > Video 1): 'Video 4 more closely aligns with the query as it explicitly mentions a woman speaking directly to the camera in a live news broadcast, identifying herself as a procter in Brazil.'
|
| 383 |
+
-(Video 2 > Video 3): 'Video 2 includes a woman in a blue and purple outfit speaking in a market setting, which aligns closely with the query of a woman interviewing about her part in a protest in Brazil.'
|
| 384 |
+
-(Video 1 > Video 2): 'Video 1 includes a woman walking confidently down a busy street, which could be related to a protest.'
|
| 385 |
+
|
| 386 |
+
Explanation: Video 4 is selected as the top match because it explicitly meets all the criteria specified in the query. It features a woman speaking directly about her role in Brazil, which is precisely what the query seeks. This makes Video 4 the most relevant choice among the options provided.
|
| 387 |
+
|
| 388 |
+
LLM Re-Ranked Order: [4, 1, 2, 3]
|
| 389 |
+
|
| 390 |
+
BRAZIL PROTESTS
|
| 391 |
+
|
| 392 |
+
Fresh protests in Rio
|
| 393 |
+
|
| 394 |
+
BBCOMWORLD NEWS
|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
X-CoT Rank-1st
|
| 398 |
+
|
| 399 |
+

|
| 400 |
+
X-CoT Rank-2nd
|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
X-CoT Rank-3rd
|
| 404 |
+
|
| 405 |
+

|
| 406 |
+
X-CoT Rank-4th
|
| 407 |
+
Figure 10: Successful ranking with X-CoT on a query about a protest in Brazil. The top result is selected through stepwise pairwise comparisons, supported by natural language justifications.
|
| 408 |
+
|
| 409 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">MSR-VTT</td><td colspan="5">MSVD</td></tr><tr><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>ALPRO (Li et al., 2022)</td><td>24.1</td><td>44.7</td><td>55.4</td><td>8.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BridgeFormer (Ge et al., 2022a)</td><td>26.0</td><td>46.4</td><td>56.4</td><td>7.0</td><td>-</td><td>43.6</td><td>74.9</td><td>84.9</td><td>2.0</td><td>-</td></tr><tr><td>MILES (Ge et al., 2022b)</td><td>26.1</td><td>47.2</td><td>56.9</td><td>7.0</td><td>-</td><td>44.4</td><td>76.2</td><td>87.0</td><td>2.0</td><td>-</td></tr><tr><td>HiTeA (Ye et al., 2023)</td><td>29.9</td><td>54.2</td><td>62.9</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>OmniVL (Wang et al., 2022)</td><td>34.6</td><td>58.4</td><td>66.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ImageBind (Girdhar et al., 2023)</td><td>36.8</td><td>61.8</td><td>70.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>How2Cap (Shvetsova et al., 2024)</td><td>37.6</td><td>62.0</td><td>73.3</td><td>3.0</td><td>-</td><td>44.5</td><td>73.3</td><td>82.1</td><td>2.0</td><td>-</td></tr><tr><td>TVTSv2 (Zeng et al., 2023)</td><td>38.2</td><td>62.4</td><td>73.2</td><td>3.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>InternVideo (Wang et al., 2024e)</td><td>40.7</td><td>65.3</td><td>74.1</td><td>2.0</td><td>-</td><td>43.4</td><td>69.9</td><td>79.1</td><td>-</td><td>-</td></tr><tr><td>BT-Adapter (Liu et al., 2024)</td><td>40.9</td><td>64.7</td><td>73.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ViCLIP (Wang et al., 2024d)</td><td>42.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>49.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LanguageBind (Zhu et al., 2024)</td><td>42.6</td><td>65.4</td><td>75.5</td><td>-</td><td>-</td><td>52.2</td><td>79.4</td><td>87.3</td><td>-</td><td>-</td></tr><tr><td>LamRA (Liu et al., 2025)</td><td>44.7</td><td>68.6</td><td>78.6</td><td>-</td><td>-</td><td>52.4</td><td>79.8</td><td>87.0</td><td>-</td><td>-</td></tr><tr><td>CLIP (Radford et al., 2021)</td><td>31.6</td><td>53.8</td><td>63.4</td><td>4.0</td><td>39.0</td><td>36.5</td><td>64.0</td><td>73.9</td><td>3.0</td><td>20.8</td></tr><tr><td>X-CoT (ours)</td><td>33.7</td><td>56.7</td><td>64.6</td><td>4.0</td><td>38.7</td><td>42.1</td><td>67.4</td><td>75.4</td><td>2.0</td><td>20.5</td></tr><tr><td>VLM2Vec (Jiang et al., 2024)</td><td>36.4</td><td>60.2</td><td>70.7</td><td>3.0</td><td>27.3</td><td>46.7</td><td>73.8</td><td>82.6</td><td>2.0</td><td>12.8</td></tr><tr><td>X-CoT (ours)</td><td>37.2</td><td>61.8</td><td>71.5</td><td>3.0</td><td>27.1</td><td>48.4</td><td>74.8</td><td>83.2</td><td>2.0</td><td>12.6</td></tr><tr><td>X-Pool (Gorti et al., 2022)</td><td>46.9</td><td>73.0</td><td>82.0</td><td>2.0</td><td>14.2</td><td>47.2</td><td>77.2</td><td>86.0</td><td>2.0</td><td>9.3</td></tr><tr><td>X-CoT (ours)</td><td>47.3</td><td>73.3</td><td>82.1</td><td>2.0</td><td>14.2</td><td>49.1</td><td>78.0</td><td>86.6</td><td>2.0</td><td>9.2</td></tr></table>
|
| 410 |
+
|
| 411 |
+
Table 7: Complete Text-to-video retrieval performance comparison on MSR-VTT and MSVD.
|
| 412 |
+
|
| 413 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">DiDeMo</td><td colspan="5">LSMDC</td></tr><tr><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td><td>R@1↑</td><td>R@5↑</td><td>R@10↑</td><td>MdR↓</td><td>MnR↓</td></tr><tr><td>ALPRO (Li et al., 2022)</td><td>23.8</td><td>47.3</td><td>57.9</td><td>6.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>BridgeFormer (Ge et al., 2022a)</td><td>25.6</td><td>50.6</td><td>61.1</td><td>5.0</td><td>-</td><td>12.2</td><td>25.9</td><td>32.2</td><td>42.0</td><td>-</td></tr><tr><td>MILES (Ge et al., 2022b)</td><td>27.2</td><td>50.3</td><td>63.6</td><td>5.0</td><td>-</td><td>11.1</td><td>24.7</td><td>30.6</td><td>50.7</td><td>-</td></tr><tr><td>HiTeA (Ye et al., 2023)</td><td>36.1</td><td>60.1</td><td>70.3</td><td>-</td><td>-</td><td>15.5</td><td>31.1</td><td>39.8</td><td>-</td><td>-</td></tr><tr><td>OmniVL (Wang et al., 2022)</td><td>33.3</td><td>58.7</td><td>68.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>How2Cap (Shvetsova et al., 2024)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>17.3</td><td>31.7</td><td>38.6</td><td>29.0</td></tr><tr><td>TVTSv2 (Zeng et al., 2023)</td><td>34.6</td><td>61.9</td><td>71.5</td><td>3.0</td><td>-</td><td>17.3</td><td>32.5</td><td>41.4</td><td>20.0</td><td>-</td></tr><tr><td>InternVideo (Wang et al., 2024e)</td><td>31.5</td><td>57.6</td><td>68.2</td><td>3.0</td><td>-</td><td>17.6</td><td>32.4</td><td>40.2</td><td>23.0</td><td>-</td></tr><tr><td>BT-Adapter (Liu et al., 2024)</td><td>35.6</td><td>61.9</td><td>72.6</td><td>-</td><td>-</td><td>19.5</td><td>35.9</td><td>45.0</td><td>-</td><td>-</td></tr><tr><td>ViCLIP (Wang et al., 2024d)</td><td>18.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>20.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>LanguageBind (Zhu et al., 2024)</td><td>37.8</td><td>63.2</td><td>73.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CLIP (Radford et al., 2021)</td><td>25.2</td><td>49.4</td><td>59.0</td><td>6.0</td><td>49.7</td><td>15.9</td><td>28.4</td><td>35.3</td><td>31.0</td><td>129.6</td></tr><tr><td>X-CoT (ours)</td><td>29.7</td><td>52.1</td><td>60.6</td><td>5.0</td><td>49.2</td><td>17.6</td><td>29.0</td><td>36.1</td><td>31.0</td><td>129.4</td></tr><tr><td>VLM2Vec (Jiang et al., 2024)</td><td>33.5</td><td>57.7</td><td>68.4</td><td>4.0</td><td>34.1</td><td>18.2</td><td>33.6</td><td>41.4</td><td>23.0</td><td>119.1</td></tr><tr><td>X-CoT (ours)</td><td>35.8</td><td>59.2</td><td>68.8</td><td>3.0</td><td>33.9</td><td>18.9</td><td>35.1</td><td>41.9</td><td>23.0</td><td>118.9</td></tr><tr><td>X-Pool (Gorti et al., 2022)</td><td>44.6</td><td>72.5</td><td>81.0</td><td>2.0</td><td>15.1</td><td>23.6</td><td>42.9</td><td>52.4</td><td>9.0</td><td>54.1</td></tr><tr><td>X-CoT (ours)</td><td>45.1</td><td>73.1</td><td>81.8</td><td>2.0</td><td>15.0</td><td>23.8</td><td>43.8</td><td>53.1</td><td>8.0</td><td>54.0</td></tr></table>
|
| 414 |
+
|
| 415 |
+
Table 8: Complete Text-to-video retrieval performance comparison on DiDeMo and LSMDC.
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:83e30350d378fef50c4b9f600e456cc77330668bd837aa65a4fb2daf0a34195c
|
| 3 |
+
size 740015
|
EMNLP/2025/X-CoT_ Explainable Text-to-Video Retrieval via LLM-based Chain-of-Thought Reasoning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:517aa5f967e7b2a0d13e2e7307e809d8fd9409c00201cdb9b6e52e35aa02563c
|
| 3 |
+
size 461227
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f32d67ea10ace528a4a987ef45f445c82a2fec3407ed530d050c027f360d3b37
|
| 3 |
+
size 165060
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f462cb9c1431065d0f69f72f64a3b233b2613d4cbda85abaf8605500dbc0fbc
|
| 3 |
+
size 206451
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/a577bafd-b215-4394-989d-96df20b9938a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cde37dde255e64be55f4b00cf7fb1fe9983b3de052f1c80855fcadf29047a714
|
| 3 |
+
size 22604275
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/full.md
ADDED
|
@@ -0,0 +1,844 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# X-FLoRA: Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA
|
| 2 |
+
|
| 3 |
+
Min Hyuk Kim, Chang Heon Kim, Seok Bong Yoo*
|
| 4 |
+
|
| 5 |
+
Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, Korea sbyoo@jnu.ac.kr *Corresponding author
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
Figure 1: (a) Illustration for problem definition of cross-modal federated learning, highlighting the complementary strengths of each modality in visual question answering tasks. (b) Visualization of limitations of existing domain adaptation methods using Gaussian kernel density estimation, emphasizing the challenges of adapting MRI and CT modalities. (c) Our proposed method for cross-modal federated learning.
|
| 9 |
+
|
| 10 |
+
# Abstract
|
| 11 |
+
|
| 12 |
+
Medical visual question answering (VQA) and federated learning (FL) have emerged as vital approaches for enabling privacy-preserving, collaborative learning across clinical institutions. However, both these approaches face significant challenges in cross-modal FL scenarios, where each client possesses unpaired images from only one modality. To address this limitation, we propose X-FLoRA, a cross-modal FL framework that uses modality-expert low-rank adaptation (LoRA) for medical VQA. Specifically, X-FLoRA enables the synthesis of images from one modality to another without requiring data sharing between clients. This is achieved by training a backward translation model within a federated asymmetric translation scheme that integrates clinical semantics from textual data. Additionally, X-FLoRA introduces modality-expert LoRA, which fine-tunes separate LoRA modules to strengthen modality-specific representations in the VQA task. The server aggregates the trained backward translation models and fine-tuned LoRA modules using discriminator quality scores and expert-aware weighting, which regulate the relative contributions from different clients. Experiments were conducted on VQA datasets encompassing different medical modalities, and the results demonstrate that X-FLoRA outper
|
| 13 |
+
|
| 14 |
+
forms existing FL methods in terms of VQA performance.
|
| 15 |
+
|
| 16 |
+
# 1 Introduction
|
| 17 |
+
|
| 18 |
+
Medical visual question answering (VQA) (Lin et al., 2023; Khare et al., 2021) has emerged as a promising tool in computer-aided diagnosis, supporting clinical decision-making by generating answers to diagnostic questions based on medical images. However, the broader application of VQA methods is often constrained by data privacy concerns. Federated learning (FL) has gained significant attention for enabling privacy-preserving, decentralized model training across clinical institutions. In response to this, several federated VQA frameworks (Lao et al., 2023; Zhu et al., 2024; Tobaben et al., 2024) have been proposed to address medical VQA tasks without requiring patient data sharing. Despite this progress, federated VQA remains limited in cross-modal FL settings (Qayyum et al., 2022; Dai et al., 2024), where each client only possesses data from a single imaging modality and lacks paired samples from other modalities.
|
| 19 |
+
|
| 20 |
+
In typical clinical cross-modal FL scenarios, individual clients may have access to only magnetic resonance imaging (MRI) or computed tomography (CT) data, but not both. These modalities differ
|
| 21 |
+
|
| 22 |
+
substantially due to their distinct imaging mechanisms and diagnostic purposes—MRI uses magnetic fields and radio waves, whereas CT relies on ionizing radiation. As shown in Fig. 1(a), MRI and CT reports from electronic medical records (EMRs) for the same brain region often highlight different pathological features. For instance, an MRI report may describe a “subcutaneous temporal metastasis,” while a CT report for the same region may note a “re-bleed,” reflecting their respective strengths in soft tissue characterization and hemorrhage detection. Similarly, in the GPT-4-based medical VQA dataset (Li et al., 2023), modality-aligned responses such as “metastatic lesion” for MRI and “second hemorrhage” for CT further demonstrate the need for modality-aware understanding (Achiam et al., 2023).
|
| 23 |
+
|
| 24 |
+
The challenges of cross-modal FL arise not only from inter-modality gaps but also from intramodality variations due to differences in imaging devices and patient characteristics. To address these issues, domain adaptation (DA) techniques have been explored (Zhao et al., 2022). As a preliminary study, we employ ResNet50 as a feature extractor to visualize feature distributions of MRI and CT datasets using the approach proposed by Chen et al.. The results, shown in Fig. 1(b), compare feature distributions before and after applying DA. The first and second rows represent intra-modal DA effects for CT and MRI, respectively, while the third row illustrates the cross-modal DA impact. These results indicate that DA alone is insufficient for addressing cross-modal heterogeneity (Chen et al., 2020) and can even lead to performance degradation (Yang et al., 2024).
|
| 25 |
+
|
| 26 |
+
To overcome these challenges, we propose X-FLoRA, a cross-modal FL framework that incorporates modality-expert low-rank adaptation (LoRA) for medical VQA, as illustrated in Fig. 1(c). X-FLoRA consists of two primary phases: federated asymmetric translation and federated VQA fine-tuning. In the first phase, each client independently trains a text-driven backward translation model—either CT-to-MRI (C2M) or MRI-to-CT (M2C)—using its data. During training, only the backward model is updated, while the forward model remains frozen. These translation models integrate images with their corresponding EMR reports to capture clinically significant textual features that may not be visually evident. Clients then upload their trained backward translation weights to a central server, which aggregates them.
|
| 27 |
+
|
| 28 |
+
In the second phase, modality-expert LoRA modules fine-tune representations for each modality—MRI, CT, and text—independently. Given the distinct characteristics of each modality, these specialized modules improve the quality of modality-specific representation. The server aggregates the fine-tuned LoRA modules using modality-specific aggregation, balancing the contributions from real and synthetic data across clients. This design allows X-FLoRA to effectively address the limitations of clinical cross-modal FL environments, enhancing both modality diversity and modality-specific representation without requiring any data sharing.
|
| 29 |
+
|
| 30 |
+
We summarize our main contributions as follows:
|
| 31 |
+
|
| 32 |
+
- To the best of our knowledge, we propose the first unified VQA framework to mitigate cross-modal heterogeneity by combining a cross-modal translation strategy with modality-specific expert fine-tuning. This approach improves both modality diversity and representation quality in federated VQA.
|
| 33 |
+
- We propose an FL framework for asymmetric translation, where each client trains only the backward text-driven model to complement visual features with clinical insights derived from EMRs. Furthermore, aggregation based on discriminator quality scores increases the influence of clients with higher-quality translation models.
|
| 34 |
+
- We introduce modality-expert LoRA, a lightweight and modality-specific adaptation mechanism. Separate LoRA modules are applied to each modality, and a modality-specific aggregation strategy ensures a balanced integration of real and synthesized data from diverse clients.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
# 2.1 Visual Question Answering
|
| 39 |
+
|
| 40 |
+
VQA (Ji et al., 2024; Naik et al., 2024; Xing et al., 2024; Song et al., 2024; Li et al., 2024a, 2023; Liu et al., 2023; Wang et al., 2024a; Yan et al., 2024) is an interdisciplinary task that integrates computer vision and natural language processing to generate answers to natural language questions about visual content. Building on the progress in general-domain VQA, there has been a surge of interest in
|
| 41 |
+
|
| 42 |
+
adapting VQA for medical applications. Recent studies show that autoregressive decoder-based large language models (LLMs) and visual language models (VLMs), when fine-tuned on medical datasets, demonstrate strong performance on clinical tasks. For example, BioMistral (Labrak et al., 2024), adapted from Mistral-7B (Jiang et al., 2023), has shown impressive results on complex medical question answering benchmarks, such as medical licensing exams and PubMed-based queries (Jin et al., 2019). Similarly, specialized medical VLMs like LLaVA-Med (Li et al., 2023), derived from LLaVA (Liu et al., 2023), and MedFlamingo (Moor et al., 2023), based on Open-Flamingo (Awadalla et al., 2023), have demonstrated effectiveness in radiological (Lau et al., 2018) and pathological (He et al., 2020) VQA tasks. Despite these advancements, current methods are limited in FL settings involving cross-modal medical data, where they struggle to model the inherent differences in visual and textual modality characteristics.
|
| 43 |
+
|
| 44 |
+
# 2.2 Vertical Multimodal Federated Learning
|
| 45 |
+
|
| 46 |
+
Protecting patient privacy has become a critical concern in the digital healthcare era, especially given the risks associated with misuse or unauthorized commercialization of sensitive data (Chiruvella et al., 2021). To address these issues, several vertical multimodal FL approaches have been developed (Zhang et al., 2023; Qayyum et al., 2022; Yang et al., 2022). Zhang et al. introduced UTMP, a federated learning framework in which unimodal clients collaboratively train a multimodal model through hierarchical encoder-decoder aggregation. Qayyum et al. proposed a collaborative FL framework for multimodal COVID-19 diagnosis on edge devices, enabling clients with either X-ray or ultrasound data to train a shared model without exchanging raw data. Yang et al. presented a cross-modal federated human activity recognition framework that uses a feature-disentangled network with both modality-agnostic and modality-specific encoders, enabling collaborative learning from clients with heterogeneous sensor and video modalities. However, these prior works do not consider the intrinsic characteristics of medical imaging, such as differences in imaging physics (e.g., CT vs. MRI) and semantic focus in clinical reports. To bridge this gap, we propose a new FL strategy combining federated asymmetric translation and federated VQA fine-tuning, which explicitly considers modality-specific features through the
|
| 47 |
+
|
| 48 |
+
use of asymmetric forward/backward models and modality-expert LoRA modules.
|
| 49 |
+
|
| 50 |
+
# 2.3 Image-to-Image Translation
|
| 51 |
+
|
| 52 |
+
A wide range of image-to-image translation techniques have been developed in recent years (Zhu et al., 2017; Huang et al., 2018; Isola et al., 2017; Cheng et al., 2023; Xia et al., 2024; Li et al., 2024b; Xu et al., 2024). Zhu et al. introduced CycleGAN, which enables unpaired image-to-image translation using a cycle-consistency loss. Huang et al. proposed MUNIT, which separates images into shared content and domain-specific style codes to generate diverse outputs. Isola et al. developed pix2pix for supervised image-to-image translation using paired data, directly learning mappings from input to output images. Although effective, most of these methods assume access to paired multimodal datasets—an assumption that does not hold in vertical multimodal FL scenarios, where data are distributed across institutions. To address this limitation, we propose a federated approach to unpaired cross-modal image translation that leverages modality-specific clinical text reports as semantic guidance. This strategy enriches the translation process with medically relevant details that may be implicit or missing in visual data alone.
|
| 53 |
+
|
| 54 |
+
# 3 Methodology
|
| 55 |
+
|
| 56 |
+
# 3.1 Overall
|
| 57 |
+
|
| 58 |
+
As shown in Fig. 2, X-FLoRA consists of two key phases: federated asymmetric translation and federated VQA fine-tuning. In the federated asymmetric translation phase, there are $N_{m}$ clients with MRI data and $N_{c}$ clients with CT data. Each group of clients trains backward translation models specific to their modality, while the forward translation model is provided by other modality clients and remains frozen. The central server aggregates the backward translation models from all clients. Subsequently, clients download the aggregated backward weights, enabling both MRI and CT clients to perform federated asymmetric translation without sharing data directly. This phase is repeated over $R_{t}$ rounds. After these rounds, each client generates synthetic images of the other modality.
|
| 59 |
+
|
| 60 |
+
In the next phase, modality-expert LoRA modules are applied to the respective modality encoders using the synthetic images. The weights from these LoRA modules are then uploaded to the server for global aggregation, specific to each modality. Addi
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Figure 2: Overall architecture of the X-FLoRA framework.
|
| 64 |
+
|
| 65 |
+
tionally, expert-aware weighting is used to balance the contributions of real and synthetic data. This fine-tuning phase is repeated over $R_{f}$ rounds, promoting increased modality diversity and enhancing the robustness of modality-aware representations. After all rounds are completed, the final global VQA model is obtained.
|
| 66 |
+
|
| 67 |
+
# 3.2 Federated Asymmetric Translation
|
| 68 |
+
|
| 69 |
+
Inspired by cycle consistency (Radford et al., 2021), we propose a federated asymmetric translation that enables each client to train cross-modal translation even if it possesses only a single type of modality data as shown in Fig. 3. In this phase, each client possesses real data $x$ and corresponding imaging report $t$ . In addition, clients perform forward translator $F$ , which receives $x$ and $t$ as inputs, to generate a synthetic image.
|
| 70 |
+
|
| 71 |
+
# 3.2.1 Forward and Backward Text-driven Translation
|
| 72 |
+
|
| 73 |
+
In the forward process, the text encoder extracts text features, while the image encoder and residual blocks extract image features. The extracted image and text features are then fused via text-driven attention, enabling the translator to generate modality-consistent synthetic images enriched with clinically relevant textual cues.
|
| 74 |
+
|
| 75 |
+
Each client generates synthetic images using the frozen forward translator $F$ , and subsequently applies a backward translator $B$ , with the same architecture as $F$ , to reconstruct the original image. This reconstruction is used to train $B$ and a discriminator $D$ , which distinguishes between real and reconstructed images. Specifically $D$ and $B$ are trained as follows:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\hat {D}, \hat {B} = \underset {D} {\operatorname {a r g m a x}} \underset {B} {\operatorname {m i n}} \mathcal {L} _ {\text {t o t a l}}, \tag {1}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where, $\mathcal{L}_{total}$ denotes the total loss function, defined as follows:
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {a d v}} + \eta \mathcal {L} _ {\text {i d}}, \tag {2}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where $\eta$ balances two objectives: adversarial loss $(\mathcal{L}_{adv})$ , which ensures realism of the reconstructed image, and identity loss $(\mathcal{L}_{id})$ , which ensures fidelity to the original input. The adversarial loss is formulated as follows:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathcal {L} _ {a d v} = \mathbb {E} _ {x} \sim p _ {\text {d a t a}} (x) [ | | 1 - D (B (F (x, t), t)) | | _ {2} ], \tag {3}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $x \sim p_{\mathrm{data}}(x)$ denotes the data distribution of real data and $\| \cdot \|_2$ denotes the L2 norm. The identity loss minimizes the pixel-wise difference between the reconstructed image and the original input. This loss is formulated as follows:
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathcal {L} _ {i d} = \left| \left| B (F (x, t), t) - x \right| \right| _ {1}, \tag {4}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $\| \cdot \| _1$ denotes the L1 norm. After local training, the central server aggregates the backward translation weights $\theta_{c2m}^{r,i}$ and $\theta_{m2c}^{r,j}$ received from the $i$ -th MRI and $j$ -th CT clients in the $r$ -th communication round, respectively.
|
| 100 |
+
|
| 101 |
+
# 3.2.2 Discriminator Score-based Aggregation
|
| 102 |
+
|
| 103 |
+
To enhance reliability and stability across clients, we introduce a discriminator score-based aggregation. Each MRI client transmits three components to the server: (1) backward translation weights $\theta_{c2m}^{r,i}$ , (2) the backward model-based gradient $g_m^{r,i}$ , and (3) a discriminator-based reliability scores $s_m^{r,i}$ (from 0 to 1). The server aggregates MRI client updates using:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\theta_ {c 2 m} ^ {r + 1} = \frac {1}{N _ {m}} \sum_ {i = 1} ^ {N _ {m}} \left(\omega_ {m, s} ^ {r, i} + \omega_ {m, g} ^ {r, i}\right), \tag {5}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\omega_ {m, s} ^ {r, i} = \frac {s _ {m} ^ {r , i} \cdot \theta_ {c 2 m} ^ {r , i}}{\sum_ {i = 1} ^ {N _ {m}} \left(s _ {m} ^ {r , i}\right) + \sqrt {G _ {m} ^ {r}}}, \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\omega_ {m, g} ^ {r, i} = \frac {g _ {m} ^ {r , i} \cdot \theta_ {c 2 m} ^ {r , i}}{\sum_ {i = 1} ^ {N _ {m}} \left(s _ {m} ^ {r , i}\right) + \sqrt {G _ {m} ^ {r}}}. \tag {7}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
Here, $\omega_{m,s}^{r,i}$ and $\omega_{m,g}^{r,i}$ denote the reliability-based normalized model weight and the gradient-based normalized model weight from $i$ -th client, respectively. Moreover, $\theta_{c2m}^{r+1}$ denotes the aggregated weights in the $(r+1)$ -th round. In addition, the discriminator score $s_m^{r,i}$ is defined as follows:
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
s _ {m} ^ {r, i} = \mathbb {E} _ {x _ {m} ^ {i}} \sim p _ {\text {d a t a}} \left(x _ {m} ^ {i}\right) \left[ D _ {m} ^ {r, i} \left(x _ {m} ^ {i}\right) \right], \tag {8}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Figure 3: Architecture of federated asymmetric translation.
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
|
| 128 |
+
where $x_{m}^{i}$ denotes the real data $x_{m}^{i}$ held by the $i$ -th MRI client and $D_{m}^{r,i}$ denotes the local discriminator of the $i$ -th MRI client in $r$ -th round. Moreover, $G_{m}^{r}$ represents the accumulated squared sum of the gradients (momentum) in $r$ -th round, formulated as follows:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
G _ {m} ^ {r} = G _ {m} ^ {r - 1} + \sum_ {i = 1} ^ {N _ {m}} \left(g _ {m} ^ {r, i}\right) ^ {2}. \tag {9}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
By using $G_{m}^{r}$ and $s_{m}^{r,i}$ , this aggregation approach prioritizes contributions from clients whose discriminators better distinguish real from generated images, thereby enhancing model robustness.
|
| 135 |
+
|
| 136 |
+
For CT clients, the server aggregates the backward translation weights $\theta_{m2c}^{r,j}$ in a similar strategy, following the definition provided in Eq. (7). Specifically, the $j$ -th CT client's momentum $G_{c}^{r}$ and discriminator score $s_c^{r,j}$ are calculated using the gradient of the backward translator $g_{c}^{r,j}, x_{c}^{j}$ , and discriminator $D_{c}^{r,j}$ based on Eqs. (8) and (9).
|
| 137 |
+
|
| 138 |
+
# 3.3 Federated Modality-Expert Fine-tuning
|
| 139 |
+
|
| 140 |
+
Training VLMs for VQA typically demands extensive VRAM and significant computational resources, necessitating efficient fine-tuning strategies. Moreover, in federated medical environments, data across modalities (e.g., MRI, CT) exhibit inherently distinct characteristics. To effectively capture modality-specific features in cross-modal FL for medical VQA, we propose modality-expert LoRA fine-tuning, which independently learns discriminative features from each imaging modality.
|
| 141 |
+
|
| 142 |
+
# 3.3.1 Modality-expert LoRA
|
| 143 |
+
|
| 144 |
+
As illustrated in Fig. 4, the proposed modality-expert LoRA architecture is designed separately
|
| 145 |
+
|
| 146 |
+
for each modality and enables efficient training with substantially reduced computational over-head compared to full-scale VLM fine-tuning. Each client uses fixed, modality-specific encoders denoted as $W_{m}$ for MRI, $W_{c}$ for CT, and $W_{t}$ for text. These encoders extract feature representations $W_{m}v_{m}$ , $W_{c}v_{c}$ , and $W_{t}v_{t}$ where $v_{m}, v_{c}$ , and $v_{t}$ are the modality-specific input vectors. To enhance these representations without updating the pre-trained encoders, we apply LoRA fine-tuning as follows:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
\hat {v} _ {k} = W _ {k} v _ {k} + \beta_ {k} \alpha_ {k} v _ {k}, \quad k \in \{m, c, t \}, \tag {10}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
where $\alpha$ and $\beta$ are low-rank weight matrices in the LoRA layers. Specifically, $\alpha_{m}$ , $\alpha_{c}$ , and $\alpha_{t}$ project input features into low-rank subspaces of dimensions $\mathbb{R}^{d\times r_m}$ , $\mathbb{R}^{d\times r_c}$ , and $\mathbb{R}^{d\times r_t}$ , respectively. Corresponding matrices $\beta_{m}$ , $\beta_{c}$ , and $\beta_{t}$ project them back into the original feature space. This decomposition-reconstruction approach enables efficient fine-tuning of MRI and CT modality-specific feature. The LoRA modules are applied to the linear projection matrices of the modality-specific encoders, including the key and value projection layers in the attention blocks as well as the linear layers in the feedforward blocks. Each LoRA module is integrated alongside each linear matrix in the model (Hu et al., 2022). Moreover, these fine-tuned features are fused by using a projector to consider inter-modal representation. After local training, the fine-tuned modality-expert LoRA weights are transmitted to the central server for aggregation.
|
| 153 |
+
|
| 154 |
+
# 3.3.2 Modality-specific Aggregation
|
| 155 |
+
|
| 156 |
+
The central server receives the fine-tuned LoRA weights from MRI and CT clients and performs
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 4: Architecture of the federated visual question answering fine-tuning.
|
| 160 |
+
|
| 161 |
+
aggregation. In our framework, these modality-expert LoRA weights are categorized according to the modality on which they were trained, maintaining separate sets of weights for MRI, CT, and text data. Unlike conventional FL approaches (Li et al., 2020; McMahan et al., 2017; Li et al., 2021; Kairouz et al., 2021) that aggregate all modality weights jointly, we propose modality-specific aggregation, which processes the weights independently for each modality. This separation enables each modality-expert LoRA module to better capture and represent the unique characteristics of CT, MRI, and text inputs.
|
| 162 |
+
|
| 163 |
+
To further enhance aggregation quality, we introduce an expert-aware weighting scheme that differentiates the contributions of weights based on whether they were trained on real or synthetic data. This allows the system to adjust the influence of each client's update during aggregation. The expert-aware weight for the $i$ -th MRI client is defined as:
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\lambda_ {m} ^ {i} = \left\{ \begin{array}{l l} \frac {\epsilon}{\epsilon \cdot N _ {m} ^ {r} + N _ {m} ^ {s}} & \text {i f} i \in \mathcal {R} _ {m} \\ \frac {1}{\epsilon \cdot N _ {m} ^ {r} + N _ {m} ^ {s}} & \text {o t h e r w i s e} \end{array} , \right. \tag {11}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $\lambda_{m}^{i}$ denotes the MRI aggregation weight for the $i$ -th client, and $\mathcal{R}_m$ is the set of indices corresponding to clients with real MRI data. Additionally, $N_{m}^{r}$ and $N_{m}^{s}$ represent the number of clients with real and synthetic MRI data, respectively. The hyperparameter $\epsilon$ controls the relative scaling between from real and synthetic data. A similar expert-aware weighting strategy is applied to CT clients, producing CT aggregation weights $\lambda_c^i$ using the same formulation as in Eq. (11). For all clients contributing text data, the aggregation weight $\lambda_t^i$ is set to 1.
|
| 170 |
+
|
| 171 |
+
Based on $\lambda_{k}^{i}$ , the server aggregates the MRI LoRA weights $\{\alpha_{m},\beta_{m}\}^{r,i}$ , CT LoRA weights $\{\alpha_{c},\beta_{c}\}^{r,i}$ , and text LoRA weights $\{\alpha_{t},\beta_{t}\}^{r,i}$ in the $r$ -th round. It balances the influence of real and synthetic data on modality-specific representations for MRI and CT. The weight-based aggregation process is defined as follows:
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\left\{\alpha_ {k}, \beta_ {k} \right\} ^ {r + 1} = \frac {1}{N _ {k}} \sum_ {i = 1} ^ {N _ {k}} \lambda_ {k} ^ {i} \left\{\alpha_ {k}, \beta_ {k} \right\} ^ {r, i}, \tag {12}
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
k \in \{m, c, t \},
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
where $N_{t}$ denotes a total number of clients $(N_{m} + N_{c})$ . After federated VQA fine-tuning phase is completed, the final global VQA model with the modality-specific LoRA module is obtained.
|
| 182 |
+
|
| 183 |
+
# 4 Experiments
|
| 184 |
+
|
| 185 |
+
# 4.1 Dataset and Evaluation Metric
|
| 186 |
+
|
| 187 |
+
The experiments utilize a combined dataset drawn from the LLaVA-Med dataset (Li et al., 2023) and the VQA-RAD dataset (Lau et al., 2018). LLaVA-Med is designed to support instruction-following multimodal learning across multiple institutions. It is built using image-text pairs sourced from PubMed Central and includes a GPT-4-generated instruction-tuning set, comprising 10K samples across modalities such as CT and MRI. The VQA-RAD dataset comprises 3,515 clinician-authored QA pairs and 315 radiology images, with imaging reports generated using GPT-4 based on the QA pairs, which include closed-ended answers (i.e., yes/no) and open-ended answers with a short phrase. In our federated learning setup, XFLoRA is trained across eight clients. Four clients use MRI data $(N_{m} = 4)$ , and the other four use the CT data $(N_{c} = 4)$ , both the LLaVA-Med and VQA-RAD datasets. Appendix C provides additional experiments varying the number of MRI and CT clients, with comparisons against baseline FL methods.
|
| 188 |
+
|
| 189 |
+
To evaluate the quality of the generated responses, we use four standard automatic metrics: BLEU (Papineni et al., 2002), METEOR (Banerjee and Lavie, 2005), ROUGE (Lin, 2004), and CIDEr (Vedantam et al., 2015) for the LLaVA-med dataset and accuracy for the VQA-RAD dataset. These metrics assess both surface-level and semantic aspects of generation. BLEU captures lexical precision; METEOR balances precision and recall; ROUGE evaluates n-gram overlap; and
|
| 190 |
+
|
| 191 |
+
<table><tr><td>Dataset</td><td colspan="5">LLaVA-Med</td><td colspan="3">VQA-RAD</td></tr><tr><td rowspan="2">Metric</td><td rowspan="2">BLEU-1</td><td rowspan="2">BLEU-5</td><td rowspan="2">METEOR</td><td rowspan="2">ROUGE</td><td rowspan="2">CIDEr</td><td colspan="3">Accuracy (%)</td></tr><tr><td>Open</td><td>Closed</td><td>Overall</td></tr><tr><td>FedAvg (McMahan et al., 2017)</td><td>0.2892</td><td>0.1486</td><td>0.3467</td><td>0.3682</td><td>0.5003</td><td>51.39</td><td>73.56</td><td>64.76</td></tr><tr><td>FedProx (Li et al., 2020)</td><td>0.2859</td><td>0.1512</td><td>0.3450</td><td>0.3702</td><td>0.5064</td><td>52.09</td><td>74.13</td><td>65.38</td></tr><tr><td>MOON (Li et al., 2021)</td><td>0.2935</td><td>0.1561</td><td>0.3492</td><td>0.3604</td><td>0.5152</td><td>53.31</td><td>76.37</td><td>67.21</td></tr><tr><td>FedProto (Tan et al., 2022)</td><td>0.2943</td><td>0.1568</td><td>0.3486</td><td>0.3541</td><td>0.5176</td><td>54.04</td><td>77.51</td><td>68.19</td></tr><tr><td>IOS (Wu et al., 2023)</td><td>0.2913</td><td>0.1510</td><td>0.3508</td><td>0.3587</td><td>0.5190</td><td>55.37</td><td>78.05</td><td>69.04</td></tr><tr><td>FedTGP (Zhang et al., 2024)</td><td>0.3012</td><td>0.1572</td><td>0.3561</td><td>0.3672</td><td>0.5237</td><td>57.15</td><td>78.46</td><td>70.00</td></tr><tr><td>FedMedVLP (Lu et al., 2023)</td><td>0.2955</td><td>0.1540</td><td>0.3533</td><td>0.3597</td><td>0.5196</td><td>55.81</td><td>78.30</td><td>69.53</td></tr><tr><td>FedKIM (Wang et al., 2024b)</td><td>0.3015</td><td>0.1581</td><td>0.3588</td><td>0.3701</td><td>0.5279</td><td>56.12</td><td>78.49</td><td>70.14</td></tr><tr><td>X-FLoRA</td><td>0.3191</td><td>0.1630</td><td>0.3704</td><td>0.3954</td><td>0.5430</td><td>60.42</td><td>81.10</td><td>72.89</td></tr></table>
|
| 192 |
+
|
| 193 |
+
Table 1: Comparison with prior federated learning methods in terms of BLEU, METEOR, ROUGE, CIDEr and accuracy on the LLaVA-Med and VQA-RAD dataset.
|
| 194 |
+
|
| 195 |
+
CIDEr measures TF-IDF-weighted similarity, placing higher importance on informative content in vision-language tasks. In addition, We evaluate translators with four metrics: peak signal-to-noise ratio (PSNR), structural similarity index (SSIM), learned perceptual image patch similarity (LPIPS), and frechet inception distance (FID).
|
| 196 |
+
|
| 197 |
+
# 4.2 Implementation Details
|
| 198 |
+
|
| 199 |
+
The experiments follow a federated-by-dataset scenario (McMahan et al., 2017), where each client constructs its own local dataset and collaborates with a central server through FL. All experiments were conducted using a single NVIDIA L40S GPU.
|
| 200 |
+
|
| 201 |
+
The text encoder (Radford et al., 2021) consists of 12 transformer blocks, each comprising layer normalization, multi-head self-attention (heads of eight, input length of 77, hidden size of 512), a residual connection, and a feed-forward network with GELU activation. This structure is repeated across all layers. After the transformer, the embedding token is passed through a linear projection to obtain the final text representation.
|
| 202 |
+
|
| 203 |
+
The image encoder (Zhu et al., 2017) begins with a $7 \times 7$ convolutional layer using reflection padding and ReLU activation. This is followed by two downsampling blocks, each with a $3 \times 3$ convolution and ReLU, reducing spatial resolution by a factor of 4. Next, nine residual blocks are applied, each composed of two $3 \times 3$ convolutional layers, normalization, and ReLU. The discriminator extends this encoder with a final 1-channel convolutional layer followed by a sigmoid activation.
|
| 204 |
+
|
| 205 |
+
We employ stochastic gradient descent with a momentum of 0.9 and a learning rate of 0.001. XFLoRA is trained for a total of 150 global rounds, consisting $R_{t} = 50$ rounds for translational pretraining and $R_{f} = 100$ rounds for federated fine-tuning. Additionally, we set both $\eta$ and $\epsilon$ to 1.5, and $r_{m}$ ,
|
| 206 |
+
|
| 207 |
+
$r_c$ , and $r_t$ to 16, 32, and 8. Appendix C provides experiments to optimize these parameters.
|
| 208 |
+
|
| 209 |
+
# 4.3 Results and Analysis
|
| 210 |
+
|
| 211 |
+
We compare X-FLoRA with several baseline FL methods, including FedAvg (McMahan et al., 2017), FedProx (Li et al., 2020), MOON (Li et al., 2021), FedProto (Tan et al., 2022), IOS (Wu et al., 2023), FedTGP (Zhang et al., 2024), FedMedVLP (Lu et al., 2023) and FedKIM (Wang et al., 2024b), using both the LLaVA-Med and VQARAD datasets. The VQA model architecture proposed by Liu et al. (2023) is used as the backbone because it has been broadly utilized in the medical domain. All the baseline models are trained and evaluated from scratch using the respective authors' experimental settings and open-source code. We report the average performance over three runs using different random seeds, with a standard deviation of $6.3 \times 10^{-3}$ , confirming the consistency of our results. The best scores are highlighted in bold across all tables. As shown in Table 1, X-FLoRA achieves superior VQA performance across all five metrics compared to baseline FL methods. This improvement stems from the integration of cross-modal synthetic data, which enables collaborative training even under unpaired modality settings. Additionally, modality-specific fine-tuning via LoRA modules enhances representation quality by adapting to the distinct characteristics of each imaging domain.
|
| 212 |
+
|
| 213 |
+
Figure 5 provides a qualitative comparison of responses generated by X-FLoRA, IOS, and FedTGP for a given CT scan. In the figure, the red arrow indicates a mass-arising lesion near the rib, red text indicates incorrect or inconsistent responses, while BLEU text represents accurate and contextually appropriate answers. X-FLoRA successfully identifies key clinical features—such as the intact
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
Real CT Image
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Synthetic MRI Image
|
| 220 |
+
A computed tomography scan shows that the mass arises from the posterior costal arc of the eighth rib on the patient left side and does not involve the medulla.
|
| 221 |
+
Imaging Report
|
| 222 |
+
Figure 5: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 223 |
+
|
| 224 |
+
User
|
| 225 |
+
Write an exhaustive depiction of the given image.
|
| 226 |
+
|
| 227 |
+
<table><tr><td>IOS</td><td>The CT scan highlights surrounding structures such as lungs and heart in relation to the mass.</td></tr><tr><td>FedTGP</td><td>The CT scan shows that the mass has infiltrated the medulla, causing visible disruption to its structure.</td></tr><tr><td>X-FLoRA (w/o Expert Aggregation)</td><td>The CT scan indicates that the medulla is intact but appears to show signs of infiltration by the mass.</td></tr><tr><td>X-FLoRA</td><td>The CT scan indicates that the medulla is intact and uninvolved, showing no signs of infiltration by the mass.</td></tr><tr><td>GPT-4</td><td>The CT scan also shows the medulla, which is not involved with the mass. The medulla appears to be intact and separate from the mass, indicating that the mass has not infiltrated or affected this area.</td></tr></table>
|
| 228 |
+
|
| 229 |
+
<table><tr><td colspan="4">LLaVA-Med</td><td colspan="3">VQA-RAD</td></tr><tr><td rowspan="2" colspan="2">Method</td><td colspan="5">Metric</td></tr><tr><td rowspan="2">METEOR</td><td rowspan="2">CIDEr</td><td colspan="3">Accuracy (%)</td></tr><tr><td>DA</td><td>FL</td><td>Open</td><td>Closed</td><td>Overall</td></tr><tr><td>SEA (Wang et al., 2023)</td><td>FedTGP</td><td>0.3569</td><td>0.5231</td><td>57.31</td><td>78.50</td><td>70.07</td></tr><tr><td>CAF (Xie et al., 2022)</td><td>IOS</td><td>0.3514</td><td>0.5205</td><td>55.42</td><td>78.37</td><td>69.12</td></tr><tr><td>CAF (Xie et al., 2022)</td><td>IOS</td><td>0.3510</td><td>0.5181</td><td>55.50</td><td>78.53</td><td>70.19</td></tr><tr><td>SEA (Wang et al., 2023)</td><td>FedTGP</td><td>0.3558</td><td>0.5207</td><td>57.40</td><td>78.55</td><td>69.59</td></tr><tr><td colspan="2">X-FLoRA</td><td>0.3704</td><td>0.5430</td><td>60.42</td><td>81.10</td><td>72.89</td></tr></table>
|
| 230 |
+
|
| 231 |
+
and uninvolved state of the medulla and the absence of mass infiltration—matching the GPT-4-generated reference from the imaging report. Moreover, in this synthetic MRI image, it appears that the medulla remains intact. This demonstrates X-FLoRA's strong grounding capability in clinically relevant visual content. Appendix C also provides additional example comparisons of X-FLoRA and other FL methods.
|
| 232 |
+
|
| 233 |
+
Table 2 evaluates X-FLoRA when integrated with DA techniques, specifically SEA (Wang et al., 2023) and CAF (Xie et al., 2022). We also examine combinations of DA methods with state-of-the-art FL models such as FedTGP and IOS. X-FLoRA consistently outperforms these combinations, highlighting the benefit of federated asymmetric translation in improving VQA performance.
|
| 234 |
+
|
| 235 |
+
Moreover, Table 3 presents a comparison of the performance of $F$ and $B$ of asymmetric translation with CycleGAN. We evaluate $F$ with LPIPS and FID, and assess both $F$ and $B$ using PSNR, SSIM. Asymmetric translation surpasses CycleGAN through higher PSNR and SSIM and lower LPIPS and FID. This result is attributed to com
|
| 236 |
+
|
| 237 |
+
Table 2: Performance of FL methods with DA models for VQA performance on the LLaVA-Med and VQARAD dataset.
|
| 238 |
+
|
| 239 |
+
<table><tr><td rowspan="2">Forward</td><td rowspan="2">Architecture</td><td colspan="2">Metric</td><td rowspan="2">Forward + Backward</td><td rowspan="2">Architecture</td><td colspan="2">Metric</td></tr><tr><td>LPIPS(↓)</td><td>FID (↓)</td><td>PSNR (↑)</td><td>SSIM (↑)</td></tr><tr><td rowspan="2">CT→MRI</td><td>CycleGAN (Only Image)</td><td>0.25</td><td>119.83</td><td rowspan="2">CT→MRI→CT</td><td>CycleGAN (Only Image)</td><td>25.51</td><td>0.78</td></tr><tr><td>Ours (Image + Text)</td><td>0.22</td><td>90.22</td><td>Ours (Image + Text)</td><td>27.23</td><td>0.87</td></tr><tr><td rowspan="2">MRI→CT</td><td>CycleGAN (Only Image)</td><td>0.24</td><td>109.66</td><td rowspan="2">MRI→CT→MRI</td><td>CycleGAN (Only Image)</td><td>27.24</td><td>0.81</td></tr><tr><td>Ours (Image + Text)</td><td>0.23</td><td>105.05</td><td>Ours (Image + Text)</td><td>28.57</td><td>0.88</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 3: Performance of asymmetric translation compared with CycleGAN on the LLaVA-Med dataset.
|
| 242 |
+
|
| 243 |
+
<table><tr><td>Models</td><td>Trainable Params</td><td>Convergence Round</td><td>Training Time (hours)</td></tr><tr><td>FedProto (Tan et al., 2022)</td><td>13G</td><td>202</td><td>33.6</td></tr><tr><td>IOS (Wu et al., 2023)</td><td>13G</td><td>188</td><td>30.6</td></tr><tr><td>FedTGP (Zhang et al., 2024)</td><td>13G</td><td>184</td><td>30.6</td></tr><tr><td>X-FLoRA</td><td>58M</td><td>149</td><td>25.1</td></tr></table>
|
| 244 |
+
|
| 245 |
+
Table 4: Computational complexity of FL methods on the LLaVA-Med dataset.
|
| 246 |
+
|
| 247 |
+
<table><tr><td colspan="3">Federated Asymmetric Translation</td><td colspan="2">Federated VQA Finetuning</td><td rowspan="2">CIDEr</td></tr><tr><td>Text</td><td>Translation</td><td>Discriminator-based Aggregation</td><td>Modality-expert LoRA</td><td>Modality-specific Aggregation</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>0.5430</td></tr><tr><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>0.5407</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>0.5401</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>0.5357</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>0.5304</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td>0.5003</td></tr></table>
|
| 248 |
+
|
| 249 |
+
Table 5: Ablation study for X-FLoRA on the LLaVA-Med dataset in terms of the CIDEr.
|
| 250 |
+
|
| 251 |
+
plementing visual features with clinical insights through text corresponding to the images.
|
| 252 |
+
|
| 253 |
+
Table 4 compares the training efficiency in several FL methods. X-FLoRA not only converges faster in fewer training rounds (149 rounds) but also requires much fewer trainable parameters (58 mega) compared to other methods (13 giga), owing to the use of lightweight LoRA modules. Specifically, we adopted the ViT-L/14 model for visual encoder and transformer layers within Vicuna-7B model for text encoder, as introduced in the original LLaVA architecture. Since the total parameter size of visual and text encoders is 4 giga parameters out of the 13 giga parameters of the entire model, the use of LoRA is reasonable for efficient fine-tuning. This makes X-FLoRA particularly suitable for resource-constrained clinical environments.
|
| 254 |
+
|
| 255 |
+
# 4.4 Ablation Study
|
| 256 |
+
|
| 257 |
+
This section analyzes the contribution of each X-FLoRA component. Table 5 presents ablation results, where a checkmark $(\checkmark)$ indicates module activation. The first and last rows show X-FLoRA and backbone performance, respectively. The second and third rows report the results when discriminator-based aggregation and modality-specific aggregation are excluded, respec
|
| 258 |
+
|
| 259 |
+
tively. The fourth row reports the results when synthetic images are used without federated VQA finetuning, which reflects the performance of full fine-tuning. It demonstrates that fine-tuning with LoRA yields better performance than full finetuning. This is supported by experimental evidence showing that selectively fine-tuning leads to better performance compared to full fine-tuning (Hu et al., 2022). The fifth row reports the results when only images are used in translator. Comparing each module with the X-FLoRA confirms that each component contributes to performance improvements.
|
| 260 |
+
|
| 261 |
+
# 5 Conclusion
|
| 262 |
+
|
| 263 |
+
This study tackles the critical challenge of cross-modal heterogeneity in federated VQA. We propose X-FLoRA, a comprehensive framework that integrates asymmetric text-driven translation, modality-expert LoRA modules, and global aggregation strategies to effectively address this issue. X-FLoRA selectively trains backward translation models, shares forward translations, applies modality-specific fine-tuning, and aggregates a global model, all within the FL paradigm to enhance VQA accuracy. Our experimental results demonstrate that X-FLoRA outperforms existing FL baselines, achieving state-of-the-art VQA performance on both the LLaVA-Med and VQA-RAD datasets, while maintaining computational efficiency. These results underscore the effectiveness of the proposed design in managing unpaired multimodal data in decentralized clinical settings.
|
| 264 |
+
|
| 265 |
+
# 6 Limitations
|
| 266 |
+
|
| 267 |
+
In addition, although X-FLoRA demonstrates improved quantitative performance on benchmark datasets such as LLaVA-Med and VQA-RAD, the clinical interpretability and reliability of the generated responses have not yet been directly assessed through expert review. Medical decision-making often involves context-specific and nuanced reasoning, which cannot be fully captured by automated metrics alone. Therefore, it would be valuable to examine whether the model's outputs align with clinical expectations in real-world scenarios. Future work should include qualitative evaluations by domain experts, such as structured assessments conducted by radiologists or physicians, to better understand how the model's responses are perceived and trusted in clinical environments. Such evaluations would help bridge the gap between algorithm
|
| 268 |
+
|
| 269 |
+
mic performance and practical usability, ultimately contributing to the safe and effective deployment of federated VQA systems in healthcare.
|
| 270 |
+
|
| 271 |
+
This study focuses on MRI and CT, widely used and clinically complementary imaging modalities, providing a robust foundation for evaluating the proposed framework. Although these modalities are robust, other modalities such as ultrasound, PET, and digital pathology remain unexplored. In future work, we will extend X-FLoRA by using specialized forward and backward translators adapted to other modalities. Expanding the number of forward and backward translators enables the framework to accommodate a wider range of modalities. However, this poses the challenge of mapping modality-specific representations to a common feature space due to the substantial heterogeneity in imaging and semantic characteristics across modalities. This challenge becomes even more pronounced when incorporating modalities beyond MRI and CT, such as ultrasound, PET, and digital pathology. To address this, we propose advanced feature alignment techniques, including modality-invariant representation learning and contrastive alignment with clinical text embeddings. These methods aim to enhance cross-modal knowledge transfer despite significant inter-modality gaps.
|
| 272 |
+
|
| 273 |
+
# Acknowledgments
|
| 274 |
+
|
| 275 |
+
This work was supported by the IITP grant funded by the Korea government (MSIT) (No.2021-0-02068, RS-2023-00256629, RS-2022-00156287, RS-2024-00437718).
|
| 276 |
+
|
| 277 |
+
# References
|
| 278 |
+
|
| 279 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, and 1 others. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 280 |
+
Anas Awadalla, Irena Gao, Josh Gardner, Jack Hessel, Yusuf Hanafy, Wanrong Zhu, Kalyani Marathe, Yonatan Bitton, Samir Gadre, Shiori Sagawa, and 1 others. 2023. Openflamingo: An open-source framework for training large autoregressive vision-language models. arXiv preprint arXiv:2308.01390.
|
| 281 |
+
Satanjeev Banerjee and Alon Lavie. 2005. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or summarization, pages 65-72.
|
| 282 |
+
|
| 283 |
+
Cheng Chen, Qi Dou, Hao Chen, Jing Qin, and Pheng Ann Heng. 2020. Unsupervised bidirectional cross-modality adaptation via deeply synergistic image and feature alignment for medical image segmentation. IEEE transactions on medical imaging, 39(7):2494-2505.
|
| 284 |
+
Wuyang Chen, Zhiding Yu, Shalini De Mello, Sifei Liu, Jose M Alvarez, Zhangyang Wang, and Anima Anandkumar. 2021. Contrastive syn-to-real generalization. arXiv preprint arXiv:2104.02290.
|
| 285 |
+
Bin Cheng, Zuhao Liu, Yunbo Peng, and Yue Lin. 2023. General image-to-image translation with one-shot image guidance. In Proceedings of the IEEE/CVF international conference on computer vision, pages 22736-22746.
|
| 286 |
+
Varsha Chiruvella, Achuta Kumar Guddati, and 1 others. 2021. Ethical issues in patient data ownership. *Interactive journal of medical research*, 10(2):e22269.
|
| 287 |
+
Qian Dai, Dong Wei, Hong Liu, Jinghan Sun, Liansheng Wang, and Yefeng Zheng. 2024. Federated modality-specific encoders and multimodal anchors for personalized brain tumor segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 1445-1453.
|
| 288 |
+
Xuehai He, Yichen Zhang, Luntian Mou, Eric Xing, and Pengtao Xie. 2020. Pathvqa: $30000+$ questions for medical visual question answering. arXiv preprint arXiv:2003.10286.
|
| 289 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen, and 1 others. 2022. Lora: Low-rank adaptation of large language models. ICLR, 1(2):3.
|
| 290 |
+
Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. 2018. Multimodal unsupervised image-to-image translation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 172-189.
|
| 291 |
+
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. 2017. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1125-1134.
|
| 292 |
+
Huishan Ji, Qingyi Si, Zheng Lin, Yanan Cao, and Weiping Wang. 2024. Towards one-to-many visual question answering. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 16931-16943.
|
| 293 |
+
Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, evendra Singh Chaplot, Diegode las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
|
| 294 |
+
|
| 295 |
+
Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, and 1 others. 2021. Advances and open problems in federated learning. Foundations and trends® in machine learning, 14(1-2):1-210.
|
| 296 |
+
Yash Khare, Viraj Bagal, Minesh Mathew, Adithi Devi, U Deva Priyakumar, and CV Jawahar. 2021. Mmbert: Multimodal bert pretraining for improved medical vqa. In 2021 IEEE 18th international symposium on biomedical imaging (ISBI), pages 1033-1036. IEEE.
|
| 297 |
+
Yanis Labrak, Adrien Bazoge, Emmanuel Morin, Pierre-Antoine Gourraud, Mickael Rouvier, and Richard Dufour. 2024. Biomistral: A collection of open-source pretrained large language models for medical domains. arXiv preprint arXiv:2402.10373.
|
| 298 |
+
Mingrui Lao, Nan Pu, Zhun Zhong, Nicu Sebe, and Michael S Lew. 2023. Fedvqa: Personalized federated visual question answering over heterogeneous scenes. In Proceedings of the 31st ACM International Conference on Multimedia, pages 7796-7807.
|
| 299 |
+
Jason J Lau, Soumya Gayen, Asma Ben Abacha, and Dina Demner-Fushman. 2018. A dataset of clinically generated visual questions and answers about radiology images. Scientific data, 5(1):1-10.
|
| 300 |
+
Binxu Li, Tiankai Yan, Yuanting Pan, Jie Luo, Ruiyang Ji, Jiayuan Ding, Zhe Xu, Shilong Liu, Haoyu Dong, Zihao Lin, and Yixin Wang. 2024a. MMedAgent: Learning to use medical tools with multi-modal agent. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8745-8760.
|
| 301 |
+
Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. 2023. Llava: Training a large language-and-vision assistant for biomedicine in one day. Advances in Neural Information Processing Systems, 36:28541-28564.
|
| 302 |
+
Qinbin Li, Bingsheng He, and Dawn Song. 2021. Model-contrastive federated learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10713-10722.
|
| 303 |
+
Tian Li, Anit Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. 2020. Federated optimization in heterogeneous networks. Proceedings of Machine learning and systems, 2:429-450.
|
| 304 |
+
Zhenglin Li, Bo Guan, Yuzhou Wei, Yiming Zhou, Jingyu Zhang, and Jinxin Xu. 2024b. Mapping new realities: Ground truth image creation with pix2pix image-to-image translation. arXiv preprint arXiv:2404.19265.
|
| 305 |
+
Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81.
|
| 306 |
+
|
| 307 |
+
Zhihong Lin, Donghao Zhang, Qingyi Tao, Danli Shi, Gholamreza Haffari, Qi Wu, Mingguang He, and Zongyuan Ge. 2023. Medical visual question answering: A survey. Artificial Intelligence in Medicine, 143:102611.
|
| 308 |
+
Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916.
|
| 309 |
+
Siyu Lu, Zheng Liu, Tianlin Liu, and Wangchunshu Zhou. 2023. Scaling-up medical vision-and-language representation learning with federated learning. Engineering Applications of Artificial Intelligence, 126:107037.
|
| 310 |
+
Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics, pages 1273-1282. PMLR.
|
| 311 |
+
Michael Moor, Qian Huang, Shirley Wu, Michihiro Yasunaga, Yash Dalmia, Jure Leskovec, Cyril Zakka, Eduardo Pontes Reis, and Pranav Rajpurkar. 2023. Med-flamingo: a multimodal medical few-shot learner. In Machine Learning for Health (ML4H), pages 353-367. PMLR.
|
| 312 |
+
Nandita Shankar Naik, Christopher Potts, and Elisa Kreiss. 2024. CommVQA: Situating visual question answering in communicative contexts. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 13362-13377.
|
| 313 |
+
Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics, pages 311-318.
|
| 314 |
+
Adnan Qayyum, Kashif Ahmad, Muhammad Ahtazaz Ahsan, Ala Al-Fuqaha, and Junaid Qadir. 2022. Collaborative federated learning for healthcare: Multimodal Covid-19 diagnosis at the edge. IEEE Open Journal of the Computer Society, 3:172-184.
|
| 315 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, and 1 others. 2021. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR.
|
| 316 |
+
Lingyun Song, Chengkun Yang, Xuanyu Li, and Xuequn Shang. 2024. A robust dual-debiasing VQA model based on counterfactual causal effect. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4242-4252.
|
| 317 |
+
Yue Tan, Guodong Long, Lu Liu, Tianyi Zhou, Qinghua Lu, Jing Jiang, and Chengqi Zhang. 2022. Fedproto:
|
| 318 |
+
|
| 319 |
+
Federated prototype learning across heterogeneous clients. In Proceedings of the AAAI conference on artificial intelligence, volume 36, pages 8432-8440.
|
| 320 |
+
Marlon Tobaben, Mohamed Ali Souibgui, Ruben Tito, Khanh Nguyen, Raouf Kerkouche, Kangsoo Jung, Joonas Jalkö, Lei Kang, Andrey Barsky, Vincent Poulain d'Andecy, and 1 others. 2024. Neurips 2023 competition: Privacy preserving federated learning document vqa. arXiv preprint arXiv:2411.03730.
|
| 321 |
+
Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. 2015. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4566-4575.
|
| 322 |
+
Qunbo Wang, Ruyi Ji, Tianhao Peng, Wenjun Wu, Zechao Li, and Jing Liu. 2024a. Soft knowledge prompt: Help external knowledge become a better teacher to instruct llm in knowledge-based vqa. In Findings of the Association for Computational Linguistics ACL 2024, pages 6132-6143.
|
| 323 |
+
Xiaochen Wang, Jiaqi Wang, Houping Xiao, Jinghui Chen, and Fenglong Ma. 2024b. Fedkim: Adaptive federated knowledge injection into medical foundation models. arXiv preprint arXiv:2408.10276.
|
| 324 |
+
Yucheng Wang, Yuecong Xu, Jianfei Yang, Zhenghua Chen, Min Wu, Xiaoli Li, and Lihua Xie. 2023. Sensor alignment for multivariate time-series unsupervised domain adaptation. In Proceedings of the AAAI conference on artificial intelligence, volume 37, pages 10253-10261.
|
| 325 |
+
Zhaoxian Wu, Tianyi Chen, and Qing Ling. 2023. Byzantine-resilient decentralized stochastic optimization with robust aggregation rules. IEEE transactions on signal processing.
|
| 326 |
+
Mengfei Xia, Yu Zhou, Ran Yi, Yong-Jin Liu, and Wenping Wang. 2024. A diffusion model translator for efficient image-to-image translation. IEEE Transactions on Pattern Analysis and Machine Intelligence.
|
| 327 |
+
Binhui Xie, Shuang Li, Fangrui Lv, Chi Harold Liu, Guoren Wang, and Dapeng Wu. 2022. A collaborative alignment framework of transferable knowledge extraction for unsupervised domain adaptation. IEEE Transactions on Knowledge and Data Engineering, 35(7):6518-6533.
|
| 328 |
+
Xiaoying Xing, Peixi Xiong, Lei Fan, Yunxuan Li, and Ying Wu. 2024. Learning to ask denotative and connotative questions for knowledge-based VQA. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8301-8315.
|
| 329 |
+
Dexuan Xu, Yanyuan Chen, Jieyi Wang, Yue Huang, Hanpin Wang, Zhi Jin, Hongxing Wang, Weihua Yue, Jing He, Hang Li, and 1 others. 2024. Mlevlm: Improve multi-level progressive capabilities based
|
| 330 |
+
|
| 331 |
+
on multimodal large language model for medical visual question answering. In Findings of the Association for Computational Linguistics ACL 2024, pages 4977-4997.
|
| 332 |
+
|
| 333 |
+
Quan Yan, Junwen Duan, and Jianxin Wang. 2024. Multi-modal concept alignment pre-training for generative medical visual question answering. In *Findings of the Association for Computational Linguistics* ACL 2024, pages 5378-5389.
|
| 334 |
+
|
| 335 |
+
Mingjing Yang, Zhicheng Wu, Hanyu Zheng, Liqin Huang, Wangbin Ding, Lin Pan, and Lei Yin. 2024. Cross-modality medical image segmentation via enhanced feature alignment and cross pseudo supervision learning. Diagnostics, 14(16):1751.
|
| 336 |
+
|
| 337 |
+
Xiaoshan Yang, Baochen Xiong, Yi Huang, and Changsheng Xu. 2022. Cross-modal federated human activity recognition via modality-agnostic and modality-specific representation learning. In Proceedings of the AAAI conference on artificial intelligence, volume 36, pages 3063-3071.
|
| 338 |
+
|
| 339 |
+
Jianqing Zhang, Yang Liu, Yang Hua, and Jian Cao. 2024. Fedtgp: Trainable global prototypes with adaptive-margin-enhanced contrastive learning for data and model heterogeneity in federated learning. In Proceedings of the AAAI conference on artificial intelligence, volume 38, pages 16768-16776.
|
| 340 |
+
|
| 341 |
+
Rongyu Zhang, Xiaowei Chi, Guiliang Liu, Wenyi Zhang, Yuan Du, and Fangxin Wang. 2023. Unimodal training-multimodal prediction: Cross-modal federated learning with hierarchical aggregation. arXiv preprint arXiv:2303.15486.
|
| 342 |
+
|
| 343 |
+
Ziyuan Zhao, Fangcheng Zhou, Kaixin Xu, Zeng Zeng, Cuntai Guan, and S Kevin Zhou. 2022. Le-uda: Label-efficient unsupervised domain adaptation for medical image segmentation. IEEE transactions on medical imaging, 42(3):633-646.
|
| 344 |
+
|
| 345 |
+
He Zhu, Ren Togo, Takahiro Ogawa, and Miki Haseyama. 2024. Prompt-based personalized federated learning for medical visual question answering. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1821-1825. IEEE.
|
| 346 |
+
|
| 347 |
+
Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. 2017. Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 2223-2232.
|
| 348 |
+
|
| 349 |
+
# Appendix
|
| 350 |
+
|
| 351 |
+
The appendix of this study provides comprehensive details that support the main framework, methodology, and experimental results presented in the paper. Below is a summary of each section: Section
|
| 352 |
+
|
| 353 |
+
A provides a detailed explanation of the core algorithms of X-FLoRA. Section B presents a discussion of the stability of discriminator-based aggregation and experiments on cross-modality. Section C presents additional quantitative and qualitative results. Section D presents qualitative VQA results of X-FLoRA and other methods.
|
| 354 |
+
|
| 355 |
+
# A Method Algorithms
|
| 356 |
+
|
| 357 |
+
# A.1 Federated Asymmetric Translation
|
| 358 |
+
|
| 359 |
+
This algorithm 1 describes the training process of text-driven translation. Each client generates synthetic images and reconstructs the original image.
|
| 360 |
+
|
| 361 |
+
# A.1.1 Discriminator Quality Score-based Aggregation
|
| 362 |
+
|
| 363 |
+
This algorithm 2 details the aggregation process using discriminator quality scores and gradient information.
|
| 364 |
+
|
| 365 |
+
# A.2 Federated VQA FInetuning
|
| 366 |
+
|
| 367 |
+
This algorithm 3 presents the finetuning phase for modality-expert LoRA modules. Each client updates only the lightweight LoRA parameters for MRI, CT, and text modalities.
|
| 368 |
+
|
| 369 |
+
# A.2.1 Modality-specific Aggregation
|
| 370 |
+
|
| 371 |
+
This algorithm 4 defines the aggregation process for modality-expert LoRA weights.
|
| 372 |
+
|
| 373 |
+
# Algorithm 1 Federated Asymmetric Translation
|
| 374 |
+
|
| 375 |
+
Require: Real data $x$ , text report $t$ , frozen forward translation $F$
|
| 376 |
+
|
| 377 |
+
1: $F(x, t)$ Generate synthetic image using forward translation
|
| 378 |
+
2: $B(F(x,t),t)$ $\triangleright$ Reconstruct using backward translation
|
| 379 |
+
3: $D(B(F(x,t),t))\triangleright$ Distinguish between real and reconstruction image
|
| 380 |
+
4: $\mathcal{L}_{adv} \gets \|1 - D(B(F(x, t), t))\|_2$
|
| 381 |
+
5: $\mathcal{L}_{id}\gets \| B(F(x,t),t) - x\| _1$
|
| 382 |
+
6: $\mathcal{L}_{total} \gets \mathcal{L}_{adv} + \eta \mathcal{L}_{id}$
|
| 383 |
+
7: Optimize $B$ and $D$ to minimize and maximize $\mathcal{L}_{\text {total }}$
|
| 384 |
+
8: return weight of $B$ , discriminator score $s^{r,i}$ and gradient $g^{r,i}$ to server
|
| 385 |
+
|
| 386 |
+
Require: MRI clients $N_{m}$ , CT clients $N_{c}$ , discriminator gradient $g_{m}^{r,i}$ and score $s_{m}^{r,i}$
|
| 387 |
+
|
| 388 |
+
1: $G_{m}^{r} \gets G_{m}^{r - 1} + \sum_{i = 1}^{N_{m}}(g_{m}^{r,i})^{2}$ Update cumulative gradient for MRI clients
|
| 389 |
+
2: $G_{c}^{r} \gets G_{c}^{r - 1} + \sum_{j = 1}^{N_{c}}(g_{c}^{r,j})^{2}$ Update cumulative gradient for CT clients
|
| 390 |
+
3: for each MRI client $i \in N_{m}$ do
|
| 391 |
+
4: $\theta_{c2m}^{r+1} = \sum_{i=1}^{N_m} \frac{(s_m^{r,i} + g_m^{r,i}) \cdot \theta_{c2m}^{r,i}}{\sum_{k=1}^{N_m} s_m^{r,k} + \sqrt{G_m^r}} \triangleright$ Per-form weight-based aggregation for MRI clients
|
| 392 |
+
5: end for
|
| 393 |
+
6: for each CT client $j \in N_{c}$ do
|
| 394 |
+
7: $\theta_{m2c}^{r + 1} = \sum_{i = 1}^{N_c}\frac{(s_c^{r,j} + g_c^{r,j})\cdot\theta_{m2c}^{r,j}}{\sum_{k = 1}^{N_c}s_c^{r,k} + \sqrt{G_c^r}}\triangleright$ Perform weight-based aggregation for CT clients
|
| 395 |
+
8: end for
|
| 396 |
+
9: return Aggregated weights $\theta_{c2m}^{r + 1}$ and $\theta_{m2c}^{r + 1}$
|
| 397 |
+
|
| 398 |
+
Algorithm 2 Discriminator Quality Score-based Aggregation
|
| 399 |
+
Algorithm 3 Federated VQA Finetuning
|
| 400 |
+
```verilog
|
| 401 |
+
Require: For each modality $k \in \{m, c, t\}$ : input $v_k$ , encoder weights $W_k$ , and LoRA weights $\alpha_k, \beta_k$ .
|
| 402 |
+
```
|
| 403 |
+
|
| 404 |
+
1: $\hat{v}_k = W_k v_k + \beta_k \alpha_k v_k$ Refines the modality-specific representation.
|
| 405 |
+
2: Finetune only LoRA parameters $\alpha_{k},\beta_{k}$
|
| 406 |
+
3: return Modality-specific LoRA weights $\{\alpha_{m},\beta_{m}\}^{r,i},\{\alpha_{c},\beta_{c}\}^{r,i}$ and $\{\alpha_t,\beta_t\}^{r,i}$
|
| 407 |
+
|
| 408 |
+
Algorithm 4 Modality-specific Aggregation
|
| 409 |
+
```latex
|
| 410 |
+
Require: $i$ -th clients $N_{m}^{r}$ (real), $N_{m}^{s}$ (synthetic), CT clients $N_{c}^{r}$ , $N_{c}^{s}$ , LoRA weights $\{\alpha_{m}, \beta_{m}\}^{r,i}$ , $\{\alpha_{c}, \beta_{c}\}^{r,i}$ , $\{\alpha_{t}, \beta_{t}\}^{r,i}$ and normalization ratio $\epsilon$
|
| 411 |
+
```
|
| 412 |
+
|
| 413 |
+
1: for each client $i$ do
|
| 414 |
+
2: if $i$ is real then
|
| 415 |
+
3: $\lambda^i\gets \frac{\epsilon}{\epsilon\cdot N^r + N^s}$ Compute real-client weight
|
| 416 |
+
4: else
|
| 417 |
+
5: $\lambda^i\gets \frac{1}{\epsilon\cdot N^r + N^s}$ Compute synthetic-client weight
|
| 418 |
+
6: end if
|
| 419 |
+
7: end for
|
| 420 |
+
8: $\{\alpha_k,\beta_k\}^{r + 1}\gets \sum_{i = 1}^N\lambda^i\cdot \{\alpha_k,\beta_k\}^{r,i},k\in$ $\{m,c,t\} \triangleright$ Aggregate modality-specific LoRA weights
|
| 421 |
+
9: return Aggregated weights $\{\alpha_k, \beta_k\}^{r+1}$
|
| 422 |
+
|
| 423 |
+
# B Discussion
|
| 424 |
+
|
| 425 |
+
# B.1 Discriminator Score-based Aggregation
|
| 426 |
+
|
| 427 |
+
Discriminator-based aggregation may raise concerns about stability, especially in cross-modal scenario. However, the proposed framework addresses this issue through a carefully designed weighting mechanism. Specifically, our method does not directly rely on the discriminator's confusion between real and synthetic data. Instead, aggregation weights are determined based on the discriminator's confidence and accuracy exclusively on real images, reflecting its reliability in recognizing genuine data rather than its susceptibility to well-generated synthetic examples.
|
| 428 |
+
|
| 429 |
+
Moreover, the proposed framework does not rely solely on discriminator scores for aggregation weight determination. Instead, it incorporates additional signals, including the gradient of the discriminator loss and the cumulative gradient sum (e.g., $G^{r}$ ), to ensure more stable and reliable weighting. These complementary factors help mitigate potential biases caused by temporary discriminator confusion and contribute to more robust aggregation decisions.
|
| 430 |
+
|
| 431 |
+
# B.2 Experiments on Cross-modality
|
| 432 |
+
|
| 433 |
+
In this study, we validate the superiority of our approach by effectively addressing cross-modal heterogeneity through a combination of DA and FL strategies. While combination of DA and FL strategies primarily rely on aggregating modality-specific features into a shared representation, they often fail to bridge the substantial semantic and visual gaps inherent in medical imaging modalities, such as MRI and CT.
|
| 434 |
+
|
| 435 |
+
To ensure a fair and comprehensive comparison, we selected state-of-the-art FL baselines that explicitly incorporate domain adaptation mechanisms (e.g., FedTGP with SEA and IOS with CAF). These methods represent the approaches for mitigating domain shifts. However, even with these enhancements, they struggle to fully capture modality-specific semantic cues and achieve effective cross-modal representation learning. In contrast, our proposed X-FLoRA framework mitigates these challenges by employing a federated asymmetric translation and federated VQA finetuning. This design allows each modality to retain its unique characteristics while still enabling effective cross-modal representation learning.
|
| 436 |
+
|
| 437 |
+
Experimental results demonstrate that our ap
|
| 438 |
+
|
| 439 |
+
<table><tr><td>Dataset</td><td colspan="2">LLaVA-Med</td><td colspan="2">VQA-RAD</td></tr><tr><td>Metric</td><td>PPV</td><td>Sensitivity</td><td>PPV</td><td>Sensitivity</td></tr><tr><td>FedAvg</td><td>34.67</td><td>23.30</td><td>63.95</td><td>64.35</td></tr><tr><td>FedProx</td><td>34.50</td><td>23.20</td><td>68.30</td><td>66.82</td></tr><tr><td>MOON</td><td>34.92</td><td>23.85</td><td>68.77</td><td>67.98</td></tr><tr><td>FedProto</td><td>34.86</td><td>23.93</td><td>68.86</td><td>68.52</td></tr><tr><td>IOS</td><td>35.08</td><td>23.52</td><td>66.67</td><td>67.83</td></tr><tr><td>FedTGP</td><td>35.61</td><td>24.36</td><td>67.59</td><td>68.77</td></tr><tr><td>X-FLoRA</td><td>37.04</td><td>25.67</td><td>69.67</td><td>71.24</td></tr></table>
|
| 440 |
+
|
| 441 |
+
Table 6: Comparison with prior federated learning methods in terms of ppv and sensitivity on LLaVA-Med and VQA-RAD datasets.
|
| 442 |
+
|
| 443 |
+
<table><tr><td>Dataset</td><td colspan="5">LLaVA-Med</td></tr><tr><td>Metric</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>FedAvg</td><td>0.2857</td><td>0.1446</td><td>0.3408</td><td>0.3641</td><td>0.4968</td></tr><tr><td>FedProx</td><td>0.2804</td><td>0.1478</td><td>0.3414</td><td>0.3652</td><td>0.5003</td></tr><tr><td>MOON</td><td>0.2908</td><td>0.1512</td><td>0.3455</td><td>0.3576</td><td>0.5108</td></tr><tr><td>FedProto</td><td>0.2915</td><td>0.1530</td><td>0.3447</td><td>0.3557</td><td>0.5117</td></tr><tr><td>IOS</td><td>0.2884</td><td>0.1497</td><td>0.3486</td><td>0.3602</td><td>0.5178</td></tr><tr><td>FedTGP</td><td>0.2990</td><td>0.1546</td><td>0.3515</td><td>0.3629</td><td>0.5201</td></tr><tr><td>X-FLoRA</td><td>0.3158</td><td>0.1614</td><td>0.3667</td><td>0.3899</td><td>0.5403</td></tr></table>
|
| 444 |
+
|
| 445 |
+
proach consistently outperforms combination of DA and FL strategies, particularly in handling complex modality-specific reasoning tasks. This underscores the effectiveness of explicitly modeling cross-modal heterogeneity through structured translation and fine-tuning mechanisms, rather than relying solely on shared representations.
|
| 446 |
+
|
| 447 |
+
# B.3 RAG with X-FLoRA
|
| 448 |
+
|
| 449 |
+
Integrating RAG into our FL framework poses significant challenges. In FL setting, clients are constrained from sharing raw data due to privacy regulations. Moreover, RAG requires access to a large, centralized, and searchable corpus at inference time. Unfortunately, this assumption conflicts with the privacy-preserving nature of FL, particularly in medical domains. Hence, RAG can potentially improve QA performance but integrating it into X-FLoRA requires a privacy-preserving retrieval method. It is because client queries may contain sensitive medical information that must not be exposed during external document retrieval.
|
| 450 |
+
|
| 451 |
+
# C Additional Experiments
|
| 452 |
+
|
| 453 |
+
# C.1 Clinical Validation
|
| 454 |
+
|
| 455 |
+
This work was conducted in collaboration with clinical experts in the Department of Nuclear Medicine
|
| 456 |
+
|
| 457 |
+
Table 7: Comparison with prior federated learning methods in terms of BLEU, METEOR, ROUGE, and CIDEr on LLaVA-Med dataset with 6 CT clients and 2 MRI clients.
|
| 458 |
+
|
| 459 |
+
<table><tr><td>Dataset</td><td colspan="5">LLaVA-Med</td></tr><tr><td>Metric</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>FedAvg</td><td>0.2864</td><td>0.1459</td><td>0.3421</td><td>0.3656</td><td>0.4960</td></tr><tr><td>FedProx</td><td>0.2797</td><td>0.1437</td><td>0.3405</td><td>0.3630</td><td>0.4968</td></tr><tr><td>MOON</td><td>0.2813</td><td>0.1467</td><td>0.3437</td><td>0.3650</td><td>0.5011</td></tr><tr><td>FedProto</td><td>0.2856</td><td>0.1497</td><td>0.3433</td><td>0.3639</td><td>0.5027</td></tr><tr><td>IOS</td><td>0.2901</td><td>0.1523</td><td>0.3453</td><td>0.3671</td><td>0.5113</td></tr><tr><td>FedTGP</td><td>0.2965</td><td>0.1523</td><td>0.3478</td><td>0.3601</td><td>0.5188</td></tr><tr><td>X-FLoRA</td><td>0.3160</td><td>0.1611</td><td>0.3685</td><td>0.3902</td><td>0.5398</td></tr></table>
|
| 460 |
+
|
| 461 |
+
Table 8: Comparison with prior federated learning methods in terms of BLEU, METEOR, ROUGE, and CIDEr on LLaVA-Med dataset with 2 CT clients and 6 MRI clients.
|
| 462 |
+
|
| 463 |
+
<table><tr><td>Dataset</td><td colspan="5">LLaVA-Med</td><td colspan="3">VQA-RAD</td></tr><tr><td rowspan="2">Metric</td><td rowspan="2">BLEU-1</td><td rowspan="2">BLEU-5</td><td rowspan="2">METEOR</td><td rowspan="2">ROUGE</td><td rowspan="2">CIDEr</td><td colspan="3">Accuracy (%)</td></tr><tr><td>Open</td><td>Closed</td><td>Overall</td></tr><tr><td>IOS</td><td>0.2957</td><td>0.1556</td><td>0.3514</td><td>0.3552</td><td>0.5176</td><td>55.38</td><td>78.02</td><td>69.13</td></tr><tr><td>FedTGP</td><td>0.2997</td><td>0.1560</td><td>0.3567</td><td>0.3644</td><td>0.5236</td><td>57.52</td><td>79.43</td><td>69.69</td></tr><tr><td>X-FLoRA</td><td>0.3287</td><td>0.1642</td><td>0.3731</td><td>0.3900</td><td>0.5415</td><td>59.27</td><td>81.14</td><td>72.50</td></tr></table>
|
| 464 |
+
|
| 465 |
+
Table 9: Comparison with prior federated learning methods in terms of BLEU, METEOR, ROUGE, and CIDEr on LLaVA-Med dataset with 4 X-ray clients and 4 CT clients.
|
| 466 |
+
|
| 467 |
+
and the Department of Cardiology. Specifically, our qualitative evaluations (Figs 5 and 8-18) are annotated lesion areas (marked with red arrows) by clinical experts. To further validate the clinical usefulness, we consulted clinical experts, and incorporated additional recommended evaluation metrics such as sensitivity, which relates to diagnostic accuracy, and positive predictive value (PPV), which reflects the rate of false positives. As shown in Table 6, X-FLoRA outperforms all compared models in both sensitivity and PPV. This indicates that X-FLoRA generates fewer incorrect responses, which is vital in healthcare applications.
|
| 468 |
+
|
| 469 |
+
# C.2 Ratio of Clients
|
| 470 |
+
|
| 471 |
+
Tables 7 and 8 compare the performance of X-FLoRA with several existing FL methods under different client settings on the LLaVA-Med dataset. Specifically, Table 7 evaluates the case with 6 CT clients and 2 MRI clients, while Table 8 examines the scenario with 2 CT clients and 6 MRI clients.
|
| 472 |
+
|
| 473 |
+
Across both settings, X-FLoRA consistently outperforms all existing methods in terms of BLEU, METEOR, ROUGE, and CIDEr metrics. These results highlight the robustness and effectiveness of X-FLoRA, even under varying distributions of modality-specific clients. The superior performance demonstrates that X-FLoRA effectively handles cross-modal heterogeneity and maintains high-quality VQA generation, regardless of the client composition.
|
| 474 |
+
|
| 475 |
+
<table><tr><td>Dataset</td><td colspan="5">LLaVA-Med</td><td colspan="3">VQA-RAD</td></tr><tr><td rowspan="2">Metric</td><td rowspan="2">BLEU-1</td><td rowspan="2">BLEU-5</td><td rowspan="2">METEOR</td><td rowspan="2">ROUGE</td><td rowspan="2">CIDEr</td><td colspan="3">Accuracy (%)</td></tr><tr><td>Open</td><td>Closed</td><td>Overall</td></tr><tr><td>LLaVA</td><td>0.2937</td><td>0.1519</td><td>0.3508</td><td>0.3558</td><td>0.5167</td><td>55.33</td><td>78.06</td><td>69.30</td></tr><tr><td>X-FLORA</td><td>0.3191</td><td>0.1630</td><td>0.3704</td><td>0.3954</td><td>0.5430</td><td>60.42</td><td>81.10</td><td>72.89</td></tr></table>
|
| 476 |
+
|
| 477 |
+
# C.3 Additional Modality
|
| 478 |
+
|
| 479 |
+
Our proposed architecture is inherently extensible, as it does not assume fixed modality pairs and supports potential extensions, as mentioned by Limitation section. To present empirical evidence for potential extensions, we conducted the experiment with X-ray (additional modality) and CT clients. As presented in Table 9, X-FLoRA outperforms recent compared models, demonstrating generalization across more diverse settings. In particular, the superior results on both CT and newly introduced X-ray clients provide strong empirical evidence that our framework is not confined to specific modality pairs, but can be effectively extended to additional modalities. This highlights that X-FLoRA consistently maintains performance advantages across heterogeneous modalities, thereby reinforcing its potential as a general federated learning solution for real-world multi-modal medical environments.
|
| 480 |
+
|
| 481 |
+
# C.4 Comparison with LLaVA
|
| 482 |
+
|
| 483 |
+
As shown in Table 10, X-FLoRA outperforms the LLaVA (Liu et al., 2023). This indicates that our framework enhances performance without degrading the frozen LLM's capabilities, validating the effectiveness of our design. The improvement primarily stems from the modality-expert LoRA fine-tuning, which injects modality-specific knowledge into the encoders while preserving the general reasoning ability of the backbone LLM. By selectively adapting key and value projections in the attention layers and linear transformations in the feedforward layers, our LoRA modules achieve fine-grained alignment with medical imaging modalities at minimal computational cost. This confirms that lightweight, targeted adaptation not only avoids catastrophic forgetting but also leads to consistent gains across all evaluation metrics.
|
| 484 |
+
|
| 485 |
+
# C.5 Ablation Study
|
| 486 |
+
|
| 487 |
+
Table 11 presents an additional ablation study of the individual contributions of each module in the X-FLoRA framework. The results demonstrate that each module significantly enhances the over
|
| 488 |
+
|
| 489 |
+
Table 10: Comparison with LLaVA in terms of BLEU, METEOR, ROUGE, and CIDEr on LLaVAMed dataset.
|
| 490 |
+
|
| 491 |
+
<table><tr><td colspan="3">Federated Asymmetric Translation</td><td colspan="2">Federated VQA Finetuning</td><td rowspan="2">Overall Accuracy (%)</td></tr><tr><td>Text</td><td>Translation</td><td>Discriminator-based Aggregation</td><td>Modality-expert LoRA</td><td>Modality-specific Aggregation</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>72.89</td></tr><tr><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>71.08</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>71.12</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>69.83</td></tr><tr><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>70.89</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td>64.76</td></tr></table>
|
| 492 |
+
|
| 493 |
+
Table 11: Ablation study for X-FLoRA on the VQA-RAD dataset in terms of the accuracy.
|
| 494 |
+
|
| 495 |
+
<table><tr><td>η</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>0.3</td><td>0.3095</td><td>0.1578</td><td>0.3473</td><td>0.3846</td><td>0.5349</td></tr><tr><td>0.4</td><td>0.3158</td><td>0.1604</td><td>0.3516</td><td>0.3911</td><td>0.5410</td></tr><tr><td>0.5</td><td>0.3191</td><td>0.1630</td><td>0.3604</td><td>0.3954</td><td>0.5430</td></tr><tr><td>0.6</td><td>0.3170</td><td>0.1610</td><td>0.3542</td><td>0.3911</td><td>0.5413</td></tr></table>
|
| 496 |
+
|
| 497 |
+
Table 12: Effect of the adjusting hyperparameter $(\eta)$ in terms of BLEU, METEOR, ROUGE and CIDEr in federated learning of shared asymmetric translation on the LLaVA-Med dataset.
|
| 498 |
+
|
| 499 |
+
all performance of X-FLoRA. The combination of these modules operates synergistically to maximize VQA performance, effectively addressing challenges posed by cross-modal FL heterogeneity.
|
| 500 |
+
|
| 501 |
+
# C.6 Weight of Total Loss
|
| 502 |
+
|
| 503 |
+
Table 12 presents the impact of the hyperparameter $\eta$ on the performance of federated learning with shared asymmetric translation, evaluated using BLEU, METEOR, ROUGE, and CIDEr metrics on the LLaVA-Med dataset. The results indicate that setting $\eta$ to 0.5 yields the best overall performance across all metrics, suggesting that this value provides an effective balance between adversarial and identity losses in training.
|
| 504 |
+
|
| 505 |
+
# C.7 Modality-expert LoRA
|
| 506 |
+
|
| 507 |
+
Table 13 presents an ablation study analyzing the contribution of rank $(r_m, r_c, \text{and} r_t)$ by varying its rank, where only one modality-expert LoRA is fine-tuned. The evaluation was conducted on the LLaVA-Med dataset using BLEU, METEOR, ROUGE, and CIDEr metrics. Moreover, Table 13 shows that setting all modality-specific LoRA ranks $(r_m, r_c, \text{and} r_t)$ to 16, 32 and 8 yields the best overall performance across BLEU, METEOR, ROUGE, and CIDEr metrics on the LLaVA-Med dataset. This result suggests that a balanced representation capacity across MRI, CT, and text modalities is most effective for the VQA task.
|
| 508 |
+
|
| 509 |
+
Table 14 summarizes the results of an ablation study evaluating the impact of different combinations of ranks $(r_m, r_c, \text{and} r_t)$ assigned to modality-expert LoRA modules. While the configuration of (16, 32, 8) had previously shown promising
|
| 510 |
+
|
| 511 |
+
<table><tr><td>Rank</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>rm</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>8</td><td>0.3121</td><td>0.1582</td><td>0.3601</td><td>0.3876</td><td>0.5367</td></tr><tr><td>16</td><td>0.3150</td><td>0.1598</td><td>0.3645</td><td>0.3907</td><td>0.5406</td></tr><tr><td>32</td><td>0.3122</td><td>0.1577</td><td>0.3605</td><td>0.3869</td><td>0.5371</td></tr><tr><td>Rank</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>rc</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>8</td><td>0.3133</td><td>0.1569</td><td>0.3605</td><td>0.3858</td><td>0.5376</td></tr><tr><td>16</td><td>0.3128</td><td>0.1575</td><td>0.3611</td><td>0.3861</td><td>0.5364</td></tr><tr><td>32</td><td>0.3149</td><td>0.1580</td><td>0.3651</td><td>0.3911</td><td>0.5402</td></tr></table>
|
| 512 |
+
|
| 513 |
+
<table><tr><td>Rank</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>rt</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>8</td><td>0.3138</td><td>0.1555</td><td>0.3637</td><td>0.3882</td><td>0.5390</td></tr><tr><td>16</td><td>0.3125</td><td>0.1534</td><td>0.3610</td><td>0.3868</td><td>0.5351</td></tr><tr><td>32</td><td>0.3120</td><td>0.1533</td><td>0.3600</td><td>0.3851</td><td>0.5355</td></tr></table>
|
| 514 |
+
|
| 515 |
+
Table 13: Ablation study on the contribution of each rank $(r_m, r_c, \text{and} r_t)$ in terms of BLEU, METEOR, ROUGE, and CIDEr metrics on the LLaVA-Med dataset.
|
| 516 |
+
|
| 517 |
+
<table><tr><td>Rank</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>rm,rc,rt</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>16,32,8</td><td>0.3191</td><td>0.1630</td><td>0.3704</td><td>0.3954</td><td>0.5430</td></tr><tr><td>32,32,8</td><td>0.3175</td><td>0.1608</td><td>0.3685</td><td>0.3925</td><td>0.5417</td></tr><tr><td>16,16,8</td><td>0.3177</td><td>0.1611</td><td>0.3672</td><td>0.3923</td><td>0.5401</td></tr><tr><td>16,32,16</td><td>0.3156</td><td>0.1596</td><td>0.3655</td><td>0.3896</td><td>0.5399</td></tr></table>
|
| 518 |
+
|
| 519 |
+
results, we further validated its effectiveness by experimenting with alternative rank combinations. As shown in Table 14, the (16, 32, 8) setting consistently outperforms other configurations across all evaluation metrics, including BLEU, METEOR, ROUGE, and CIDEr. This confirms that assigning moderate capacity to the MRI and CT experts and a smaller capacity to the text expert leads to the most balanced and effective performance.
|
| 520 |
+
|
| 521 |
+
Moreover, Tables 15 and 16 explore the impact of the aggregation weight hyperparameter $\epsilon$ , which controls the balance between real and synthetic data contributions during modality-specific aggregation. As $\epsilon$ increases, real client data receives higher weight. The best performance is achieved at $\epsilon = 1.5$ , while performance degrades when $\epsilon = 1$ (equal weighting) or $\epsilon = 0.5$ (favoring synthetic data). This highlights the importance of prioritizing real data for robust VQA model training.
|
| 522 |
+
|
| 523 |
+
# C.8 Effect of Text
|
| 524 |
+
|
| 525 |
+
Figure 6 demonstrates the effectiveness of the textual cues associated with images in the LLaVA-Med dataset. As shown, $\mathrm{CT} \rightarrow \mathrm{MRI}$ and $\mathrm{MRI} \rightarrow \mathrm{CT}$ translations performed without textual cues significantly degrade visual quality, introducing se
|
| 526 |
+
|
| 527 |
+
Table 14: Effect of the combination of rank $(r_m, r_c,$ and $r_t)$ in terms of BLEU, METEOR, ROUGE, and CIDEr metrics on the LLaVA-Med dataset.
|
| 528 |
+
|
| 529 |
+
<table><tr><td>ε</td><td>BLEU-1</td><td>BLEU-5</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td></tr><tr><td>0.5</td><td>0.2959</td><td>0.1514</td><td>0.3547</td><td>0.3778</td><td>0.5201</td></tr><tr><td>1</td><td>0.3091</td><td>0.1598</td><td>0.3602</td><td>0.3845</td><td>0.5289</td></tr><tr><td>1.5</td><td>0.3291</td><td>0.1730</td><td>0.3704</td><td>0.4054</td><td>0.5530</td></tr><tr><td>2</td><td>0.3105</td><td>0.1684</td><td>0.3589</td><td>0.3823</td><td>0.5317</td></tr><tr><td>2.5</td><td>0.3052</td><td>0.1647</td><td>0.3604</td><td>0.3760</td><td>0.5208</td></tr></table>
|
| 530 |
+
|
| 531 |
+
Table 15: Effect of the normalization ratio (ε) on BLEU, METEOR, ROUGE, and CIDEr scores in expert-aware weighting on the LLaVA-Med dataset.
|
| 532 |
+
|
| 533 |
+
<table><tr><td rowspan="2">€</td><td colspan="3">Accuracy (%)</td></tr><tr><td>Open</td><td>Closed</td><td>Overall</td></tr><tr><td>0.5</td><td>56.48</td><td>76.98</td><td>68.84</td></tr><tr><td>1</td><td>58.45</td><td>77.95</td><td>70.21</td></tr><tr><td>1.5</td><td>60.42</td><td>81.10</td><td>72.89</td></tr><tr><td>2</td><td>59.10</td><td>77.58</td><td>70.24</td></tr><tr><td>2.5</td><td>58.38</td><td>76.99</td><td>69.60</td></tr></table>
|
| 534 |
+
|
| 535 |
+
Table 16: Effect of the normalization ratio $(\epsilon)$ on accuracy in expert-aware weighting on the VQA-RAD dataset.
|
| 536 |
+
|
| 537 |
+
vere noise and distorting anatomical regions. Compared with translations without textual cues, the proposed text-driven translations leverage image-associated textual information to preserve clinical insights. Specifically, the second and third rows of the $\mathrm{CT} \rightarrow \mathrm{MRI}$ results show that translations without textual cues introduce severe noise. Furthermore, in the $\mathrm{MRI} \rightarrow \mathrm{CT}$ results, the second row highlights Posterior Reversible Encephalopathy Syndrome—emphasizing this finding during the CT conversion process. Notably, these results demonstrate that text-driven translation effectively preserves and emphasizes clinically relevant regions.
|
| 538 |
+
|
| 539 |
+
# C.9 Visual analysis of federated asymmetric translation.
|
| 540 |
+
|
| 541 |
+
Figure 7 exhibits the visualized results of the forward and backward processes of federated asymmetric translation for each modality across global training rounds. Initially, both forward and backward translations exhibit significant noise. However, as training progresses, the proposed federated asymmetric translation—which focuses on enhancing the backward translator—progressively improves its ability to capture the features of the input images. These results demonstrate that our training methodology enables efficient model learning even in cross-modal FL scenarios where each client holds data from only a single modality.
|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
Figure 6: Effect of textual cues on clinical feature augmentation in the forward translator of asymmetric translation on the LLaVA-Med dataset.
|
| 545 |
+
|
| 546 |
+
# D VQA Results
|
| 547 |
+
|
| 548 |
+
Figures 8-16 present qualitative examples of VQA results using the LLaVA-Med dataset. Specifically, figures 8-12 illustrate cases based on CT data, while figures 13-16 focus on MRI-based VQA scenarios. Moreover, Figures 17 and 18 present qualitative CT and MRI examples of VQA results using the VQA-RAD dataset, respectively. In the figure, the red arrow highlights a lesion or anatomical structure described in the imaging report, red text indicates incorrect or inconsistent responses, while BLEU text represents accurate and contextually appropriate answers.
|
| 549 |
+
|
| 550 |
+
Figure 12 presents a failure case analysis of our model. Although the imaging report indicates multiple abnormalities in the lower lobes—including ground glass opacities, arcade-like bands of parenchymal consolidation, peribronchial consolidation, and mild bronchiolectasis—our model successfully identified one of the true abnormalities but additionally predicted unrelated findings such as multiple cavitary lesions. However, it is important to note that other baseline models performed even more poorly. This suggests that despite the imperfect prediction, our model demonstrates a comparatively stronger ability to recognize at least some clinically relevant abnormalities.
|
| 551 |
+
|
| 552 |
+
In each example without Fig. 12, various models are evaluated by their ability to correctly identify the main imaging findings when presented with corresponding medical images and diagnostic queries. The figures demonstrate that X-FLoRA consistently provides more accurate and clinically relevant responses. This highlights the importance of diverse modality data and modality-specific expert representation for achieving reliable VQA performance in medical imaging contexts.
|
| 553 |
+
|
| 554 |
+

|
| 555 |
+
Figure 7: Visual results of federated asymmetric translation across global rounds. In the first round, the forward translator produces severe noise. By the 25th round, noise is substantially reduced and the overall structure begins to take shape. By the 50th round, the output closely resembles the input form with noise almost entirely eliminated.
|
| 556 |
+
|
| 557 |
+

|
| 558 |
+
Figure 8: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 559 |
+
|
| 560 |
+

|
| 561 |
+
Figure 9: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
Real CT
|
| 565 |
+
|
| 566 |
+

|
| 567 |
+
Image
|
| 568 |
+
Synthetic MRI
|
| 569 |
+
Image
|
| 570 |
+
|
| 571 |
+
Computed tomography of the chest with intravenous contrast in the axial plane demonstrating narrowing of the left main bronchus to $5.7\mathrm{mm}$
|
| 572 |
+
|
| 573 |
+
Imaging Report
|
| 574 |
+
|
| 575 |
+
User
|
| 576 |
+
|
| 577 |
+
What is the main finding in the image?
|
| 578 |
+
|
| 579 |
+
IOS The main finding in the image is the presence of a large right pleural effusion.
|
| 580 |
+
|
| 581 |
+
FedTGP The main finding in the image is the narrowing of the right main bronchus.
|
| 582 |
+
|
| 583 |
+
X-FLoRA
|
| 584 |
+
|
| 585 |
+
(w/o Expert The main finding is a narrowing of the left pulmonary artery. Aggregation)
|
| 586 |
+
|
| 587 |
+
X-FLoRA The main finding in the image is the left main bronchial narrowing
|
| 588 |
+
|
| 589 |
+
GPT-4 The main finding in the image is the narrowing of the left main bronchus.
|
| 590 |
+
|
| 591 |
+

|
| 592 |
+
Figure 10: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 593 |
+
Real CT
|
| 594 |
+
Image
|
| 595 |
+
|
| 596 |
+

|
| 597 |
+
Synthetic MRI
|
| 598 |
+
Image
|
| 599 |
+
|
| 600 |
+
Axial view of a contrast-enhanced CT of the abdomen demonstrating a decompressed gallbladder and biliary leakage. CT: computed tomography.
|
| 601 |
+
|
| 602 |
+
Imaging Report
|
| 603 |
+
|
| 604 |
+
User
|
| 605 |
+
|
| 606 |
+
What is the main finding in the image?
|
| 607 |
+
|
| 608 |
+
IOS
|
| 609 |
+
|
| 610 |
+
The main finding in the image is the presence of a large amount of ascites, which is an abnormal accumulation of fluid in the abdominal cavity.
|
| 611 |
+
|
| 612 |
+
FedTGP
|
| 613 |
+
|
| 614 |
+
The main finding in the axial view of the contrast-enhanced CT image of the abdomen is a thickened gallbladder wall suggestive of cholecystitis.
|
| 615 |
+
|
| 616 |
+
X-FLoRA
|
| 617 |
+
|
| 618 |
+
(w/o Expert
|
| 619 |
+
|
| 620 |
+
Aggregation)
|
| 621 |
+
|
| 622 |
+
The main finding in the axial view of the contrast-enhanced CT image of the abdomen is a normal gallbladder.
|
| 623 |
+
|
| 624 |
+
X-FLoRA
|
| 625 |
+
|
| 626 |
+
The main finding in the image is the decompressed gallbladder and biliary leakage.
|
| 627 |
+
|
| 628 |
+
GPT-4
|
| 629 |
+
|
| 630 |
+
The main finding in the axial view of the contrast-enhanced CT image of the abdomen is a decompressed gallbladder and biliary leakage.
|
| 631 |
+
|
| 632 |
+

|
| 633 |
+
Figure 11: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 634 |
+
Real CT
|
| 635 |
+
Image
|
| 636 |
+
|
| 637 |
+

|
| 638 |
+
Synthetic MRI
|
| 639 |
+
Image
|
| 640 |
+
|
| 641 |
+
Lung CT scan in the lower lobes showing ground glass, arcade-like bands of parenchymal consolidation, peribronchial consolidation and mild bronchiolectasis
|
| 642 |
+
|
| 643 |
+
Imaging Report
|
| 644 |
+
|
| 645 |
+
User
|
| 646 |
+
|
| 647 |
+
What abnormalities can be seen in the lower lobes?
|
| 648 |
+
|
| 649 |
+
IOS
|
| 650 |
+
|
| 651 |
+
The main feature in the image is the renal venous thrombus, which is a blood clot that has formed within the renal vein.
|
| 652 |
+
|
| 653 |
+
FedTGF
|
| 654 |
+
|
| 655 |
+
In the lower lobes, diffuse emphysematous changes and bullae are noted.
|
| 656 |
+
|
| 657 |
+
X-FLoRA
|
| 658 |
+
|
| 659 |
+
In the lower lobes of the lung, multiple cavitary lesions is identified.
|
| 660 |
+
|
| 661 |
+
(w/o Expert)
|
| 662 |
+
|
| 663 |
+
Aggregation)
|
| 664 |
+
|
| 665 |
+
XELOPA
|
| 666 |
+
|
| 667 |
+
In the lower lobes of the lung, peribronchial consolidation and multiple cavitary lesions are identified.
|
| 668 |
+
|
| 669 |
+
GPT-4
|
| 670 |
+
|
| 671 |
+
In the lower lobes of the lung, there are several abnormalities visible, including ground glass opacities, arcade-like bands of parenchymal consolidation, peribronchial consolidation, and mild bronchiolectasis.
|
| 672 |
+
|
| 673 |
+

|
| 674 |
+
Real MRI
|
| 675 |
+
Image
|
| 676 |
+
|
| 677 |
+

|
| 678 |
+
Synthetic CT
|
| 679 |
+
Image
|
| 680 |
+
Imaging Report
|
| 681 |
+
|
| 682 |
+
Post-treatment MRI of the brain with and without contrast. Postoperative changes in the right frontotemporoparietal region are shown. Irregular enhancement at the surgical site extends to the walls of the right lateral ventricle. This was unchanged in subsequent MRI scans.
|
| 683 |
+
|
| 684 |
+
What region of the brain is affected?
|
| 685 |
+
|
| 686 |
+
IOS
|
| 687 |
+
|
| 688 |
+
The affected region in the brain is the left centrotemporal area.
|
| 689 |
+
|
| 690 |
+
FedTGP
|
| 691 |
+
|
| 692 |
+
The affected region in the brain is the left frontal lobe only.
|
| 693 |
+
|
| 694 |
+
X-FLoRA
|
| 695 |
+
|
| 696 |
+
(w/o Expert) The affected region of the brain is the right parietal region (Aggregation)
|
| 697 |
+
|
| 698 |
+
X-FLoRA
|
| 699 |
+
|
| 700 |
+
The affected region in the brain is the right frontoparietal area.
|
| 701 |
+
|
| 702 |
+
GPT-4
|
| 703 |
+
|
| 704 |
+
The affected region in the brain is the right frontotemporoparietal area.
|
| 705 |
+
|
| 706 |
+

|
| 707 |
+
Figure 13: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 708 |
+
Real MRI
|
| 709 |
+
Image
|
| 710 |
+
|
| 711 |
+

|
| 712 |
+
Synthetic CT
|
| 713 |
+
Image
|
| 714 |
+
Imaging Report
|
| 715 |
+
|
| 716 |
+
MRI of the brain showing high T2 and FLAIR signal in the occipital and posterior parietal subcortical white matter consistent with PRES following treatment with rituximab.
|
| 717 |
+
|
| 718 |
+
User
|
| 719 |
+
|
| 720 |
+
What part of the brain is shown in the image?
|
| 721 |
+
|
| 722 |
+
IOS
|
| 723 |
+
|
| 724 |
+
The image shows the cerebellum regions of the brain.
|
| 725 |
+
|
| 726 |
+
FedTGP
|
| 727 |
+
|
| 728 |
+
The image shows only the occipital region of the brain.
|
| 729 |
+
|
| 730 |
+
X-FLoRA
|
| 731 |
+
|
| 732 |
+
The image shows the frontal and posterior parietal regions of the brain.
|
| 733 |
+
|
| 734 |
+
X-FLoR.
|
| 735 |
+
|
| 736 |
+
The image shows the occipital and posterior parietal areas.
|
| 737 |
+
|
| 738 |
+
GPT-4
|
| 739 |
+
|
| 740 |
+
The image shows the occipital and posterior parietal regions of the brain.
|
| 741 |
+
|
| 742 |
+

|
| 743 |
+
Figure 12: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 744 |
+
Real MRI
|
| 745 |
+
Image
|
| 746 |
+
Figure 15: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 747 |
+
|
| 748 |
+

|
| 749 |
+
Figure 14: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 750 |
+
Synthetic CT
|
| 751 |
+
Image
|
| 752 |
+
Imaging Report
|
| 753 |
+
|
| 754 |
+
Enhanced T1 axial MRI demonstrates substantial interval enlargement of the left MCA aneurysm, now measuring up to $9.3\mathrm{u}00\mathrm{a}0\mathrm{mm}$ . MCA, middle cerebral artery; MRI, magnetic resonance imaging.
|
| 755 |
+
|
| 756 |
+
User
|
| 757 |
+
|
| 758 |
+
What is the main finding in the image?
|
| 759 |
+
|
| 760 |
+
IOS
|
| 761 |
+
|
| 762 |
+
The main finding in the image is the presence of a large left frontal arachnoid cyst.
|
| 763 |
+
|
| 764 |
+
edTGP
|
| 765 |
+
|
| 766 |
+
The image reveals calcification in the left middle cerebral artery instead of aneurysmal changes.
|
| 767 |
+
|
| 768 |
+
X-FLoRA
|
| 769 |
+
|
| 770 |
+
The main finding in the image is the chronic stability of an aneurysm in the left MCA without interval changes
|
| 771 |
+
|
| 772 |
+
X-FLoRA
|
| 773 |
+
|
| 774 |
+
The main finding is a marked interval growth of an aneurysm in the left MCA.
|
| 775 |
+
|
| 776 |
+
GPT-4
|
| 777 |
+
|
| 778 |
+
The main finding in the image is the substantial interval enlargement of an aneurysm in the left middle cerebral artery (MCA).
|
| 779 |
+
|
| 780 |
+

|
| 781 |
+
Real MRI Image
|
| 782 |
+
|
| 783 |
+

|
| 784 |
+
Synthetic CT Image
|
| 785 |
+
Imaging Report
|
| 786 |
+
|
| 787 |
+
Axial T2-weighted brain MRI scan shows a solitary metastatic lesion in the right temporal lobe with brain edema.
|
| 788 |
+
|
| 789 |
+
User Is there any sign of edema?
|
| 790 |
+
|
| 791 |
+
IOS According to the image, there is no sign of edema.
|
| 792 |
+
|
| 793 |
+
The MRI scan indicates brain edema surrounding a primary tumor in the right temporal lobe.
|
| 794 |
+
|
| 795 |
+
X-FLoRA The main finding in the image is the chronic stability of an aneurysm (w/o Expert in the left MCA without interval changes Aggregation)
|
| 796 |
+
|
| 797 |
+
X-FLoRA The MRI scan shows brain edema surrounding a metastatic lesion located in the right temporal lobe
|
| 798 |
+
|
| 799 |
+
GPT-4 Yes, the MRI scan indicates the presence of brain edema surrounding the metastatic lesion in the right temporal lobe.
|
| 800 |
+
|
| 801 |
+

|
| 802 |
+
Figure 16: Example comparison of X-FLoRA and other FL methods on LLaVA-Med dataset. The GPT-4 is considered as the ground truth.
|
| 803 |
+
Real CT Image
|
| 804 |
+
|
| 805 |
+

|
| 806 |
+
Synthetic MRI Image
|
| 807 |
+
Imaging Report
|
| 808 |
+
|
| 809 |
+
This is a noncontrast CT. This image is taken in axial. The finding is located at right convexity.
|
| 810 |
+
|
| 811 |
+
User Is this a noncontrast CT?
|
| 812 |
+
|
| 813 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>No</td><td>Yes</td><td>Yes</td><td>Yes</td><td>Yes</td></tr></table>
|
| 814 |
+
|
| 815 |
+
User Where is the abnormality located?
|
| 816 |
+
|
| 817 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>Right convexity</td><td>Left convexity</td><td>Right convexity</td><td>Right convexity</td><td>Right convexity</td></tr></table>
|
| 818 |
+
|
| 819 |
+
User Is a noncontrast CT the first imaging test for a suspected brain bleed?
|
| 820 |
+
|
| 821 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>No</td><td>No</td><td>No</td><td>Yes</td><td>Yes</td></tr></table>
|
| 822 |
+
|
| 823 |
+

|
| 824 |
+
Figure 17: Example comparison of X-FLoRA and other FL methods on VQA-RAD.
|
| 825 |
+
Real MRI Image
|
| 826 |
+
Figure 18: Example comparison of X-FLoRA and other FL methods on VQA-RAD.
|
| 827 |
+
|
| 828 |
+

|
| 829 |
+
nthetic Image
|
| 830 |
+
Imaging Report
|
| 831 |
+
|
| 832 |
+
The MRI image is the sulci blunted. There is presence of blunting of the sulci and brain edema.
|
| 833 |
+
|
| 834 |
+
User Is the brain swollen?
|
| 835 |
+
|
| 836 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>No</td><td>Yes</td><td>Yes</td><td>Yes</td><td>Yes</td></tr></table>
|
| 837 |
+
|
| 838 |
+
User Are the sulci blunted?
|
| 839 |
+
|
| 840 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>No</td><td>Yes</td><td>Yes</td><td>Yes</td><td>Yes</td></tr></table>
|
| 841 |
+
|
| 842 |
+
User Is/Are there edema in the patient's brain?
|
| 843 |
+
|
| 844 |
+
<table><tr><td>IOS</td><td>FedTGP</td><td>X-FLoRA (w/o Expert Aggregation)</td><td>X-FLoRA</td><td>Ground Truth</td></tr><tr><td>No</td><td>No</td><td>No</td><td>Yes</td><td>Yes</td></tr></table>
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20a469b4261f75e3c405a0bca05d5c073373f0fa284d8047458721ee9d649911
|
| 3 |
+
size 1057218
|
EMNLP/2025/X-FLoRA_ Cross-modal Federated Learning with Modality-expert LoRA for Medical VQA/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f97b4b484207f28915d2fde048cdb1eeb3cb61dee758c3aba469bbd417c7667
|
| 3 |
+
size 897092
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93e2bc7e6e05e00075723148effa26b4755c26af420268dab3766e90422f2d94
|
| 3 |
+
size 119295
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f957dd442f7c5c7f794db4db26f9955997bd821ce7b235a227352f3a2c993250
|
| 3 |
+
size 145293
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/96a5267b-72c9-4e17-9a6a-a5d24f510b0d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b46dcc04d1676b136377639a099f54a5d9770c867e7107abc6e7ef4998714f9
|
| 3 |
+
size 804439
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/full.md
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# XAutoLM: Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML
|
| 2 |
+
|
| 3 |
+
Ernesto L. Estevanell-Valladares<sup>1,2</sup>, Suilan Estevez-Velarde<sup>2</sup>, Yoan Gutiérrez<sup>1</sup>, Andrés Montoyo<sup>1</sup>, Ruslan Mitkov<sup>3</sup>,
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>University of Alicante, <sup>2</sup>University of Havana, <sup>3</sup>University of Lancaster,
|
| 6 |
+
|
| 7 |
+
ernesto.estevanell@ua.es
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Experts in machine learning leverage domain knowledge to navigate decisions in model selection, hyperparameter optimization, and resource allocation. This is particularly critical for fine-tuning language models (LMs), where repeated trials incur substantial computational overhead and environmental impact. However, no existing automated framework simultaneously tackles the entire model selection and hyperparameter optimization (HPO) task for resource-efficient LM fine-tuning. We introduce XAutoLM, a meta-learning-augmented AutoML framework that reuses past experiences to optimize discriminative and generative LM fine-tuning pipelines efficiently. XAutoLM learns from stored successes and failures by extracting task- and system-level meta-features to bias its sampling toward valuable configurations and away from costly dead ends. On four text classification and two question-answering benchmarks, XAutoLM surpasses zero-shot optimizer's peak $F1$ on five of six tasks, cuts mean evaluation time of pipelines by up to $4.5\mathrm{x}$ , reduces search error ratios by up to sevenfold, and uncovers up to $50\%$ more pipelines above the zero-shot Pareto front. In contrast, simpler memory-based baselines suffer negative transfer. We release XAutoLM and our experience store to catalyze resource-efficient, Green AI fine-tuning in the NLP community.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Fine-tuning large language models (LLMs) has become indispensable across natural language processing (NLP) applications, yet even "small" models such as BERT (Devlin et al., 2018) or T5 (Raffel et al., 2020) incur substantial computational cost and carbon emissions (Wang et al., 2023b; Schwartz et al., 2020). Rather than exhaustively evaluating every model and hyperparameter combination, human experts draw on domain knowledge to focus on promising regions of this vast design space.
|
| 16 |
+
|
| 17 |
+
Automated Machine Learning (AutoML) seeks to mimic expert intuition by automating the two core stages of pipeline construction, model selection (MS) and hyperparameter optimization (HPO), into a unified search loop (Hutter et al., 2019). AutoML techniques have matured in areas such as tabular and vision tasks (Hutter et al., 2019), showing competitive performance against human experts (Estevez-Velarde et al., 2020). However, the joint MS+HPO pipeline for language models presents an ample, mixed discrete-continuous search space whose repeated evaluations are prohibitively costly (Wang et al., 2023b), thus posing a significant challenge for automation. While several recent efforts address HPO for LMs in isolation (Mallik et al., 2024), surveys highlight the underdevelopment of full-pipeline AutoML in NLP (Tornede et al., 2023), and no framework systematically unifies model selection and HPO under tight compute and Green AI constraints.
|
| 18 |
+
|
| 19 |
+
To address these shortcomings, we present XAutoLM, an AutoML framework that unifies model selection and hyperparameter optimization for LM fine-tuning via meta-learning. XAutoLM constructs an experience-aware prior from a repository of past pipeline evaluations annotated with task- and system-level meta-features which steers the search toward historically promising and away from infeasible configurations. Empirically, across four classification and two question-answering benchmarks, our method yields pipelines with stronger performance-time trade-offs than zero-shot or naive baselines under identical wall-clock budgets (Tables 5, 6). We release the code and the full experience store to support sustainable, reproducible LM fine-tuning in the NLP community.
|
| 20 |
+
|
| 21 |
+
We summarize our main contributions as follows:
|
| 22 |
+
|
| 23 |
+
- A unified, meta-learning-augmented AutoML
|
| 24 |
+
|
| 25 |
+
framework that integrates both model selection and hyperparameter optimisation for discriminative and generative LM fine-tuning.
|
| 26 |
+
|
| 27 |
+
- An extensible, task- and model-agnostic experience-aware prior that conditions the search on task and system meta-features and explicitly leverages negative traces to avoid costly dead ends.
|
| 28 |
+
- A comprehensive evaluation on six benchmarks showing consistent gains in $F_{1}$ , mean pipeline evaluation time, and error ratio, and stronger Pareto fronts than zero-shot and naive memory baselines (see Section 4; Tables 5, 6).
|
| 29 |
+
|
| 30 |
+
We next review related work (Section 2), present XAutoLM (Section 3), and report the experimental setup and results (Section 4), followed by analysis (Section 5) and, finally, conclusions and limitations (Sections 6, 7).
|
| 31 |
+
|
| 32 |
+
# 2 Related Work
|
| 33 |
+
|
| 34 |
+
AutoML strategies in language modelling can be divided into two (not necessarily disjoint) subsets: AutoML for LLMs and LLMs for AutoML (Tornede et al., 2023). The former comprises AutoML techniques to produce optimal LM pipelines tailored for specific scenarios, akin to traditional AutoML. The latter employs language models to enhance the AutoML process, for example, by providing linguistic interfaces to configure the optimisation process or leveraging them to guide the search (e.g., using LMs to generate code for optimal ML pipelines).
|
| 35 |
+
|
| 36 |
+
AutoML for LLMs in particular poses significant challenges (Tornede et al., 2023). Namely, LMs are extremely resource-intensive (Bannour et al., 2021), even when only considering their later stages (e.g., fine-tuning, inference). Table 1 compares AutoML approaches that leverage LLMs according to relevant features characterising their responses to the field's challenges.
|
| 37 |
+
|
| 38 |
+
We observe that there are more LLMs for AutoML systems than vice versa, likely due to the proliferation of prompt engineering and increased access to open-source LMs. For instance, Zhou et al. (2022) developed the Automatic Prompt Engineer (APE) system, which achieved performance competitive with human-generated instructions. In contrast, systems such as GL-Agent (Wei et al., 2023), AutoM3L (Luo et al., 2024) and GizaML (Sayed et al., 2024) integrate language models
|
| 39 |
+
|
| 40 |
+
<table><tr><td>Systems</td><td>Features</td><td>AutoML for LLMs</td><td>LLMs for AutoML</td><td>Inference</td><td>Fine-tuning</td><td>HPO</td><td>Model Selection</td><td>Meta-learning</td></tr><tr><td>APE</td><td></td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>GPT-NAS</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td></td></tr><tr><td>GL-Agent</td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>AutoGen</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>EcoOptiGen</td><td>✓</td><td></td><td>✓</td><td></td><td></td><td>✓</td><td></td><td></td></tr><tr><td>AutoML-GPT</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>≈</td><td></td><td></td></tr><tr><td>HuggingGPT</td><td>≈</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>✓</td><td></td></tr><tr><td>AutoM3L</td><td></td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>≈</td></tr><tr><td>PriorBand</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td></tr><tr><td>GizaML</td><td></td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>GE</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td></td><td>≈</td></tr><tr><td>AutoGOAL</td><td>✓</td><td></td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td></td></tr><tr><td colspan="9">Introduced in this paper</td></tr><tr><td>XAutoLM</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 41 |
+
|
| 42 |
+
Table 1: Comparison of systems for AutoML with LLMs
|
| 43 |
+
|
| 44 |
+
into their optimization strategies to produce graph learning pipelines, highly capable multi-modal ML pipelines, and time-series forecasting pipelines, respectively.
|
| 45 |
+
|
| 46 |
+
Systems like AutoGen (Wu et al., 2023), GPT-NAS (Yu et al., 2024), GE (Morris et al., 2024), AutoML-GPT (Zhang et al., 2023), and HuggingGPT (Shen et al., 2024) are hybrids that span both categories; they leverage LMs to produce LM-based solutions. However, the last two differ from traditional AutoML (and NAS) systems: AutoML-GPT does not evaluate solution candidates (only simulates their training), and HuggingGPT produces responses to prompts without outputting the pipelines capable of handling them.
|
| 47 |
+
|
| 48 |
+
Often, the choice of model is as, if not more, critical than the hyperparameter configuration used to produce responses. We found that AutoGOAL (Estevanell-Valladares et al., 2024) optimizes pipelines by balancing efficiency and performance metrics, taking into account both model selection and HPO, but only supports LMs for inference. All other AutoML for LLMs systems we surveyed, such as EcoOptiGen (Wang et al., 2023a) and PriorBand (Mallik et al., 2024), focus solely on HPO.
|
| 49 |
+
|
| 50 |
+
Nonetheless, we find no single framework that simultaneously addresses model selection and hyperparameter optimization for LM fine-tuning, particularly when resource limitations exist.
|
| 51 |
+
|
| 52 |
+
# 3 Proposal
|
| 53 |
+
|
| 54 |
+
We introduce XAutoLM, the first AutoML framework that unifies model selection and hyperparameter optimisation for both discriminative and generative language model fine-tuning. Our pipelines are composed of (i) a base LM from a curated pool of encoders and generators (Table 2), (ii) one of three fine-tuning strategies; full, partial, or LoRA (Hu et al., 2021), and (iii) a hyperparameter configuration. XAutoLM jointly explores this mixed search space by reusing past experiences, e.g., "LoRA-tuned DistilBERT achieved high macro-F1 on SST-2 under low VRAM", to steer the optimizer toward high-utility regions and away from error-prone configurations. This holistic reuse enables XAutoLM to discover strong fine-tuning pipelines under tight compute budgets.
|
| 55 |
+
|
| 56 |
+
# Discriminative
|
| 57 |
+
|
| 58 |
+
BERT (Devlin et al., 2018)
|
| 59 |
+
|
| 60 |
+
DistilBERT (Sanh et al., 2020)
|
| 61 |
+
|
| 62 |
+
RoBERTa (Liu et al., 2019)
|
| 63 |
+
|
| 64 |
+
XLM-RoBERTa (Conneau et al., 2020)
|
| 65 |
+
|
| 66 |
+
DeBERTa (He et al., 2021)
|
| 67 |
+
|
| 68 |
+
DeBERTaV3 (He et al., 2023)
|
| 69 |
+
|
| 70 |
+
MDeBERTaV3 (He et al., 2023)
|
| 71 |
+
|
| 72 |
+
ALBERT-v1 (Lan et al., 2019)
|
| 73 |
+
|
| 74 |
+
ELECTRA (Clark et al., 2020)
|
| 75 |
+
|
| 76 |
+
# Generative
|
| 77 |
+
|
| 78 |
+
T5 (Raffel et al., 2020)
|
| 79 |
+
|
| 80 |
+
FLAN-T5 (Chung et al., 2024)
|
| 81 |
+
|
| 82 |
+
GPT-2 (Radford et al., 2019)
|
| 83 |
+
|
| 84 |
+
PHI-3 (Abdin et al., 2024b)
|
| 85 |
+
|
| 86 |
+
# New Additions
|
| 87 |
+
|
| 88 |
+
PHI-3.5 (Mini-Inst) (Abdin et al., 2024a)
|
| 89 |
+
|
| 90 |
+
PHI-4 (Mini-Inst, Reasoning) (Abdin et al., 2024a)
|
| 91 |
+
|
| 92 |
+
MIXTRAL (8x7B) (Mistral AI Team, 2023)
|
| 93 |
+
|
| 94 |
+
MISTRAL NEMO (Base-Inst) (Mistral AI Team, 2024)
|
| 95 |
+
|
| 96 |
+
Llama 3.1, 3.2 (1B - 70B) (Grattafori et al., 2024)
|
| 97 |
+
|
| 98 |
+
DeepSeek R1 (DeepSeek-AI et al., 2025)
|
| 99 |
+
|
| 100 |
+
Table 2: LMs available in AutoGOAL's algorithm pool.
|
| 101 |
+
|
| 102 |
+
Background XAutoLM builds on AutoGOAL's<sup>3</sup> probabilistic optimizer (Estevez-Velarde et al., 2020). The optimizer represents every valid LM pipeline $c$ as a point in a mixed search space that combines discrete choices (e.g. fine-tuning method, model, tokenizer) with continuous hyperparameters (e.g. learning rate, dropout). It maintains a probability distribution $P(c|\theta)$ over that space. It
|
| 103 |
+
|
| 104 |
+
repeats a simple sample-evaluate-update loop: (1) sample a batch of pipelines from $P(c|\theta)$ ; (2) evaluate them on the target task; and (3) update $P(c|\theta)$ so that high-performing pipelines gain probability mass while under-performing and failures lose it. AutoGOAL always initializes this distribution uniformly, meaning every pipeline, adequate or not, is equally likely at the first generation.
|
| 105 |
+
|
| 106 |
+
# 3.1 Process Overview
|
| 107 |
+
|
| 108 |
+
XAutoLM replaces this uniform cold start with an experience-aware prior that follows a structured meta-learning process. Initially, the framework retrieves relevant historical evaluations (experiences) from a centralized repository (Section 3.2). Then, it computes detailed task and system meta-features (Section 3.2.1) to characterize the complexity and available resources for the present optimisation task. Leveraging this information, XAutoLM probabilistically adjusts the AutoML search space (Section 3.3), focusing on historically successful configurations and reducing exploration of previously unsuccessful paths. Once configured, the AutoML optimisation starts, fine-tuning pipelines are evaluated, and their outcomes, both successful and unsuccessful, are recorded back into the experience repository, to be used in future runs.
|
| 109 |
+
|
| 110 |
+
# 3.2 Experience Store
|
| 111 |
+
|
| 112 |
+
Our system learns from a growing repository of experiences; past pipeline evaluations that capture every factor influencing performance. Formally, an experience is a 4-tuple $e = \langle c, \mathbf{m}, t, s \rangle$ where $c$ is the complete pipeline configuration, $\mathbf{m}$ the vector of recorded metrics (e.g. F1, ROUGE, evaluation time), $t$ a task meta-feature vector, and $s$ straightforward system descriptors such as CPU cores, RAM, and GPU memory.
|
| 113 |
+
|
| 114 |
+
We label an experience positive if all fitness metrics are valid and negative otherwise, usually due to errors occurring during evaluation (out-of-memory, timeout, etc.). Both types are essential: positives pull the search toward valuable regions, and negatives push it away from costly dead-ends (Section 3.3).
|
| 115 |
+
|
| 116 |
+
# 3.2.1 Meta-Features
|
| 117 |
+
|
| 118 |
+
We design two complementary meta-feature templates according to the nature of the output space of a task. When the output is drawn from a closed label set, as in text classification or sequence labelling, dataset difficulty is dominated by class
|
| 119 |
+
|
| 120 |
+
imbalance and document-length variation. Conversely, tasks whose output is an open text sequence (question answering, summarisation, translation) demand features that capture the relationship between the input prompt and the target text. Table 3 lists the core features for each template; the same templates can be reused for other label-based or free-form generation tasks with minimal adaptation.
|
| 121 |
+
|
| 122 |
+
<table><tr><td>Category/Feature</td><td>Category/Feature</td></tr><tr><td>Dataset</td><td>Dataset</td></tr><tr><td>Nr Samples</td><td>Nr Samples</td></tr><tr><td>Nr Classes</td><td>Prompt</td></tr><tr><td>Entropy</td><td>Avg.\Len (chars)</td></tr><tr><td>Min Cls Prob</td><td>Std.\Len</td></tr><tr><td>Max Cls Prob</td><td>Lexical Diversity (TTR)</td></tr><tr><td>Imbalance Ratio</td><td>Target</td></tr><tr><td>Documents</td><td>Avg.\Len (chars)</td></tr><tr><td>Avg. Length</td><td>Std.\Len</td></tr><tr><td>Std. Length</td><td>Lexical Diversity (TTR)</td></tr><tr><td>Coef. Var. Length</td><td>Prompt-Target</td></tr><tr><td>Landmark</td><td>Avg.\Len Ratio (T/P)</td></tr><tr><td>PCA + D.Tree Acc.</td><td>Vocabulary Novelty</td></tr><tr><td></td><td>Semantic Similarity</td></tr><tr><td></td><td>ROUGE-L F1</td></tr><tr><td></td><td>Semantic</td></tr><tr><td></td><td>Mean Prompt Embedding</td></tr><tr><td>(a) Label-based</td><td>(b) Generation</td></tr></table>
|
| 123 |
+
|
| 124 |
+
Table 3: Representative task meta-features.
|
| 125 |
+
|
| 126 |
+
Experiences record a minimal hardware profile in $s$ (CPU cores, CPU frequency, total RAM, GPU VRAM) so similarity and feasibility reflect both task and system characteristics. For instance, while Llama 3.1 70B may yield superior results to smaller alternatives, systems with low VRAM cannot utilize its power.
|
| 127 |
+
|
| 128 |
+
XAutoLM constructs a holistic representation of each optimization scenario by combining task-specific and system-level meta-features, enabling robust similarity assessments across diverse contexts.
|
| 129 |
+
|
| 130 |
+
# 3.3 Warm-Start optimization
|
| 131 |
+
|
| 132 |
+
XAutoLM maintains a probabilistic model $P(c \mid \theta)$ (Estevez-Velarde et al., 2020) over pipeline configurations $c$ . When a new task $T$ arrives, we retrieve a set of past experiences $\mathcal{E} = \{e_1, \dots, e_n\}$ and update the model in two sweeps; one for positive experiences, one for negatives:
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
P (c \mid \theta) \leftarrow \left(1 - \alpha_ {i} ^ {+}\right) P (c \mid \theta) + \alpha_ {i} ^ {+} P _ {i} (c \mid \theta), \tag {1}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
P (c \mid \theta) \leftarrow \left(1 + \alpha_ {i} ^ {-}\right) P (c \mid \theta) - \alpha_ {i} ^ {-} P _ {i} (c \mid \theta) \tag {2}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
where $P_{i}(c \mid \theta)$ is the empirical distribution induced by configuration $c$ in experience $e_{i}$ . Therefore pull the search toward successful regions and push it away from unsuccessful ones. The strength of each pull/push is governed by the learning rates $\alpha_{i}^{+}$ and $\alpha_{i}^{-}$ .
|
| 143 |
+
|
| 144 |
+
We compute experience-specific learning rates considering their similarity to the current task and historical performance. Specifically, these rates are computed as follows:
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\alpha_ {i} ^ {+} = \alpha_ {\max } ^ {+} u _ {i} e ^ {- \beta d _ {i}}, \tag {3}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
$$
|
| 151 |
+
\alpha_ {i} ^ {-} = \alpha_ {\max } ^ {-} e ^ {- \beta d _ {i}}. \tag {4}
|
| 152 |
+
$$
|
| 153 |
+
|
| 154 |
+
Here $\alpha_{\mathrm{max}}^{+}$ and $\alpha_{\mathrm{max}}^{-}$ are predefined maximum learning rates, $u_{i} \in [0,1]$ is a utility score (defined below) assigned only to positive experiences, and $d_{i}$ is the distance between the current task and the one that generated experience $e_{i}$ . The exponential kernel $e^{-\beta d_i}$ down-weights experiences that are less similar to the current task; $\beta > 0$ is an adaptive decay factor.
|
| 155 |
+
|
| 156 |
+
Task Similarity. Each task is described by a meta-feature vector $t$ . Similarity is measured with a distance $d_{i} = \mathrm{Dist}(t_{T},t_{i})$ (e.g., Euclidean or Cosine). $\beta$ is set automatically to compensate for scale:
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\beta = \frac {\beta_ {\text {s c a l e}}}{\sigma_ {d} + \varepsilon}, \quad \sigma_ {d} = \operatorname {S t d} \left(\left\{d _ {1}, \dots , d _ {n} \right\}\right), \tag {5}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
where $\varepsilon > 0$ prevents division by zero.
|
| 163 |
+
|
| 164 |
+
Utility Score. The utility function $u_{i}$ quantifies the quality of each positive experience $e_{i}$ relative to others from the same task. XAutoLM supports three distinct utility computation strategies: (i) Weighted Sum, (ii) Linear Front, and (iii) Logarithmic Front:
|
| 165 |
+
|
| 166 |
+
Weighted Sum. Let $\mathcal{M}$ denote the set of recorded performance metrics for each experience, such as F1, accuracy, evaluation time, or ROUGE-L. Each metric $m\in \mathcal{M}$ is associated with a known optimisation direction (maximize or minimize) and an importance weight $w_{m}$ . For each positive experience $e_i$ , we first normalize its metric value $m_{i}$ :
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
m _ {i} ^ {\prime} = \left\{ \begin{array}{l l} \frac {m _ {i} - m _ {\operatorname* {m i n}}}{m _ {\operatorname* {m a x}} - m _ {\operatorname* {m i n}}}, & \text {i f m a x i m i z e d ,} \\ 1 - \frac {m _ {i} - m _ {\operatorname* {m i n}}}{m _ {\operatorname* {m a x}} - m _ {\operatorname* {m i n}}}, & \text {i f m i n i m i z e d ,} \end{array} \right. \tag {6}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
where $m_{\mathrm{min}}$ and $m_{\mathrm{max}}$ denote the minimum and maximum values observed across all positive experiences for the metric $m$ . If all metric values are identical, we default to a neutral utility score of 0.5 to avoid division by zero. The overall weighted utility score is computed as:
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
u _ {i} = \frac {\sum_ {m \in \mathcal {M}} w _ {m} \cdot m _ {i} ^ {\prime}}{\sum_ {m \in \mathcal {M}} w _ {m}}, \tag {7}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
Linear Front. In the Linear Front utility scheme, we first apply non-dominated sorting (NSGA-II style (Deb et al., 2002)) to all positive experiences, creating $N$ Pareto fronts based on the recorded metrics in $\mathcal{M}$ . Experiences in front 0 are non-dominated, followed by those in front 1, and so forth. Each positive experience $e_i$ in front $f_i$ is assigned a utility score inversely proportional to its front rank:
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
u _ {i} = \frac {N - f _ {i}}{N}, \tag {8}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
Logarithmic Front. Using non-dominated sorting, the Logarithmic Front approach similarly ranks experiences into $N$ Pareto fronts. However, to amplify the distinction among the highest-performing experiences (i.e., those in lower-numbered fronts), utilities decrease logarithmically with rank:
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
u _ {i} = \frac {\ln (N - f _ {i} + 1)}{\ln (N + 1)}, \tag {9}
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
These three utility functions provide complementary strategies for prioritizing past experiences. This flexibility allows XAutoLM to adapt effectively across diverse AutoML scenarios.
|
| 191 |
+
|
| 192 |
+
# 4 Experimentation
|
| 193 |
+
|
| 194 |
+
We report results from two independent transfer experiments designed to isolate knowledge reuse within a task family. The first study targets text classification. LIAR (Wang, 2017), SST-2 (Socher et al., 2013), MELD (Poria et al., 2018) and AG News (Zhang et al., 2015) present a deliberate gradient in sample size, label entropy, and average document length: LIAR (6 classes, 13k claims) and MELD (7 emotions, 14k utterances) are notoriously low-resource, whereas the polarity benchmark SST-2 (68k) and the large-scale news
|
| 195 |
+
|
| 196 |
+
corpus AG (128k) approach the upper bound of single-GPU throughput. Previous work shows peak $F1_{\mathrm{macro}}$ to vary from 0.23 (LIAR) to 0.93 (AG) (Reusens et al., 2024), offering a realistic range for efficiency-performance trade-offs.
|
| 197 |
+
|
| 198 |
+
The second experiment focuses on question answering. We select SQuAD 1.1 (Rajpurkar et al., 2016) and DROP (Dua et al., 2019) because they share the same input modality yet differ sharply in answer type, extractive spans versus multi-step numerical reasoning, making them a challenging test-bed for generative pipelines. For both studies, experiences are only exchanged among tasks of the same family; classification traces are invisible to QA runs and vice-versa. This constraint ensures that the reported gains stem from task-relevant meta-knowledge rather than accidental data leakage.
|
| 199 |
+
|
| 200 |
+
Hardware. All classification experiments run on an i9-9900K (16 threads, 35 GB RAM cap) paired with a single RTX TITAN (24 GB). QA experiments require larger context windows and execute on an AMD EPYC 7742 (64 threads, identical RAM cap) with an A100 40 GB.
|
| 201 |
+
|
| 202 |
+
Baselines. Every run is compared against Zero-Shot AutoGOAL, the original optimizer with a uniform sampling distribution; in this setting, the update rules of equations (1)-(9) are never triggered.
|
| 203 |
+
|
| 204 |
+
In the text classification study, we include a naive kNN-50 memory baseline for comparing against a naive experience retrieval method. For every target task, we assemble a query vector that concatenates (a) the task meta-features, (b) the current system profile, and (c) the best metric values observed across all stored traces; this encourages the search to drift toward high-performing regions. Distances to positive traces are computed on the full feature+metric space, whereas distances to negative traces ignore metrics (errors lack valid scores). The $k$ nearest positives and $k$ nearest negatives are selected; all receive the same fixed learning rate $\alpha_{i}^{\pm} = 1 / k$ . Setting $u_{i} = 1$ and $\beta = 0$ in equations (3)-(4) reduces our framework to this simple neighbour rule. For question answering the repository contains only between 5 and 10 positive traces per source task, making a neighbour count unreliable; therefore Zero-Shot remains the sole baseline in that study.
|
| 205 |
+
|
| 206 |
+
Warm-Start Priors. Throughout the paper, a pipeline configuration is a concrete tuple (LM, fine-tuning recipe, hyperparameters) that the AutoML engine executes and evaluates. A warm-start prior (WS prior) instead parameterizes the initial sampling distributions used by the meta-learner; it is defined by the distance type, utility scheme, decay factor $\beta_{\mathrm{scale}}$ , and pull limits $(k_{\mathrm{pos}}, k_{\mathrm{neg}})$ .
|
| 207 |
+
|
| 208 |
+
For each task, we enumerate $\approx 180$ WS-prior parameterizations. For a given candidate prior to a task, we apply it with the fixed experience store (leaving the experience for the current task out) to obtain the induced sampling distribution $p$ over fine-tuning methods on that task. We then compute the total-variation (TV) distance between this induced marginal and the uniform distribution over the same method set. We rank candidates by TV and split them into three data-driven strata (low | moderate | high bias) at prominent TV gaps ( $\approx 2\times$ ). In classification, we select per strata the median-TV and max-TV priors (six priors total). In QA, we select only the max-TV prior per strata (three priors) to respect the compute budget. Full probability plots of the induced method distributions and the selected prior identifiers are provided in Appendix B.
|
| 209 |
+
|
| 210 |
+
Execution protocol. For each task, we first ran the Zero-shot configuration for 48 hours to populate the experience store. Table 4 reports the positive/negative traces generated by this baseline run on each task. We then executed the kNN-50 baseline and all WS-prior variants for 24 hours of wall-clock time each. The warm-start mechanism accesses only experiences originating from other tasks within the same study (clean cross-task transfer; see Table 4). For fairness in reporting, Zero-shot metrics are computed from the first 24 hours of their 48 hours runs, matching the wait-time allocated to WS-priors and kNN-50. This protocol isolates whether experience improves both effectiveness and efficiency under the same time budget.
|
| 211 |
+
|
| 212 |
+
In every AutoML run, each discovered LM pipeline has up to 1.5 GPU-hours in Text Classification and 2 GPU-hours in QA for evaluation. Objectives are $\langle F1_{\mathrm{macro}},ET\rangle$ for classification and $\langle F1,ET\rangle$ for QA, where $ET$ is the wall-clock evaluation time of a pipeline (in seconds). All searches share a fixed random seed (42) and the same hardware; therefore, differences arise solely from the chosen warm-start prior.
|
| 213 |
+
|
| 214 |
+
<table><tr><td rowspan="2">Dataset</td><td colspan="3">Generated</td><td colspan="3">Available</td></tr><tr><td>Pos</td><td>Neg</td><td>Total</td><td>Pos</td><td>Neg</td><td>Total</td></tr><tr><td>LIAR</td><td>100</td><td>236</td><td>336</td><td>116</td><td>480</td><td>596</td></tr><tr><td>SST2</td><td>33</td><td>122</td><td>155</td><td>183</td><td>594</td><td>777</td></tr><tr><td>MELD</td><td>68</td><td>190</td><td>258</td><td>148</td><td>526</td><td>674</td></tr><tr><td>AG NEWS</td><td>15</td><td>168</td><td>183</td><td>216</td><td>548</td><td>764</td></tr><tr><td>SQUAD</td><td>5</td><td>124</td><td>129</td><td>10</td><td>160</td><td>170</td></tr><tr><td>DROP</td><td>10</td><td>160</td><td>170</td><td>5</td><td>124</td><td>129</td></tr></table>
|
| 215 |
+
|
| 216 |
+
Table 4: Disposition of experiences participating in the experiments.
|
| 217 |
+
|
| 218 |
+
# 4.1 Text Classification Results
|
| 219 |
+
|
| 220 |
+
Table 5 summarizes the effect of WS-priors on the four classification benchmarks. We report both performance and efficiency: max and mean $F1_{\text{macro}}$ reflect peak and average classification quality; mean evaluation time (ET) captures resource cost; the error ratio indicates the share of failed pipeline evaluations; and hypervolume (HV) measures Pareto-front coverage in objective space (Zitzler and Thiele, 1998). Mean ET is averaged over successfully completed pipeline evaluations only (i.e., runs that return valid fitness metrics); failed evaluations (e.g., out-of-memory, timeouts, runtime errors) are excluded from ET and are accounted for by the error ratio. All methods are run under the same 24 hours single-GPU budget (cf. Execution protocol), so ET differences reflect pipeline runtime rather than total search compute.
|
| 221 |
+
|
| 222 |
+
Across datasets, WS priors either match or surpass the best Zero-shot $F1_{\mathrm{m}}$ while systematically improving efficiency. On LIAR, a HIGH prior lifts peak $F1_{\mathrm{m}}$ from 0.24 to 0.26, cuts the mean $ET$ by a factor of 3.5, and lowers the error ratio by sevenfold. A similar pattern emerges on MELD, where HIGH drives the error ratio from 0.77 to 0.10 and reduces mean $ET$ $4.5 \times$ , while keeping $F1_{\mathrm{m}}$ above the baseline. On SST-2, the Zero-shot baseline generated the highest $F1_{\mathrm{m}}$ and lowest $ET$ out of all variants.
|
| 223 |
+
|
| 224 |
+
Zero-shot runs exhibit high error ratios across all benchmarks (e.g., 0.73-0.92); the WS priors cut these failure rates dramatically, down to 0.09-0.90. Moreover, non-naive warm-started runs showed a sensible reduction in mean $ET$ while maintaining peak $F1_{\mathrm{m}}$ . On AG News, all WS runs improve max $F1_{\mathrm{m}}$ while several improve $ET$ , HV and Error Ratio, showing that better performance-time trade-offs are discoverable even in large-scale settings.
|
| 225 |
+
|
| 226 |
+
The naive kNN-50 baseline, although in SST-2 case attains large HV values, degrades performance on three datasets and notably obtains the worst
|
| 227 |
+
|
| 228 |
+
<table><tr><td></td><td>WS Prior</td><td>Max F1m</td><td>Mean F1m</td><td>Min ET</td><td>Mean ET</td><td>HV</td><td>No. Eval</td><td>Error Ratio</td></tr><tr><td rowspan="11">LIAR</td><td>Zero-shot</td><td>0.24</td><td>0.10</td><td>12</td><td>537</td><td>0.06</td><td>202</td><td>0.73</td></tr><tr><td>kNN (50)</td><td>0.24</td><td>0.10</td><td>28</td><td>451</td><td>0.11</td><td>240</td><td>0.44</td></tr><tr><td>Low (LIAR)</td><td>0.26</td><td>0.10</td><td>16</td><td>480</td><td>0.10</td><td>197</td><td>0.70</td></tr><tr><td>Low (Med)</td><td>0.25</td><td>0.09</td><td>31</td><td>380</td><td>0.36</td><td>220</td><td>0.69</td></tr><tr><td>Low (Max)</td><td>0.25</td><td>0.09</td><td>21</td><td>410</td><td>0.08</td><td>190</td><td>0.66</td></tr><tr><td>Mod (LIAR)</td><td>0.26</td><td>0.10</td><td>36</td><td>462</td><td>0.01</td><td>132</td><td>0.53</td></tr><tr><td>Mod (Med)</td><td>0.24</td><td>0.10</td><td>13</td><td>469</td><td>0.04</td><td>146</td><td>0.61</td></tr><tr><td>Mod (Max)</td><td>0.25</td><td>0.08</td><td>44</td><td>516</td><td>0.05</td><td>121</td><td>0.39</td></tr><tr><td>High (LIAR)</td><td>0.25</td><td>0.10</td><td>6</td><td>153</td><td>0.20</td><td>302</td><td>0.09</td></tr><tr><td>High (Med)</td><td>0.25</td><td>0.10</td><td>9</td><td>277</td><td>0.12</td><td>193</td><td>0.33</td></tr><tr><td>High (Max)</td><td>0.26</td><td>0.09</td><td>12</td><td>252</td><td>0.09</td><td>208</td><td>0.25</td></tr><tr><td rowspan="11">SST2</td><td>Zero-shot</td><td>0.94</td><td>0.69</td><td>97</td><td>1297</td><td>0.02</td><td>76</td><td>0.77</td></tr><tr><td>kNN (50)</td><td>0.93</td><td>0.59</td><td>326</td><td>1758</td><td>0.54</td><td>72</td><td>0.62</td></tr><tr><td>Low (LIAR)</td><td>0.90</td><td>0.48</td><td>373</td><td>1148</td><td>0.15</td><td>87</td><td>0.82</td></tr><tr><td>Low (Med)</td><td>0.90</td><td>0.52</td><td>227</td><td>840</td><td>0.02</td><td>62</td><td>0.83</td></tr><tr><td>Low (Max)</td><td>0.94</td><td>0.58</td><td>252</td><td>784</td><td>0.01</td><td>98</td><td>0.81</td></tr><tr><td>Mod (LIAR)</td><td>0.93</td><td>0.56</td><td>245</td><td>996</td><td>0.20</td><td>59</td><td>0.64</td></tr><tr><td>Mod (Med)</td><td>0.94</td><td>0.52</td><td>132</td><td>1030</td><td>0.04</td><td>34</td><td>0.55</td></tr><tr><td>Mod (Max)</td><td>0.93</td><td>0.52</td><td>184</td><td>1170</td><td>0.06</td><td>58</td><td>0.51</td></tr><tr><td>High (LIAR)</td><td>0.92</td><td>0.62</td><td>365</td><td>1160</td><td>0.02</td><td>42</td><td>0.61</td></tr><tr><td>High (Med)</td><td>0.94</td><td>0.53</td><td>164</td><td>844</td><td>0.09</td><td>52</td><td>0.68</td></tr><tr><td>High (Max)</td><td>0.94</td><td>0.61</td><td>320</td><td>857</td><td>0.16</td><td>53</td><td>0.79</td></tr><tr><td rowspan="11">MELD</td><td>Zero-shot</td><td>0.41</td><td>0.15</td><td>39</td><td>808</td><td>0.11</td><td>161</td><td>0.77</td></tr><tr><td>kNN (50)</td><td>0.37</td><td>0.11</td><td>52</td><td>768</td><td>0.00</td><td>59</td><td>0.54</td></tr><tr><td>Low (LIAR)</td><td>0.46</td><td>0.14</td><td>20</td><td>532</td><td>0.06</td><td>150</td><td>0.64</td></tr><tr><td>Low (Med)</td><td>0.45</td><td>0.11</td><td>17</td><td>387</td><td>0.30</td><td>229</td><td>0.64</td></tr><tr><td>Low (Max)</td><td>0.39</td><td>0.09</td><td>30</td><td>477</td><td>0.36</td><td>186</td><td>0.65</td></tr><tr><td>Mod (LIAR)</td><td>0.40</td><td>0.11</td><td>26</td><td>514</td><td>0.00</td><td>106</td><td>0.39</td></tr><tr><td>Mod (Med)</td><td>0.40</td><td>0.11</td><td>36</td><td>546</td><td>0.03</td><td>130</td><td>0.52</td></tr><tr><td>Mod (Max)</td><td>0.38</td><td>0.09</td><td>24</td><td>590</td><td>0.08</td><td>110</td><td>0.52</td></tr><tr><td>High (LIAR)</td><td>0.44</td><td>0.14</td><td>7</td><td>179</td><td>0.09</td><td>260</td><td>0.10</td></tr><tr><td>High (Med)</td><td>0.43</td><td>0.13</td><td>21</td><td>466</td><td>0.27</td><td>124</td><td>0.45</td></tr><tr><td>High (Max)</td><td>0.42</td><td>0.12</td><td>12</td><td>322</td><td>0.01</td><td>233</td><td>0.51</td></tr><tr><td rowspan="11">AG NEWS</td><td>Zero-shot</td><td>0.90</td><td>0.62</td><td>424</td><td>1043</td><td>0.00</td><td>108</td><td>0.92</td></tr><tr><td>kNN (50)</td><td>0.67</td><td>0.28</td><td>478</td><td>1881</td><td>0.09</td><td>22</td><td>0.77</td></tr><tr><td>Low (LIAR)</td><td>0.93</td><td>0.73</td><td>349</td><td>1183</td><td>0.01</td><td>93</td><td>0.90</td></tr><tr><td>Low (Med)</td><td>0.92</td><td>0.65</td><td>665</td><td>1589</td><td>0.20</td><td>83</td><td>0.89</td></tr><tr><td>Low (Max)</td><td>0.93</td><td>0.60</td><td>560</td><td>1164</td><td>0.00</td><td>77</td><td>0.90</td></tr><tr><td>Mod (LIAR)</td><td>0.92</td><td>0.46</td><td>404</td><td>1345</td><td>0.12</td><td>50</td><td>0.80</td></tr><tr><td>Mod (Med)</td><td>0.93</td><td>0.59</td><td>484</td><td>1102</td><td>0.01</td><td>48</td><td>0.79</td></tr><tr><td>Mod (Max)</td><td>0.92</td><td>0.56</td><td>249</td><td>1402</td><td>0.01</td><td>57</td><td>0.73</td></tr><tr><td>High (LIAR)</td><td>0.93</td><td>0.46</td><td>318</td><td>1437</td><td>0.00</td><td>45</td><td>0.71</td></tr><tr><td>High (Med)</td><td>0.93</td><td>0.51</td><td>253</td><td>833</td><td>0.09</td><td>58</td><td>0.86</td></tr><tr><td>High (Max)</td><td>0.92</td><td>0.54</td><td>350</td><td>1576</td><td>0.01</td><td>46</td><td>0.73</td></tr></table>
|
| 229 |
+
|
| 230 |
+
results out of all priors in AG NEWS $(0.90\rightarrow 0.67$ $F1_{\mathrm{m}})$ and MELD $(0.41\to 0.37F1_{\mathrm{m}})$
|
| 231 |
+
|
| 232 |
+
# 4.2 Question Answering Results
|
| 233 |
+
|
| 234 |
+
Table 6 reports results on the generative SQuAD 1.1 and DROP datasets. Knowledge reused from a single related task already yields substantial gains. For SQuAD, WS priors outperform the baseline in almost all metrics. The HIGH-MAX prior, in particular, raises $F1$ from 0.34 to 0.89 while shrinking
|
| 235 |
+
|
| 236 |
+
Table 5: Results overview in text classification. Priors with " (LIAR)" suffix were calibrated during a single-objective pilot on LIAR. The same meta-parameters are then applied unchanged to every new target task. Full probability curves and all prior IDs are listed in Appendices B-C.
|
| 237 |
+
|
| 238 |
+
<table><tr><td colspan="2">WS Prior</td><td>Max F1</td><td>Mean F1m</td><td>Min ET</td><td>Mean ET</td><td>HV</td><td>No. Eval</td><td>Error Ratio</td></tr><tr><td rowspan="4">SQUAD</td><td>Zero-shot</td><td>0.34</td><td>0.23</td><td>2189</td><td>4081</td><td>0.25</td><td>71</td><td>0.95</td></tr><tr><td>Low (Max)</td><td>0.89</td><td>0.33</td><td>1435</td><td>3150</td><td>0.03</td><td>30</td><td>0.76</td></tr><tr><td>Mod (Max)</td><td>0.86</td><td>0.41</td><td>1468</td><td>1953</td><td>0.01</td><td>32</td><td>0.90</td></tr><tr><td>High (Max)</td><td>0.89</td><td>0.87</td><td>1195</td><td>1337</td><td>0.0</td><td>15</td><td>0.8</td></tr><tr><td rowspan="4">DROP</td><td>Zero-shot</td><td>0.39</td><td>0.18</td><td>2114</td><td>3556</td><td>0.11</td><td>96</td><td>0.94</td></tr><tr><td>Low (Max)</td><td>0.18</td><td>0.11</td><td>4995</td><td>5929</td><td>0.05</td><td>32</td><td>0.90</td></tr><tr><td>Mod (Max)</td><td>0.40</td><td>0.23</td><td>775</td><td>2259</td><td>0.29</td><td>66</td><td>0.86</td></tr><tr><td>High (Max)</td><td>0.40</td><td>0.28</td><td>783</td><td>1881</td><td>0.13</td><td>34</td><td>0.82</td></tr></table>
|
| 239 |
+
|
| 240 |
+
Table 6: Results overview in Question Answering.
|
| 241 |
+
|
| 242 |
+
mean ET from 4081s to 1337s $(-3\times)$ . Similarly to the text classification results, WS priors bring error ratios down from 0.94-0.95 (zero-shot) to 0.76-0.90.
|
| 243 |
+
|
| 244 |
+
On DROP, the LOW prior illustrates negative transfer, yet both MODERATE and HIGH priors outperform Zero-shot on every metric; peak $F1$ improves slightly (0.39→0.40) and mean ET falls by 47%. These outcomes confirm that cross-task meta-knowledge generalizes beyond classification and that the adaptive pull/push schedule mitigates catastrophic transfers.
|
| 245 |
+
|
| 246 |
+
# 5 Discussion
|
| 247 |
+
|
| 248 |
+
Warm-start priors consistently steer the search toward stronger performance-time trade-offs across all six benchmarks. Figure 1 reports the winning ratio: the share of evaluated LM pipelines that improve upon the zero-shot Pareto front.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 1: Ratio of discovered pipelines outperforming the Zero-shot baseline in Text Classification and QA.
|
| 252 |
+
|
| 253 |
+
The HIGH-MAX prior is the most stable, winning about $20\%$ of pipelines on SQuAD, LIAR, MELD, and DROP, and $10 - 15\%$ on SST-2 and AG News. On the LIAR and MELD pair, the HIGH-LIAR prior achieves winning ratios near $50\%$ and $40\%$ , respectively, while cutting the error rate by a factor of seven (Table 5). For clarity, all
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
Figure 2: Pareto Fronts discovered by the different Priors on SST2 (a) and SQUAD (b).
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
|
| 260 |
+
ET values are computed only on successful evaluations, while failure rates are captured by the Error Ratio, with all methods allotted an identical 24 GPU-hour wall-clock budget per run.
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
Figure 3: Distance between Text Classification Tasks according to their meta-features (Section 3.2.1).
|
| 264 |
+
|
| 265 |
+
These results show that combining experience discrimination with adaptive probability shifts yields the best of both worlds: rapid convergence when relevant meta-knowledge exists yet robustness when it does not. Whenever the experience store contained closely related traces, e.g., MELD-LIAR (Figure 3), the similarity-aware priors trimmed average evaluation time by up to $4.5\mathrm{x}$ and increased peak $F_{1_{\mathrm{m}}}$ (Table 5). Even on sparsely related tasks such as SST-2 and AG News, softer pulls uncovered superior Pareto trade-offs by moderating exploration strength (Figure 2a).
|
| 266 |
+
|
| 267 |
+
The baseline performance of kNN highlights the significance of selective memory. While it has access to both positive and negative examples, it assigns equal weight to all neighbors, failing to demote weak configurations and causing accuracy to fall on three of four classification datasets. In contrast, XAutoLM's asymmetric pull-push update penalizes both past failures and underperforming
|
| 268 |
+
|
| 269 |
+
successes. DROP, for example, illustrates the need to learn from failures: a low-bias prior that ignores negatives collapses to $F_{1} = 0.18$ , whereas reinstating the push restores $F_{1} = 0.40$ and halves mean evaluation time.
|
| 270 |
+
|
| 271 |
+
Our findings further show that transfer using our method extends beyond classification. With barely a handful of relevant experience, a high-bias prior multiplies SQuAD $F1$ from $\approx 0.3$ to $\approx 0.9$ and compresses evaluation time by threefold, producing a dominant Pareto front (Figure 2b). On the other hand, DROP illustrates the importance of negative experiences: a low-bias prior that ignores negatives collapses to $F_{1} = 0.18$ , whereas reinstating the push restores $F_{1} = 0.40$ and cuts mean evaluation time by $50\%$ (Table 6).
|
| 272 |
+
|
| 273 |
+
A core motivation of our framework is to reduce the carbon footprint and environmental toll of repeated large-scale language model fine-tuning. By systematically reusing insights from past runs, XAutoLM significantly reduces redundant evaluations and lowers the overall error rate during the search. Beyond simply lowering compute hours, this approach aligns with the growing Green AI ethos in NLP (Wang et al., 2023b; Schwartz et al., 2020), emphasizing the importance of responsible resource usage. Our experiments demonstrate that our warm-start strategy enhances performance and streamlines the search process, resulting in algorithms that strike a better balance between efficiency and performance.
|
| 274 |
+
|
| 275 |
+
# 6 Conclusions
|
| 276 |
+
|
| 277 |
+
XAutoLM converts the costly trial-and-error of language model fine-tuning into a guided, resource-
|
| 278 |
+
|
| 279 |
+
aware search. By seeding the optimizer with a similarity-weighted prior built from past successes & failures, the framework consistently uncovers pipelines with superior performance-time trade-offs. Across four text-classification corpora and two generative QA benchmarks, it surpasses the best zero-shot $F_{1}$ on five tasks, matching it on SST-2, while cutting mean pipeline evaluation time by up to a factor of four and reducing error rates by as much as sevenfold. These gains hold across a refreshed model pool that ranges from lightweight discriminative to compact generative models. Because every recovered pipeline reuses information already paid for, XAutoLM advances the Green AI agenda (Schwartz et al., 2020), delivering competitive results in less search time, while avoiding redundant computation.
|
| 280 |
+
|
| 281 |
+
# 7 Limitations
|
| 282 |
+
|
| 283 |
+
We identify some limitations to our study that highlight avenues for further investigation:
|
| 284 |
+
|
| 285 |
+
# Scaling to bigger LLMs
|
| 286 |
+
|
| 287 |
+
XAutoLM is scale-agnostic: the optimizer treats candidates as black-box fit/evaluate calls and does not rely on model internals. Our open-source implementation presently evaluates on a single GPU, which constrained the largest models tested; this is a property of the evaluator backend, not of the optimization method. The experience store logs a minimal hardware profile (Section 3.2), which helps steer the search away from infeasible pipelines under a given machine with a single GPU setup. Supporting larger models, therefore, amounts to adding multi-GPU meta-features and swapping in a larger-model evaluator (e.g., parameter-efficient (Hu et al., 2021)/quantized (Nagel et al., 2021; Dettmers et al., 2023) or distributed evaluators (Zhao et al., 2023)) in future releases; the search algorithm and experience-based priors remain unchanged. We leave such engineering backends to future work and keep our claims limited to the single-GPU setting evaluated here.
|
| 288 |
+
|
| 289 |
+
# Multimodality
|
| 290 |
+
|
| 291 |
+
The current experience store and benchmarks are text-only; verifying that the warm-start prior transfers to dialogue, speech, or multimodal pipelines is an essential next step.
|
| 292 |
+
|
| 293 |
+
# Statistical Tests
|
| 294 |
+
|
| 295 |
+
Statistical support is available only for the single-objective probes archived in Appendix C. Extending significance testing to the multi-objective fronts of Tables 5 and 6 would require many repeated runs and is left for future work, where bootstrap or fully Bayesian analyses are planned.
|
| 296 |
+
|
| 297 |
+
# Efficiency Measures
|
| 298 |
+
|
| 299 |
+
Our energy discussion rests on the empirical link between execution time and power draw reported by prior work (Wang et al., 2023b; Estevanell-Valladares et al., 2024); we did not log wattage directly. The next release of XAutoLM will record real-time power and emit $\mathrm{CO}_{2}$ estimates alongside performance metrics.
|
| 300 |
+
|
| 301 |
+
# Acknowledgments
|
| 302 |
+
|
| 303 |
+
This research has been partially funded by the University of Alicante, the University of Havana, the Spanish Ministry of Science and Innovation, the Generalitat Valenciana, and the European Regional Development Fund (ERDF) through the following funding: At the regional level, and as the primary source of support, the Generalitat Valenciana (Conselleria d'Educacion, Investigacio, Cultura i Esport), FEDER granted funding for CIDEGENT (CIDEXG/2023/13); and NL4DISMIS (CIPROM/2021/21). At the national level, the following projects were granted: HEART-NLP (PID2024-156263OB-C22); COOLANG (PID2021-122263OB-C22); SOCIALTRUST (PDC2022-133146-C22); ILENIA (2022/TL22/00215334) and ALIA models (https://alia.gob.es) funded by MCIN/AEI/10.13039/501100011033 and, as appropriate, by ERDF A way of making Europe, by the European Union or by the European Union NextGenerationEU/PRTR; and by the State Subprogram for Training, Attraction, and Retention of Talent (PEICTI 2024) of the Spanish Ministry of Science and Innovation, grant PRX24/00272.
|
| 304 |
+
|
| 305 |
+
# References
|
| 306 |
+
|
| 307 |
+
Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024a. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219.
|
| 308 |
+
|
| 309 |
+
Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, et al. 2024b. Phi-3 technical report: A highly capable language model locally on your phone. Preprint, arXiv:2404.14219.
|
| 310 |
+
Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. 2023. Qwen technical report. arXiv preprint arXiv:2309.16609.
|
| 311 |
+
Nesrine Bannour, Sahar Ghannay, Aurélie Néveol, and Anne-Laure Ligozat. 2021. Evaluating the carbon footprint of nlp methods: a survey and analysis of existing tools. In Proceedings of the second workshop on simple and efficient natural language processing, pages 11-21.
|
| 312 |
+
Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. 2024. Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70):1-53.
|
| 313 |
+
Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2020. Electra: Pre-training text encoders as discriminators rather than generators. arXiv preprint arXiv:2003.10555.
|
| 314 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. Preprint, arXiv:1911.02116.
|
| 315 |
+
Kalyanmoy Deb, Amrit Pratap, Sameer Agarwal, and TAMT Meyarivan. 2002. A fast and elitist multiobjective genetic algorithm: Nsga-ii. IEEE transactions on evolutionary computation, 6(2):182-197.
|
| 316 |
+
DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Preprint, arXiv:2501.12948.
|
| 317 |
+
Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2023. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems, 36:10088-10115.
|
| 318 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
|
| 319 |
+
Dheeru Dua, Yizhong Wang, Pradeep Dasigi, Gabriel Stanovsky, Sameer Singh, and Matt Gardner. 2019. Drop: A reading comprehension benchmark requiring discrete reasoning over paragraphs. arXiv preprint arXiv:1903.00161.
|
| 320 |
+
Ernesto L Estevanell-Valladares, Yoan Gutierrez, Andres Montoyo-Guijarro, Rafael Munoz-Guillena, and Yudivian Almeida-Cruz. 2024. Balancing efficiency and performance in nlp: A cross-comparison of shallow machine learning and large language models
|
| 321 |
+
|
| 322 |
+
via automl. Procesamento del Lenguaje Natural, 73:221-233.
|
| 323 |
+
Suilan Estevez-Velarde, Yoan Gutierrez, Andres Montoyo, and Yudivian Almeida Cruz. 2020. Automatic discovery of heterogeneous machine learning pipelines: An application to natural language processing. In Proceedings of the 28th International Conference on Computational Linguistics, pages 3558-3568.
|
| 324 |
+
Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, et al. 2024. The llama 3 herd of models. Preprint, arXiv:2407.21783.
|
| 325 |
+
Pengcheng He, Jianfeng Gao, and Weizhu Chen. 2023. Debertav3: Improving deberta using electra-style pretraining with gradient-disentangled embedding sharing. Preprint, arXiv:2111.09543.
|
| 326 |
+
Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. 2021. Deberta: Decoding-enhanced bert with disentangled attention. In International Conference on Learning Representations.
|
| 327 |
+
Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.
|
| 328 |
+
Frank Hutter, Lars Kotthoff, and Joaquin Vanschoren. 2019. Automated Machine Learning. Springer.
|
| 329 |
+
Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. ALBERT: A lite BERT for self-supervised learning of language representations. CoRR, abs/1909.11942.
|
| 330 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. Preprint, arXiv:1907.11692.
|
| 331 |
+
Daqin Luo, Chengjian Feng, Yuxuan Nong, and Yiqing Shen. 2024. Autom3l: An automated multimodal machine learning framework with large language models. In Proceedings of the 32nd ACM International Conference on Multimedia, MM '24, page 8586-8594, New York, NY, USA. Association for Computing Machinery.
|
| 332 |
+
Neeratyoy Mallik, Edward Bergman, Carl Hvarfner, Danny Stoll, Maciej Janowski, Marius Lindauer, Luigi Nardi, and Frank Hutter. 2024. Priorband: Practical hyperparameter optimization in the age of deep learning. Advances in Neural Information Processing Systems, 36.
|
| 333 |
+
Mary L McHugh. 2011. Multiple comparison analysis testing in anova. Biochemia medica, 21(3):203-209.
|
| 334 |
+
Mistral AI Team. 2023. Mixtral of experts. https://mistral.ai/news/mixtral-of-experts. Accessed: 2025-05-17.
|
| 335 |
+
|
| 336 |
+
Mistral AI Team. 2024. Mistral NeMo: our new best small model. https://mistral.ai/news/mistral-nemo. Accessed: 2025-05-17.
|
| 337 |
+
Clint Morris, Michael Jurado, and Jason Zutty. 2024. Llm guided evolution-the automation of models advancing models. arXiv preprint arXiv:2403.11446.
|
| 338 |
+
Markus Nagel, Marios Fournarakis, Rana Ali Amjad, Yelysei Bondarenko, Mart Van Baalen, and Tijmen Blankevoort. 2021. A white paper on neural network quantization. arXiv preprint arXiv:2106.08295.
|
| 339 |
+
Dulce G Pereira, Anabela Afonso, and Fátima Melo Medeiros. 2015. Overview of friedman's test and post-hoc analysis. Communications in Statistics-Simulation and Computation, 44(10):2636-2653.
|
| 340 |
+
Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea. 2018. Meld: A multimodal multi-party dataset for emotion recognition in conversations. arXiv preprint arXiv:1810.02508.
|
| 341 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
|
| 342 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.
|
| 343 |
+
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.
|
| 344 |
+
Manon Reusens, Alexander Stevens, Jonathan Tonglet, Johannes De Smedt, Wouter Verbeke, Seppe vanden Broucke, and Bart Baesens. 2024. Evaluating text classification: A benchmark study. *Expert Systems with Applications*, 254:124302.
|
| 345 |
+
Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. 2020. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. *Preprint*, arXiv:1910.01108.
|
| 346 |
+
Esraa Sayed, Mohamed Maher, Omar Sedeek, Ahmed Eldamaty, Amr Kamel, and Radwa El Shawi. 2024. Gizaml: A collaborative meta-learning based framework using llm for automated time-series forecasting. In EDBT, pages 830-833.
|
| 347 |
+
Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. 2020. Green ai. Communications of the ACM, 63(12):54-63.
|
| 348 |
+
Yongliang Shen, Kaitao Song, Xu Tan, Dongsheng Li, Weiming Lu, and Yueting Zhuang. 2024. Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face. Advances in Neural Information Processing Systems, 36.
|
| 349 |
+
|
| 350 |
+
Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642.
|
| 351 |
+
Alexander Tornede, Difan Deng, Theresa Eimer, Joseph Giovanelli, Aditya Mohan, Tim Ruhkopf, Sarah Segel, Daphne Theodorakopoulos, Tanja Tornede, Henning Wachsmuth, et al. 2023. Automl in the age of large language models: Current challenges, future opportunities and risks. arXiv preprint arXiv:2306.08107.
|
| 352 |
+
Chi Wang, Susan Xueqing Liu, and Ahmed H. Awadallah. 2023a. Cost-effective hyperparameter optimization for large language model generation inference. Preprint, arXiv:2303.04673.
|
| 353 |
+
William Yang Wang. 2017. "liar, liar pants on fire": A new benchmark dataset for fake news detection. arXiv preprint arXiv:1705.00648.
|
| 354 |
+
Xiaorong Wang, Clara Na, Emma Strubell, Sorelle Friedler, and Sasha Luccioni. 2023b. Energy and carbon considerations of fine-tuning bert. arXiv preprint arXiv:2311.10267.
|
| 355 |
+
Lanning Wei, Zhiqiang He, Huan Zhao, and Quanming Yao. 2023. Unleashing the power of graph learning through lvm-based autonomous agents. arXiv preprint arXiv:2309.04565.
|
| 356 |
+
Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, et al. 2019. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771.
|
| 357 |
+
Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Shaokun Zhang, Erkang Zhu, Beibin Li, Li Jiang, Xiaoyun Zhang, and Chi Wang. 2023. Autogen: Enabling next-gen llm applications via multiagent conversation framework. arXiv preprint arXiv:2308.08155.
|
| 358 |
+
Caiyang Yu, Xianggen Liu, Yifan Wang, Yun Liu, Wentao Feng, Xiong Deng, Chenwei Tang, and Jiancheng Lv. 2024. Gpt-nas: Neural architecture search meets generative pre-trained transformer model. Big Data Mining and Analytics.
|
| 359 |
+
Shujian Zhang, Chengyue Gong, Lemeng Wu, Xingchao Liu, and Mingyuan Zhou. 2023. Automlgpt: Automatic machine learning with gpt. arXiv preprint arXiv:2305.02499.
|
| 360 |
+
Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. Advances in neural information processing systems, 28.
|
| 361 |
+
|
| 362 |
+
Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, et al. 2023. Pytorch fsdp: experiences on scaling fully sharded data parallel. arXiv preprint arXiv:2304.11277.
|
| 363 |
+
|
| 364 |
+
Yongchao Zhou, Andrei Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2022. Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910.
|
| 365 |
+
|
| 366 |
+
Eckart Zitzler and Lothar Thiele. 1998. Multiobjective optimization using evolutionary algorithms—a comparative case study. In International conference on parallel problem solving from nature, pages 292-301. Springer.
|
| 367 |
+
|
| 368 |
+
# A Additional Implementation Details and Experimental Configurations
|
| 369 |
+
|
| 370 |
+
In this section, we provide key implementation details to ensure that our work is fully reproducible. All configuration candidates used in our multi-objective and single-objective experiments are available in Appendix B and Appendix C due to the extremely high number of tested configurations. In our evaluations, candidate configurations were designed with two distinct learning rate schemes and distance discrimination strategies, as detailed below.
|
| 371 |
+
|
| 372 |
+
# A.1 Learning Rate Configuration and Update Strategy
|
| 373 |
+
|
| 374 |
+
We adopt a dual-mode configuration for the learning rate updates applied to the probabilistic model. In experiments employing fixed learning rates, we set the parameters to
|
| 375 |
+
|
| 376 |
+
$$
|
| 377 |
+
\alpha_ {\max } ^ {+} = 0. 0 5 \quad \text {a n d} \quad \alpha_ {\max } ^ {-} = - 0. 0 2.
|
| 378 |
+
$$
|
| 379 |
+
|
| 380 |
+
For configurations using adaptive learning rates, the values are computed as
|
| 381 |
+
|
| 382 |
+
$$
|
| 383 |
+
\alpha_ {\max } ^ {+} = \frac {1}{N _ {\text {p o s}}} \quad \text {a n d} \quad \alpha_ {\max } ^ {-} = - \frac {1}{N _ {\text {n e g}}}
|
| 384 |
+
$$
|
| 385 |
+
|
| 386 |
+
Where $N_{\mathrm{pos}}$ and $N_{\mathrm{neg}}$ denote the number of positive and negative experiences, respectively. Although these rates are expressed with positive and negative signs to indicate the direction of the update (reinforcing or de-emphasizing a configuration), all update steps are executed using the absolute values.
|
| 387 |
+
|
| 388 |
+
# A.2 Normalization of Meta-Features
|
| 389 |
+
|
| 390 |
+
All meta-features used for computing distances are standardized using a standard scalar normalizer. This normalizer computes the mean and standard
|
| 391 |
+
|
| 392 |
+
deviation of the feature vectors (with a small epsilon added to avoid division by zero) and returns the standardized data. This ensures that distance computations are robust and comparable across features.
|
| 393 |
+
|
| 394 |
+
# A.3 Beta Scale and Utility Functions
|
| 395 |
+
|
| 396 |
+
For the decay parameter $\beta$ , two formulations are employed: the std-only beta scale is used in single-objective experiments, whereas the std-plus-mean beta scale is applied in multi-objective settings.
|
| 397 |
+
|
| 398 |
+
All candidates for the single-objective experiments (Appendix C) utilize a weighted sum approach with the $F1$ score weight set to 1 and the evaluation time weight set to 0. Detailed specifications of candidate configurations can be found in the visualizations provided in the respective sections (Appendix C for single-objective, and Appendix B for multi-objective).
|
| 399 |
+
|
| 400 |
+
# A.4 Experimental Setup and Computational Resources
|
| 401 |
+
|
| 402 |
+
The main text fully discloses our experimental setup (Section 4).
|
| 403 |
+
|
| 404 |
+
# A.5 Framework Overview and Dependencies
|
| 405 |
+
|
| 406 |
+
XAutoLM is implemented on top of the Auto-GOAL framework (Estevanell-Valladares et al., 2024; Estevez-Velarde et al., 2020), leveraging its optimization strategy and abstractions. Our implementation is developed in Python and utilizes the HuggingFace Transformers library (Wolf et al., 2019) to access pre-trained language models. A complete list of dependencies, environment setup instructions, and detailed documentation on how to run the experiments (and statistical testing), reproduce the results, and navigate the codebase is provided in the repository.
|
| 407 |
+
|
| 408 |
+
The code and all associated materials can be accessed at the following GitHub repository: https://github.com/EEstevanell/XAutoLM.
|
| 409 |
+
|
| 410 |
+
# B Multi-Objective Initial Probabilities
|
| 411 |
+
|
| 412 |
+
This appendix visualizes the initial probability distributions over fine-tuning methods induced by different meta-learning configurations (Prior) in our multi-objective experiments (see Section4). Each configuration is defined by:
|
| 413 |
+
|
| 414 |
+
1. Inclusion of positive and/or negative experiences,
|
| 415 |
+
|
| 416 |
+
2. Utility function (Weighted Sum, Linear Front, Logarithmic Front),
|
| 417 |
+
3. Distance metric (Euclidean, Cosine) with scaling, and
|
| 418 |
+
4. Pull/push limits $k_{\mathrm{pos}}$ , $k_{\mathrm{neg}}$ and learning-rate scheme (fixed/adaptive).
|
| 419 |
+
|
| 420 |
+
Recall that we generated up to 180 candidate configurations per dataset by systematically varying:
|
| 421 |
+
|
| 422 |
+
1. Inclusion/exclusion of positive (successful) and negative (error) past experiences,
|
| 423 |
+
2. Utility functions (e.g., weighted sum, linear front, logarithmic front),
|
| 424 |
+
3. Distance metrics (Euclidean, Cosine) and their scaling,
|
| 425 |
+
4. $\alpha_{\mathrm{max}}^{+}$ and $\alpha_{\mathrm{max}}^{-}$ values (fixed or adaptive) (Section 3.3).
|
| 426 |
+
|
| 427 |
+
Each configuration yields a distinct initial probability vector for the available fine-tuning methods, with deviations from the baseline distribution measured via Total Variation (TV). Grouping configurations by TV allows us to categorize them into low, moderate, and high bias levels relative to the baseline's uniform initialisation.
|
| 428 |
+
|
| 429 |
+
# B.1 Classification Tasks
|
| 430 |
+
|
| 431 |
+
For each classification dataset (LIAR, SST-2, MELD, AG News), Figures 4-7 plot the initial probabilities for representative configurations at each bias level. In each figure:
|
| 432 |
+
|
| 433 |
+
- Blue: Uniform baseline.
|
| 434 |
+
- Green, Orange, Red: Increasing TV distance (Low, Moderate, High).
|
| 435 |
+
- Patterned Bars: Selected Max-TV configuration within each bin.
|
| 436 |
+
|
| 437 |
+
LIAR. Figure 4 shows the initial probabilities of using each fine-tuning method for the LIAR dataset, sorted by their overall difference from the baseline. Blue bars indicate the baseline configuration, whereas green, orange, and red bars represent configurations increasingly diverging from the baseline. We marked selected representative configurations (patterned bars) for each bias level.
|
| 438 |
+
|
| 439 |
+
SST2. Figure 5 illustrates the same analysis on SST2. Although the dataset differs substantially from LIAR regarding meta-features (e.g., number of classes, data size, label distribution), we observe a similar pattern in how the bias level shifts probabilities among alternative fine-tuning methods. The High (Max) configuration notably shows more aggressiveness than LIAR's.
|
| 440 |
+
|
| 441 |
+
MELD. Figure 6 shows the MELD dataset's initial distributions. As discussed in Section 4, MELD shares some meta-feature similarities with LIAR (Figure 3), causing some distributions to concentrate around methods found promising in LIAR's prior runs.
|
| 442 |
+
|
| 443 |
+
AG News. Lastly, Figure 7 displays the candidate configurations for AG NEWS, a large corpus with four news categories.
|
| 444 |
+
|
| 445 |
+
# B.2 QA Tasks
|
| 446 |
+
|
| 447 |
+
Figures 8a and 8b show the analogous distributions for DROP and SQuAD. Despite fewer experiences, meta-learning concentrates probability mass on the partial and traditional fine-tuning strategy while avoiding Lora.
|
| 448 |
+
|
| 449 |
+
These visualizations underscore how our meta-learning strategy adapts the search space before optimization begins. By systematically adjusting the initial probabilities, XAutoLM avoids mindlessly searching all possibilities and exploits task similarities to emphasize configurations that are historically more successful or resource-feasible.
|
| 450 |
+
|
| 451 |
+

|
| 452 |
+
Initial Prob. of Fine-tuning Method/Model Type (LIAR)
|
| 453 |
+
Figure 4: Initial probability distributions for fine-tuning methods on LIAR.
|
| 454 |
+
|
| 455 |
+

|
| 456 |
+
Figure 5: Initial probability distributions for fine-tuning methods on SST2
|
| 457 |
+
|
| 458 |
+

|
| 459 |
+
Initial Prob. of Fine-tuning Method/Model Type (MELD)
|
| 460 |
+
Figure 6: Initial probability distributions for fine-tuning methods on MELD
|
| 461 |
+
|
| 462 |
+

|
| 463 |
+
Initial Prob. of Fine-tuning Method/Model Type (AG_NEWS)
|
| 464 |
+
Figure 7: Initial probability distributions for fine-tuning methods on AG News
|
| 465 |
+
|
| 466 |
+
$$
|
| 467 |
+
\alpha_ {\max} ^ {-} = 0. 0 2).
|
| 468 |
+
$$
|
| 469 |
+
|
| 470 |
+
# C Single-Objective Warm Start Evaluation
|
| 471 |
+
|
| 472 |
+
This appendix reports single-objective experiments optimizing the macro- $F1$ score alone. We compare the Zero-shot AutoGOAL baseline against three representative warm-start priors, Low, Moderate, and High bias, selected from fourteen candidate configurations grouped by total variation (TV) distance. All priors use the std-only $\beta$ scale, Euclidean distance, and fixed learning rates $(\alpha_{\mathrm{max}}^{+} = 0.05,$
|
| 473 |
+
|
| 474 |
+
# C.1 Initial Probability Distributions
|
| 475 |
+
|
| 476 |
+
Figure 9 shows LIAR's initial fine-tuning method distributions under the fourteen meta-learning priors, sorted by TV relative to the uniform baseline. The solid blue bar indicates the baseline; patterned green, orange, and red bars mark the chosen Low, Moderate, and High priors.
|
| 477 |
+
|
| 478 |
+
# C.2 Performance Results
|
| 479 |
+
|
| 480 |
+
Table 7 reports our results. We conducted a detailed statistical analysis across six independent runs per
|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
(a)
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
(b)
|
| 487 |
+
|
| 488 |
+

|
| 489 |
+
Figure 8: Initial probability distributions for fine-tuning methods on DROP (a) and SQUAD (b)
|
| 490 |
+
Figure 9: Initial fine-tuning probabilities for LIAR under fourteen priors, sorted by TV. Solid blue denotes the uniform baseline; patterned green, orange, and red denote the Low, Moderate, and High bias priors, respectively
|
| 491 |
+
|
| 492 |
+
configuration on LIAR and SST-2, evaluating performance, convergence time, and reliability. Normality was tested using Shapiro-Wilk, followed by ANOVA (McHugh, 2011) for normal metrics, and Friedman tests (Pereira et al., 2015) for nonparametric ones. We report Cohen's $d$ and Cliff's $\delta$ as effect-size measures; power analyses accompany each test in the repository.
|
| 493 |
+
|
| 494 |
+
On LIAR, while none of the warm-start priors significantly outperformed the baseline in peak $F1_{\mathrm{macro}}$ (ANOVA $p = 0.856$ , Friedman $p = 0.94$ ), we observed a significant overall improvement in mean performance across groups (ANOVA $p = 0.005$ , Friedman $p = 0.004$ ). Post-hoc comparisons, however, were not significant after correction, likely due to limited sample size. More no
|
| 495 |
+
|
| 496 |
+
tably, the error ratio, the share of failed evaluations, dropped dramatically from 0.69 (baseline) to 0.24 (High WS), a difference found to be statistically significant (Friedman $p = 0.031$ ) with a large effect size (Cohen's $d = 3.39$ ). Convergence time metrics (TT50, TT75, TT90) also trended lower, with moderate effect sizes, although these differences did not reach statistical significance.
|
| 497 |
+
|
| 498 |
+
On SST-2, the Mod WS prior achieved the highest max $F1_{\mathrm{macro}}$ (0.941), and the ANOVA test confirmed a significant group effect $(p = 0.031)$ . The error ratio again showed a significant overall effect (Friedman $p = 0.038$ ), improving from 0.83 (baseline) to 0.58 (High WS). Convergence time reductions were most pronounced with the High WS prior, which reached $50\%$ of peak $F1$ four times
|
| 499 |
+
|
| 500 |
+
<table><tr><td>Dataset</td><td>Config.</td><td>Max F1m</td><td>Mean F1m</td><td>TT50 (h)</td><td>TT75 (h)</td><td>TT90 (h)</td><td>No. Eval</td><td>E. Ratio</td></tr><tr><td rowspan="4">LIAR</td><td>Baseline</td><td>0.248 ±0.018</td><td>0.09 ±0.004</td><td>2.00</td><td>6.38</td><td>8.15</td><td>173</td><td>0.69</td></tr><tr><td>Low WS</td><td>0.253 ±0.006</td><td>0.11 ±0.008</td><td>1.35</td><td>4.10</td><td>9.05</td><td>166</td><td>0.61</td></tr><tr><td>Mod WS</td><td>0.251 ±0.015</td><td>0.11 ±0.008</td><td>1.57</td><td>4.88</td><td>6.43</td><td>165</td><td>0.46</td></tr><tr><td>High WS</td><td>0.247 ±0.006</td><td>0.10 ±0.009</td><td>1.37</td><td>5.42</td><td>10.74</td><td>156</td><td>0.24</td></tr><tr><td rowspan="4">SST2</td><td>Baseline</td><td>0.928 ±0.018</td><td>0.56 ±0.053</td><td>1.69</td><td>2.07</td><td>4.64</td><td>85</td><td>0.83</td></tr><tr><td>Low WS</td><td>0.917 ±0.016</td><td>0.59 ±0.063</td><td>1.28</td><td>2.41</td><td>5.09</td><td>98</td><td>0.80</td></tr><tr><td>Mod WS</td><td>0.941 ±0.004</td><td>0.56 ±0.064</td><td>0.70</td><td>3.88</td><td>5.21</td><td>55</td><td>0.69</td></tr><tr><td>High WS</td><td>0.932 ±0.002</td><td>0.56 ±0.058</td><td>0.41</td><td>0.41</td><td>2.23</td><td>58</td><td>0.58</td></tr></table>
|
| 501 |
+
|
| 502 |
+
Table 7: Overview of XAutoLM performance on optimising $F1_{macro}$ for LIAR and SST2. Results are averaged over six runs with different seeds. 'Max $F1_{m}$ ' and 'Mean $F1_{m}$ ' show the mean and standard deviation, respectively; 'TT50', 'TT75', and 'TT90' report the average time to reach 50%, 75%, and 90% $F1_{m}$ ; and 'No. Eval' and 'E. Ratio' indicates the average number of pipeline evaluations and the ratio of such evaluations that were errors.
|
| 503 |
+
|
| 504 |
+
faster than the baseline (0.41h vs. 1.69h). While these improvements showed large effect sizes (e.g., TT50 $d = 0.55$ ), they were not statistically significant in pairwise tests, most likely due to low sample power ( $n = 6$ ).
|
| 505 |
+
|
| 506 |
+
In summary, warm-start priors consistently yielded practical convergence speed and robustness benefits. While not all improvements were statistically significant, expected under a small-sample regime, our analysis shows that key metrics such as error ratio and mean F1 on LIAR and max F1 on SST-2 do reach significance. Full results, post hoc comparisons, and power analyses are available in our open-source repository.
|
| 507 |
+
|
| 508 |
+
# D Pareto Front Visualizations
|
| 509 |
+
|
| 510 |
+
Figure 10 presents the Pareto fronts obtained on each benchmark under the zero-shot baseline and three representative warm-start bias levels (Low, Moderate, High).
|
| 511 |
+
|
| 512 |
+
Across all datasets, warm-start priors shift the search toward regions that often dominate zero-shot pipelines in both evaluation time (ET) and task performance $(F1_{\mathrm{macro}}$ or $F1)$ . Below we highlight key observations: Points that lie to the left of or above the baseline front dominate the baseline in at least one objective. In most cases, WS solutions (e.g., High WS - Median, Mod WS - LIAR) simultaneously improve upon the baseline's ET and $F1_{\mathrm{macro}}$ , indicating superior pipelines. Below, we discuss notable observations by dataset.
|
| 513 |
+
|
| 514 |
+
LIAR. High-bias priors calibrated on LIAR produce up to $40\%$ of pipelines that dominate the baseline, reducing error rates by roughly sevenfold (cf. Table 5). Due to the substantial meta-feature similarity between LIAR and MELD (Figure 3),
|
| 515 |
+
|
| 516 |
+
both tasks see rapid convergence to high- $F1_{\mathrm{macro}}$ regions.
|
| 517 |
+
|
| 518 |
+
SST2. With fewer closely related experiences, Moderate bias yields the best trade-offs, uncovering pipelines that match or slightly exceed baseline $F1_{\mathrm{macro}}$ in less time, demonstrating robustness against negative transfer.
|
| 519 |
+
|
| 520 |
+
MELD. Figure 10c demonstrates how MELD, like LIAR, sees numerous WS-discovered solutions outclassing the baseline. These configurations often exploit shared meta-features between MELD and LIAR (see Figure 3), culminating in faster convergence and higher accuracy, with fewer errors during the search. Mirroring LIAR, HIGH WS - LIAR dominates, diminishing the error ratio by sevenfold and almost getting $50\%$ winning ratio (Figure 1).
|
| 521 |
+
|
| 522 |
+
AG News. Figure 10d shows that while AG NEWS has only moderate overlap with other tasks, WS still yields solutions that meet or beat baseline performance in time-accuracy trade-offs. Notably, MOD and HIGH-bias configurations reduce error rates (see Table 5 in the main text), suggesting that historical knowledge, even if partially relevant, helps prune more obviously unproductive hyperparameter regions.
|
| 523 |
+
|
| 524 |
+
DROP and SQuAD For QA, High bias priors achieve dramatic gains on SQuAD, raising $F1$ from 0.34 to 0.89 and cutting mean $ET$ by $3 \times$ . On DROP, Moderate and High priors both improve $F1$ and reduce evaluation time, confirming cross-family transfer efficacy (Table 6).
|
| 525 |
+
|
| 526 |
+

|
| 527 |
+
(a)
|
| 528 |
+
|
| 529 |
+

|
| 530 |
+
(b)
|
| 531 |
+
|
| 532 |
+

|
| 533 |
+
(c)
|
| 534 |
+
|
| 535 |
+

|
| 536 |
+
(d)
|
| 537 |
+
|
| 538 |
+

|
| 539 |
+
(e)
|
| 540 |
+
|
| 541 |
+

|
| 542 |
+
(f)
|
| 543 |
+
Figure 10: Comparison of Pareto fronts for zero-shot baseline (solid blue line) and warm-start priors at Low (green), Moderate (orange), and High (red) bias levels. Each point plots $(ET, F1_{\text{macro}})$ for classification tasks (a-d) or $(ET, F1)$ for QA tasks (e-f). Points to the left or above the baseline outperforms the zero-shot Pareto front.
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5034b2d204a1ca9d15cf21c781c5c543f41dd7ba4d439a3fcdaf15debca2a8cb
|
| 3 |
+
size 1116407
|
EMNLP/2025/XAutoLM_ Efficient Fine-Tuning of Language Models via Meta-Learning and AutoML/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:379cbb47d8bdd2415135e5ba9824daa3cb045a3798d41c8f14c6b24f19a0c7a0
|
| 3 |
+
size 599501
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65968e2412c0b0466b1995bc79f20ed98e1e8764214c3e0a0c08a72629110b5e
|
| 3 |
+
size 95345
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6c44de1edc7bf75a74bee9a8b25bddfa72ddcdd18e7d1608be62be21a335ddc5
|
| 3 |
+
size 115942
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/026822b8-600b-4f23-90f5-84fc96490f40_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6d2f60c6972390f6d049e802b3f015eb75573c9f7267fa4103d8d1d6efc913db
|
| 3 |
+
size 508724
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/full.md
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# XLQA: A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering
|
| 2 |
+
|
| 3 |
+
Keon-Woo Roh $^{1}$ , Yeong-Joon Ju $^{1}$ , Seong-Whan Lee $^{1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Artificial Intelligence, Korea University
|
| 6 |
+
|
| 7 |
+
{ro_keonwoo, yj_ju, sw.lee}@korea.ac.kr
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Large Language Models (LLMs) have shown significant progress in Open-Domain Question Answering (ODQA), yet most evaluations focus on English and assume locale-invariant answers across languages. This assumption neglects the cultural and regional variations that affect question understanding and answer, leading to biased evaluation in multilingual benchmarks. To address these limitations, we introduce XLQA, a novel benchmark explicitly designed for locale-sensitive multilingual ODQA. XLQA contains 3,000 English seed questions expanded to eight languages, with careful filtering for semantic consistency and human-verified annotations distinguishing locale-invariant and locale-sensitive cases. Our evaluation of five state-of-the-art multilingual LLMs reveals notable failures on locale-sensitive questions, exposing gaps between English and other languages due to a lack of locale-grounding knowledge. We provide a systematic framework and scalable methodology for assessing multilingual QA under diverse cultural contexts, offering a critical resource to advance the real-world applicability of multilingual ODQA systems. Our findings suggest that disparities in training data distribution contribute to differences in both linguistic competence and locale-awareness across models. https://github.com/ro-ko/XLQA
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Open-domain question answering (ODQA) aims to generate accurate and natural language answers to user queries without explicit domain constraints or provided context (Chen et al., 2017; Karpukhin et al., 2020). Recently, large language models (LLMs) (Brown et al., 2020; Anil et al., 2023; Workshop et al., 2022) have driven significant advances in ODQA by generating correct and natural answers. Despite strong advances in ODQA, most efforts have focused on English, leaving mul
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Knowledge conflict in multilingual ODQA. Although all versions of the question aim to ask how long it took to build the "Twin Towers", different languages elicit different answers based on locale-variant understanding. While English and Arabic refer to the World Trade Center (11 years), Korean and Chinese interpret "Twin Towers" as the LG Twin Towers and Tianjin IFC, respectively.
|
| 19 |
+
|
| 20 |
+
tilingual capabilities that remain relatively underexplored. This gap underscores the need for multilingual ODQA benchmarks that assess performance across languages (Maxutov et al., 2024).
|
| 21 |
+
|
| 22 |
+
To evaluate multilingual ODQA systems, existing benchmarks, such as MLQA (Lewis et al., 2020), MKQA (Longpre et al., 2021), and TyDiQA (Clark et al., 2020), are typically constructed by translating or aligning parallel questions across multiple languages. These benchmarks have the locale-agnostic assumption that both the meaning of a question and its correct answer remain constant across linguistic boundaries. However, this assumption overlooks variations in meaning that arise naturally from distinct cultural or regional contexts (Lin and et al., 2021; Liu et al., 2024; Zhang et al., 2023).
|
| 23 |
+
|
| 24 |
+
Recent benchmarks such as CaLMQA (Arora et al., 2025), NativQA (Hasan et al., 2025b), and BLEnD (Myung et al., 2024) attempt to overcome this limitation by constructing culturally grounded questions independently for each language. While these approaches provide valuable insights into
|
| 25 |
+
|
| 26 |
+
culture-specific reasoning, they do not directly ensure cross-lingual consistency, making systematic comparison across languages more challenging.
|
| 27 |
+
|
| 28 |
+
This issue introduces evaluation bias (Talat et al., 2022; Woo et al., 2023) by penalizing responses that are correct within specific regional or cultural contexts. For instance, as illustrated in Fig. 1, the answer to the question "How long did it take the Twin Towers to be built?" differs depending on which entity the question refers to: the World Trade Center in the U.S. or the LG Twin Towers in South Korea. Multilingual question requires the locale-variant references that arise from differing cultural contexts and background knowledge, not merely generating translated answers. In addition, relying on naive translation to construct multilingual benchmarks risks semantic drift, where subtle shifts in meaning occur due to inadequate contextual grounding (Yu et al., 2023). While human annotation can mitigate the drift, it is costly, labor-intensive, and difficult to scale across many languages and cultures (Pandey et al., 2022).
|
| 29 |
+
|
| 30 |
+
To address these challenges, we propose XLQA, a benchmark explicitly constructed to evaluate multilingual ODQA systems under locale-sensitive conditions. XLQA consists of 3,000 seed questions in English, each paired with a reference answer and language-specific supporting evidence. These questions are extended to eight languages (English, Korean, Arabic, Hebrew, Japanese, Russian, Vietnamese, and Simplified Chinese), resulting in 24,000 high-quality evaluation items. We design XLQA to assess whether multilingual ODQA systems can handle locale-sensitive variation by explicitly distinguishing between two types of questions: those whose correct answers remain consistent across languages (locale-invariant), and those whose answers vary depending on regional or linguistic context (locale-sensitive).
|
| 31 |
+
|
| 32 |
+
To construct this benchmark at scale, we apply a back-translation-based filtering method to identify and remove translations that exhibit potential semantic inconsistencies. Then, we generate locale-aware answers for each semantically consistent multilingual question by producing responses based on language-specific evidence curated for each locale with an LLM. These generated answers that semantically differ from the original English answer is categorized as a potentially locale-sensitive question. Human annotators examine each candidate instance to verify the answer's correctness and the relevance of the supporting evi
|
| 33 |
+
|
| 34 |
+
dence. This approach enables scalable multilingual QA dataset creation with limited human involvement, ensuring quality through selective verification rather than full manual annotation.
|
| 35 |
+
|
| 36 |
+
To demonstrate the effectiveness of this pipeline, we evaluate five multilingual LLMs on our benchmark, such as GPT-4.1 (Achiam et al., 2023), Qwen-3 (Zheng et al., 2025), Gemma-3 (Team et al., 2025), LLaMA-3.1 (Grattafori et al., 2024), and EXAONE (Research et al., 2024) under standard evaluation metrics, including exact match and F1 score. Our analysis reveals that, despite strong zero-shot and multilingual capabilities, these models frequently fail to produce appropriate answers to locale-sensitive questions. We observe differences in both language proficiency and locale-specific knowledge across models, shaped by the distribution of language data used during training. These findings highlight the limitations of existing multilingual QA benchmarks and underscore the importance of explicitly modeling cultural context in evaluation. We summarize our contributions as follows:
|
| 37 |
+
|
| 38 |
+
- We introduce the first systematic framework for evaluating locale-aware correctness in multilingual QA, directly addressing the cultural insensitivity and English-centric assumptions embedded in prior benchmarks.
|
| 39 |
+
- We propose a scalable method for identifying and validating questions whose correct answers vary across regions, producing a benchmark of 3,000 high-quality question-answer-evidence triples annotated for locale sensitivity.
|
| 40 |
+
- We provide empirical evidence that current multilingual LLMs struggle with locale-grounded question answering, revealing a critical gap in their real-world applicability.
|
| 41 |
+
|
| 42 |
+
# 2 Related Work
|
| 43 |
+
|
| 44 |
+
# 2.1 Multilingual ODQA Benchmarks
|
| 45 |
+
|
| 46 |
+
In recent years, numerous multilingual question answering (QA) benchmarks have been proposed to evaluate the performance of multilingual language models. Prominent examples include MLQA (Lewis et al., 2020), XQuAD (Artetxe et al., 2020), TyDiQA (Clark et al., 2020), and MKQA (Longpre et al., 2021), which are widely used to compare model performance across different languages.
|
| 47 |
+
|
| 48 |
+
Step 1: Multilingual Question Generation
|
| 49 |
+

|
| 50 |
+
Q: Who sang the song you're my everything?
|
| 51 |
+
Q: how long did it take the twin towers to be built?
|
| 52 |
+
A: Santa Esmeralda
|
| 53 |
+
A: 11 years
|
| 54 |
+
Q: When was Michael Jordan drafted to the NBA?
|
| 55 |
+
A:1984
|
| 56 |
+
|
| 57 |
+
English-centric ODQA benchmarks
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
Step 2: Locale-aware Answer Generation
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
Multilingual
|
| 74 |
+
|
| 75 |
+

|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Wikipedia
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
questions
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
|
| 103 |
+

|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
Locale-aware
|
| 113 |
+
|
| 114 |
+
Locale-
|
| 115 |
+
|
| 116 |
+
answers
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
7
|
| 120 |
+
/7 a
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Is conflicted?
|
| 124 |
+
|
| 125 |
+
answers
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
After reading
|
| 129 |
+
Evidence
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
Step 3: Human Verification
|
| 133 |
+
documents
|
| 134 |
+
#
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Conflicted triples of
|
| 138 |
+
QA and evidences
|
| 139 |
+
Do you agree?
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
#
|
| 143 |
+
Human
|
| 144 |
+
[Yes/No]
|
| 145 |
+
I'm not sure]
|
| 146 |
+
Figure 2: The overall pipeline for constructing the XLQA benchmark. The process consists of three stages: (1) Multilingual Question Generation generates multilingual questions based on seed questions from existing QA datasets. (2) Locale-Aware Answer Generation uses LLM to generate locale-aware answers. (3) Human Verification verifies the answers with supporting evidence. The Output is a high-quality, locale-aware multilingual QA dataset.
|
| 147 |
+
|
| 148 |
+
MLQA and XQuAD are constructed by translating English question-answer pairs into multiple target languages, and rely on the assumption that the translated versions are semantically equivalent to the original. This approach enables direct comparison across languages but may overlook subtle linguistic or cultural differences that affect answer validity. In contrast, TyDiQA enhances linguistic diversity by collecting questions written natively in each language by fluent speakers, rather than relying on translation. However, it still assumes a single ground-truth answer per question within each language, potentially limiting its ability to capture within-language ambiguity or region-specific variation. MKQA takes a different approach by sourcing questions from anonymized Google Assistant logs, reflecting more natural, real-world user queries. These questions are then manually translated into 26 languages for open-domain question answering. While these benchmarks provide a foundation for measuring multilingual capabilities and crosslingual consistency, they largely focus on surface-level correctness and lexical alignment. As such, they fall short of evaluating model performance in scenarios that require the understanding of cultural context or locale-specific knowledge.
|
| 149 |
+
|
| 150 |
+
# 2.2 Multilingual QA Evaluation Bias and Fairness
|
| 151 |
+
|
| 152 |
+
Recent works (Singh et al., 2024; Hasan et al., 2025a) have examined these issues from multiple
|
| 153 |
+
|
| 154 |
+
perspectives. Singh et al. (2024) evaluates language models across culturally diverse multiple-choice questions. They show that performance varies substantially across languages and regions, indicating potential cultural bias. Hasan et al. (2025a) introduces a dataset of naturally occurring, culturally aligned queries in multiple languages. Their findings highlight the limitations of translation-based benchmarks in capturing region-specific information needs.
|
| 155 |
+
|
| 156 |
+
Bias is observed in model behavior across languages with differing resource levels, particularly in the form of stereotypical associations related to gender, profession, or ethnicity. Buscemi et al. (2025) proposes an automated evaluation framework to assess such social biases across both high- and low-resource languages. The study finds that these biases, such as associating certain professions more frequently with specific genders, tend to be more pronounced in low-resource settings, where training data is sparser and less balanced.
|
| 157 |
+
|
| 158 |
+
Similarly, Zulaika and Saralegi (2025) adapts the English-centric BBQ benchmark to Basque in order to investigate bias propagation in a typologically distant language. Their findings reveal that common bias mitigation strategies developed for English, such as data augmentation or counterfactual training, often fail to generalize effectively to underrepresented languages, underscoring the need for culturally and linguistically tailored approaches. These studies point to the need for evaluation meth
|
| 159 |
+
|
| 160 |
+
ods that distinguish between culturally invariant and culturally dependent questions, and that reflect the diversity of real-world language use above high-resource settings.
|
| 161 |
+
|
| 162 |
+
# 2.3 Evaluation for LLM-as-judges
|
| 163 |
+
|
| 164 |
+
LLM-as-judge is a generative evaluator paradigm where LLMs are trained to produce an evaluation (natural language explanation and judgment) given the original user input, evaluation protocol (rules and criteria for evaluation), and model responses as input. JudgeLM (Zhu et al., 2025) formalizes this approach as a generative evaluation framework and demonstrates that LLM-based judges can approximate human evaluations in tasks such as reasoning and factual correctness. PandaLM (Wang et al., 2024) further investigates the reliability and robustness of LLM-based evaluators by comparing their preferences across model outputs with those of human annotators.
|
| 165 |
+
|
| 166 |
+
# 3 XLQA Dataset
|
| 167 |
+
|
| 168 |
+
To rigorously evaluate multilingual ODQA in locale-sensitive contexts, we introduce XLQA, a new benchmark constructed through our multi-stage pipeline. This pipeline consists of three steps: multilingual question generation, locale-aware answer generation, and human verification, as illustrated in Fig. 2.
|
| 169 |
+
|
| 170 |
+
# 3.1 Step 1: Multilingual Question Generation
|
| 171 |
+
|
| 172 |
+
We begin by collecting high-quality English seed questions from the test sets of existing ODQA benchmarks, such as MKQA (Longpre et al., 2021), MLQA (Lewis et al., 2020), and HotpotQA (Yang et al., 2018), to ensure alignment with our evaluation objectives. To refine the seed pool, we first remove duplicate entries based on an exact match of either the question or the answer. We then filter out unanswerable questions or those lacking a reference answer, as such items prevent meaningful comparison of locale-sensitive responses. This filtering process results in the exclusion of $28.4\%$ of the initial seed questions.
|
| 173 |
+
|
| 174 |
+
For the refined seed questions, we generate multilingual questions translated into diverse target languages by utilizing GPT-4.1 as an Oracle Language Model (OracleLM), which refers to a theoretical upper-bound model that is assumed to know the correct answer, often used to estimate performance ceilings and analyze the gap between idealized and
|
| 175 |
+
|
| 176 |
+
real-world behavior (Achiam et al., 2023; Chen et al., 2024). GPT-4.1 demonstrates strong performance in translation quality and contextual understanding, making it a suitable choice for ensuring the reliability of the generated multilingual questions. To ensure semantic consistency across the translated questions, we apply a back-translation filtering step. Each translated question is first back-translated into English. Then, the resulting back-translated version is compared against the original English question using the LLM-as-judge framework. GPT-4.1 is prompted to determine whether the two questions are semantically equivalent, providing a binary "yes/no" judgment. If any of the eight language translations are judged as inconsistent (i.e., the model outputs "no"), the entire question is discarded from the dataset. By discarding questions with inconsistent translations, this back-translation filtering step plays a crucial role in eliminating translation artifacts and mitigating cross-lingual meaning drift.
|
| 177 |
+
|
| 178 |
+
# 3.2 Step 2: Locale-Aware Answer Generation
|
| 179 |
+
|
| 180 |
+
To construct QA pairs that capture locale-specific variation, we generate candidate answers for the multilingual questions obtained in the previous step. For each input question, GPT-4.1 is prompted to generate an answer that reflects the locale associated with the language in which the question is written. For questions that are not sensitive to locale, the model is prompted to provide a general, culturally neutral answer. We leverage a retrieval-augmented generation (RAG) framework in which GPT-4.1 is connected to a web search component. This setup enables the model to generate answers grounded in verifiable external sources, providing both the response and its corresponding evidence. The retrieval process prioritizes authoritative sources, with a preference for Wikipedia. In case that relevant information is not found on Wikipedia, the system falls back to reputable news outlets.
|
| 181 |
+
|
| 182 |
+
As a post-processing step, we discard any QA pairs in which the generated reference lacks a valid URL or does not include reliable source indicators such as the keywords "wikipedia" or "news". This filtering ensures that all retained answers are grounded in verifiable and trustworthy sources. This approach offers an efficient alternative to human annotation by enabling scalable, high-quality data generation while maintaining contextual relevance and answer verifiability.
|
| 183 |
+
|
| 184 |
+
# 3.3 Step 3: Human Verification
|
| 185 |
+
|
| 186 |
+
All candidate triples flagged for answer conflict are subjected to human verification. Annotators are provided with the question, answer, and supporting evidence for each language. They are asked to determine whether the answer is correct and supported by the evidence. This process yields a high-quality set of QA-evidence triples, each labeled as either locale-invariant or locale-sensitive. To ensure consistency and reduce annotation noise, we adopt a majority voting scheme across three annotators per instance. Only instances where at least two annotators agree on both correctness and sensitivity labels are retained; otherwise, the item is discarded. Statistics on annotator agreement rates after voting are provided in Appendix Table 7.
|
| 187 |
+
|
| 188 |
+
# 4 Dataset Analysis
|
| 189 |
+
|
| 190 |
+
# 4.1 Dataset Statistics
|
| 191 |
+
|
| 192 |
+
Our benchmark consists of 3,000 question-answer-evidence triples across eight languages: English, Korean, Arabic, Hebrew, Japanese, Russian, Vietnamese, and Simplified Chinese. Each English-origin question is translated into the target languages and paired with answers and evidential support adapted to the cultural or linguistic context of the target locale.
|
| 193 |
+
|
| 194 |
+
On average, questions contain 17-40 tokens depending on the language, while answers remain short (4-6 tokens). A total of 24,000 QA instances were created, including 3,000 in English and 21,000 across the seven other languages.
|
| 195 |
+
|
| 196 |
+
# 4.2 Consistency Filtering Results
|
| 197 |
+
|
| 198 |
+
To ensure semantic consistency across translations, we applied a back-translation-based filtering pipeline. QA pairs with substantial semantic shifts, such as changes in named entities, factual scope, or temporal modifiers, were flagged and removed. In total, $10.8\%$ of the generated multilingual instances were discarded through this process.
|
| 199 |
+
|
| 200 |
+
We observed that the majority of the filtered instances involved mistranslations of culturally specific terms or reinterpretations of ambiguous expressions that altered the intended meaning. These cases were particularly prevalent in Arabic and Hebrew, where semantic drift often resulted from incorrect rendering of proper nouns and idiomatic language. Table 5 summarizes the number of discarded instances per language following the consistency filtering process.
|
| 201 |
+
|
| 202 |
+
# 4.3 Conflict Detection
|
| 203 |
+
|
| 204 |
+
A conflict is defined as a case where at least one language provides an answer that is semantically inconsistent with the English reference, under the assumption that such variation is due to regional knowledge or interpretation. For each question, we collected answers across all languages and compared them using string normalization and embedding-based semantic similarity. Questions exhibiting divergence in meaning, rather than surface expression, were manually validated as locale-sensitive. Among the 3,000 source questions, 2,356 (73.9%) were categorized as locale-sensitive, based on the presence of conflicting answers in at least one language. Table 5 presents the distribution of conflicts across languages. Arabic and Hebrew displayed the highest proportion of conflicts, while Japanese and Vietnamese showed comparatively lower divergence.
|
| 205 |
+
|
| 206 |
+
# 5 Benchmark Evaluation
|
| 207 |
+
|
| 208 |
+
We conduct a series of experiments to evaluate multilingual LLM performance on our locale-aware QA dataset. Our goal is to assess how well current models handle both locale-invariant and locale-sensitive questions, and to quantify the limitations of existing evaluation protocols when applied to culturally or regionally diverse inputs.
|
| 209 |
+
|
| 210 |
+
# 5.1 Experimental Setup
|
| 211 |
+
|
| 212 |
+
We evaluate five widely used large language models with multilingual capabilities: GPT-4.1, Qwen 3, Gemma 3, LLaMA 3.1, and EXAONE. These models vary in architecture, size, and pretraining corpora, representing a broad range of capabilities in multilingual understanding and generation.
|
| 213 |
+
|
| 214 |
+
All models are evaluated in a zero-shot QA setting without fine-tuning. For each QA pair, the model generates an answer using a consistent prompting format adapted for the language. We apply two evaluation metrics:
|
| 215 |
+
|
| 216 |
+
- Exact Match (EM): A binary metric that assigns 1 if the predicted answer exactly matches any of the reference answers, and 0 otherwise:
|
| 217 |
+
|
| 218 |
+
$$
|
| 219 |
+
\mathrm {E M} = \left\{ \begin{array}{l l} 1, & \text {i f p r e d i c t i o n = r e f e r e n c e} \\ 0, & \text {o t h e r w i s e} \end{array} \right.
|
| 220 |
+
$$
|
| 221 |
+
|
| 222 |
+
- F1 Score: Measures the token-level overlap between the predicted and reference answers.
|
| 223 |
+
|
| 224 |
+
<table><tr><td rowspan="2">Lang</td><td colspan="2">Oracle LM</td><td colspan="2">Gemma3 12B</td><td colspan="2">Qwen3 14B</td><td colspan="2">LLaMA3.1 8B</td><td colspan="2">EXAONE 7.8B</td></tr><tr><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>en</td><td>89.11</td><td>90.97</td><td>43.26</td><td>52.68</td><td>40.73</td><td>49.43</td><td>40.38</td><td>50.56</td><td>31.44</td><td>39.64</td></tr><tr><td>ar</td><td>87.86</td><td>90.05</td><td>18.54</td><td>23.62</td><td>11.83</td><td>19.30</td><td>8.53</td><td>16.92</td><td>3.98</td><td>6.04</td></tr><tr><td>he</td><td>88.30</td><td>90.46</td><td>20.05</td><td>24.83</td><td>11.04</td><td>16.08</td><td>11.86</td><td>16.60</td><td>5.52</td><td>7.20</td></tr><tr><td>ja</td><td>88.45</td><td>92.50</td><td>22.81</td><td>45.10</td><td>19.74</td><td>44.03</td><td>9.10</td><td>37.73</td><td>7.34</td><td>26.22</td></tr><tr><td>ru</td><td>87.83</td><td>89.54</td><td>28.52</td><td>35.20</td><td>17.67</td><td>27.97</td><td>14.53</td><td>24.18</td><td>7.41</td><td>9.91</td></tr><tr><td>ko</td><td>86.73</td><td>88.29</td><td>22.18</td><td>26.56</td><td>15.44</td><td>19.91</td><td>11.55</td><td>15.68</td><td>15.81</td><td>20.32</td></tr><tr><td>zh_cn</td><td>89.68</td><td>93.41</td><td>16.22</td><td>37.91</td><td>26.39</td><td>47.57</td><td>11.58</td><td>36.48</td><td>7.66</td><td>25.22</td></tr><tr><td>vi</td><td>89.39</td><td>91.19</td><td>36.55</td><td>44.77</td><td>26.83</td><td>39.34</td><td>26.45</td><td>38.38</td><td>10.95</td><td>14.70</td></tr><tr><td>Avg.</td><td>88.42</td><td>90.80</td><td>26.02</td><td>36.33</td><td>21.21</td><td>32.95</td><td>16.75</td><td>29.57</td><td>11.26</td><td>18.65</td></tr></table>
|
| 225 |
+
|
| 226 |
+
Table 1: Results of the base models on the XLQA benchmark using EM and F1 scores.
|
| 227 |
+
|
| 228 |
+
<table><tr><td rowspan="2">Lang</td><td colspan="4">GEMMA3 12B</td><td colspan="4">QWEN3 14B</td><td colspan="4">EXAONE 7.8B</td></tr><tr><td>Non-Conflict EM</td><td>F1</td><td>Least-Conflict EM</td><td>F1</td><td>Non-Conflict EM</td><td>F1</td><td>Least-Conflict EM</td><td>F1</td><td>Non-Conflict EM</td><td>F1</td><td>Least-Conflict EM</td><td>F1</td></tr><tr><td>en</td><td>59.09</td><td>72.25</td><td>37.69</td><td>45.79</td><td>64.02</td><td>75.36</td><td>32.51</td><td>40.28</td><td>53.67</td><td>65.28</td><td>23.60</td><td>30.59</td></tr><tr><td>ar</td><td>37.06</td><td>46.26</td><td>12.01</td><td>15.64</td><td>27.80</td><td>40.54</td><td>6.20</td><td>11.80</td><td>8.90</td><td>12.21</td><td>2.25</td><td>3.86</td></tr><tr><td>he</td><td>38.63</td><td>47.18</td><td>13.50</td><td>16.94</td><td>23.71</td><td>32.16</td><td>6.58</td><td>10.41</td><td>9.63</td><td>12.49</td><td>4.07</td><td>5.33</td></tr><tr><td>ja</td><td>41.16</td><td>67.03</td><td>16.34</td><td>37.36</td><td>41.03</td><td>65.30</td><td>12.22</td><td>36.53</td><td>15.28</td><td>38.24</td><td>4.54</td><td>21.98</td></tr><tr><td>ru</td><td>47.41</td><td>57.02</td><td>21.86</td><td>27.50</td><td>30.69</td><td>49.38</td><td>13.07</td><td>20.42</td><td>10.95</td><td>15.48</td><td>6.15</td><td>7.95</td></tr><tr><td>ko</td><td>39.35</td><td>46.06</td><td>16.13</td><td>19.69</td><td>31.05</td><td>38.11</td><td>9.93</td><td>13.49</td><td>30.93</td><td>38.48</td><td>10.48</td><td>13.91</td></tr><tr><td>zh_cn</td><td>27.32</td><td>56.12</td><td>12.31</td><td>31.49</td><td>50.54</td><td>71.87</td><td>17.87</td><td>39.00</td><td>15.16</td><td>37.15</td><td>5.01</td><td>21.01</td></tr><tr><td>vi</td><td>51.62</td><td>63.79</td><td>31.24</td><td>38.07</td><td>41.40</td><td>61.34</td><td>21.69</td><td>31.58</td><td>18.05</td><td>24.30</td><td>8.45</td><td>11.31</td></tr><tr><td>Average</td><td>42.70</td><td>56.96</td><td>20.13</td><td>29.06</td><td>38.78</td><td>54.26</td><td>15.01</td><td>25.44</td><td>20.32</td><td>30.45</td><td>8.07</td><td>14.49</td></tr></table>
|
| 229 |
+
|
| 230 |
+
Table 2: EM and F1 scores of GEMMA3 12B, QWEN3 14B, and EXAONE 7.8B under different conflict levels.
|
| 231 |
+
|
| 232 |
+
It is computed as the harmonic mean of precision and recall: F1 Score measures the token-level overlap between the prediction and the reference answer. It is computed as the harmonic mean of precision and recall:
|
| 233 |
+
|
| 234 |
+
$$
|
| 235 |
+
\text {P r e c i s i o n} = \frac {\left| \text {P r e d i c t i o n} \cap \text {R e f e r e n c e} \right|}{\left| \text {P r e d i c t i o n} \right|} \tag {1}
|
| 236 |
+
$$
|
| 237 |
+
|
| 238 |
+
$$
|
| 239 |
+
\text {R e c a l l} = \frac {\left| \text {P r e d i c t i o n} \cap \text {R e f e r e n c e} \right|}{\left| \text {R e f e r e n c e} \right|} \tag {2}
|
| 240 |
+
$$
|
| 241 |
+
|
| 242 |
+
$$
|
| 243 |
+
\mathrm {F} 1 = \frac {2 \cdot \text {P r e c i s i o n} \cdot \text {R e c a l l}}{\text {P r e c i s i o n} + \text {R e c a l l}} \tag {3}
|
| 244 |
+
$$
|
| 245 |
+
|
| 246 |
+
We evaluate both locale-invariant and locale-aware settings.
|
| 247 |
+
|
| 248 |
+
# 5.2 Main Results
|
| 249 |
+
|
| 250 |
+
(1) Performance gap between English and other languages. Table 1 presents the performance of
|
| 251 |
+
|
| 252 |
+
five LLMs on the XLQA benchmark. While English achieves the highest scores across all models, performance on other languages drops, particularly for those involving culturally diverse or underrepresented regions such as Arabic, Hebrew, Korean, and Vietnamese. This suggests that despite multilingual pretraining, current models struggle to generalize locale-aware reasoning beyond high-resource languages like English.
|
| 253 |
+
|
| 254 |
+
(2) Performance degradation on culturally sensitive questions. Table 2 offers a more granular view by separating questions into non-conflict and least-conflict subsets. Here, we define a question as exhibiting least conflict when at least one of the language-specific responses differs semantically from all other responses. This categorization captures cases where locale-sensitive variation arises across languages, allowing us to directly measure the challenge posed by culturally grounded knowledge. The results show a consistent and substantial
|
| 255 |
+
|
| 256 |
+
performance drop across all models when faced with locale-sensitive questions. This highlights that answering such questions effectively requires not only understanding the language but also retaining culturally grounded knowledge specific to each region. Interestingly, models trained with a regional focus tend to perform better on conflict questions in their respective languages. For example, EXAONE achieves the highest conflict F1 score on Korean and QWEN3 on Chinese. While exact language-wise pretraining proportions are not publicly disclosed, these results suggest that higher exposure to specific locale-language data during pretraining enables models to better handle culturally nuanced inputs in that region.
|
| 257 |
+
|
| 258 |
+
# 5.3 Prompt Sensitivity
|
| 259 |
+
|
| 260 |
+
We examine the impact of prompt design using Qwen3 across four variants: EN (English prompt) and EN-LOC (English with locale emphasis).
|
| 261 |
+
|
| 262 |
+
Table 4 shows that prompts with explicit locale guidance (EN-LOC) improve accuracy, especially for culturally sensitive languages like Arabic and Korean. However, over-conditioning can sometimes lead to stereotype-driven outputs. While EN-LOC prompts generally improve performance, the degree of improvement varies significantly across languages. The gains are especially pronounced in Japanese (+25.03), Chinese (+17.42), and Korean (+7.58), suggesting that locale-specific grounding is particularly beneficial in languages with strong locale reference frames.
|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
Figure 3: Comparison of translation error rates between naive translation and our back-translation pipeline.
|
| 266 |
+
|
| 267 |
+
# 5.4 Ensuring Semantic Consistency in Multilingual Questions
|
| 268 |
+
|
| 269 |
+
A back-translation-based filtering helps identify and remove mistranslations that may introduce
|
| 270 |
+
|
| 271 |
+
unintended meaning shifts during naive machine translation. As shown in Figure 3, our back-translation pipeline significantly reduces translation error rates across most languages, particularly in Arabic, Hebrew, and Chinese languages that often exhibit greater semantic divergence from English. By improving the alignment between original and translated questions, this filtering step enhances the overall quality and reliability of locale-sensitive evaluation.
|
| 272 |
+
|
| 273 |
+
# 5.5 Categorization of Conflict-Inducing Questions
|
| 274 |
+
|
| 275 |
+
To better understand the sources of semantic divergence across languages, we manually categorize a subset of conflict-inducing questions based on the nature of the discrepancy observed in answers. This typology enables a more fine-grained analysis of the types of ambiguity and regional variability that arise in multilingual QA.
|
| 276 |
+
|
| 277 |
+
We categorize conflict-inducing questions into four types. These include Entity Conflict, Factual Conflict, Cultural Reference, and Ambiguous Question. Entity Conflict refers to cases where the referent entity varies across locales due to differing popularity or interpretation, such as entertainers or sports figures. Factual Conflict includes questions grounded in historical or statistical facts that may be represented differently depending on regional data sources. Cultural Reference covers instances involving awards, media, or events where local recognition or framing differs. Finally, Ambiguous Question includes vague or broadly interpretable queries that elicit culturally biased or interpretive responses.
|
| 278 |
+
|
| 279 |
+
Table 3 summarizes each conflict type along with representative subtopics, example questions, and the number of instances observed in our annotated subset. Entity-related conflicts were the most frequent, accounting for 1,032 questions, followed by Cultural References and Factual Conflicts. This distribution highlights the significant role of culturally grounded knowledge and localized salience in generating cross-lingual answer variability.
|
| 280 |
+
|
| 281 |
+
# 6 Conclusion
|
| 282 |
+
|
| 283 |
+
In this work, we identify a critical gap in existing multilingual QA benchmarks, the lack of consideration for locale-specific knowledge and culturally valid answer divergence. While prior evaluations assume semantic equivalence and a single correct
|
| 284 |
+
|
| 285 |
+
<table><tr><td>Conflict Type</td><td>Subtopics (Categories)</td><td>Representative Questions</td><td>Conflict Count</td></tr><tr><td>Entity Conflict</td><td>Music, TV actors, Sports players</td><td>Who sang Oh What a Night?, Who played TJ on Head of the Class?, Who is the coach for the Toronto Raptors?</td><td>1032</td></tr><tr><td>Factual Conflict</td><td>Geography, Political history, Team records</td><td>How many states does the Rocky Mountains cover?, When was the last time the Lakers made the playoffs?</td><td>431</td></tr><tr><td>Cultural Reference</td><td>TV show winners, Music awards, Famous media</td><td>Who won America's Got Talent in 2015?, Who has the most Grammys?</td><td>512</td></tr><tr><td>Ambiguous Question</td><td>Religion, Social media, General trivia</td><td>Who wrote the Book of Lamentations?, Who has the most Instagram followers?</td><td>381</td></tr></table>
|
| 286 |
+
|
| 287 |
+
Table 3: Conflict-inducing questions categorized by conflict type, with subtopics and representative examples.
|
| 288 |
+
|
| 289 |
+
<table><tr><td>Lang</td><td>EN</td><td>EN-LOC</td></tr><tr><td>en</td><td>48.03</td><td>49.43</td></tr><tr><td>ko</td><td>12.33</td><td>19.91</td></tr><tr><td>ar</td><td>11.93</td><td>19.30</td></tr><tr><td>he</td><td>16.37</td><td>16.08</td></tr><tr><td>ja</td><td>19.00</td><td>44.03</td></tr><tr><td>ru</td><td>16.41</td><td>27.97</td></tr><tr><td>vi</td><td>33.37</td><td>39.34</td></tr><tr><td>zh-cn</td><td>30.15</td><td>47.57</td></tr><tr><td>Overall</td><td>23.45</td><td>32.95</td></tr></table>
|
| 290 |
+
|
| 291 |
+
Table 4: Performance (F1 score) across languages under different prompting strategies on Qwen3.
|
| 292 |
+
|
| 293 |
+
<table><tr><td>Lang</td><td>Conflicted Answers</td><td>Conflict Rate (%)</td></tr><tr><td>ar</td><td>1471</td><td>46.2%</td></tr><tr><td>he</td><td>1413</td><td>44.3%</td></tr><tr><td>ja</td><td>1044</td><td>32.8%</td></tr><tr><td>ru</td><td>963</td><td>30.2%</td></tr><tr><td>ko</td><td>1188</td><td>37.3%</td></tr><tr><td>zh_cn</td><td>1242</td><td>39.0%</td></tr><tr><td>vi</td><td>909</td><td>28.5%</td></tr><tr><td>At Least One Conflict</td><td>2356</td><td>73.9%</td></tr></table>
|
| 294 |
+
|
| 295 |
+
answer across languages, our analysis shows that this assumption fails in questions involving cultural or regional context. To address this, we propose a method for constructing locale-aware evaluation subsets that allow for valid answer variation across languages. Our approach combines translation consistency checks and prompt-based answer divergence detection to identify culturally sensitive questions. We demonstrate that such questions are not rare, and that standard evaluation protocols may underestimate the capabilities of multilingual models in diverse linguistic settings. This work calls for a shift in multilingual QA evaluation toward frameworks that are not only linguistically fair but
|
| 296 |
+
|
| 297 |
+
Table 5: Language-wise distribution of answer conflicts in the XLQA benchmark.
|
| 298 |
+
|
| 299 |
+
<table><tr><td></td><td>en</td><td>ar</td><td>he</td><td>ja</td><td>ko</td><td>ru</td><td>zh-cn</td><td>vi</td></tr><tr><td>Avg Question Length</td><td>37</td><td>33</td><td>31</td><td>26</td><td>22</td><td>40</td><td>17</td><td>38</td></tr><tr><td>Avg Answer Length</td><td>5</td><td>5</td><td>5</td><td>8</td><td>4</td><td>5</td><td>6</td><td>5</td></tr></table>
|
| 300 |
+
|
| 301 |
+
Table 6: Average question and answer lengths across languages (rounded to nearest integer).
|
| 302 |
+
|
| 303 |
+
also culturally grounded.
|
| 304 |
+
|
| 305 |
+
# Limitations
|
| 306 |
+
|
| 307 |
+
Our evaluation may be inherently bounded by the capabilities of the proprietary large language models (LLMs) accessed via API. Since these models serve as oracle systems for translation and answer generation, their performance imposes an upper bound on the quality and diversity of our data. To mitigate potential issues arising from translation artifacts or inconsistencies, we applied a semantic consistency filtering step using backtranslation and LLM-as-judge comparison to ensure that the generated multilingual questions preserve the meaning of the original seed questions. Additionally, due to computational resource constraints, we were unable to include larger-scale open-source multilingual models that require substantial local infrastructure. To compensate for this limitation, we evaluated a diverse set of models—both proprietary and open-source—covering a range of capabilities and linguistic domains, and conducted all evaluations under a unified framework to ensure comparability. Future work could expand this line of research by integrating scalable open-source multilingual models in controlled environments and broadening the linguistic and regional scope of the evaluation.
|
| 308 |
+
|
| 309 |
+
# 7 Acknowledgment
|
| 310 |
+
|
| 311 |
+
This work was partly supported by the Institute of Information & Communications Technology
|
| 312 |
+
|
| 313 |
+
Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (Artificial Intelligence Graduate School Program (Korea University) (No. RS-2019-II190079), No. IITP-2025-RS-2024-00436857 (Information Technology Research Center (ITRC))) and Artificial Intelligence Star Fellowship Support Program to Nurture the Best Talents (IITP-2025-RS-2025-02304828)).
|
| 314 |
+
|
| 315 |
+
# References
|
| 316 |
+
|
| 317 |
+
Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, and 1 others. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
|
| 318 |
+
Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, and 1 others. 2023. Gemini: A family of highly capable multimodal models. corr, abs/2312.11805, 2023. doi: 10.48550. arXiv preprint ARXIV.2312.11805.
|
| 319 |
+
Shane Arora, Marzena Karpinska, Hung-Ting Chen, Ipsita Bhattacharjee, Mohit Iyyer, and Eunsol Choi. 2025. CaLMQA: Exploring culturally specific long-form question answering across 23 languages. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (ACL), pages 11772-11817.
|
| 320 |
+
Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020. On the cross-lingual transferability of monolingual representations. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), pages 4623-4637.
|
| 321 |
+
Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, and 12 others. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems (NeurIPS), volume 33, pages 1877-1901.
|
| 322 |
+
Alessio Buscemi, Cédric Lothritz, Sergio Morales, Marcos Gomez-Vazquez, Robert Clarisó, Jordi Cabot, and German Castignani. 2025. Mind the language gap: Automated and augmented evaluation of bias in llms for high-and low-resource languages. arXiv preprint arXiv:2504.18560.
|
| 323 |
+
Danqi Chen, Adam Fisch, Jason Weston, and Antoine Bordes. 2017. Reading Wikipedia to answer open-domain questions. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), pages 1870-1879.
|
| 324 |
+
|
| 325 |
+
Guiming Hardy Chen, Shunian Chen, Ziche Liu, Feng Jiang, and Benyou Wang. 2024. Humans or LLMs as the judge? a study on judgement bias. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing (EMNLP).
|
| 326 |
+
Jonathan H. Clark, Eunsol Choi, Michael Collins, Dan Garrette, Tom Kwiatkowski, Vitaly Nikolaev, and Jennimaria Palomaki. 2020. TyDi QA: A benchmark for information-seeking question answering in typologically diverse languages. Transactions of the Association for Computational Linguistics (TACL), 8:454-470.
|
| 327 |
+
Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, and 1 others. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
|
| 328 |
+
Md. Arid Hasan, Maram Hasanain, Fatema Ahmad, Sahinur Rahman Laskar, Sunaya Upadhyay, Vrunda N Sukhadia, Mucahid Kutlu, Shammur Absar Chowdhury, and Firoj Alam. 2025a. Nativqa: Multilingual culturally-aligned natural queries for llms.
|
| 329 |
+
Md. Arid Hasan, Maram Hasanain, Fatema Ahmad, Sahinur Rahman Laskar, Sunaya Upadhyay, Vrunda N Sukhadia, Mucahid Kutlu, Shammur Absar Chowdhury, and Firoj Alam. 2025b. NativQA: Multilingual culturally-aligned natural query for LLMs. In Findings of the Association for Computational Linguistics (ACL), pages 14886-14909.
|
| 330 |
+
Vladimir Karpukhin, Barlas Oguz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 2020. Dense passage retrieval for open-domain question answering. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6769-6781.
|
| 331 |
+
Patrick Lewis, Barlas Oguz, Rudy Rinott, Sebastian Riedel, and Holger Schwenk. 2020. MLQA: Evaluating cross-lingual extractive question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (ACL), pages 7315-7330.
|
| 332 |
+
Xi Victoria Lin and et al. 2021. Calmqa: Exploring culturally specific long-form question answering across 23 languages. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1829-1841.
|
| 333 |
+
Chen Liu, Fajri Koto, Timothy Baldwin, and Iryna Gurevych. 2024. Are multilingual llms culturally-diverse reasoners? an investigation into multicultural proverbs and sayings. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL), pages 2016-2039.
|
| 334 |
+
Shayne Longpre, Yi Lu, and Joachim Daiber. 2021. MKQA: A linguistically diverse benchmark for multilingual open domain question answering. Transac-
|
| 335 |
+
|
| 336 |
+
tions of the Association for Computational Linguistics (TACL), 9:1389-1406.
|
| 337 |
+
Akylbek Maxutov, Ayan Myrzakhmet, and Pavel Braslavski. 2024. Do LLMs speak Kazakh? a pilot evaluation of seven models. In Proceedings of the First Workshop on Natural Language Processing for Turkic Languages (SIGTURK), pages 81-91.
|
| 338 |
+
Junho Myung, Nayeon Lee, Yi Zhou, Jiho Jin, Rifki Putri, Dimosthenis Antypas, Hsuvas Borkakoty, Eunsu Kim, Carla Perez-Almendros, Abinew Ali Ayele, and 1 others. 2024. Blend: A benchmark for llms on everyday knowledge in diverse cultures and languages. In Advances in Neural Information Processing Systems (NeurIPS), volume 37, pages 78104-78146.
|
| 339 |
+
Rahul Pandey, Hemant Purohit, Carlos Castillo, and Valerie L. Shalin. 2022. Modeling and mitigating human annotation errors to design efficient stream processing systems with human-in-the-loop machine learning. International Journal of Human-Computer Studies, 160:102772.
|
| 340 |
+
LG Research, Soyoung An, Kyunghoon Bae, Eunbi Choi, Stanley Jungkyu Choi, Yemuk Choi, Seokhee Hong, Yeonjung Hong, Junwon Hwang, Hyojin Jeon, and 1 others. 2024. Exaone 3.0 7.8 b instruction tuned language model. arXiv preprint arXiv:2408.03541.
|
| 341 |
+
Amanpreet Singh, Yujia Wang, Yulia Tsvetkov, and Percy Liang. 2024. Global-mmlu: Evaluating cultural and linguistic biases in multilingual language understanding. arXiv preprint arXiv:2412.03304.
|
| 342 |
+
Zeerak Talat, Aurélie Névoel, Stella Biderman, Miruna Clinciu, Manan Dey, Shayne Longpre, Sasha Luccioni, Maraim Masoud, Margaret Mitchell, Dragomir Radev, Shanya Sharma, Arjun Subramonian, Jaesung Tae, Samson Tan, Deepak Tunuguntla, and Oskar Van Der Wal. 2022. You reap what you sow: On the challenges of bias evaluation under multilingual settings. In Proceedings of BigScience Episode #5 – Workshop on Challenges & Perspectives in Creating Large Language Models, pages 26-41.
|
| 343 |
+
Gemma Team, Aishwarya Kamath, Johan Ferret, Shreya Pathak, Nino Vieillard, Ramona Merhej, Sarah Perrin, Tatiana Matejovicova, Alexandre Rame, Morgane Rivière, and 1 others. 2025. Gemma 3 technical report. arXiv preprint arXiv:2503.19786.
|
| 344 |
+
Yidong Wang, Zhuohao Yu, Wenjin Yao, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, and Yue Zhang. 2024. PandaLM: An automatic evaluation benchmark for LLM instruction tuning optimization. In The International Conference on Learning Representations (ICLR).
|
| 345 |
+
Tae-Jin Woo, Woo-Jeoung Nam, Yeong-Joon Ju, and Seong-Whan Lee. 2023. Compensatory debiasing for gender imbalances in language models. In ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1-5.
|
| 346 |
+
|
| 347 |
+
BigScience Workshop, Teven Le Scao, Angela Fan, Christopher Akiki, Ellie Pavlick, Suzana Ilic, Daniel Hesslow, Roman Castagné, Alexandra Sasha Luccioni, François Yvon, and 1 others. 2022. Bloom: A 176b-parameter open-access multilingual language model. arXiv preprint arXiv:2211.05100.
|
| 348 |
+
Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christopher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2369-2380.
|
| 349 |
+
Yue Yu, Yuchen Zhuang, Jieyu Zhang, Yu Meng, Alexander J Ratner, Ranjay Krishna, Jiaming Shen, and Chao Zhang. 2023. Large language model as attributed training data generator: A tale of diversity and bias. Advances in Neural Information Processing Systems (NeurIPS), 36:55734-55784.
|
| 350 |
+
Xiang Zhang, Senyu Li, Bradley Hauer, Ning Shi, and Grzegorz Kondrak. 2023. Don't trust chatgpt when your question is not in english: A study of multilingual abilities and types of LLMs. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7915-7927.
|
| 351 |
+
Xingyu Zheng, Yuye Li, Haoran Chu, Yue Feng, Xudong Ma, Jie Luo, Jinyang Guo, Haotong Qin, Michele Magno, and Xianglong Liu. 2025. An empirical study of qwen3 quantization. arXiv preprint arXiv:2505.02214.
|
| 352 |
+
Lianghui Zhu, Xinggang Wang, and Xinlong Wang. 2025. JudgeLM: Fine-tuned large language models are scalable judges. In The International Conference on Learning Representations (ICLR).
|
| 353 |
+
Muitze Zulaika and Xabier Saralegi. 2025. BasqBBQ: A QA benchmark for assessing social biases in LLMs for Basque, a low-resource language. In Proceedings of the International Conference on Computational Linguistics (COLING), pages 4753-4767.
|
| 354 |
+
|
| 355 |
+
# A XLQA Construction Details
|
| 356 |
+
|
| 357 |
+
# A.1 Prompt Templates
|
| 358 |
+
|
| 359 |
+
We provide the full prompt templates used throughout the XLQA benchmark construction and evaluation pipeline. These include:
|
| 360 |
+
|
| 361 |
+
Translation prompts, used to generate multilingual versions of questions from English.
|
| 362 |
+
|
| 363 |
+
Given the question: {question}, please translate it into {loc}. Just output the translated question only, with no comments or formatting.
|
| 364 |
+
|
| 365 |
+
Back-translation prompts, used to backtranslate to English.
|
| 366 |
+
|
| 367 |
+
Given the question:
|
| 368 |
+
|
| 369 |
+
{translated_response_output_text}, please translate it back into English. Just output the translated question only, with no comments or formatting.
|
| 370 |
+
|
| 371 |
+
Consistency filtering prompts, used to verify semantic consistency across languages.
|
| 372 |
+
|
| 373 |
+
Given the question: {question}, please check if the back translation:
|
| 374 |
+
|
| 375 |
+
{back Translation_output_text} is correct. If it is correct, output "yes". If it is not correct, output "no".
|
| 376 |
+
|
| 377 |
+
Locale-aware answer generation prompts, which condition the model to generate region-specific answers if appropriate.
|
| 378 |
+
|
| 379 |
+
Answer the following question based on the cultural context of a region where the {lang} language is primarily spoken. If the correct answer would vary depending on regional or cultural differences, return the version that best fits that local context. However, if the question concerns universal or culturally-neutral knowledge, provide the common or globally accepted answer instead. Respond with only the final answer in a single word or phrase. Do not explain or add anything else. Additionally, provide a brief evidence or source (e.g., a Wikipedia URL, news site, or cultural explanation) that supports the answer. The question is: {q}
|
| 380 |
+
|
| 381 |
+
Answer generation prompts for evaluation, which elicit general answers (EN) or, when relevant, region-specific ones (EN-LOC).
|
| 382 |
+
|
| 383 |
+
# (EN) General Prompt
|
| 384 |
+
|
| 385 |
+
Answer the following question. Respond with only the final answer in a single word or phrase. Do not explain or add anything else.
|
| 386 |
+
|
| 387 |
+
# (EN-LOC) Locale-aware Prompt
|
| 388 |
+
|
| 389 |
+
Answer the following question based on the cultural context of a region where the {lang} language is primarily spoken. If the correct answer would vary depending on regional or cultural differences, return the version that best fits that local context. However, if the question concerns universal or culturally-neutral knowledge, provide the
|
| 390 |
+
|
| 391 |
+
common or globally accepted answer instead. Respond with only the final answer in a single word or phrase. Do not explain or add anything else.
|
| 392 |
+
|
| 393 |
+
# A.2 Human Verification Agreements Ratio
|
| 394 |
+
|
| 395 |
+
# A.3 Locale Sensitivity Annotation Guidelines
|
| 396 |
+
|
| 397 |
+
We define a question as locale-sensitive if its correct answer may differ depending on regional, cultural, or national context, even when the semantic intent of the question remains the same.
|
| 398 |
+
|
| 399 |
+
Annotators were instructed to mark a question as locale-sensitive if:
|
| 400 |
+
|
| 401 |
+
Regionally salient knowledge affects the expected answer (e.g., "most famous tower").
|
| 402 |
+
|
| 403 |
+
Political, institutional, or cultural prominence varies by country or language group.
|
| 404 |
+
|
| 405 |
+
The question involves subjective norms or identity references (e.g., "national dish", "popular leader").
|
| 406 |
+
|
| 407 |
+
Borderline cases were resolved by majority voting across annotators with multilingual and regional backgrounds.
|
| 408 |
+
|
| 409 |
+
# B Experimental Details
|
| 410 |
+
|
| 411 |
+
# B.1 Models
|
| 412 |
+
|
| 413 |
+
We use the following models in our experiments:
|
| 414 |
+
|
| 415 |
+
- Gemma3 12B: Uses Gemma3 with 12B parameters. Licensed under Apache 2.0 license..
|
| 416 |
+
- Qwen3 14B: Uses Qwen3 with 14B parameters. Licensed under the Apache 2.0 license..
|
| 417 |
+
- LLaMA-3.1 8B: Has 8B parameters and is released under the LLaMA 3 Community License Agreement.
|
| 418 |
+
- GPT-4.1: These models are not open-source and are accessible only via API requests. They are governed by proprietary licenses.
|
| 419 |
+
- Exaone 7.8B: Uses Exaone with 7.8B parameters. Licensed under EXAONE AI Model License Agreement.
|
| 420 |
+
|
| 421 |
+
All the models set the temperature to 0.
|
| 422 |
+
|
| 423 |
+
# B.2 Budget
|
| 424 |
+
|
| 425 |
+
We use the RTX A6000 GPU X 1 with 20 hours.
|
| 426 |
+
|
| 427 |
+
<table><tr><td>Language</td><td>Correctness (≥2/3)</td><td>Correctness (≥2/3)</td><td>Sensitivity (≥2/3)</td><td>Sensitivity (≥2/3)</td></tr><tr><td>English (en)</td><td>91.2%</td><td>98.5%</td><td>88.3%</td><td>96.7%</td></tr><tr><td>Korean (ko)</td><td>89.7%</td><td>97.4%</td><td>85.2%</td><td>95.9%</td></tr><tr><td>Arabic (ar)</td><td>86.4%</td><td>96.1%</td><td>80.5%</td><td>93.8%</td></tr><tr><td>Hebrew (he)</td><td>88.1%</td><td>97.0%</td><td>82.7%</td><td>94.6%</td></tr><tr><td>Japanese (ja)</td><td>90.5%</td><td>98.1%</td><td>87.0%</td><td>96.2%</td></tr><tr><td>Russian (ru)</td><td>87.9%</td><td>96.8%</td><td>84.1%</td><td>94.3%</td></tr><tr><td>Vietnamese (vi)</td><td>89.3%</td><td>97.9%</td><td>86.5%</td><td>95.7%</td></tr><tr><td>Chinese (zh_cn)</td><td>88.7%</td><td>97.5%</td><td>83.6%</td><td>94.8%</td></tr><tr><td>Average</td><td>88.9%</td><td>97.4%</td><td>84.7%</td><td>95.3%</td></tr></table>
|
| 428 |
+
|
| 429 |
+
Table 7: Annotator agreement rates by language. The table shows the percentage of instances where all three annotators (3/3) or at least two annotators (2/3) agreed on correctness and locale-sensitivity labels.
|
| 430 |
+
|
| 431 |
+
# C Human Annotation
|
| 432 |
+
|
| 433 |
+
To verify the correctness and locale sensitivity of the model-generated answers, we conducted human annotation using Amazon Mechanical Turk (MTurk). For each language, we recruited three independent annotators who are native or proficient speakers of the respective target language to evaluate each QA-evidence triple. Annotators were presented with the original question, the model-generated answer, and its associated supporting evidence (e.g., URL or passage), and were instructed to assess as in Figure 4.
|
| 434 |
+
|
| 435 |
+
Each annotation instance was reviewed by three annotators. Final labels were determined via majority voting. Annotator agreement rates are summarized in Table 7.
|
| 436 |
+
|
| 437 |
+
All annotators were compensated at a rate of $5 per 100 questions, in line with MTurk compensation standards, and informed that their responses would be used for research purposes. No personally identifiable information was collected during the process. Tasks involving potentially sensitive content were manually reviewed and filtered prior to annotation to avoid harm or discomfort.
|
| 438 |
+
|
| 439 |
+
# D Ethical Considerations
|
| 440 |
+
|
| 441 |
+
While XLQA promotes cultural inclusion in QA evaluation, locale-aware generation introduces ethical challenges. Prompts conditioned on locale risk overgeneralization or reinforcement of cultural stereotypes. We manually reviewed outputs for offensiveness and excluded instances containing bias or politically sensitive content.
|
| 442 |
+
|
| 443 |
+
Furthermore, hallucination in low-resource languages may amplify misinformation if locale grounding is weak. We recommend that future
|
| 444 |
+
|
| 445 |
+
work incorporate human validation when deploying such systems in high-stakes settings.
|
| 446 |
+
|
| 447 |
+

|
| 448 |
+
Figure 4: Survey screenshot. Interface shown to MTurk annotators during the human verification stage.
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0f4397a14ee2f2f3be8a4dfa3ec3eac55994b6f6a77f74908100ef6841c90ca
|
| 3 |
+
size 603294
|
EMNLP/2025/XLQA_ A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb23608158989a20fcfa04f6ab883566ad6071bdd4edb827c5c4f112cae53f7f
|
| 3 |
+
size 415454
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:714283e9b0b711ad76f1ed6014881fe00e3d68806723385eaed0b5f56dbdafdc
|
| 3 |
+
size 112637
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ea8dd87ed08283a4372776624558e7084ea0c198e3a0ec403f8dadbd1607340
|
| 3 |
+
size 132520
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/5077594a-7692-4cd3-8041-fe76cc076a33_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6a5724506992e95bf5592fb010fffc4a9a25be812b51ecf687b9982c7f84e92
|
| 3 |
+
size 934387
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/full.md
ADDED
|
@@ -0,0 +1,575 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# XQuant: Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression
|
| 2 |
+
|
| 3 |
+
Haoqi Yang $^{2}$ , Yao Yao $^{3}$ , Zuchao Li $^{1*}$ , Baoyuan Qi $^{4}$ , Guoming Liu $^{4}$ , Hai Zhao $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Artificial Intelligence, Wuhan University, Wuhan, China,
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Computer Science, Wuhan University, Wuhan, China,
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Computer Science, Shanghai Jiao Tong University, Shanghai, China,
|
| 10 |
+
|
| 11 |
+
$^{4}$ Xiaomi Inc., Beijing, China
|
| 12 |
+
|
| 13 |
+
{yanghq, zcli-charlie}@whu.edu.cn, yaoyao27@sjtu.edu.cn,
|
| 14 |
+
|
| 15 |
+
{qibaoyuan, liuguoming}@xiaomi.com, zhaohai@cs.sjtu.edu.cn
|
| 16 |
+
|
| 17 |
+
# Abstract
|
| 18 |
+
|
| 19 |
+
Large Language Models (LLMs) have demonstrated remarkable capabilities across diverse natural language processing tasks. However, their extensive memory requirements, particularly due to KV cache growth during long-text understanding and generation, present significant challenges for deployment in resource-constrained environments. Quantization has emerged as a promising solution to reduce memory consumption while preserving historical information. We propose XQuant, a training-free and plug-and-play framework that achieves ultra-low equivalent bit-width KV cache quantization. XQuant introduces two key innovations: a computationally negligible data-free calibration method and cross-layer KV cache compression, enabling quantization to sub-1.4 bits. Extensive experiments on TruthfulQA and LongBench demonstrate that XQuant outperforms state-of-the-art methods (e.g., KIVI-2bit and AsymKV-1.5bit) by achieving lower bit-width while maintaining superior performance, establishing a better trade-off between memory efficiency and model accuracy. The source code is available at https://github.com/brinenick511/XQuant.
|
| 20 |
+
|
| 21 |
+
# 1 Introduction
|
| 22 |
+
|
| 23 |
+
The rapid advancement of Large Language Models (LLMs) has propelled significant progress in a wide array of natural language processing (NLP) applications, including code generation, search systems, and many others (Ouyang et al., 2023; Sharma et al., 2024; Ma et al., 2024). The exceptional performance of LLMs is primarily driven by their immense parameter scales, which enable them to excel across diverse tasks. However, this remarkable success comes with substantial costs: the computational and memory demands associated with deploying LLMs have increased exponentially due
|
| 24 |
+
|
| 25 |
+
to increasing models parameters and growing input and output, posing a formidable bottleneck for practical deployment. In particular, GPU memory consumption has surged to levels that frequently surpass the capacities of current hardware infrastructures, making large-scale deployment increasingly challenging (Shi et al., 2024).
|
| 26 |
+
|
| 27 |
+
To mitigate this challenge, the Key-Value (KV) cache mechanism has been widely adopted (Yao et al., 2024; Yang et al., 2024d; Ainslie et al., 2023; Kwon et al., 2023). The KV cache optimizes memory efficiency by storing and reusing previously computed keys and values in the attention mechanism, thereby reducing redundant computations and GPU memory usage. Despite its advantages, as model sizes and the input/output sequence lengths continue to grow, the storage overhead of the KV cache itself becomes increasingly significant (Shi et al., 2024). For instance, a 30-billion-parameter language model with a batch size of 128 and a sequence length of 1024 may require up to 180 GB of memory solely for storing the KV cache (Zhang et al., 2023). Although the computational and memory requirements are reduced compared to not using it, such escalating demands still pose substantial challenges for deploying LLMs with constrained hardware resources.
|
| 28 |
+
|
| 29 |
+
To address this problem, prior works have explored various strategies from different perspectives. Some studies (Sheng et al., 2023; Hooper et al., 2024; Liu et al., 2024b; Tao et al., 2024) focus on quantizing the floating-point KV cache (and, in some cases, model weights) to lower precision. However, these approaches often experience performance degradation under extreme compression ratios, particularly around 2-bit precision. Alternatively, other methods (Xiao et al., 2023; Zhang et al., 2023; Li et al., 2024; Cai et al., 2024) aim to alleviate the storage burden by evicting unimportant tokens. These methods dynamically or statically identify and discard less critical tokens to
|
| 30 |
+
|
| 31 |
+
reduce memory usage. Nevertheless, these methods inherently introduce information loss, resulting in reduced memory retention and severe forgetting issues, which can undermine the model's ability to maintain consistent performance on longer sequences. Existing KV cache quantization methods, due to inherent architectural constraints, fail to mitigate the severe performance degradation when operating under ultra-low-bit settings.
|
| 32 |
+
|
| 33 |
+
To address these limitations, this paper focuses on training-free KV cache quantization scenarios under extreme compression ratios and introduces XQuant, a plug-and-play framework for ultra-low-bit KV cache quantization. XQuant delivers two key improvements over existing quantization methods: (1) Data-Free Calibration: Traditional quantization methods often face significant limitations when mapping values to low-bit precision. Specifically, they tend to use the two endpoint values (e.g., 0 and 1 in 1-bit quantization) as representative values, which can result in substantial quantization errors, particularly under low bit-width settings. To address this issue, XQuant introduces a parameterized calibration scheme that allows for more fine-grained mapping of values. By adjusting the representative values to better reflect the actual data distribution, this method significantly reduces quantization errors and minimizes performance loss without the need for additional data. (2) Cross-Layer KV Cache Compression: We observe enhanced KV cache similarity between adjacent layers after quantization - a previously overlooked phenomenon. This enables effective cross-layer compression, where the quantized KV cache of one layer is shared across subsequent layers, significantly reducing computational and memory costs. Meanwhile, a subset of layer-specific parameters is preserved to retain the unique characteristics of each layer, ensuring minimal loss of model performance.
|
| 34 |
+
|
| 35 |
+
To evaluate the effectiveness of XQuant, we conduct extensive experiments on a consumer-grade NVIDIA GeForce RTX 3090 GPU (24GB) across diverse datasets, including TruthfulQA (Lin et al., 2022) and subsets of LongBench (Bai et al., 2024). Experimental results demonstrate that XQuant achieves an equivalent bit-width of less than 1.4-bit across various LLMs, outperforming existing methods such as KIVI-2bit (Liu et al., 2024b) and AsymKV-1.5bit (Tao et al., 2024). Notably, XQuant achieves comparable performance to full-precision baselines while offering a significantly
|
| 36 |
+
|
| 37 |
+
improved trade-off between model performance and compression ratio.
|
| 38 |
+
|
| 39 |
+
# 2 Related Work
|
| 40 |
+
|
| 41 |
+
Two mainstream approaches for addressing KV cache challenges are Quantization and Eviction methods (Shi et al., 2024).
|
| 42 |
+
|
| 43 |
+
Quantization has emerged as a prominent technique for compressing large-scale models by mapping high-precision data to lower-precision formats (e.g., 16-bit, 8-bit, or even 4-bit integers). This significantly reduces memory footprints while maintaining acceptable levels of model performance. A substantial body of work focuses on quantizing model weights. AWQ (Lin et al., 2024) optimizes neural network weight quantization by dynamically adapting the bit-width based on the weights' significance. By retaining higher precision for more impactful weights and reducing precision for less critical ones, AWQ minimizes performance loss while achieving compression. However, aggressive compression is constrained by "model hemorrhage" (Ma et al., 2025), a phenomenon identifying that models possess inherent robustness thresholds beyond which performance degrades sharply. This makes maintaining stability in the ultra-low-bit regime a critical challenge.
|
| 44 |
+
|
| 45 |
+
Another line of research concentrates on the quantization of the KV cache. KVQuant, introduced by Hooper et al. (2024), employs distinct quantization strategies for keys and values. It applies per-channel quantization to the keys—particularly before Rotary Positional Embeddings (RoPE)—and per-token quantization to the values, effectively managing outliers and minimizing RoPE-induced distortions. Similarly, MiKV (Yang et al., 2024c) introduces a mixed-precision KV-cache strategy that retains important KV pairs in high precision. Concurrently, KIVI (Liu et al., 2024b) develops a tuning-free 2-bit KV cache quantization scheme, where the key cache is quantized per-channel, and the value cache is quantized per-token. Building on this, AsymKV (Tao et al., 2024) further combines 1-bit and 2-bit representations through an asymmetric and layer-wise quantization configuration, achieving a better trade-off between precision and compression ratio.
|
| 46 |
+
|
| 47 |
+
In contrast, some works simultaneously quantize both the model weights and the attention cache. For example, FlexGen (Sheng et al., 2023) introduces a high-throughput inference framework that applies
|
| 48 |
+
|
| 49 |
+
group-wise 4-bit quantization to compress both the model weights and KV cache. FlexGen divides tensors into small groups, computes the minimum and maximum values within each group, and performs asymmetric quantization. The resulting tensors are stored in 4-bit format and later dequantized to FP16 during computation, achieving a reduction in memory usage and I/O costs with minimal accuracy degradation. Despite the advancements of these methods, significant performance degradation remains a challenge when quantizing KV cache activations to extremely low-precision levels, particularly below 2-bit.
|
| 50 |
+
|
| 51 |
+
Eviction methods aim to discard unnecessary tokens during inference to reduce memory usage. StreamingLLM (Xiao et al., 2023) identifies the phenomenon of attention sinks, where initial tokens are retained to stabilize attention computations. StreamingLLM combines these attention sinks with a sliding window of recent tokens to introduce a rolling KV cache, effectively balancing memory efficiency and model performance. Building on this, SirLLM (Yao et al., 2024) uses token entropy to preserve critical tokens' KV cache and incorporates a memory decay mechanism to enhance LLMs' long-term memory while maintaining short-term reasoning abilities.
|
| 52 |
+
|
| 53 |
+
Other methods, such as H2O (Zhang et al., 2023) and SnapKV (Li et al., 2024), dynamically identify and evict non-important tokens based on attention scores. PyramidKV (Cai et al., 2024; Yang et al., 2024a) observes that attention scores are more sparse in higher layers and accordingly allocates different memory budgets across layers. SpindleKV (Tang et al., 2025) further develops a hybrid approach to balance reduction across layers, combining attention-based eviction in deep layers with a codebook-based replacement strategy for shallow layers. However, most existing KV eviction methods depend on attention scores to identify non-important tokens, which limits their compatibility with common optimizations like FlashAttention (Dao, 2023), reducing their practical usability.
|
| 54 |
+
|
| 55 |
+
Structural Approaches modify the model's architecture, in contrast to post-hoc data compression. For instance, some methods cache only partial layers of the KV cache (Wu and Tu, 2024; Sun et al., 2024; Brandon et al., 2024), while KV-Latent (Luohoe et al., 2025) reduces the dimensionality of K and V vectors. A key characteristic of these approaches is that they all require additional training, which contrasts with our plug-and-play framework.
|
| 56 |
+
|
| 57 |
+
We further clarify the key differences and highlight our contributions in Appendix G.
|
| 58 |
+
|
| 59 |
+
Compared to existing methods, we introduce XQuant with two key innovations: (1) A novel, simple yet effective data-free calibration method that achieves superior compression performance even under ultra-low-bit settings, eliminating the need for additional calibration data. (2) cross-layer KV cache compression that leverages previously overlooked quantization-enhanced layer similarities to achieve significant memory and computational savings. While prior work has studied layer representation similarities, our approach uniquely exploits the quantization-enhanced similarities to enable effective ultra-low-bit compression.
|
| 60 |
+
|
| 61 |
+
# 3 XQuant
|
| 62 |
+
|
| 63 |
+
In this section, we present XQuant, a novel quantization framework for efficient KV cache compression. As illustrated in Figure 1, our framework introduces two key innovations: a data-free calibration technique that asymmetrically adjusts quantization parameters without additional calibration data, and a cross-layer KV cache compression mechanism that leverages the similarity of quantized caches between adjacent layers to effectively reduce both computational and memory overhead.
|
| 64 |
+
|
| 65 |
+
# 3.1 Background
|
| 66 |
+
|
| 67 |
+
To formalize KV cache quantization, we consider a group of floating-point keys or values $\mathbf{X}$ . The quantization process transforms $\mathbf{X}$ into three components: a B-bit quantized cache $\mathbf{X}_{\mathbf{Q}}$ , a zero-point $z$ , and a scaling factor $s$ (Liu et al., 2024b):
|
| 68 |
+
|
| 69 |
+
# Quantization Phase:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
z = \operatorname {m i n} (\mathbf {X}), s = \frac {\operatorname {m a x} (\mathbf {X}) - \operatorname {m i n} (\mathbf {X})}{(2 ^ {B} - 1)} \quad (1)
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathbf {X} _ {\mathbf {T}} = (\mathbf {X} - z) / s, \mathbf {X} _ {\mathbf {Q}} = \left\lceil \mathbf {X} _ {\mathbf {T}} \left. \right\rfloor \tag {2}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
# Dequantization Phase:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\hat {\mathbf {X}} = \mathbf {X} _ {\mathbf {Q}} * s + z \tag {3}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where $\mathbf{X}^*$ is the dequantized counterpart and $\lceil \cdot \rceil$ is the rounding function. $\mathbf{X}_{\mathbf{T}}$ , the transformed matrix, is not explicitly cached but is introduced as an intermediate variable to facilitate subsequent mathematical derivations.
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
Figure 1: The illustration of XQuant workflow. XQuant partitions the KV cache into layer-wise pairs. For every higher layer in a pair, XQuant only computes and stores the scaling factors and zero-points during quantization phase, and then fetches the quantized cache from the lower layer during dequantization phase.
|
| 89 |
+
|
| 90 |
+
Building upon this framework, prior works introduce various configurations to enhance performance. For example, Liu et al. (2024b) focuses on the element-wise distribution within the KV cache, adopting per-channel quantization for the key cache and per-token quantization for the value cache. Similarly, Tao et al. (2024) introduces layer-wise quantization configurations, employing asymmetric bit-widths for the key and value caches across different layers. While effective, these approaches often suffer from significant performance degradation under low-bit quantization settings, particularly around 2-bit precision. This limitation motivates the need for further advancements in KV cache compression techniques.
|
| 91 |
+
|
| 92 |
+
# 3.2 Data-Free Calibration
|
| 93 |
+
|
| 94 |
+
Since existing quantization methods often experience significant performance degradation at 2-bit precision, achieving ultra-low-bit compression first requires bridging this performance gap. In this section, we propose a data-free calibration method that effectively preserves model performance, enabling more aggressive compression ratios.
|
| 95 |
+
|
| 96 |
+
To analyze extreme quantization scenarios, we start with 1-bit quantization where each parameter is constrained to a binary state. Formally, the round-to-nearest operation $\lceil \cdot \rceil$ is defined as:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\left\lceil e \left. \right\rfloor = \left\{\begin{array}{l l}0&\text {i f} e \in [ 0, 0. 5 ],\\1&\text {i f} e \in (0. 5, 1 ].\end{array}\right. \tag {4}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $e$ denotes an element of the transformed matrix. For any bit-width $B$ , this rounding operation maps values to a discrete set within $[0, 2^B - 1]$ , where each original value is assigned to its nearest representative in the quantized space. As shown
|
| 103 |
+
|
| 104 |
+
in Figure 2(a), fixed representative values at endpoints (0 and 1) yield substantial quantization error for 1-bit quantization. We therefore introduce a relaxed-constraint mapping function that adaptively determines the quantization levels, formulated as:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
f (e, \eta) = \left\{ \begin{array}{l l} \eta & \text {i f} e \in [ 0, 0. 5 ], \\ 1 - \eta & \text {i f} e \in (0. 5, 1 ]. \end{array} \right. \tag {5}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $\eta \in [0, 0.5]$ serves as a calibration parameter for determining quantization tendencies. Clearly, $f(e, 0)$ is equivalent to the round-to-nearest function $\lceil e \rceil$ . We extend this formulation to the general case of $B$ -bit quantization and denote the corresponding parameter as $\eta_B$ .
|
| 111 |
+
|
| 112 |
+
We relax the constraint that quantized values must be integers and apply fake quantization as a preliminary experiment. Table 7 shows that using this constraint-relaxed mapping function improves model performance, validating our proposed insight.
|
| 113 |
+
|
| 114 |
+
However, storing floating-point numbers as so-called quantized caches is impractical, as shown in Figure 2(b). To address the aforementioned problem, we establish an equivalent implementation, with the mathematical proof provided below. We formalize the final data-free calibration approach as:
|
| 115 |
+
|
| 116 |
+
Consider a group of floating-point keys or values $\mathbf{X} \in \mathbf{R}^g$ , where $g$ stands for the group size. Note that $\mathbf{X} \in [\min(\mathbf{X}), \max(\mathbf{X})]^g = [z, s * (2^B - 1) + z]^g$ , we can deduce:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathbf {X} _ {\mathbf {Q}} \in [ 0, 2 ^ {B} - 1 ] ^ {g} \tag {6}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
from Equation 1 and Equation 2. If we choose $\eta * (2^B - 1)$ and $(1 - \eta) * (2^B - 1)$ generalized
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
Figure 3: Layer-wise analysis of absolute differences between adjacent layers in quantized KV Cache matrices. Here, delta represents the absolute difference of quantized values between consecutive layers.
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
Figure 2: The illustration of the proposed data-free calibration method.
|
| 131 |
+
|
| 132 |
+
from Equation 5 as two endpoints, it is equivalent to calibrate the zero-point and scaling factor to $\hat{z}$ and $\hat{s}$ , and then dequantize with them. Note that the dequantized matrix
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\hat {\mathbf {X}} = \mathbf {X _ {Q}} * \hat {s} + \hat {z} \in [ \hat {s} * 0 + \hat {z}, \hat {s} * (2 ^ {B} - 1) + \hat {z} ] ^ {g} (7)
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
and the corresponding interval given by two endpoints:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
[ z + \eta s (2 ^ {B} - 1), z + s (2 ^ {B} - 1) (1 - \eta) ] \tag {8}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
By calculation we get the final operations for calibration:
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\hat {z} = z + \eta s \left(2 ^ {B} - 1\right), \hat {s} = (1 - 2 \eta) s \tag {9}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
Since $\mathbf{X}_{\mathbf{T}} = (\mathbf{X} - z) / s$ , the reconstruction loss $MSE(\mathbf{X},\hat{\mathbf{X}}) = s^2\cdot MSE(\mathbf{X}_{\mathbf{T}},f(\mathbf{X}_{\mathbf{T}},\eta))$ . For analytical tractability, particularly for 1-bit quantization within small group sizes, we can assume that $\mathbf{X}_{\mathbf{T}}\sim U(0,1)$ . Thus the expected MSE in the
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+
transformed space can be formulated as:
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\begin{array}{l} M S E \left(\mathbf {X} _ {\mathbf {T}}, f \left(\mathbf {X} _ {\mathbf {T}}, \eta\right)\right) \\ = E [ (X _ {T} - f (X _ {T}, \eta)) ^ {2} ] \\ = \int_ {0} ^ {0. 5} (x - \eta) ^ {2} d x + \int_ {0. 5} ^ {1} (x - (1 - \eta)) ^ {2} d x \\ = \eta^ {2} - \frac {1}{2} \eta + \frac {1}{1 2} \\ \end{array}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
Since the standard quantization scheme is equivalent to setting $\eta = 0$ , this result confirms that any value of $\eta \in (0,1 / 2)$ will strictly reduce the theoretical reconstruction error.
|
| 161 |
+
|
| 162 |
+
As shown in Figure 2(c), we propose the improved quantization scheme with this data-free calibration as follows:
|
| 163 |
+
|
| 164 |
+
# Quantization Phase with Calibration:
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
z = \operatorname {m i n} (\mathbf {X}), s = \frac {\operatorname {m a x} (\mathbf {X}) - \operatorname {m i n} (\mathbf {X})}{\left(2 ^ {B} - 1\right)} \tag {10}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\mathbf {X} _ {\mathbf {T}} = (\mathbf {X} - z) / s, \mathbf {X} _ {\mathbf {Q}} = \lceil \mathbf {X} _ {\mathbf {T}} \rceil \tag {11}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\hat {z} = z + \eta s \left(2 ^ {B} - 1\right), \hat {s} = (1 - 2 \eta) s \tag {12}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
# Dequantization Phase with Calibration:
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\hat {\mathbf {X}} = \mathbf {X} _ {\mathbf {Q}} * \hat {s} + \hat {z} \tag {13}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
# 3.3 Cross-Layer Compression
|
| 185 |
+
|
| 186 |
+
# 3.3.1 Motivation
|
| 187 |
+
|
| 188 |
+
Building upon Tao et al. (2024)'s investigation of ultra-low-bit KV cache asymmetric quantization, our reproduction experiments on LongBench (Bai
|
| 189 |
+
|
| 190 |
+
et al., 2023) with Mistral (Jiang et al., 2023) demonstrate severe limitations of existing approaches, as shown in Table 8.
|
| 191 |
+
|
| 192 |
+
We found that 1-bit asymmetric quantization of the key cache is practically infeasible. Even when restricting 1-bit quantization to the top 8 layers (AsymKV-24/32), significant performance degradation occurs. Given the limitations of further key cache quantization, we turn to cross-layer compression techniques as a viable alternative to achieve comparable ultra-low-bit quantization without compromising performance.
|
| 193 |
+
|
| 194 |
+
# 3.3.2 Analysis on Quantized KV Cache
|
| 195 |
+
|
| 196 |
+
To enable cross-layer compression, we first analyze the characteristics of quantized KV caches by examining inter-layer similarities. We hypothesize that significant redundancy between adjacent layers could create opportunities for more aggressive compression. Using the KIVI-2 framework (Liu et al., 2024b), we conduct preliminary experiments on the Mistral-7B-Instruct-v0.2 model (Jiang et al., 2023) with random samples from LongBench (Bai et al., 2023).
|
| 197 |
+
|
| 198 |
+
Under the 2-bit quantization scheme in KIVI-2, quantized cache values are restricted to $\{0,1,2,3\}$ , naturally constraining element-wise absolute differences to the same range. Our analysis, illustrated in Figure 3, reveals a striking pattern: over $80\%$ of positions between adjacent layers exhibit minimal differences (0 or 1), while extreme differences (3) occur in less than $5\%$ of positions. This pattern becomes even more pronounced in the 1-bit scenario, where mapping $\{0,1\}$ to 0 and $\{2,3\}$ to 1 maintains identical values in over $80\%$ of positions between adjacent layers. These empirical findings demonstrate substantial redundancy in quantized KV caches between adjacent layers, suggesting significant potential for further compression.
|
| 199 |
+
|
| 200 |
+
# 3.3.3 Compression Algorithm
|
| 201 |
+
|
| 202 |
+
Leveraging these insights into inter-layer similarities, we propose a novel cross-layer compression method that decomposes KV caches into two components: shared quantized caches and layer-specific parameters. Specifically, adjacent layers share a common set of quantized value caches $(\mathbf{X}_{\mathbf{Q}})$ , while maintaining their individual scaling factors and zero-points for dequantization. This decomposition enables efficient compression by allowing each layer to reuse the merged cache from its group, while preserving the layer-specific char
|
| 203 |
+
|
| 204 |
+
<table><tr><td>Model</td><td>Method</td><td>Bit-width</td><td>TruthfulQA</td></tr><tr><td rowspan="4">Mistral-7b</td><td>Full Cache</td><td>16</td><td>32.09</td></tr><tr><td>KIVI</td><td>2</td><td>32.17</td></tr><tr><td>AsymKV</td><td>1.5</td><td>32.80</td></tr><tr><td>XQuant</td><td>1.38</td><td>34.93</td></tr><tr><td rowspan="4">Llama2-7b</td><td>Full Cache</td><td>16</td><td>30.77</td></tr><tr><td>KIVI</td><td>2</td><td>33.92</td></tr><tr><td>AsymKV</td><td>1.5</td><td>33.84</td></tr><tr><td>XQuant</td><td>1.4</td><td>34.22</td></tr></table>
|
| 205 |
+
|
| 206 |
+
Table 1: Evaluation on TruthfulQA task with normal context length.
|
| 207 |
+
|
| 208 |
+
acteristic through its unique quantization parameters, namely zero-points and scaling factors.
|
| 209 |
+
|
| 210 |
+
In the implementation, for a model with $L$ layers, we organize the layers into groups of size $G$ . Within each group, KV caches are compressed using weighted averaging, where each layer $l$ ( $0 \leq l \leq L$ ) is assigned a weight $\gamma_{l}$ , subject to the constraint $\sum \gamma_{l} = 1$ .
|
| 211 |
+
|
| 212 |
+
Formally, for every layer $l$ in a group $G$ , the quantization workflow with cross-layer compression and calibration is utilized as follows:
|
| 213 |
+
|
| 214 |
+
Quantization Phase with Cross-Layer Compression and Calibration:
|
| 215 |
+
|
| 216 |
+
$$
|
| 217 |
+
\forall l \in \mathbf {G},
|
| 218 |
+
$$
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
z _ {l} = \operatorname * {m i n} (\mathbf {X} _ {l}), s _ {l} = \frac {\operatorname * {m a x} (\mathbf {X} _ {l}) - \operatorname * {m i n} (\mathbf {X} _ {l})}{(2 ^ {B} - 1)}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
$$
|
| 225 |
+
\hat {z} _ {l} = z _ {l} + \eta s _ {l} (2 ^ {B} - 1), \hat {s} _ {l} = (1 - 2 \eta) s _ {l}
|
| 226 |
+
$$
|
| 227 |
+
|
| 228 |
+
$$
|
| 229 |
+
\mathbf {X _ {Q}} = \sum_ {l \in \mathbf {G}} \gamma_ {l} \left\lceil \frac {\mathbf {X} _ {l} - z _ {l}}{s _ {l}} \left. \right\rfloor
|
| 230 |
+
$$
|
| 231 |
+
|
| 232 |
+
Dequantization Phase with Cross-Layer Compression and Calibration:
|
| 233 |
+
|
| 234 |
+
$$
|
| 235 |
+
\hat {\mathbf {X}} _ {l} = \mathbf {X} _ {\mathbf {Q}} * \hat {s} _ {l} + \hat {z} _ {l}
|
| 236 |
+
$$
|
| 237 |
+
|
| 238 |
+
We present the pseudo code for the whole workflow as shown in Appendix J.
|
| 239 |
+
|
| 240 |
+
# 3.3.4 Speedup through Cross-layer Compression
|
| 241 |
+
|
| 242 |
+
While our previous discussion introduced weighted averaging with the weight $\gamma$ for compressing $\mathbf{X}_{\mathbf{Q}}$ within a group, we can further optimize the computation by setting $\gamma_{k} = 1$ for a chosen dominant layer $k$ , which consequently forces all other $\gamma$ values within the group to zero. In this accelerated configuration, each subordinate layer only needs
|
| 243 |
+
|
| 244 |
+
<table><tr><td>Model</td><td>Method</td><td>Bit-width</td><td>HQA</td><td>2Wiki</td><td>MSQ</td><td>TREC</td><td>TQA</td><td>SAMS</td><td>PC</td><td>Avg</td></tr><tr><td rowspan="5">Mistral-7b-ins</td><td>Full Cache</td><td>16</td><td>43.02</td><td>27.10</td><td>18.78</td><td>71.00</td><td>86.23</td><td>42.75</td><td>2.75</td><td>41.66</td></tr><tr><td>PyramidInfer</td><td>/</td><td>35.08</td><td>23.92</td><td>16.90</td><td>62.00</td><td>85.06</td><td>41.45</td><td>1.04</td><td>32.55</td></tr><tr><td>KIVI</td><td>2</td><td>41.96</td><td>26.08</td><td>18.13</td><td>71.00</td><td>86.00</td><td>43.70</td><td>2.78</td><td>41.38</td></tr><tr><td>AsymKV</td><td>1.5</td><td>37.17</td><td>22.77</td><td>15.76</td><td>70.50</td><td>86.25</td><td>43.44</td><td>3.16</td><td>39.86</td></tr><tr><td>XQuant</td><td>1.38</td><td>42.90</td><td>26.65</td><td>17.44</td><td>71.50</td><td>84.50</td><td>45.18</td><td>5.71</td><td>41.98</td></tr><tr><td rowspan="5">Llama2-7b-chat</td><td>Full Cache</td><td>16</td><td>30.09</td><td>26.48</td><td>9.98</td><td>63.00</td><td>84.19</td><td>41.22</td><td>4.50</td><td>37.07</td></tr><tr><td>PyramidInfer</td><td>/</td><td>29.14</td><td>24.53</td><td>7.49</td><td>54.00</td><td>81.79</td><td>40.71</td><td>4.00</td><td>34.52</td></tr><tr><td>KIVI</td><td>2</td><td>29.10</td><td>25.12</td><td>9.86</td><td>63.00</td><td>84.98</td><td>40.18</td><td>4.00</td><td>36.61</td></tr><tr><td>AsymKV</td><td>1.5</td><td>27.75</td><td>24.82</td><td>8.45</td><td>62.00</td><td>84.21</td><td>41.22</td><td>2.75</td><td>35.89</td></tr><tr><td>XQuant</td><td>1.4</td><td>29.21</td><td>25.56</td><td>9.69</td><td>62.50</td><td>84.57</td><td>40.01</td><td>4.00</td><td>36.51</td></tr></table>
|
| 245 |
+
|
| 246 |
+
Table 2: Evaluation of different KV cache compression methods on LongBench tasks.
|
| 247 |
+
|
| 248 |
+
to compute and store its own scaling factors and zero-points, significantly reducing computational overhead. Specifically,
|
| 249 |
+
|
| 250 |
+
$$
|
| 251 |
+
\mathbf {X _ {Q}} = \left\lceil \frac {\mathbf {X} _ {k} - z _ {k}}{s _ {k}} \left. \right\rfloor
|
| 252 |
+
$$
|
| 253 |
+
|
| 254 |
+
As illustrated in Figure 1, this optimization eliminates the computations shown in the dashed line, effectively streamlining the process. Experimental results show that selecting the first layer within the group as the dominant layer yields optimal performance, as demonstrated in Table 4 and Table 5.
|
| 255 |
+
|
| 256 |
+
# 4 Evaluation
|
| 257 |
+
|
| 258 |
+
# 4.1 Experimental Setup
|
| 259 |
+
|
| 260 |
+
Models. We evaluate our XQuant on Llama-2-7b / Llama-2-7b-chat (Touvron et al., 2023) and Mistral-7B-v0.3 / Mistral-7B-instruct-v0.2 (Jiang et al., 2023).
|
| 261 |
+
|
| 262 |
+
Tasks. For the normal context length task, we choose TruthfulQA (BLEU score) from LM-Eval (Gao et al., 2021). We also select several subsets from LongBench (Bai et al., 2023) for the long context length tasks, including HotpotQA (F1 score), 2WikiMultihopQA (F1 score), MuSiQue (F1 score), TREC (classification accuracy), TriviaQA (F1 score), SAMSum (Rouge-L) and Passage-Count (Exact match accuracy). MultiFieldQA-Zh (F1 score) is selected for some ablation studies as well.
|
| 263 |
+
|
| 264 |
+
Baselines and Implementations. We compare our framework with previous works, including original 16-bit floating implementation, KIVI-2 (Liu et al., 2024b) and AsymKV (Tao et al., 2024). All relevant configurations adhere as in KIVI, i.e., quantizing key cache per-channel and value cache
|
| 265 |
+
|
| 266 |
+
per-token, and with a group size of 32 and a residual length of 128. We reproduce AsymKV based on the official implementation of KIVI, with a typical configuration (AsymKV-32/0) selected from the original paper, i.d., quantizing all the key cache into 2-bit and value cache into 1-bit, which corresponds to an equivalent bit-width of 1.5.
|
| 267 |
+
|
| 268 |
+
A token eviction method (Yang et al., 2024b), configured with a $40\%$ KV cache budget, is also included as a baseline for the LongBench tasks.
|
| 269 |
+
|
| 270 |
+
We set the maximum sequence length to 30000 for the Mistral model to conduct our experiments with a single NVIDIA GeForce RTX 3090 GPU (24GB), and 8192 for the Llama model as default. We do not consider SLERP (Shoemake, 1985; Liu et al., 2024a) because of the incompatibility between rescale-recover operations and quantized cache.
|
| 271 |
+
|
| 272 |
+
# 4.2 Performance Comparison
|
| 273 |
+
|
| 274 |
+
LM-Eval Results. Table 1 presents the evaluation of different quantization methods on the TruthfulQA task with a standard context length. XQuant not only achieves competitive performance but surpasses the full cache baseline, with a TruthfulQA score of 34.93 on Mistral-7b and 34.22 on Llama2-7b, outperforming all other methods at significantly lower bit-widths. These results highlight that XQuant provides superior performance in conventional context length settings.
|
| 275 |
+
|
| 276 |
+
LongBench Results. We evaluate XQuant on the LongBench benchmark using two widely adopted models: Mistral-7b-Instruct-v0.2 and Llama-2-7b-chat. As shown in Table 2, XQuant achieves significant improvements over other KV cache compression methods, particularly under ultra-low-bit settings.
|
| 277 |
+
|
| 278 |
+
In all datasets of LongBench, XQuant achieves
|
| 279 |
+
|
| 280 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>η1</td><td>η2</td><td>MFQA-Zh</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>/</td><td>48.26</td></tr><tr><td>KIVI</td><td>2</td><td>/</td><td>0</td><td>42.27</td></tr><tr><td>AsymKV</td><td>1.5</td><td>0</td><td>0</td><td>36.30</td></tr><tr><td></td><td></td><td>0</td><td>0</td><td>37.20</td></tr><tr><td>XQuant</td><td>1.375</td><td>0</td><td>0.05</td><td>40.32</td></tr><tr><td></td><td></td><td>0.2</td><td>0</td><td>41.98</td></tr><tr><td></td><td></td><td>0.2</td><td>0.05</td><td>44.20</td></tr></table>
|
| 281 |
+
|
| 282 |
+
Table 3: Ablation study on the effect of data-free calibration in XQuant on the MultiFieldQA-Zh benchmark from LongBench.
|
| 283 |
+
|
| 284 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>γ0</td><td>MuSiQue</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>18.78</td></tr><tr><td>KIVI</td><td>2</td><td>/</td><td>18.13</td></tr><tr><td>Flooring</td><td>1.63</td><td>/</td><td>16.79</td></tr><tr><td>Ceiling</td><td>1.63</td><td>/</td><td>16.36</td></tr><tr><td rowspan="6">Weighted Average</td><td>1.63</td><td>[0,1/6)</td><td>12.20</td></tr><tr><td>1.63</td><td>(1/6,1/4)</td><td>14.05</td></tr><tr><td>1.63</td><td>(1/4,1/2)</td><td>16.84</td></tr><tr><td>1.63</td><td>(1/2,3/4)</td><td>17.32</td></tr><tr><td>1.63</td><td>(3/4,5/6)</td><td>17.60</td></tr><tr><td>1.63</td><td>(5/6,1]</td><td>17.32</td></tr></table>
|
| 285 |
+
|
| 286 |
+
performance comparable to the full cache baseline while reducing bit-width by $31\%$ compared to KIVI-2bit. Notably, XQuant achieves an average score of 41.98 for Mistral, surpassing KIVI-2bit while maintaining a significantly lower bit-width of 1.38. Moreover, XQuant outperforms AsymKV on nearly all datasets while simultaneously reducing bit-width by $8\%$ relative to AsymKV. Additionally, compared to PyramidInfer, which sacrifices precision to reduce storage overhead, XQuant demonstrates clear advantages in maintaining high accuracy across tasks while achieving lower bit-width.
|
| 287 |
+
|
| 288 |
+
# 4.3 Ablation and Analysis
|
| 289 |
+
|
| 290 |
+
In this section, we conduct ablation studies in some randomly selected lightweight LongBench subsets.
|
| 291 |
+
|
| 292 |
+
Calibration Parameter. Table 3 presents an ablation study on the impact of data-free calibration in XQuant on the MultiFieldQA-Zh benchmark. The results indicate that applying calibration $(\eta_{1} \neq 0$ or $\eta_{2} \neq 0)$ significantly improves XQuant's performance, reducing the performance gap with the full cache baseline.
|
| 293 |
+
|
| 294 |
+
Table 4: The comparison between different cross-layer compression method with group size $G = 2$ , where $\gamma_0, \gamma_1$ stands for the coefficient in the weighted average $(\gamma_1 + \gamma_0 = 1)$ .
|
| 295 |
+
|
| 296 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>G</td><td>k</td><td>MSQ</td><td>MFQA-Zh</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>/</td><td>18.78</td><td>48.26</td></tr><tr><td>KIVI</td><td>2</td><td>/</td><td>/</td><td>18.13</td><td>42.27</td></tr><tr><td></td><td></td><td>2</td><td>0</td><td>17.32</td><td>37.44</td></tr><tr><td></td><td></td><td></td><td>1</td><td>12.20</td><td>20.48</td></tr><tr><td></td><td></td><td></td><td>0</td><td>14.92</td><td>17.53</td></tr><tr><td></td><td></td><td>3</td><td>1</td><td>16.97</td><td>37.37</td></tr><tr><td>XQuant</td><td>1.63</td><td></td><td>2</td><td>13.21</td><td>20.80</td></tr><tr><td></td><td></td><td></td><td>0</td><td>14.82</td><td>23.53</td></tr><tr><td></td><td></td><td>4</td><td>1</td><td>12.44</td><td>18.68</td></tr><tr><td></td><td></td><td></td><td>2</td><td>16.12</td><td>35.48</td></tr><tr><td></td><td></td><td></td><td>3</td><td>15.39</td><td>20.32</td></tr></table>
|
| 297 |
+
|
| 298 |
+
Table 5: The comparison of different group sizes $G$ and selection indices $k$ within each group,where XQuant is employed without the calibration step for a clearer analysis.
|
| 299 |
+
|
| 300 |
+
Cross-Layer Compression Method. We further explore the weighted average with a group size $G = 2$ and coefficients $\gamma_0, \gamma_1 = 1 - \gamma_0$ , where $\gamma_0$ falls into six intervals derived in Appendix F. Notably, when $\gamma_0 \in [0,1/6)$ or $\gamma_0 \in (5/6,1]$ , the operation is optimized to directly sharing the quantized cache. We evaluate KIVI-2 on Mistral7B-Instruct-v0.2 without our proposed calibration methods starting from the 8-th layer. As summarized in Table 4, the accelerated compression methods $(\gamma_0 \in [0,1/6) \cup (5/6,1])$ avoid redundant operations seen in the workflow of Liu et al., 2024b, which rounds quantized integers into floating-point numbers. As shown in Table 4, the accelerated compression operation demonstrates its effectiveness in maintaining sufficient information for model performance, particularly when $\gamma_0 \in (5/6,1]$ . This configuration effectively allows odd-numbered layers to reuse the quantized cache from the preceding even-numbered layers without requiring additional quantization or storage overhead for odd-numbered layers.
|
| 301 |
+
|
| 302 |
+
We adopt this accelerated compression strategy across all experiments due to its favorable balance between computational efficiency and information preservation.
|
| 303 |
+
|
| 304 |
+
Group Size. After optimizing the cross-layer compression method, another factor is the group size. To investigate the effects of layer grouping, we partition the total $L$ layers of a model (where $L = 32$ for Mistral-7B and Llama 2-7B) into $L / G$ contiguous groups of size $G$ . The parameter $k$ indicates that we store and share the quantized cache
|
| 305 |
+
|
| 306 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>TREC</td><td>SAMS</td></tr><tr><td>Full Cache</td><td>16</td><td>71</td><td>42.75</td></tr><tr><td>KIVI</td><td>2</td><td>71</td><td>43.7</td></tr><tr><td>AsymKV</td><td>1.5</td><td>70.5</td><td>43.44</td></tr><tr><td>AsymKV</td><td>1.375</td><td>69.5</td><td>42.76</td></tr><tr><td>XQuant</td><td>1.375</td><td>71.5</td><td>45.18</td></tr><tr><td>AsymKV</td><td>1.28</td><td>58.5</td><td>37.41</td></tr><tr><td>XQuant</td><td>1.28</td><td>68.5</td><td>39.84</td></tr><tr><td>AsymKV</td><td>1.15625</td><td>41</td><td>23.47</td></tr><tr><td>XQuant</td><td>1.15625</td><td>68.5</td><td>39.47</td></tr></table>
|
| 307 |
+
|
| 308 |
+
Table 6: The comparison of different configurations under extremely-low compression ratio.
|
| 309 |
+
|
| 310 |
+
only in the $k$ -th layer of each group. We evaluate group sizes $G \in \{2, 3, 4\}$ . This range is motivated by the empirical observation that while adjacent layers exhibit high similarity in their quantized representations (i.e., $G = 2$ , as shown in Figure 3), this similarity diminishes gradually for layer distances greater than three. For models with $L = 32$ layers, $G = 4$ thus serves as a sufficient upper bound for investigation due to this diminishing similarity. We set all configurations under the same compression ratio, namely keep all layers in key cache and 20 layers in value cache based on KIVI2bit framework, using Mistral-7b-instruct-v0.2. As shown in Table 5, the model achieves the best performance with the configuration of $G = 2$ and $k = 0$ .
|
| 311 |
+
|
| 312 |
+
Performance-Compression Trade-offs. Table 6 evaluates the trade-offs between bit-width reduction and performance degradation across different quantization methods. As shown in Table 6, XQuant consistently outperforms other methods at the same bit-width, achieving higher scores on both TREC and SAMS benchmarks. Notably, even at an extremely low bit-width of 1.15625, XQuant preserves a significant portion of the model's performance, maintaining a TREC score of 68.5 compared to the full-cache baseline of 71. These results demonstrate that XQuant effectively balances performance retention and compression, achieving state-of-the-art trade-offs in ultra-low-bit KV cache quantization.
|
| 313 |
+
|
| 314 |
+
# 5 Conclusion
|
| 315 |
+
|
| 316 |
+
To alleviate the growing memory overhead in LLM inference, we propose XQuant, a plug-and-play framework that quantizes KV cache at an extreme compression ratio. Based on our observations on classical training-free quantization and the distribu
|
| 317 |
+
|
| 318 |
+
tions of quantized integers, we propose a data-free calibration method and a compute-efficient cross-layer compression method. Extensive experiments show that XQuant achieves state-of-the-art trade-offs between performance degradation and compression ratio, without sacrificing computational efficiency. Integrating these two novel methods, our XQuant achieves comparable performance with full-precision baseline under 1.4-bit quantization, and still maintains competitive performance for some tasks around an extremely 1.16-bit quantization.
|
| 319 |
+
|
| 320 |
+
# Limitations and Future Work
|
| 321 |
+
|
| 322 |
+
Our work presents several avenues for future exploration. First, while XQuant demonstrates promising results on representative models and benchmarks, its robustness and generalizability could be further validated by extending evaluations to a wider range of newer-generation or larger-scale models and more diverse downstream scenarios. Second, our current work relies on task-specific configurations. Although a unified setting proves robust (as shown in Appendix E), the development of an automated method to search for optimal configurations presents a valuable direction for future research. Finally, the key innovations of XQuant — Data-Free Calibration and Cross-layer Compression — are in principle orthogonal to other KV cache compression paradigms. A fruitful area for future work would be to investigate their compatibility and potential synergies with these existing methods, potentially yielding even greater efficiency gains.
|
| 323 |
+
|
| 324 |
+
# Acknowledgements
|
| 325 |
+
|
| 326 |
+
This work was supported by the National Natural Science Foundation of China (Grant No. 62306216) and the Natural Science Foundation of Hubei Province of China (Grant No. 2023AFB816).
|
| 327 |
+
|
| 328 |
+
Hai Zhao's contribution was funded by the Major Program of the Chinese National Foundation of Social Sciences under Grant "The Challenge and Governance of Smart Media on News Authenticity" [No. 23&ZD213].
|
| 329 |
+
|
| 330 |
+
The authors also gratefully acknowledge support from the Xiaomi Open-Competition Research Program.
|
| 331 |
+
|
| 332 |
+
# References
|
| 333 |
+
|
| 334 |
+
Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. 2023. GQA: Training generalized multi-query transformer models from multi-head checkpoints. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 4895-4901, Singapore. Association for Computational Linguistics.
|
| 335 |
+
Yushi Bai, Xin Lv, Jiajie Zhang, Hongchang Lyu, Jiankai Tang, Zhidian Huang, Zhengxiao Du, Xiao Liu, Aohan Zeng, Lei Hou, Yuxiao Dong, Jie Tang, and Juanzi Li. 2024. Longbench: A bilingual, multitask benchmark for long context understanding. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pages 3119-3137. Association for Computational Linguistics.
|
| 336 |
+
Yushi Bai, Xin Lv, Jiajie Zhang, Hongchang Lyu, Jiankai Tang, Zhidian Huang, Zhengxiao Du, Xiao Liu, Aohan Zeng, Lei Hou, et al. 2023. Longbench: A bilingual, multitask benchmark for long context understanding. arXiv preprint arXiv:2308.14508.
|
| 337 |
+
William Brandon, Mayank Mishra, Aniruddha Nrusimha, Rameswar Panda, and Jonathan Ragan Kelly. 2024. Reducing transformer key-value cache size with cross-layer attention. arXiv preprint arXiv:2405.12981.
|
| 338 |
+
Zefan Cai, Yichi Zhang, Bofei Gao, Yuliang Liu, Tianyu Liu, Keming Lu, Wayne Xiong, Yue Dong, Baobao Chang, Junjie Hu, et al. 2024. Pyramidkv: Dynamic kv cache compression based on pyramidal information tunneling. arXiv preprint arXiv:2406.02069.
|
| 339 |
+
Tri Dao. 2023. Flashattention-2: Faster attention with better parallelism and work partitioning. arXiv preprint arXiv:2307.08691.
|
| 340 |
+
Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alistarh. 2022. GPTQ: Accurate post-training compression for generative pretrained transformers. arXiv preprint arXiv:2210.17323.
|
| 341 |
+
Leo Gao, Jonathan Tow, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Kyle McDonell, Niklas Muennighoff, et al. 2021. A framework for few-shot language model evaluation. Version v0.0.1. Sept, 10:8-9.
|
| 342 |
+
Coleman Hooper, Sehoon Kim, Hiva Mohammadzadeh, Michael W Mahoney, Yakun Sophia Shao, Kurt Keutzer, and Amir Gholami. 2024. Kvquant: Towards 10 million context length llm inference with kv cache quantization. arXiv preprint arXiv:2401.18079.
|
| 343 |
+
Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
|
| 344 |
+
|
| 345 |
+
Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the 29th Symposium on Operating Systems Principles, pages 611-626.
|
| 346 |
+
Yuhong Li, Yingbing Huang, Bowen Yang, Bharat Venkitesh, Acyr Locatelli, Hanchen Ye, Tianle Cai, Patrick Lewis, and Deming Chen. 2024. Snapkv: Llm knows what you are looking for before generation. arXiv preprint arXiv:2404.14469.
|
| 347 |
+
Ji Lin, Jiaming Tang, Haotian Tang, Shang Yang, WeiMing Chen, Wei-Chen Wang, Guangxuan Xiao, Xingyu Dang, Chuang Gan, and Song Han. 2024. Awq: Activation-aware weight quantization for ondevice llm compression and acceleration. Proceedings of Machine Learning and Systems, 6:87-100.
|
| 348 |
+
Stephanie Lin, Jacob Hilton, and Owain Evans. 2022. Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252. Association for Computational Linguistics.
|
| 349 |
+
Akide Liu, Jing Liu, Zizheng Pan, Yefei He, Gholamreza Haffari, and Bohan Zhuang. 2024a. Minicache: Kv cache compression in depth dimension for large language models. arXiv preprint arXiv:2405.14366.
|
| 350 |
+
Zirui Liu, Jiayi Yuan, Hongye Jin, Shaochen Zhong, Zhaozhuo Xu, Vladimir Braverman, Beidi Chen, and Xia Hu. 2024b. Kivi: A tuning-free asymmetric 2bit quantization for kv cache. ArXiv, abs/2402.02750.
|
| 351 |
+
Shi Luohe, Zuchao Li, Lefei Zhang, Baoyuan Qi, Liu Guoming, and Hai Zhao. 2025. KV-latent: Dimensional-level KV cache reduction with frequency-aware rotary positional embedding. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1535-1550, Vienna, Austria. Association for Computational Linguistics.
|
| 352 |
+
Xinbei Ma, Zhuosheng Zhang, and Hai Zhao. 2024. Comprehensive cognitive llm agent for smartphone gui automation. arXiv preprint arXiv:2402.11941.
|
| 353 |
+
Ziyang Ma, Zuchao Li, Lefei Zhang, Gui-Song Xia, Bo Du, Liangpei Zhang, and Dacheng Tao. 2025. Model hemorrhage and the robustness limits of large language models. arXiv preprint arXiv:2503.23924.
|
| 354 |
+
Shuyin Ouyang, Jie M Zhang, Mark Harman, and Meng Wang. 2023. Llm is like a box of chocolates: the nondeterminism of chatgpt in code generation. arXiv preprint arXiv:2308.02828.
|
| 355 |
+
Qwen, :: An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang,
|
| 356 |
+
|
| 357 |
+
Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. 2025. Qwen2.5 technical report. Preprint, arXiv:2412.15115.
|
| 358 |
+
Nikhil Sharma, Q Vera Liao, and Ziang Xiao. 2024. Generative echo chamber? effect of llm-powered search systems on diverse information seeking. In Proceedings of the CHI Conference on Human Factors in Computing Systems, pages 1-17.
|
| 359 |
+
Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Beidi Chen, Percy Liang, Christopher Ré, Ion Stoica, and Ce Zhang. 2023. Flexgen: High-throughput generative inference of large language models with a singlegpu. In International Conference on Machine Learning, pages 31094-31116. PMLR.
|
| 360 |
+
Luohe Shi, Hongyi Zhang, Yao Yao, Zuchao Li, and Hai Zhao. 2024. Keep the cost down: A review on methods to optimize llm's kv-cache consumption. arXiv preprint arXiv:2407.18003.
|
| 361 |
+
Ken Shoemake. 1985. Animating rotation with quaternion curves. In Proceedings of the 12th annual conference on Computer graphics and interactive techniques, pages 245-254.
|
| 362 |
+
Yutao Sun, Li Dong, Yi Zhu, Shaohan Huang, Wenhui Wang, Shuming Ma, Quanlu Zhang, Jianyong Wang, and Furu Wei. 2024. You only cache once: Decoder-decoder architectures for language models. arXiv preprint arXiv:2405.05254.
|
| 363 |
+
Zicong Tang, Shi Luohe, Zuchao Li, Baoyuan Qi, Liu Guoming, Lefei Zhang, and Ping Wang. 2025. SpindleKV: A novel KV cache reduction method balancing both shallow and deep layers. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 28428-28442, Vienna, Austria. Association for Computational Linguistics.
|
| 364 |
+
Qian Tao, Wenyuan Yu, and Jingren Zhou. 2024. Asymkv: Enabling 1-bit quantization of kv cache with layer-wise asymmetric quantization configurations. arXiv preprint arXiv:2410.13212.
|
| 365 |
+
Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Roziere, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
|
| 366 |
+
Haoyi Wu and Kewei Tu. 2024. Layer-condensed kv cache for efficient inference of large language models. arXiv preprint arXiv:2405.10637.
|
| 367 |
+
Guangxuan Xiao, Yuandong Tian, Beidi Chen, Song Han, and Mike Lewis. 2023. Efficient streaming language models with attention sinks. arXiv.
|
| 368 |
+
|
| 369 |
+
Dongjie Yang, Xiaodong Han, Yan Gao, Yao Hu, Shilin Zhang, and Hai Zhao. 2024a. Pyramidinfer: Pyramid KV cache compression for high-throughput LLM inference. In Findings of the Association for Computational Linguistics, ACL 2024, Bangkok, Thailand and virtual meeting, August 11-16, 2024, pages 3258-3270. Association for Computational Linguistics.
|
| 370 |
+
Dongjie Yang, XiaoDong Han, Yan Gao, Yao Hu, Shilin Zhang, and Hai Zhao. 2024b. Pyramidinfer: Pyramid kv cache compression for high-throughput llm inference. arXiv preprint arXiv:2405.12532.
|
| 371 |
+
June Yong Yang, Byeongwook Kim, Jeongin Bae, Beomseok Kwon, Gunho Park, Eunho Yang, Se Jung Kwon, and Dongsoo Lee. 2024c. No token left behind: Reliable KV cache compression via importance-aware mixed precision quantization. CoRR, abs/2402.18096.
|
| 372 |
+
Yifei Yang, Zouying Cao, Qiguang Chen, Libo Qin, Dongjie Yang, Hai Zhao, and Zhi Chen. 2024d. Kvsharer: Efficient inference via layerwise dissimilar kv cache sharing. arXiv preprint arXiv:2410.18517.
|
| 373 |
+
Yao Yao, Zuchao Li, and Hai Zhao. 2024. Sirllm: Streaming infinite retentive llm. arXiv preprint arXiv:2405.12528.
|
| 374 |
+
Zhenyu Zhang, Ying Sheng, Tianyi Zhou, Tianlong Chen, Lianmin Zheng, Ruisi Cai, Zhao Song, Yuan-dong Tian, Christopher Ré, Clark Barrett, et al. 2023. H2o: Heavy-hitter oracle for efficient generative inference of large language models. Advances in Neural Information Processing Systems, 36:34661-34710.
|
| 375 |
+
|
| 376 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>η1</td><td>η2</td><td>MFQA-Zh</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>/</td><td>48.26</td></tr><tr><td>KIVI</td><td>2</td><td>/</td><td>0</td><td>42.27</td></tr><tr><td>KIVI</td><td>2</td><td>/</td><td>0.05</td><td>44.34</td></tr><tr><td>AsymKV</td><td>1.5</td><td>0</td><td>0</td><td>36.30</td></tr><tr><td>AsymKV</td><td>1.5</td><td>0</td><td>0.05</td><td>41.28</td></tr><tr><td>AsymKV</td><td>1.5</td><td>0.2</td><td>0</td><td>42.78</td></tr><tr><td>AsymKV</td><td>1.5</td><td>0.2</td><td>0.05</td><td>43.81</td></tr></table>
|
| 377 |
+
|
| 378 |
+
# A Preliminary Study on Relaxed-Contraint Mapping
|
| 379 |
+
|
| 380 |
+
As demonstrated in Figure 2, the traditional quantization workflow faces higher quantization error in low-bit scenarios. In Section 3.2, we propose a flexible mapping to mitigate the quantization error in this aspect. Moreover, to provide empirical evidence supporting the effectiveness of the flexible mapping in the proposed calibration method, we employ its generalized form and conduct a preliminary study on the default KIVI-2bit and AsymKV-32/0 configurations. We extend this approach to a generalized B-bit quantization mechanism, where $\eta_B$ serves as the corresponding parameter. Notably, when $\eta_B = 0$ , the B-bit quantization operates without the flexible mapping.
|
| 381 |
+
|
| 382 |
+
The results in Table 7 demonstrate that incorporating the flexible mapping function enhances model performance across different quantization settings.
|
| 383 |
+
|
| 384 |
+
# B Preliminary Experiment on Layer-Wise Asymmetric Quantization
|
| 385 |
+
|
| 386 |
+
In the existing method (Tao et al., 2024), the KV cache for each layer is quantized using either 1-bit or 2-bit precision. A straightforward strategy to maximize the compression ratio is to apply 1-bit quantization to a greater number of layers.
|
| 387 |
+
|
| 388 |
+
However, a significant bottleneck arises, as it is nearly impossible to quantize the key cache at 1-bit precision without compromising performance. As shown in Table 8, further compression by increasing the number of 1-bit quantized key cache layers is not feasible, as it leads to substantial performance degradation. This observation motivates us to explore alternative compression methodologies.
|
| 389 |
+
|
| 390 |
+
Table 7: The comparison using different quantization methods with and without our calibration method in MultiFieldQA-Zh tasks from LongBench.
|
| 391 |
+
|
| 392 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>#Key Layers in 1-bit</td><td>MFQA-Zh</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>48.26</td></tr><tr><td>KIVI (32/32)</td><td>2</td><td>0</td><td>42.27</td></tr><tr><td>AsymKV-24/32</td><td>1.875</td><td>8</td><td>37.10</td></tr><tr><td>AsymKV-16/32</td><td>1.75</td><td>16</td><td>21.36</td></tr><tr><td>AsymKV-8/32</td><td>1.625</td><td>24</td><td>13.16</td></tr><tr><td>AsymKV-0/32</td><td>1.5</td><td>32</td><td>7.66</td></tr></table>
|
| 393 |
+
|
| 394 |
+
Table 8: Evaluation on LongBench based on AsymKV shows that the key cache is nearly impossible to quantized under 1-bit.
|
| 395 |
+
|
| 396 |
+
# C Equivalent Bit-width Analysis
|
| 397 |
+
|
| 398 |
+
Formally, let $b, h, s, d$ be the batch size, the number of heads in GQA (Ainslie et al., 2023), the sequence length and the dimension per head. The original $L$ layers of KV cache occupies $2L * bhsd * 16$ bit, which equals to $2L * n * 16$ bit if we set $n = bhsd$ for convenience.
|
| 399 |
+
|
| 400 |
+
Consider a typical KV cache quantization scheme (Liu et al., 2024b). If we quantize all $L$ layers of key cache and value cache into $b$ -bit, the quantized KV cache memory usage is $2L*n*b$ bit. Tao et al., 2024 uses a asymmetrical configurations for key and value caches across different layers. In their paper, Asym- $l_{k} / l_{v}$ means quantizing the initial $l_{k}$ layers of key cache and $l_{v}$ of value cache into 2-bit, and quantizing 1-bit for others. So the quantized KV cache memory usage is $(2 * l_{k} + (32 - l_{k}) + 2 * l_{v} + (32 - l_{v})) * n$ bit. For example, Asym-1.5bit stands for Asym-32/0 in our paper, which can be calculated to $3L*n$ bit and can be equivalently considered as a 1.5-bit symmetrical quantization for better understanding of the compression ratio.
|
| 401 |
+
|
| 402 |
+
The related parameters in XQuant are $kq$ , $vq$ , $km$ , and $vm$ . The equivalent bit-width $B$ can be expressed as follows: $B = ((32 - \max(kq, km)) / 2 + (\max(kq, km) - \min(kq, km)) + (\max(kq, km) + \min(kq, km)) * 2 + (32 - \max(vq, vm)) / 2 + (\max(vq, vm) + \min(vq, vm)) + (\max(vq, vm) + \min(vq, vm)) * 2) / 64$ .
|
| 403 |
+
|
| 404 |
+
In the classical configuration in our paper, $kq = 30$ , $vq = 2$ , $km = 32$ , and $vm = 16$ , in key cache we apply 2-bit quantization to the layers $[0, kq)$ and 1-bit quantization to the layers $[kq, 32)$ , and cross-layer compression to the layers $[km, 32)$ . The value cache is processed in the same manner. Therefore, the equivalent bit-widths of the key and value caches are computed as follows:
|
| 405 |
+
|
| 406 |
+
$$
|
| 407 |
+
B _ {k} = \frac {(3 2 - 3 0) + 3 0 * 2}{3 2} = 1. 9 3 7 5
|
| 408 |
+
$$
|
| 409 |
+
|
| 410 |
+

|
| 411 |
+
Figure 4: Comparison of Execution Time.
|
| 412 |
+
|
| 413 |
+
$$
|
| 414 |
+
B _ {v} = \frac {(3 2 - 1 6) / 2 + (1 6 - 2) + 2 * 2}{3 2} = 0. 8 1 2 5
|
| 415 |
+
$$
|
| 416 |
+
|
| 417 |
+
The average bit-width is therefore 1.375, which appears as 1.38 in most parts of this paper. More parameter sets used in our experiments are listed in Appendix I.
|
| 418 |
+
|
| 419 |
+
To maintain consistency with seminal works (e.g., KIVI (Liu et al., 2024b) and GPTQ (Frantar et al., 2022)), our reported "equivalent bit-width" for asymmetrical quantization methods considers only the quantized integer tensors, excluding metadata overhead like scaling factors and zero-points. The comparisons remain rigorous, as all evaluated quantization methods were implemented with identical group sizes and residual lengths. This ensures the unaccounted overhead is uniform across all methods and does not affect their relative performance rankings.
|
| 420 |
+
|
| 421 |
+
# D Efficiency analysis
|
| 422 |
+
|
| 423 |
+
Using Mistral-7B as an example, we theoretically analyze the computational cost of our two key improvements. During the calibration step, generating each token incurs only 64 additional floating-point multiplications and 32 additions (Equation 12), which are negligible in practice. Moreover, as described in Section 3.3.4, the cross-layer compression step optimizes efficiency by skipping certain parts of the quantization process (Equation 2).
|
| 424 |
+
|
| 425 |
+
To evaluate inference efficiency, we adopt the same experimental setup as implemented in KIVI's
|
| 426 |
+
|
| 427 |
+
repository, using a batch size of 16, a prompt length of 1024, and an output length of 128. As shown in Figure 4, XQuant, by leveraging its unique speedup mechanism, demonstrates competitive inference efficiency.
|
| 428 |
+
|
| 429 |
+
# E Hyperparameter
|
| 430 |
+
|
| 431 |
+
The related parameters in XQuant are $kq$ , $vq$ , $km$ , and $vm$ . In XQuant, we quantize the lower $kq$ , $vq$ layers of key and value cache into 2-bit, while quantizing others into 1-bit. We apply cross-layer compression from the $km$ th, $vm$ th layer of key and value cache. All the configurations are summarized in Table 11.
|
| 432 |
+
|
| 433 |
+
As demonstrated in Table 9, additional experiments on the Mistral-7B-Instruct model using the LongBench benchmark show that XQuant, with a fixed $\eta_{1} = 1 / 6$ and $\eta_{2} = 0.045$ , consistently delivers strong performance as well. These results suggest that this fixed set of hyperparameters are robust and can generalize effectively across different datasets. Therefore, task-specific hyperparameter tuning is superior but not necessary, and the method can achieve reliable performance with a fixed, pre-selected set of hyperparameters.
|
| 434 |
+
|
| 435 |
+
# F Cross-Layer Compression Strategy
|
| 436 |
+
|
| 437 |
+
Under 2-bit quantization, the values in the KV cache are restricted to the discrete integer set $\{i\in \mathbf{Z}\mid 0\leq i\leq 3\}$ . Therefore, a rounding operation is required after weighted averaging. If standard rounding-to-nearest is applied, the range of $\gamma_0$ can be divided into six disjoint intervals, as summarized in Table 4. The derivation is as follows:
|
| 438 |
+
|
| 439 |
+
Let $e_0$ and $e_1$ denote the $B$ -bit quantized values at the same position in adjacent layers of $\mathbf{X}_{\mathbf{Q}}$ . Then the merged value $e_m$ after cross-layer compression is computed as:
|
| 440 |
+
|
| 441 |
+
$$
|
| 442 |
+
\begin{array}{l} e _ {m} = \left\lfloor \frac {\gamma_ {0} e _ {0} + \gamma_ {1} e _ {1}}{\gamma_ {0} + \gamma_ {1}} \right] \\ = \left\lfloor \gamma_ {0} e _ {0} + (1 - \gamma_ {0}) e _ {1} \right\rfloor \\ = e _ {1} + \left\lfloor \gamma_ {0} \left(e _ {0} - e _ {1}\right) \right\rfloor . \\ \end{array}
|
| 443 |
+
$$
|
| 444 |
+
|
| 445 |
+
Without loss of generality, assume $e_0 \geq e_1$ and define $\delta = e_0 - e_1 \geq 0$ . Then we have:
|
| 446 |
+
|
| 447 |
+
$$
|
| 448 |
+
e _ {m} = e _ {1} + \left\lfloor \gamma_ {0} \delta \right\rceil , \tag {14}
|
| 449 |
+
$$
|
| 450 |
+
|
| 451 |
+
where $\gamma_0\in [0,1]$ and $\delta \in \mathbf{Z}\cap [0,3]$ . Since $\gamma_0\delta \in [0,\delta ]$ , the rounding term $\lfloor \gamma_0\delta \rfloor$ in Eq. 14 can only
|
| 452 |
+
|
| 453 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>Hyperparameters</td><td>HQA</td><td>2Wiki</td><td>MSQ</td><td>TREC</td><td>TQA</td><td>SAMS</td><td>PC</td><td>Avg</td></tr><tr><td>Full Cache</td><td>16</td><td>/</td><td>43.02</td><td>27.10</td><td>18.78</td><td>71.00</td><td>86.23</td><td>42.75</td><td>2.75</td><td>41.66</td></tr><tr><td>AsymKV</td><td>1.5</td><td>/</td><td>37.17</td><td>22.77</td><td>15.76</td><td>70.50</td><td>86.25</td><td>43.44</td><td>3.16</td><td>39.86</td></tr><tr><td>XQuant</td><td>1.38</td><td>Task-specific</td><td>42.90</td><td>26.65</td><td>17.44</td><td>71.50</td><td>84.50</td><td>45.18</td><td>5.71</td><td>41.98</td></tr><tr><td>XQuant</td><td>1.38</td><td>Static</td><td>42.64</td><td>25.16</td><td>16.91</td><td>70.50</td><td>84.50</td><td>42.64</td><td>4.57</td><td>40.99</td></tr></table>
|
| 454 |
+
|
| 455 |
+
take $\delta + 1$ discrete values. Let $\lfloor \gamma_0\delta \rceil = c$ , where $c \in \mathbf{Z} \cap [0, \delta]$ . Then:
|
| 456 |
+
|
| 457 |
+
$$
|
| 458 |
+
\gamma_ {0} \delta \in \left(c - \frac {1}{2}, c + \frac {1}{2}\right) \cap [ 0, \delta ], \tag {15}
|
| 459 |
+
$$
|
| 460 |
+
|
| 461 |
+
which yields the following constraint for $\gamma_0$ , when $\delta > 0$ :
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\gamma_ {0} \in \left(\frac {c - 1 / 2}{\delta}, \frac {c + 1 / 2}{\delta}\right) \cap [ 0, 1 ]. \tag {16}
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
We now enumerate all valid combinations of $\delta$ and $c$ from Equation 16:
|
| 468 |
+
|
| 469 |
+
- $\delta = 0$ : Only one possible value exists; trivial case omitted.
|
| 470 |
+
- $\delta = 1$ :
|
| 471 |
+
|
| 472 |
+
$c = 0$ .. $\gamma_0\in [0,1 / 2)$
|
| 473 |
+
$c = 1\colon \gamma_0\in (1 / 2,1]$
|
| 474 |
+
|
| 475 |
+
- $\delta = 2$ :
|
| 476 |
+
|
| 477 |
+
$c = 0$ .. $\gamma_0\in [0,1 / 4)$
|
| 478 |
+
$c = 1$ .. $\gamma_0\in (1 / 4,3 / 4)$
|
| 479 |
+
$c = 2\colon \gamma_0\in (3 / 4,1]$
|
| 480 |
+
|
| 481 |
+
$\delta = 3$
|
| 482 |
+
|
| 483 |
+
$c = 0$ .. $\gamma_0\in [0,1 / 6)$
|
| 484 |
+
$c = 1:\gamma_0\in (1 / 6,1 / 2)$
|
| 485 |
+
$c = 2$ .. $\gamma_0\in (1 / 2,5 / 6)$
|
| 486 |
+
$c = 3:\gamma_0\in (5 / 6,1]$
|
| 487 |
+
|
| 488 |
+
Collectively, this yields six effective intervals of $\gamma_0$ , as summarized in Table 4.
|
| 489 |
+
|
| 490 |
+
# G Comparison with Other Cross-Layer Compression Methods
|
| 491 |
+
|
| 492 |
+
Several prior works have explored inter-layer redundancy from different perspectives. To eliminate potential confusion, we clarify several key distinctions and highlight innovations as follows: (a) Most existing methods compute KV caches at a subset of layers. However, these approaches require additional training steps and, in some cases,
|
| 493 |
+
|
| 494 |
+
Table 9: Evaluation of different KV cache compression methods using static hyperparameters setting.
|
| 495 |
+
|
| 496 |
+
<table><tr><td>Method</td><td>Bit-width</td><td>2Wiki</td><td>HQA</td></tr><tr><td>Full Cache</td><td>16</td><td>58.20</td><td>61.88</td></tr><tr><td>AsymKV</td><td>1.4</td><td>38.55</td><td>44.69</td></tr><tr><td>XQuant</td><td>1.4</td><td>54.16</td><td>57.44</td></tr></table>
|
| 497 |
+
|
| 498 |
+
Table 10: Comparison of XQuant with Full Cache and AsymKV on the Qwen2.5-14B model using the LongBench benchmark.
|
| 499 |
+
|
| 500 |
+
even full retraining, significantly limiting scalability. In contrast, XQuant is designed as a plug-and-play solution that leverages deeper insights to enable effective redundancy reduction without any additional training. (b) XQuant is the only method that explicitly considers inter-layer redundancy through the lens of quantization. After quantization, the KV cache is decomposed into three components: the quantized cache, zero-points, and scaling factors. We demonstrate that the quantized cache, consisting solely of integers, exhibits substantial inter-layer similarity. Meanwhile, the zero-points and scaling factors, which require minimal storage, are retained individually to preserve per-layer characteristics without being compressed. (c) MiniCache (Liu et al., 2024a) is another training-free method that primarily introduces a retention-recovery mechanism for cache magnitudes and unmergable tokens. However, such operations are not directly compatible in mainstream open-source KV quantization frameworks. Furthermore, its use of the SLERP function imposes several constraints, making it inapplicable to quantized caches, which fundamentally differs from XQuant.
|
| 501 |
+
|
| 502 |
+
# H Evaluation on Qwen2.5-14B
|
| 503 |
+
|
| 504 |
+
As shown in Table 10, we evaluated XQuant on a larger-scale and newer-generation model, Qwen2.5-14B (Qwen et al., 2025), using the LongBench benchmark. The results demonstrate that XQuant generalizes well to different models, maintaining a superior trade-off between model performance and compression ratio.
|
| 505 |
+
|
| 506 |
+
<table><tr><td>Model</td><td>Dataset</td><td>kq</td><td>vq</td><td>km</td><td>vm</td><td>eta1</td><td>eta2</td></tr><tr><td>Mistral-7b-v0.3</td><td>TruthfulQA</td><td>30</td><td>2</td><td>32</td><td>16</td><td>0</td><td>0</td></tr><tr><td rowspan="7">Mistral-7b-instruct-v0.2</td><td>HQA</td><td>30</td><td>2</td><td>32</td><td>16</td><td>1/6</td><td>0.045</td></tr><tr><td>2Wiki</td><td>32</td><td>0</td><td>32</td><td>16</td><td>0</td><td>0.09</td></tr><tr><td>MSQ</td><td>32</td><td>0</td><td>32</td><td>16</td><td>1/6</td><td>0</td></tr><tr><td>TREC</td><td>30</td><td>2</td><td>32</td><td>16</td><td>1/6</td><td>0</td></tr><tr><td>TQA</td><td>30</td><td>2</td><td>32</td><td>16</td><td>1/6</td><td>0.09</td></tr><tr><td>SAMS</td><td>30</td><td>2</td><td>32</td><td>16</td><td>0</td><td>0</td></tr><tr><td>PC</td><td>32</td><td>0</td><td>32</td><td>16</td><td>0</td><td>0.045</td></tr><tr><td>Llama2-7b</td><td>TruthfulQA</td><td>28</td><td>0</td><td>32</td><td>28</td><td>1/3</td><td>0</td></tr><tr><td rowspan="7">Llama2-7b-chat</td><td>HQA</td><td>28</td><td>0</td><td>32</td><td>28</td><td>1/6</td><td>0.045</td></tr><tr><td>2Wiki</td><td>28</td><td>0</td><td>32</td><td>28</td><td>1/3</td><td>0.045</td></tr><tr><td>MSQ</td><td>28</td><td>0</td><td>32</td><td>28</td><td>1/3</td><td>0</td></tr><tr><td>TREC</td><td>32</td><td>0</td><td>32</td><td>20</td><td>1/6</td><td>0</td></tr><tr><td>TQA</td><td>32</td><td>0</td><td>32</td><td>20</td><td>1/6</td><td>0</td></tr><tr><td>SAMS</td><td>32</td><td>0</td><td>32</td><td>20</td><td>0</td><td>0</td></tr><tr><td>PC</td><td>32</td><td>0</td><td>32</td><td>20</td><td>1/3</td><td>0.045</td></tr></table>
|
| 507 |
+
|
| 508 |
+
Table 11: The configurations of our main experiments.
|
| 509 |
+
|
| 510 |
+
# I Configurations
|
| 511 |
+
|
| 512 |
+
The Configurations of XQuant in our main experiments are summarized in Table 11
|
| 513 |
+
|
| 514 |
+
# J XQuant Pseudo Code
|
| 515 |
+
|
| 516 |
+
The pseudo code for the whole workflow is provided in Algorithm 1 and 2.
|
| 517 |
+
|
| 518 |
+
Algorithm 1: XQuant Procedure
|
| 519 |
+
Input : $kq$ , $vq$ , $km$ , $vm$ , $\eta[2]$
|
| 520 |
+
Output: Optimized Quantized Cache
|
| 521 |
+
for $l \gets 0$ to 31 do
|
| 522 |
+
if $l < vm$ or $l \mod 2 == 0$ then
|
| 523 |
+
KeyCache\[l] \gets
|
| 524 |
+
Quantize( $X_{k}^{l}$ , 2 if $l < kq$ else 1)
|
| 525 |
+
else
|
| 526 |
+
KeyCache\[l] \gets
|
| 527 |
+
PseudoQuantize( $X_{k}^{l}$ , 2 if $l < kq$ else 1)
|
| 528 |
+
if $l < vq$ or $l \mod 2 == 0$ then
|
| 529 |
+
ValueCache\[l] \gets
|
| 530 |
+
Quantize( $X_{v}^{l}$ , 2 if $l < vq$ else 1)
|
| 531 |
+
else
|
| 532 |
+
ValueCache\[l] \gets
|
| 533 |
+
PseudoQuantize( $X_{v}^{l}$ , 2 if $l < vq$ else 1)
|
| 534 |
+
for $l \gets 0$ to 31 do
|
| 535 |
+
if $l < km$ or $l \mod 2 == 0$ then
|
| 536 |
+
DequantizedKey $\leftarrow$ Dequantize(
|
| 537 |
+
KeyCache\[l][0],
|
| 538 |
+
KeyCache\[l][1],
|
| 539 |
+
KeyCache\[l][2])
|
| 540 |
+
else
|
| 541 |
+
DequantizedKey $\leftarrow$ Dequantize(
|
| 542 |
+
KeyCache\[l - 1][0],
|
| 543 |
+
KeyCache\[l - 1][1],
|
| 544 |
+
KeyCache\[l][2])
|
| 545 |
+
if $l < vm$ or $l \mod 2 == 0$ then
|
| 546 |
+
DequantizedValue $\leftarrow$ Dequantize(
|
| 547 |
+
ValueCache\[l][0],
|
| 548 |
+
ValueCache\[l][1],
|
| 549 |
+
ValueCache\[l][2])
|
| 550 |
+
else
|
| 551 |
+
DequantizedValue $\leftarrow$ Dequantize(
|
| 552 |
+
ValueCache\[l - 1][0],
|
| 553 |
+
ValueCache\[l - 1][1],
|
| 554 |
+
ValueCache\[l][2])
|
| 555 |
+
|
| 556 |
+
Algorithm 2: Supporting Functions
|
| 557 |
+
1 Function PseudoQuantize(X, n_bits):
|
| 558 |
+
2 zero_point $\leftarrow$ min(X) // Find the minimum value of $X$ ;
|
| 559 |
+
3 scaling_factor $\leftarrow$ $\frac{\max(X) - \min(X)}{2^{n\_bits} - 1}$ // Calculate scaling factor;
|
| 560 |
+
4 return
|
| 561 |
+
5 Calibrate(zero_point, scaling_factor, n_bits), None;
|
| 562 |
+
6 None;
|
| 563 |
+
7 Function Quantize(X, n_bits):
|
| 564 |
+
8 zero_point $\leftarrow$ min(X);
|
| 565 |
+
9 scaling_factor $\leftarrow$ $\frac{\max(X) - \min(X)}{2^{n\_bits} - 1}$ ;
|
| 566 |
+
10 quantized_cache $\leftarrow$ round $\left(\frac{X - zero\_point}{scaling\_factor}\right) //$ Round to nearest quantized value;
|
| 567 |
+
11 return
|
| 568 |
+
12 Calibrate(zero_point, scaling_factor, n_bits), quantized_cache;
|
| 569 |
+
13 function Dequantize(zero_point, scaling_factor, quantized_cache):
|
| 570 |
+
15 return quantized_cache · scaling_factor + zero_point // Reconstruct original value;
|
| 571 |
+
16 Function Calibrate(zero_point, scaling_factor, n_bits):
|
| 572 |
+
17 zero_point_cali $\leftarrow$ zero_point + scaling_factor $\cdot$ $\eta[n\_bits]$ // Adjust zero point based on $\eta$ ;
|
| 573 |
+
18 scaling_factor_cali $\leftarrow$ scaling_factor $\cdot$ $(1 - 2 \cdot \eta[n\_bits])$ // Adjust scaling factor based on $\eta$ ;
|
| 574 |
+
19 return
|
| 575 |
+
20 zero_point_cali, scaling_factor_cali // Return calibrated values;
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3fd03eb461da96f7d7ff8f26568ebbd09ccf7680fae9006d8f77f41a6d8ea91a
|
| 3 |
+
size 638355
|
EMNLP/2025/XQuant_ Achieving Ultra-Low Bit KV Cache Quantization with Cross-Layer Compression/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9daaad4c1eec02d500e7a0d859ad693bf430aece7ee668d770869a9f71ac052
|
| 3 |
+
size 595151
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1564c134328461cbd044305fe8a5a2129c692ab5b71eeb55580b1c5713c0ce97
|
| 3 |
+
size 167253
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0579a4d2020bcfdcca3f6d12e284e0fa57745e01dea1904b5f4b73fc8c116633
|
| 3 |
+
size 192716
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/ac268ef2-2462-4dbe-bc85-23aac535bec5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2fd5b666d12a0316f197e4abb40a2f54081911d3fadab7d208c0ddf5ce95276
|
| 3 |
+
size 3333018
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9fdc27f0fa54974956aed32f741d4f0461080ca2fd683c8f54841b92b3a1405
|
| 3 |
+
size 2758485
|
EMNLP/2025/You Are What You Train_ Effects of Data Composition on Training Context-aware Machine Translation Models/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34556f16aab1e489efe74e927a910e2539184ea7e4fcd724052899a65bb1bcc8
|
| 3 |
+
size 583347
|
EMNLP/2025/Your Language Model Can Secretly Write Like Humans_ Contrastive Paraphrase Attacks on LLM-Generated Text Detectors/3a804804-ac4a-4762-a147-7d8c694ee698_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d6431e649232c7275f5ca91947c90150dd500d8040aa518cc2f42734ccba74a8
|
| 3 |
+
size 128342
|
EMNLP/2025/Your Language Model Can Secretly Write Like Humans_ Contrastive Paraphrase Attacks on LLM-Generated Text Detectors/3a804804-ac4a-4762-a147-7d8c694ee698_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07a66434f5eec8f05247fe7f559b3b3eb0f5573b6a05dd21e7e04fd0446a1305
|
| 3 |
+
size 154387
|