SlowGuess commited on
Commit
6be8c08
·
verified ·
1 Parent(s): 88c278a

Add Batch 05cb3606-ea9c-4f6d-95ba-64bfa72a0028

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_content_list.json +3 -0
  2. 3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_model.json +3 -0
  3. 3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_origin.pdf +3 -0
  4. 3automaticalignmentframeworkforattributedtextgeneration/full.md +371 -0
  5. 3automaticalignmentframeworkforattributedtextgeneration/images.zip +3 -0
  6. 3automaticalignmentframeworkforattributedtextgeneration/layout.json +3 -0
  7. acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_content_list.json +3 -0
  8. acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_model.json +3 -0
  9. acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_origin.pdf +3 -0
  10. acceleratingdensellmsvial0regularizedmixtureofexperts/full.md +311 -0
  11. acceleratingdensellmsvial0regularizedmixtureofexperts/images.zip +3 -0
  12. acceleratingdensellmsvial0regularizedmixtureofexperts/layout.json +3 -0
  13. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_content_list.json +3 -0
  14. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_model.json +3 -0
  15. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_origin.pdf +3 -0
  16. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/full.md +221 -0
  17. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/images.zip +3 -0
  18. acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/layout.json +3 -0
  19. advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_content_list.json +3 -0
  20. advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_model.json +3 -0
  21. advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_origin.pdf +3 -0
  22. advancingsequentialnumericalpredictioninautoregressivemodels/full.md +399 -0
  23. advancingsequentialnumericalpredictioninautoregressivemodels/images.zip +3 -0
  24. advancingsequentialnumericalpredictioninautoregressivemodels/layout.json +3 -0
  25. alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_content_list.json +3 -0
  26. alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_model.json +3 -0
  27. alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_origin.pdf +3 -0
  28. alittlehumandatagoesalongway/full.md +509 -0
  29. alittlehumandatagoesalongway/images.zip +3 -0
  30. alittlehumandatagoesalongway/layout.json +3 -0
  31. ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_content_list.json +3 -0
  32. ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_model.json +3 -0
  33. ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_origin.pdf +3 -0
  34. ameasureofthesystemdependenceofautomatedmetrics/full.md +227 -0
  35. ameasureofthesystemdependenceofautomatedmetrics/images.zip +3 -0
  36. ameasureofthesystemdependenceofautomatedmetrics/layout.json +3 -0
  37. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_content_list.json +3 -0
  38. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_model.json +3 -0
  39. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_origin.pdf +3 -0
  40. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/full.md +321 -0
  41. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/images.zip +3 -0
  42. aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/layout.json +3 -0
  43. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_content_list.json +3 -0
  44. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_model.json +3 -0
  45. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_origin.pdf +3 -0
  46. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/full.md +174 -0
  47. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/images.zip +3 -0
  48. areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/layout.json +3 -0
  49. automaticdetectionofdyslexiabasedoneyemovementsduringreadinginrussian/fb2eb2b3-6585-4534-8f2c-29000957e5af_content_list.json +3 -0
  50. automaticdetectionofdyslexiabasedoneyemovementsduringreadinginrussian/fb2eb2b3-6585-4534-8f2c-29000957e5af_model.json +3 -0
3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7d863b5e0f90de65fdb2a4a39687863b0f3b6d8e2808820b52c8dbe55a04e4
3
+ size 102758
3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87db8a329a678cc83ebee04af97f8bae67dd72c5d08bd17f24f5182fad6d1039
3
+ size 120791
3automaticalignmentframeworkforattributedtextgeneration/e6af0897-3b51-44d3-b28d-417088cd8005_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a36342a1f625507f32085131f31f4976aa049b3f21ad878dae01e3cbc39661
3
+ size 1084983
3automaticalignmentframeworkforattributedtextgeneration/full.md ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # $\mathcal{A}^3$ : Automatic Alignment Framework for Attributed Text Generation
2
+
3
+ Yue Wang $^{1,2*}$ , Haoke Zhang $^{1,2*}$ , Juntao Li $^{1,2\dagger}$ , Jinxiong Chang $^{3}$ , Min Zhang $^{1,2}$
4
+
5
+ $^{1}$ School of Computer Science and Technology, Soochow University
6
+
7
+ $^{2}$ Key Laboratory of Data Intelligence and Advanced Computing, Soochow University
8
+
9
+ 3Ant Group
10
+
11
+ ywangnlp,hkzhangnlp@stu.suda.edu.cn
12
+
13
+ ljt, minzhang@suda.edu.cn
14
+
15
+ # Abstract
16
+
17
+ Attributed text generation aims to enhance the reliability of content generated from large language models by providing citations for each claim, which thereby enables users to easily verify the correctness of the responses. However, the scarcity of high-quality training samples presents a significant challenge in aligning large language models to generate texts with citations, revealing considerable room for improvement in existing attribution systems. Besides, existing approaches of aligning large language models to follow user instructions can lead to an undue emphasis on irrelevant documents, which in turn reduces the quality of responses. To address the above problems, we propose Automatic Alignment Framework for Attributed Text Generation $(\mathcal{A}^3)$ , a novel framework designed to automatically generate high-quality attributed query-response pairs for both supervised fine-tuning and preference optimization stages without human annotation. With the help of $\mathcal{A}^3$ , Mistral-7B can achieve a citation recall of 84.4 and a precision of 87.0 precision on ASQA, which notably surpasses GPT-4's citation recall of 73.0 and precision of 76.5.
18
+
19
+ # 1 Introduction
20
+
21
+ Recently, due to the convenience of natural language interaction, an increasing number of users prefer to employ Large Language Models (LLMs) for their information-seeking needs. However, despite the abundant knowledge obtained during pretraining, the outputs of LLMs can sometimes deviate from user instructions and contain hallucinations, significantly constraining their ability to satisfy information-seeking needs (Zhang et al., 2023b). Furthermore, due to the lack of clear attributions, it is also difficult to check the correctness of content generated from LLMs (Asai et al.,
22
+
23
+ 2024b). Therefore, to better satisfy the information-seeking needs, attributed text generation has recently gained significant attention from both academics and industry, which aims to enhance the reliability of generated content by providing citations for each claim (Li et al., 2023a).
24
+
25
+ Despite the significant importance of attributed text generation, existing open-source attribution systems exhibit considerable room for improvement. In the era of LLMs, constructing an attribution system typically involves a two-step process (Gao et al., 2023b; Malaviya et al., 2023). Firstly, an external retriever is used to get relevant passages. Subsequently, these passages, along with the user query, are incorporated into carefully designed templates. Finally, these templates serve as the input of LLMs and guide them to generate responses with proper citations. However, since existing LLMs are designed to follow user instructions, this approach can lead to models generating content based on irrelevant retrieved passages, thus undermining the quality of the attribution. Moreover, even if provided with relevant passages, it is still challenging to rely only on the task generalization capabilities of LLMs to follow user instructions and generate responses with correct citations that align with the question (Gao et al., 2023b). Overall, the lack of high-quality open-source data hinders the development of attributed text generation.
26
+
27
+ The challenge of obtaining high-quality open-source attributed text generation training data stems from several reasons. Firstly, the high cost of human annotation makes large-scale, human-annotated datasets unaffordable, which results in the necessity of automatic data generation. Besides, while existing commercial attribution systems, such as Bing Chat and perplexity.ai, have achieved success, the open-source community can
28
+
29
+ ![](images/d03c18167a96a0af804e6b4ca48da68c9d02532a6f9dca6d05a343e551da85f0.jpg)
30
+
31
+ ![](images/b88a3327a708f3231c767fcf177ce96e186fa14376154c3e63ffdb1c98c466f2.jpg)
32
+ Figure 1: An illustration of our $\mathcal{A}^3$ framework, including two main processes: Data Construction and Model Training. During the data construction process, we first introduce an (a) entity matching based document clustering strategy to select multiple interrelated documents. Next, we proceed with (b) Q&A Pair Extraction & Noisy Document Augmentation using the selected documents to generate Q&A pairs that serve as the SFT training data. Following this step, (c) Automatic Dispreferred Response Construction involves using the responses from (b) as preferred responses and altering these responses by removing, changing, or adding citations, or by emphasizing irrelevant documents to construct dispreferred responses. During the model training process, we utilize the constructed data to perform (I) Supervised Fine-tuning and (II) Preference Optimization.
33
+
34
+ not benefit from these commercial systems. Specifically, since there is no open access to the API of Bing Chat and it does not contain citations in the responses from the API of Perplexity.Ai, we can not use these commercial systems to generate attributed query-response as training data. Besides, due to the inability to obtain results of commercial systems on academic benchmarks, we can not directly compare our models with commercial systems like Bing Chat and Perplexity.Ai, which also hinders the development of this task. Finally, despite the strong general ability, the performance of existing LLMs (e.g., GPT-4) is far from satisfactory, which leads to poor results in directly using existing LLMs to generate attributed text generation data (Kamalloo et al., 2023).
35
+
36
+ To tackle the aforementioned challenges, we introduce a novel framework, the Automatic Alignment Framework for Attributed Text Generation $(\mathcal{A}^3)$ , which leverages underperforming attributed LLMs to automatically generate high-quality attributed text generation data without the need for human-annotated datasets. To achieve this goal, the $\mathcal{A}^3$ framework breaks down the
37
+
38
+ difficult attributed text generation task into simple solvable ones, e.g., document clustering, text summarization, text entailment, etc. Specifically, the $\mathcal{A}^3$ framework comprises two main processes: data construction and model training. During the data construction process, we start by employing both entity-matching method and embedding-based method to cluster multiple documents on relevant topics. Then, we utilize LLMs to construct Q&A pairs based on these interrelated documents. To reduce the effect of irrelevant documents, we introduce a noisy document augmentation strategy for constructing supervised fine-tuning data, alongside four strategies to generate dispreferred responses for the preference optimization stage. Finally, these constructed data are utilized in the model training process.
39
+
40
+ In conclusion, our work has the following contributions:
41
+
42
+ - We introduce the $\mathcal{A}^3$ framework, which can automatically generate high-quality training samples for attributed text generation without requiring human annotations;
43
+
44
+ - Leveraging $\mathcal{A}^3$ , the Mistral-7B model achieves a citation recall of 84.4 and precision of 87.0 on ASQA, markedly outperforming GPT-4, which achieves a citation recall of 73.0 and precision of 76.5;
45
+ - Our experimental results also demonstrate the effectiveness of our proposed framework in reducing the effects of noisy documents and avoiding irrelevant citations.
46
+
47
+ # 2 Automatic Alignment Framework for Attributed Text Generation
48
+
49
+ # 2.1 Attribute Text Generation Task Setup
50
+
51
+ In our work, the passages $P_{i}$ are sourced from Wikipedia and retrieved by an external retriever according to the query $Q_{i}$ . Each passage $p_{i}$ consists of one or more sentences and is 100 words in length. Given a query $Q_{i}$ and the corresponding retrieved passages $P_{i} = \{p_{1}, p_{2}, \ldots, p_{n}\}$ , attributed text generation aims to generate a response $R_{i}$ along with citations $C_{i} = \{c_{1}, c_{2}, \ldots, c_{m}\}$ . Each citation within $C_{i}$ is an index of one of the retrieved passages $p_{i}$ . We show an example pair in Table 1.
52
+
53
+ # 2.2 Overview of Our Framework
54
+
55
+ To construct an effective attribution system at a low cost, we introduce the framework $\mathcal{A}^3$ , which consists of a data construction process and a model training process. To achieve this goal, the $\mathcal{A}^3$ framework breaks down the complex task of attributed text generation into simpler, more manageable tasks, such as document clustering, text summarization, and text entailment. An illustration of our framework is shown in Figure 1. In the data construction process, we focus on generating a training set $D_{train} = \{d_i\}$ for attributed text generation, where $d_i$ represents $(Q_i, P_i, R_i, C_i)$ . Due to the high cost of human annotation, our framework aims to automatically generate $D = \{d_i\}$ based on existing corpus $D_{corpus} = \{p_1, p_2, \ldots, p_N\}$ (such as Wikipedia), where $N$ denotes the number of passages in $D_{corpus}$ . Specifically, during data construction, we generate $(Q_i, R_i, C_i)$ based on the $P_i$ . We introduce quite a few strategies to ensure the generated data quality for the Supervised Fine-Tuning (SFT) and Preference Optimization (PO) stages. Next, we introduce the data construction process of both the SFT and PO stages and the model training process. In this work, both 'citation' and 'evidence' refer to source references supporting claims in generated responses, while 'doc
56
+
57
+ ument' and 'passage' both denote text segments within the corpus.
58
+
59
+ # 2.3 Data Construction for Supervised Fine-tuning
60
+
61
+ In the SFT stage, to generate an SFT training sample $d_{i}$ , we first select interrelated documents $P_{i} = \{p_{1}, p_{2}, \ldots, p_{n}\}$ from $D_{\text{corpus}}$ with the use of an entity matching based document clustering strategy, where $n$ denotes the number of selected documents. Then, we use LLMs to generate $(Q_{i}, R_{i}, C_{i})$ based on the selected $P_{i}$ . Finally, a data filtering strategy and a noisy document augmentation strategy are introduced to enhance the SFT data quality.
62
+
63
+ # 2.3.1 Interrelated Document Selection
64
+
65
+ Interrelated document selection aims to generate coherent questions with multi-source citations, rather than artificially combining unrelated passages. There are two alternatives for the interrelated document selection. The first and simpler one is that if the number of selected documents $n = 1$ , we pick up one passage $p_i$ from $D_{corpus}$ randomly. For the other one, we need to select more than two documents from $D_{corpus}$ for $(Q_i, P_i, R_i, C_i)$ pair construction. This phenomenon results from that the randomly selected documents are not interrelated. Therefore, we introduce an entity matching based document clustering strategy to select interrelated documents. Specifically, we use WikiGraphs (Wang et al., 2021) as $D_{corpus}$ . WikiGraphs consists of lots of entity edges $Edge_i = (entity_a, entity_b, r)$ , which represents $entity_a$ has relation $r$ to $entity_b$ . Besides, $entity_a$ and $entity_b$ links to $P_a$ and $P_b$ respectively, where $P_a$ and $P_b$ represents Wikipedia passages $\{p_1, p_2, \ldots, p_n\}$ . Focusing on a specific entity, we gather a set of Wikipedia passages linked to entities that have a relationship $r$ with our target entity. To further uncover interconnected paragraphs within these passages, we seek out sections that feature more than two entities as interrelated documents $P_i = \{p_1, p_2, \ldots, p_n\}$ . Finally, based on our data generation budget, we randomly selected 31,823 entity triples from WikiGraphs.
66
+
67
+ # 2.3.2 Q&A Pair Extraction
68
+
69
+ After selecting interrelated documents $P_{i} = \{p_{1}, p_{2}, \ldots, p_{n}\}$ , we use gpt-4-1106-preview to generate attributed Q&A pair $(Q_{i}, R_{i}, C_{i})$ . If the number of selected documents $n = 1$ , recognizing
70
+
71
+ <table><tr><td>Query:</td><td>When did the Battle of Rennell Island occur and why is it significant in the context of World War II&#x27;s Guadalcanal campaign?</td></tr><tr><td>Retrieved Passages:</td><td>Document [1] The Battle of Rennell Island took place on 29 – 30 January 1943 . . .Document [2] ...it was the last major naval engagement between the United States Navy and the Imperial Japanese Navy during the Guadalcanal campaign of World War II . .Document [3] . .Document [4] == Goalball == Rzepecki is a goalball player , .Document [5] Aviva Premiership rugby union teams are based in London , . .</td></tr><tr><td>Preferred Response:</td><td>The Battle of Rennell Island occurred on 29-30 January 1943 and is significant because it was the last major naval engagement between the United States Navy and the Imperial Japanese Navy during the Guadalcanal campaign of World War II [1][2][3].</td></tr><tr><td>Dispreferred Response:</td><td>The Battle of Rennell Island occurred on 29-30 January 1943 and is significant because it was the last major naval engagement between the United States Navy and the Imperial Japanese Navy during the Guadalcanal campaign of World War II [1][2][3][5].</td></tr></table>
72
+
73
+ Table 1: A example of our generated dataset. In real-world scenarios, retrieval systems inevitably retrieve irrelevant documents. Therefore, to simulate real-world scenarios and reduce the effect of irrelevant documents, we select random passages as noisy retrieved passages, which are shown as Gray. Red represents wrong modifications caused by our preference data construction strategies.
74
+
75
+ that generating a question for a given response is more straightforward than answering a question, we use LLMs to generate a summary for this document $p_1$ and treat this summary as response $R_i$ . Subsequently, we task the LLMs with generating question $Q_i$ based on $R_i$ . Afterward, we add the citation $c_1$ linked with $p_1$ as $C_i$ . If the number of selected documents $n \geq 2$ , to improve the speed of data construction, we generate multiple attributed Q&A pairs based on $P_i$ . The model will output multiple pairs of Q&A, and we extract them using regular expressions. We show the prompt templates in the appendix. Each triple corresponds to one document cluster, with half of the clusters containing a single document and the other half containing multiple documents. We generate one QA-pair for one document cluster. Therefore, we initially extract a total of 31,823 samples.
76
+
77
+ # 2.3.3 Data Filtering
78
+
79
+ To conduct data filtering, we remove generated samples with low citation quality. We choose the citation quality criterion introduced from ALCE (Gao et al., 2023b) to evaluate the citation quality for each generated sample $(Q_{i},P_{i},R_{i},C_{i})$ . Specifically, each response is divided into multiple statements and the NLI model<sup>4</sup> is used to determine whether each statement is fully supportive or not fully supportive. Citation recall is a metric to evaluate whether the cited passages fully support the content of the response, which is calculated by the average support ratio of all the claims in the response. Citation precision is employed to identify irrelevant citations. A citation becomes irrelevant
80
+
81
+ to a statement when it fails to substantiate the statement, yet the remaining citations continue to support the statement without it. Citation precision is calculated by the average relevant ratio of all the citations in the response. Finally, we remove samples whose Citation F1 is below a threshold, which is computed as follows:
82
+
83
+ $$
84
+ C i t a t i o n F 1 = 2 \times \frac {C i t a t i o n \text {P r e c i s i o n} \times \text {C i t a t i o n R e c a l l}}{C i t a t i o n \text {P r e c i s i o n} + \text {C i t a t i o n R e c a l l}}
85
+ $$
86
+
87
+ In implementation, we set filtering threshold as 0.9 for data filtering. Finally, after filtering, we keep 13,225 samples.
88
+
89
+ # 2.3.4 Noisy Document Augmentation
90
+
91
+ Due to the limitation of retrieval systems, the retrieved passages are difficult to avoid containing some irrelevant information. To address the above challenge, we introduce a noisy document augmentation strategy. Specifically, we first select some random documents from $D_{\text{corpus}}$ as irrelevant documents and add them to the documents set $\{p_1, p_2, \ldots, p_n\}$ . Then we shuffle the order of the final documents set $P_i$ and change $\{c_1, c_2, \ldots, c_m\}$ to ensure they link with the correct passage.
92
+
93
+ # 2.3.5 Extending to Different Data Source
94
+
95
+ Given that the entity-based document clustering method is restricted to data with structured information, we also leverage data sources lacking structured information for data generation. This is to showcase the flexible extensibility of our framework. Specifically, we use the ArXiver dataset as data source. This dataset encompasses 63,357
96
+
97
+ arXiv papers published from January 2023 to October 2023. To obtain interrelated documents, we utilize the gte-modernbert -base model $^{6}$ to compute the relevance between each pair of documents within the same category. If two documents exhibit the highest mutual relevance, we retain this pair. In the implementation process, to ensure that an adequate number of relevant documents can be identified for each category, we select categories containing no fewer than 500 documents. In total, 34 such categories are identified. To guarantee data diversity, we limit the retention to a maximum of 200 category pairs per category. Ultimately, a total of 5,171 pairs of interrelated documents are obtained. Subsequently, due to the excessive length of the documents, we use regular expressions to extract crucial information from each document, such as claims, conclusions, or theorems from the papers. Then, we calculate the overlap and retain the most overlapping strings from each pair of documents. During the Q&A Pair Extraction stage, taking into account the balance between cost and performance, we use Doubao-1.5-Pro-32k for data generation, resulting in a total of 19,924 QA pairs. In the data filtering stage, we utilize the t5-xxl -true-nli-mixture model $^{7}$ and removes samples with a citation F1 score below 0.9. Finally, 1,275 samples are retained.
98
+
99
+ # 2.4 Data Construction for Preference Optimization
100
+
101
+ In the PO stage, we use $R_{i}$ as the preferred response and design four strategies to automatically generate corresponding dispreferred response $DR_{i}$ with incorrect citations $DC_{i}$ :
102
+
103
+ - Random Citation Adding: To prevent LLMs from including excessive incorrect citations, we add some citations randomly to construct a dispreferred response;
104
+ - Random Citation Removing: We remove some golden citations to construct a dispreferred response, which aims to avoid the miss of key citations;
105
+ - Random Citation Changing: To avoid referencing irrelevant documents, we substitute some key citations with random ones as a dispreferred response;
106
+
107
+ - Irrelevant Document Focusing: To discourage LLMs from focusing on irrelevant documents, we first remove documents related to responses and retain only irrelevant documents. Then, we use LLMs to answer the given questions based on these irrelevant documents, employing these answers as dispreferred responses.
108
+
109
+ # 3 Experiments
110
+
111
+ # 3.1 Backbone and Baselines
112
+
113
+ To confirm the generalization of our framework, we select two families of LLMs as backbone models: LLaMA2 (Touvron et al., 2023), LLaMA3 (Grattafori et al., 2024) and Mistral (Jiang et al., 2023). For the LLaMA2 family, we select two models with different sizes: LLaMA2-7B and LLaMA2-13B. For the Mistral family, we select Mistral-7B. We use ChatGPT (gpt-3.5-turbo-0301) with a 4K context window for most main experiments and ablations. We also report results with ChatGPT-16K (gpt3.5-turbo-16k0613) and GPT-4 (gpt-4-0613;8K context window). For open-source models, we evaluate LLaMA and its chat versions. Besides, we also compare our framework with Hagrid (Kamalloo et al., 2023), Self-RAG (Asai et al., 2024a), and CaLM (Hsu et al., 2024). Hagrid (Kamalloo et al., 2023) is an SFT dataset for attributed text generation, which consists of 3,214 samples; Self-RAG (Asai et al., 2024a) aims to make LLMs decide whether need to retrieve documents through self-reflection; CaLM (Hsu et al., 2024) empowers smaller LMs to validate the output of larger LMs.
114
+
115
+ # 3.2 Benchmark
116
+
117
+ To fully confirm the effectiveness, we evaluate our proposed framework and all the baselines on one short-form QA dataset PopQA (Mallen et al., 2023), two long-form QA datasets ALCE-ASQA and ALCE-ELI5 (Gao et al., 2023b), and FanOutQA (Zhu et al., 2024). POPQA is constructed from Wikidata knowledge triples spanning 16 relationship types, converted into natural-language questions using manually annotated templates to ensure entity-centric factual coverage. ASQA and ELI5 are long-form, open-ended QA datasets. ASQA is built upon the AMBIGQA dataset (Min et al., 2020), and augments questions with crowdsourced long-form answers that synthesize multiple valid short answers into coherent
118
+
119
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Num.</td><td colspan="3">ASQA</td><td colspan="3">ELI5</td><td rowspan="2">PopQA Correct Acc.</td><td rowspan="2">FanOutQA Correct Loose.</td></tr><tr><td>Correct EM Rec.</td><td>Citation Rec.</td><td>Prec.</td><td>Correct Claim</td><td>Citation Rec.</td><td>Prec.</td></tr><tr><td>LLaMA3.1-8B (Dubey et al., 2024)</td><td>-</td><td>27.2</td><td>2.4</td><td>4.1</td><td>7.7</td><td>2.5</td><td>5.3</td><td>17.3</td><td>19.8</td></tr><tr><td>LLaMA3.1-8B-Hagrid (Kamalloo et al., 2023)</td><td>3,214</td><td>28.6</td><td>49.7</td><td>50.3</td><td>6.9</td><td>17.6</td><td>19.3</td><td>30.9</td><td>21.7</td></tr><tr><td>LLaMA3.1-8B-\(A^3\)-SFT-Wiki</td><td>13,225</td><td>31.3</td><td>80.0</td><td>79.8</td><td>7.6</td><td>40.4</td><td>40.2</td><td>35.8</td><td>34.2</td></tr><tr><td>LLaMA3.1-8B-\(A^3\)-PO-Wiki</td><td>13,225</td><td>32.5</td><td>82.7</td><td>83.2</td><td>7.4</td><td>48.8</td><td>51.8</td><td>50.0</td><td>39.0</td></tr><tr><td>LLaMA3.1-8B-\(A^3\)-SFT-Wiki&amp;arXiv</td><td>14,500</td><td>32.0</td><td>81.5</td><td>80.6</td><td>7.9</td><td>42.8</td><td>41.0</td><td>37.1</td><td>33.1</td></tr><tr><td>LLaMA3.1-8B-\(A^3\)-PO-Wiki&amp;arXiv</td><td>14,500</td><td>32.9</td><td>84.1</td><td>83.9</td><td>7.6</td><td>50.3</td><td>52.9</td><td>49.8</td><td>40.3</td></tr><tr><td>LLaMA2-70B-Chat (Touvron et al., 2023)</td><td>-</td><td>41.5</td><td>62.9</td><td>61.3</td><td>12.8</td><td>38.3</td><td>37.9</td><td>-</td><td>51.4</td></tr><tr><td>GPT-3.5-Turbo (OpenAI, 2023b)</td><td>-</td><td>40.4</td><td>73.6</td><td>72.5</td><td>12.0</td><td>51.1</td><td>50.0</td><td>-</td><td>42.5</td></tr><tr><td>GPT-4 (OpenAI, 2023a)</td><td>-</td><td>41.3</td><td>73.0</td><td>76.5</td><td>14.2</td><td>48.5</td><td>53.4</td><td>-</td><td>38.2</td></tr><tr><td>GPT-4o (OpenAI, 2024)</td><td>-</td><td>42.3</td><td>68.5</td><td>75.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>GPT-3.5-Turbo-CalM (Hsu et al., 2024)</td><td>-</td><td>45.0</td><td>78.0</td><td>72.6</td><td>12.9</td><td>51.9</td><td>46.6</td><td>-</td><td>-</td></tr></table>
120
+
121
+ summaries; ELI5 is derived from the Reddit forum 'Explain Like I'm Five'. FanOutQA is constructed using 1,034 high-quality, human-authored questions and 7,305 sub-question decompositions created by undergraduate and graduate students in AI/NLP courses at the University of Pennsylvania, with answers anchored to 4,121 distinct English Wikipedia articles. Given that our data is sourced from Wikipedia passages, and ELI5 data is collected from Reddit, the performance of the ELI5 model can alleviate concerns regarding performance improvements potentially stemming from data contamination. Additionally, we incorporate arXiv as a data source, which also contributes to performance enhancements and addresses this concern. We further carry out a comprehensive analysis by computing the BLEU scores between all questions in our training set and those in the ASQA and PopQA datasets. The findings reveal that no question pair had a BLEU score surpassing 0.9. This suggests that there is no substantial textual overlap between the datasets, thus effectively reducing the risk of data contamination.
122
+
123
+ # 3.3 Main Results
124
+
125
+ In Table 2, we report the results of our framework and all the baselines. With the help of our framework, the open-source backbone models can achieve strong citation performance, which can outperform powerful closed-sourced models significantly. Besides, our framework can bring significant improvements to all the backbone models on all three datasets, which fully shows the generalization of $\mathcal{A}^3$ . The ablation study of the SFT and PO stages also shows the effectiveness of our strate
126
+
127
+ Table 2: The performance of all the baseline and our proposed framework on ASQA, ELI5, and PopQA. Bold indicates the best performance. EM, Rec., Prec. and Acc. denote Exact Match, Recall, Precision, and Accuracy. For FanOutQA, we conducted experiments under the setting of the evidence provided and context limited. Loose denotes loose accuracy. Wiki denotes we only use Wikipedia as data source, while Wiki&arXiv means we use both Wikipedia and arXiv as data source. Num. denotes the number of training samples.
128
+
129
+ <table><tr><td rowspan="2">Filtering</td><td rowspan="2">Noisy</td><td rowspan="2">Num.</td><td colspan="3">ASQA</td></tr><tr><td>Correct EM Rec.</td><td>Citation Rec.</td><td>Prec.</td></tr><tr><td>T5</td><td>Random</td><td>14,500</td><td>30.9</td><td>81.7</td><td>80.0</td></tr><tr><td>T5</td><td>Cat.</td><td>14,500</td><td>32.0</td><td>81.5</td><td>80.6</td></tr><tr><td>T5</td><td>Sub-Cat.</td><td>14,500</td><td>30.1</td><td>77.2</td><td>76.5</td></tr><tr><td>BERT</td><td>Random</td><td>14,790</td><td>30.7</td><td>80.5</td><td>80.2</td></tr><tr><td>BERT</td><td>Cat.</td><td>14,790</td><td>31.3</td><td>81.0</td><td>80.9</td></tr><tr><td>BERT</td><td>Sub-Cat.</td><td>14,790</td><td>30.5</td><td>78.4</td><td>77.6</td></tr></table>
130
+
131
+ Table 3: The performance of different data filtering and noisy document augmentation strategies on ASQA. The backbone model is LLaMA3.1-8B and the data source is both Wikipedia and arXiv. T5 denotes we use t5-xxl-true-nli-mixture model to filter data, while BERT denotes ModernBERT-base-nli. Random denotes we use documents from different arXiv categories as noisy documents. Cat. denote we use use documents with the same arXiv category as noisy documents, while SubCat. denote we use use documents with the same arXiv sub-category as noisy documents.
132
+
133
+ gies for each stage. PO stages can bring significant improvements in the caution quality. Depite the strong citation performance, the correctness of our framework is below powerful closed-sourced models. We think this phenomenon may result from the limitation of the model size. Furthermore, it can be observed that integrating data from diverse sources leads to enhanced performance. This effectively validates the generalizability of our framework. We report the performance of different backbone models in the appendix.
134
+
135
+ # 3.4 The Effect of Data Filtering and Noisy Document Augmentation Strategies
136
+
137
+ To analyze our framework further, we also study the effect of data filtering and noisy document aug
138
+
139
+ ![](images/f08e6522172457b8eaafa5bd1517fa0e6dcdd76c8c8296be6c451f5c9f56bd6c.jpg)
140
+ Figure 2: The effect of the filtering threshold in the SFT stage. We use Citation F1 score as filtering threshold and report the performance on ASQA when using LLaMA2-7B as backbone models.
141
+
142
+ ![](images/ffc9230f0c2d174674c57b9dc0d9ef720b8fe26f947cd39f38254e63c9d7c88b.jpg)
143
+ Figure 3: The effect of the data number in the SFT stage. We report the performance on ASQA when using LLaMA2-7B as backbone models.
144
+
145
+ mentation strategies. From the results in Figure 2, we can find that only a high filtering threshold can bring benefits, which shows that data quality is more important than data number.
146
+
147
+ # 3.5 Data Filtering & Number Analysis
148
+
149
+ We report the results when generating different numbers of data in Figure 3 and 4. With the same filtering threshold, the performance is improved with the increasing of the SFT data, which is shown in Figure 3. When the SFT data number is more than 5,000, the improvements are not significant. In Figure 4, we can find that when the DPO data number is less than 9,000, there is a clear linear relationship, namely, as the DPO data increases,
150
+
151
+ ![](images/92b0937a974572101f0c8b91f28e1a53222887af180a398bc4ceff305dc5df8f.jpg)
152
+ Figure 4: The effect of the data number in the PO stage. We report the performance on ASQA when using LLaMA2-7B as backbone models.
153
+
154
+ performance improves, which fully confirms the effectiveness of our proposed framework. When the DPO data number is more than 9,000, the improvements are not significant. In Figure 3, we evaluate the performance of different data filtering models and noisy documents strategies. Specifically, we compare the performance of t5-xxl-true-nli-mixture and ModernBERT-base-nli. We can observe that ModernBERT-base-nli may keep more samples while decreasing the performance. Therefore, we finally use t5-xxl-true-nli-mixture to filter data. We also find using the documents with same category and different sub-categories as noisy document can achieve the best performance. If noisy documents are too relevant, the performance may decrease.
155
+
156
+ # 3.6 Human Evaluation
157
+
158
+ We conduct a human evaluation on 100 randomly selected samples from our dataset. Following Self-RAG (Asai et al., 2024a), we evaluate from two dimensions: relevance (the output's appropriateness and topic alignment with the question) and supportiveness (the sufficiency of provided evidence for answer validation). We recruited three undergraduate students as annotators. We calculated the inter-annotator agreement using Spearman's correlation coefficient, which resulted in a value of 0.72. The results show that $84\%$ of the samples were classified as relevant and $78\%$ as supportive. These results confirm the high quality of our dataset. We
159
+
160
+ <table><tr><td>Input:</td><td>Write a high-quality answer for the given question using only the provided search results and cite them properly using [1][2][3].
161
+ Question: Who wrote the song i&#x27;m not going to miss you?
162
+ Document [1](Title: I&#x27;m Not Gonna Miss You): part of the soundtrack for &quot;&quot; which premiered on October 24, 2014. I&#x27;m Not Gonna Miss You &quot;I&#x27;m Not Gonna Miss You&quot; is a song recorded by ...
163
+ Document [2](Title: I&#x27;m Not Gonna Miss You): I&#x27;m Not Gonna Miss You &quot;I&#x27;m Not Gonna Miss You&quot; is a song recorded by American country music artist Glen Campbell and The Wrecking Crew...
164
+ Document [3](Title: Don&#x27;t Miss You (Ricki-Lee Coulter song)): Don&#x27;t Miss You (Ricki-Lee Coulter song)
165
+ &quot;Don&#x27;t Miss You&quot; is a song performed by Australian singer-songwriter, Ricki-Lee Coulter...
166
+ Document [4](Title: I&#x27;m Not Gonna Miss You): Grammy Award for Best Country Song. &quot;I&#x27;m Not Gonna Miss You&quot; was initiated by Julian Raymond who pitched an idea of recording the song to James Keach...
167
+ Document [5](Title: Don&#x27;t Miss You (Ricki-Lee Coulter song)): physical release of &quot;Don&#x27;t Miss You&quot;, Coulter held instore appearances at Westfield Shopping Centres in Victoria, New South Wales...
168
+ Answer:</td></tr><tr><td>Output</td><td>The song &quot;I&#x27;m Not Gonna Miss You&quot; was co-written by Glen Campbell and producer Julian Raymond [1][2].</td></tr></table>
169
+
170
+ Table 4: A example of responses generated by LLaMA2-7B- $\mathcal{A}^3$ -PO on the ASQA dataset.
171
+
172
+ conduct the human evaluation with three annotators. We calculate the inter-annotator agreement using Spearman's correlation coefficient, which results in a value of 0.72.
173
+
174
+ # 3.7 Case Study
175
+
176
+ In Table 4, we show results predicted by LLaMA2-7B- $\mathcal{A}^3$ -PO on the ASQA dataset. We can observe that the response is fluent, relevant to the question, and contains proper citations. Besides, through our error analysis, we identify two primary types of issues: failure to utilize information from multiple documents and retrieval of unrelated documents. Specifically, for the former, the model sometimes struggles to integrate information from multiple documents to generate accurate answers. Instead, it occasionally copies irrelevant sentences from a single document, which affects the overall quality of the response. Regarding the latter, some errors are due to the retrieval of documents that are not relevant to the question.
177
+
178
+ # 4 Related Work
179
+
180
+ LLM Alignment Existing LLM alignment works mainly consist of two methods: supervised instruction tuning and reinforcement learning. Supervised instruction tuning leverages collected supervised instruction datasets to train foundation models to deal with diverse instructions. Early instruction datasets are collected from large-scale existing NLP task datasets and transformed into instruction formats with manual written templates (Wei et al., 2021). In order to align with human requirements in realistic scenarios, recent works focus on collecting instruction data from realistic scenarios (Ouyang et al., 2022; Databricks, 2023; Kopf et al., 2023; Zhang et al., 2023a; Chi-
181
+
182
+ ang et al., 2023). The reinforcement learning method improves the response quality with the preference signals. InstructGPT (Ouyang et al., 2022) introduce a Proximal Policy Optimization (PPO) framework, which can help LLMs learn from human preference signals. There is a line of subsequent work focused on improving the effectiveness and efficiency of this framework, e.g., RAFT (Dong et al., 2023), DPO (Rafailov et al., 2023), PRO (Song et al., 2023), COH (Liu et al., 2023) and RRHF (Yuan et al., 2023).
183
+
184
+ Attributed LLM Recently, retrieval-augmented models have shown great potential. By utilizing retrieved passages to prompt models, they can enhance the correctness of the outputs, which have been applied to various downstream tasks. To further improve correctness and help users more easily verify the outputs, recent works focus on building attributed LLM, which generates text with citations. Due to the abundant knowledge obtained during the pre-training stage, there is a line of works that focus on exploring the potential of LLMs to generate citation directly (Weller et al., 2023; Xu et al., 2023; Asai et al., 2024a). To test the performance of attributed text generation, recent works build specific datasets for attribution. Specifically, CiteBench (Funkquist et al., 2022) focus on text summarization; ALCE (Gao et al., 2023b) is a comprehensive benchmark to evaluate the attribution ability of existing LLMs, which do not contain train data; HAGRID (Kamalloo et al., 2023) utilize powerful LLMs (GPT-3.5-turbo) to generate texts with citations. BioKaLMA (Li et al., 2023b) and ExpertQA (Malaviya et al., 2023) are specific-domain datasets for attribution. Besides, rather than generating citations directly, there is also a line of works (Gao et al., 2023a; Huo et al., 2023; Chen
185
+
186
+ et al., 2023; Hsu et al., 2024) perform retrieving and finding supportive passages to add citations after generating outputs. Recent works utilize reward modeling (Huang et al., 2024), test-time adaptation (Ye et al., 2024), or preference learning (Li et al., 2024) to improve the performance.
187
+
188
+ # 5 Conclusion
189
+
190
+ In this paper, we introduce the $\mathcal{A}^3$ framework, which aims to leverage underperforming attributed LLMs to generate high-quality attributed query-response pairs for both SFT and PO stages, eliminating the requirement for human-annotated samples. Comprehensive experiments demonstrate our method's effectiveness in improving citation quality and reducing the effect of irrelevant documents. Despite the effectiveness of $\mathcal{A}^3$ , there remains a gap in fully meeting the needs of information-seeking tasks in realistic scenarios. With our framework, we hope more breakthroughs can be made to promote the development of this task. For instance, integrating it with the promising Reinforcement Learning with Verifiable Rewards (RLVR) methods holds great potential.
191
+
192
+ # Limitations
193
+
194
+ Although our instruction framework can generate high-quality attributed text generation data, it still has the following limitations:
195
+
196
+ - Even though we generate data with the use of the existing corpus to ensure the faithfulness of outputs, due to the characteristic of the used backbone LLMs and the potential social bias in the corpus, it may still have the hallucination and bias problem. Therefore, the generated and data and the outputs of trained models may contain misleading and toxic information, which needs to be addressed before being applied to realistic scenario;
197
+ - Although we conduct experiments on widely used attributed text generation benchmarks, the language of all these benchmarks is English, which has limited morphology. The effectiveness of our proposed method on language with varied morphology needs to be further confirmed.
198
+ We have confirmed the effectiveness of our framework on multiple backbone LLMs, including LLaMA2-7B, LLaMA2-13B,
199
+
200
+ LLaMA3.1-8B and LLaMA3.1-8B. However, the effectiveness on other backbone LLMs still need to be tested.
201
+
202
+ - In our framework, we mainly use GPT-3.5-Turbo and GPT-4 to generate data, which can be expanded to include other closed-source or open-source LLMs, such as Claude and LLaMA3.1-Instruct.
203
+
204
+ # Acknowledgments
205
+
206
+ We want to thank all the anonymous reviewers for their valuable comments. This work was supported by the National Science Foundation of China (NSFC No. 62206194), the Natural Science Foundation of Jiangsu Province, China (Grant No. BK20220488), the Young Elite Scientists Sponsorship Program by CAST (2023QNRC001), and the Priority Academic Program Development of Jiangsu Higher Education Institutions.
207
+
208
+ # References
209
+
210
+ Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2024a. Self-RAG: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations.
211
+ Akari Asai, Zexuan Zhong, Danqi Chen, Pang Wei Koh, Luke Zettlemoyer, Hannaneh Hajishirzi, and Wen-tau Yih. 2024b. Reliable, adaptable, and attributable language models with retrieval. arXiv preprint arXiv:2403.03187.
212
+ Jifan Chen, Grace Kim, Aniruddh Sriram, Greg Durrett, and Eunsol Choi. 2023. Complex claim verification with evidence retrieved in the wild. arXiv preprint arXiv:2305.11859.
213
+ Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. 2023. Vicuna: An opensource chatbot impressing gpt-4 with $90\%$ * chatgpt quality.
214
+ Databricks. 2023. Databricks' dolly, a large language model trained on the databricks machine learning platform. https://github.com/databrickslabs/dolly.
215
+ Hanze Dong, Wei Xiong, Deepanshu Goyal, Rui Pan, Shizhe Diao, Jipeng Zhang, Kashun Shum, and Tong Zhang. 2023. Raft: Reward ranked finetuning for generative foundation model alignment. arXiv preprint arXiv:2304.06767.
216
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman,
217
+
218
+ Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
219
+
220
+ Martin Funkquist, Ilia Kuznetsov, Yufang Hou, and Iryna Gurevych. 2022. Citebench: A benchmark for scientific citation text generation. arXiv preprint arXiv:2212.09577.
221
+
222
+ Luyu Gao, Zhuyun Dai, Panupong Pasupat, Anthony Chen, Arun Tejasvi Chaganty, Yicheng Fan, Vincent Zhao, Ni Lao, Hongrae Lee, Da-Cheng Juan, et al. 2023a. Rarr: Researching and revising what language models say, using language models. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16477-16508.
223
+
224
+ Tianyu Gao, Howard Yen, Jiatong Yu, and Danqi Chen. 2023b. Enabling large language models to generate text with citations. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 6465-6488, Singapore. Association for Computational Linguistics.
225
+
226
+ Aaron Grattafori, Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad AlDahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Alex Vaughan, Amy Yang, Angela Fan, Anirudh Goyal, Anthony Hartshorn, Aobo Yang, Archi Mitra, Archie Sravankumar, Artem Korenev, Arthur Hinsvark, Arun Rao, Aston Zhang, Aurelien Rodriguez, Austen Gregerson, Ava Spataru, Baptiste Roziere, Bethany Biron, Binh Tang, Bobbie Chern, Charlotte Caucheteux, Chaya Nayak, Chloe Bi, Chris Marra, Chris McConnell, Christian Keller, Christophe Touret, Chunyang Wu, Corinne Wong, Cristian Canton Ferrer, Cyrus Nikolaidis, Damien Allonsius, Daniel Song, Danielle Pintz, Danny Livshits, Danny Wyatt, David Esiobu, Dhruv Choudhary, Dhruv Mahajan, Diego Garcia-Olano, Diego Perino, Dieuwke Hupkes, Egor Lakomkin, Ehab AlBadawy, Elina Lobanova, Emily Dinan, Eric Michael Smith, Filip Radenovic, Francisco Guzmán, Frank Zhang Gabriel Synnaeve, Gabrielle Lee, Georgia Lewis Anderson, Govind Thattai, Graeme Nail, Gregoire Mialon, Guan Pang, Guillem Cucurell, Hailey Nguyen, Hannah Korevaar, Hu Xu, Hugo Touvron, Iliyan Zarov, Imanol Arrieta Ibarra, Isabel Kloumann, Ishan Misra, Ivan Evtimov, Jack Zhang, Jade Copet, Jaewon Lee, Jan Geffert, Jana Vranes, Jason Park, Jay Mahadeokar, Jeet Shah, Jelmer van der Linde, Jennifer Billock, Jenny Hong, Jenya Lee, Jeremy Fu, Jianfeng Chi, Jianyu Huang, Jiawen Liu, Jie Wang. Jiecao Yu, Joanna Bitton, Joe Spisak, Jongsoo Park Joseph RoccaJoshua JohnstunJoshua SaxeJunteng Jia Kalyan Vasuden Alwala Karthik Prasad Kartikeya Upasani Kate Plawiak Ke Li Kenneth Heafield,Kevin Stone,Khalid El-Arini,Krithika Iyer. Kshitiz Malik Kuenley ChiuKunal BhallaKushal Lakhotia Lauren Rantala-Yeary Laurens van der Maaten Lawrence Chen Liang Tan Liz Jenkins Louis Martin Lovish Madaan Lubo Malo Lukas Blecher Lukas Landzaat Luke de Oliveira Madeline Muzzi Mahesh Pasupuleti Mannat Singh Manohar
227
+
228
+ Paluri, Marcin Kardas, Maria Tsimpoukelli, Mathew Oldham, Mathieu Rita, Maya Pavlova, Melanie Kambadur, Mike Lewis, Min Si, Mitesh Kumar Singh, Mona Hassan, Naman Goyal, Narjes Torabi, Nikolay Bashlykov, Nikolay Bogoychev, Niladri Chatterji, Ning Zhang, Olivier Duchenne, Onur Celebi, Patrick Alrassy, Pengchuan Zhang, Pengwei Li, Petar Vasic, Peter Weng, Prajwal Bhargava, Pratik Dubal, Praveen Krishnan, Punit Singh Koura, Puxin Xu, Qing He, Qingxiao Dong, Ragavan Srinivasan, Raj Ganapathy, Ramon Calderer, Ricardo Silveira Cabral, Robert Stojnic, Roberta Raileanu, Rohan Maheswari, Rohit Girdhar, Rohit Patel, Romain Sauvestre, Ronnie Polidoro, Roshan Sumbaly, Ross Taylor, Ruan Silva, Rui Hou, Rui Wang, Saghar Hosseini, Sahana Chennaibasappa, Sanjay Singh, Sean Bell, Seohyun Sonia Kim, Sergey Edunov, Shaoliang Nie, Sharan Narang, Sharath Rarparthy, Sheng Shen, Shengye Wan, Shruti Bhosale, Shun Zhang, Simon Vandenhende, Soumya Batra, Spencer Whitman, Sten Sootla, Stephane Collot, Suchin Gururangan, Sydney Borodinsky, Tamar Herman, Tara Fowler, Tarek Sheasha, Thomas Georgiou, Thomas Scialom, Tobias Speckbacher, Todor Mihaylov, Tong Xiao, Ujjwal Karn, Vedanuj Goswami, Vibhor Gupta, Vignesh Ramanathan, Viktor Kerkez, Vincent Gonguet, Virginia Do,Vish Vogeti,Vitor Albiero,Vladan Petrovic Weiwei Chu,Wenhan Xiong,Wenyin Fu.Whitney Meers,Xavier Martinet,Xiaodong Wang,Xiaofang Wang,Xiaqing Ellen Tan,Xide Xia,Xinfeng Xie,Xuchao Jia,Xuewei Wang,Yaelle Goldschlag,Yashesh Gaur,Yasmine Babaei,Yi Wen Yiwen Song,Yuchen Zhang,Yue Li,Yuning Mao Zacharie Delpierre Coudert,Zheng Yan,Zhengxing Chen,Zoe Papakipos,Aaditya Singh,Aayushi Srivastava, Abha Jain, Adam Kelsey, Adam Shajnfeld Adithya Gangidi, Adolfo Victoria,Ahuva Goldstand Ajay Menon,Ajay Sharma,Alex Boesenberg,Alexei Baevski, Allie Feinstein,Amanda Kallet,Amit Sangani Amos Teo Anam Yunus Andrei Lupu Andres Alvarado, Andrew Caples, Andrew Gu, Andrew HoAndrew PoultonAndrew RyanAnkit Ramchandani Annie DongAnnie Franco Anuj Goyal Aparajita Saraf Arkabandhu Chowdhury Ashley Gabriel Ashwin Bharambe Assaf Eisenman Azadeh YazdanBeau James,Ben Maurer Benjamin Leonhardi Bernie Huang,Beth Loyd,Beto De Paola,Bhargavi Paranjape,Bing LiuBo WuBoyu NiBraden HancockBram Wasti Brandon Spence Brani Stojkovic Brian Gamido Britt Montalvo Carl ParkerCarly BurtonCatalina Mejia Ce LiuChanghan Wang Changkyu KimChao Zhou Chester Hu ChingHsiang ChuChris CaiChris Tindal Christoph Feichtenhofer,Cynthia Gao,Damon Civin Dana Beaty Daniel KreymerDaniel LiDavid Adkins David XuDavide Testuggine Delia David Devi Parikh Diana LiskovichDidem Foss Dingkang Wang Duc Le,Dustin HollandEdward Dowling,Eissa Jamil Elaine Montgomery,Eleonora Presani Emily Hahn Emily WoodEric-Tuan LeErik Brinkman Esteban Arcaute, Evan Dunbar,Erik Smothers,Fei Sun Felix KreukFeng TianFilippos Kokkinos,Firat Ozgenel Francesco Caggioni Frank Kanayet Frank SeideGabriela Medina FlorezGabriella Schwarz
229
+
230
+ Gada Badeer, Georgia Swee, Gil Halpern, Grant Herman, Grigory Sizov, Guangyi, Zhang, Guna Lakshminarayanan, Hakan Inan, Hamid Shojanazeri, Han Zou, Hannah Wang, Hanwen Zha, Haroun Habeeb, Harrison Rudolph, Helen Suk, Henry Aspegren, Hunter Goldman, Hongyuan Zhan, Ibrahim Damlaj, Igor Molybog, Igor Tufanov, Ilias Leontiadis, Irina-Elena Veliche, Itai Gat, Jake Weissman, James Geboski, James Kohli, Janice Lam, Japhet Asher, Jean-Baptiste Gaya, Jeff Marcus, Jeff Tang, Jennifer Chan, Jenny Zhen, Jeremy Reizenstein, Jeremy Teboul, Jessica Zhong, Jian Jin, Jingyi Yang, Joe Cummings, Jon Carvill, Jon Shepard, Jonathan McPhie, Jonathan Torres, Josh Ginsburg, Junjie Wang, Kai Wu, Kam Hou U, Karan Saxena, Kartikay Khandelwal, Katayoun Zand, Kathy Matosich, Kaushik Veeraraghavan, Kelly Michelena, Keqian Li, Kiran Jagadeesh, Kun Huang, Kunal Chawla, Kyle Huang, Lailin Chen, Lakshya Garg, Lavender A, Leandro Silva, Lee Bell, Lei Zhang, Liangpeng Guo, Licheng Yu, Liron Moshkovich, Luca Wehrstedt, Madian Khabsa, Manav Avalani, Manish Bhatt, Martynas Mankus, Matan Hasson, Matthew Lennie, Matthias Reso, Maxim Groshev, Maxim Naumov, Maya Lathi, Meghan Keneally, Miao Liu, Michael L. Seltzer, Michal Valko, Michelle Restrepo, Mihir Patel, Mik Vyatskov, Mikayel Samvelyan, Mike Clark, Mike Macey, Mike Wang, Miquel Jubert Hermoso, Mo Metanat, Mohammad Rastegari, Munish Bansal, Nandhini Santhanam, Natascha Parks, Natasha White, Navyata Bawa, Nayan Singhal, Nick Egebo Nicolas Usunier, Nikhil Mehta, Nikolay Pavlovich Laptev, Ning Dong, Norman Cheng, Oleg Chernoguz, Olivia Hart, Omkar Salpekar, Ozlem Kalinli, Parkin Kent, Parth Parekh, Paul Saab, Pavan Balaji Pedro Rittner Philip Bontrager Pierre Roux Piotr Dollar Polina Zvyagina Prashant Ratanchandani, British Yuvraj Qian Liang Rachad Alao Rachel RodriguezRafi Ayub,Raghotham Murthy Raghu Nayani,Rahul Mitra Rangaprabhu Parthasarathy Raymond Li Rebekkah Hogan Robin Battey Rocky Wang Russ Howes Rudy Rinott Sachin Mehta Sachin Sby,Sai Jayesh Bondu,Samyak Datta,Sara Chugh,Sara Hunt,Sargun Dhillon,Sasha Sidorov Satadru PanSaurabh MahajanSaurabh Verma Seiji Yamamoto Sharadh Ramaswamy Shaun Lindsay. Shaun Lindsay Sheng Feng Shenghao Lin Shengxin Cindy Zha Shishir Patil Shiva Shankar Shuqiang ZhangShuqiang Zhang Sinong Wang Sneha Agarwal Soji Sajuyigbe Soumith Chintala Stephanie Max Stephen Chen Steve Kehoe Steve Satterfield Sudarshan Govindaprasad Sumit Gupta Summer Deng Sungmin Cho Sunny Virk Suraj Subramanian Sy Choudhury Sydney Goldman Tal RemezTamar Glaser Tamara Best Thilo Koehler Thomas Robinson Tianhe Li Tianjun Zhang Tim Matthews Timothy Chou Tzook Shaked Varun Vontimitta Victoria Ajayi Victoria Montanez Vijai Mohan,Vinay Satish Kumar,Vishal Mangla,Vlad Ionescu,Vlad Poenaru,Vlad Tiberiu Mihailescu Vladimir Ivanov Wei Li Wenchen Wang Wenwen JiangWes Bouaziz Will Constable Xiaocheng TangXiaojian WuXiaolan Wang Xilun Wu Xinbo Gao,Yaniv Kleinman Yanjun Chen Ye HuYe Jia
231
+
232
+ Ye Qi, Yenda Li, Yilin Zhang, Ying Zhang, Yossi Adi, Youngjin Nam, Yu, Wang, Yu Zhao, Yuchen Hao, Yundi Qian, Yunlu Li, Yuzi He, Zach Rait, Zachary DeVito, Zef Rosnbrick, Zhaoduo Wen, Zhenyu Yang, Zhiwei Zhao, and Zhiyu Ma. 2024. The llama 3 herd of models. Preprint, arXiv:2407.21783.
233
+
234
+ I Hsu, Zifeng Wang, Long T Le, Lesly Miculicich, Nanyun Peng, Chen-Yu Lee, Tomas Pfister, et al. 2024. Calm: Contrasting large and small language models to verify grounded generation. arXiv preprint arXiv:2406.05365.
235
+
236
+ Chengyu Huang, Zeqiu Wu, Yushi Hu, and Wenya Wang. 2024. Training language models to generate text with citations via fine-grained rewards. arXiv preprint arXiv:2402.04315.
237
+
238
+ Siqing Huo, Negar Arabzadeh, and Charles Clarke. 2023. Retrieving supporting evidence for generative question answering. In Annual International ACM SIGIR Conference on Research and Development in Information Retrieval in the Asia Pacific Region, pages 11-20.
239
+
240
+ Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. 2022. Unsupervised dense information retrieval with contrastive learning. Transactions on Machine Learning Research.
241
+
242
+ Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. 2023. Mistral 7b. Preprint, arXiv:2310.06825.
243
+
244
+ Ehsan Kamalloo, Aref Jafari, Xinyu Zhang, Nandan Thakur, and Jimmy Lin. 2023. *Hagrid: A human-llm collaborative dataset for generative information-seeking with attribution.* arXiv preprint arXiv:2307.16883.
245
+
246
+ Andreas Köpf, Yannic Kilcher, Dimitri von Rütte, Sotiris Anagnostidis, Zhi-Rui Tam, Keith Stevens, Abdullah Barhoum, Nguyen Minh Duc, Oliver Stanley, Richard Nagyfi, et al. 2023. Openassistant conversations-democratizing large language model alignment. arXiv preprint arXiv:2304.07327.
247
+
248
+ Dongfang Li, Zetian Sun, Baotian Hu, Zhenyu Liu, Xinshuo Hu, Xuebo Liu, and Min Zhang. 2024. Improving attributed text generation of large language models via preference learning. arXiv preprint arXiv:2403.18381.
249
+
250
+ Dongfang Li, Zetian Sun, Xinshuo Hu, Zhenyu Liu, Ziyang Chen, Baotian Hu, Aiguo Wu, and Min Zhang. 2023a. A survey of large language models attribution. arXiv preprint arXiv:2311.03731.
251
+
252
+ Xinze Li, Yixin Cao, Liangming Pan, Yubo Ma, and Aixin Sun. 2023b. Towards verifiable generation:
253
+
254
+ A benchmark for knowledge-aware language model attribution. arXiv preprint arXiv:2310.05634.
255
+ Hao Liu, Carmelo Sferrazza, and Pieter Abbeel. 2023. Languages are rewards: Hindsight finetuning using human feedback. arXiv preprint arXiv:2302.02676.
256
+ Chaitanya Malaviya, Subin Lee, Sihao Chen, Elizabeth Sieber, Mark Yatskar, and Dan Roth. 2023. Expert-curated questions and attributed answers. arXiv preprint arXiv:2309.07852.
257
+ Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. 2023. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 9802-9822.
258
+ Sewon Min, Julian Michael, Hannaneh Hajishirzi, and Luke Zettlemoyer. 2020. Ambiguous open-domain questions. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5783-5797, Online. Association for Computational Linguistics.
259
+ Jianmo Ni, Chen Qu, Jing Lu, Zhuyun Dai, Gustavo Hernandez Abrego, Ji Ma, Vincent Zhao, Yi Luan, Keith Hall, Ming-Wei Chang, and Yinfei Yang. 2022. Large dual encoders are generalizable retrievers. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 9844–9855, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
260
+ OpenAI. 2023a. Gpt-4 technical report. Preprint, arXiv:2303.08774.
261
+ OpenAI. 2023b. Introducing chatgpt.
262
+ OpenAI. 2024. Gpt-4o system card.
263
+ Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744.
264
+ Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D Manning, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. arXiv preprint arXiv:2305.18290.
265
+ Feifan Song, Bowen Yu, Minghao Li, Haiyang Yu, Fei Huang, Yongbin Li, and Houfeng Wang. 2023. Preference ranking optimization for human alignment. arXiv preprint arXiv:2306.17492.
266
+ Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca.
267
+
268
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288.
269
+ Luyu Wang, Yujia Li, Ozlem Aslan, and Oriol Vinyals. 2021. Wikigraphs: A wikipedia text-knowledge graph paired dataset. *NAACL-HLT* 2021, page 67.
270
+ Jason Wei, Maarten Bosma, Vincent Y Zhao, Kelvin Guu, Adams Wei Yu, Brian Lester, Nan Du, Andrew M Dai, and Quoc V Le. 2021. Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652.
271
+ Orion Weller, Marc Marone, Nathaniel Weir, Dawn Lawrie, Daniel Khashabi, and Benjamin Van Durme. 2023. "according to..." prompting language models improves quoting from pre-training data. arXiv preprint arXiv:2305.13252.
272
+ Shicheng Xu, Liang Pang, Huawei Shen, Xueqi Cheng, and Tat-seng Chua. 2023. Search-in-the-chain: Towards the accurate, credible and traceable content generation for complex knowledge-intensive tasks. arXiv preprint arXiv:2304.14732.
273
+ Xi Ye, Ruoxi Sun, Sercan Arik, and Tomas Pfister. 2024. Effective large language model adaptation for improved grounding and citation generation. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 6237-6251.
274
+ Zheng Yuan, Hongyi Yuan, Chuanqi Tan, Wei Wang, Songfang Huang, and Fei Huang. 2023. Rrhf: Rank responses to align language models with human feedback without tears. arXiv preprint arXiv:2304.05302.
275
+ Ge Zhang, Yemin Shi, Ruibo Liu, Ruibin Yuan, Yizhi Li, Siwei Dong, Yu Shu, Zhaoqun Li, Zekun Wang, Chenghua Lin, et al. 2023a. Chinese open instruction generalist: A preliminary release. arXiv preprint arXiv:2304.07987.
276
+ Xinyu Zhang, Nandan Thakur, Odunayo Ogundepo, Ehsan Kamalloo, David Alfonso-Hermelo, Xiaoguang Li, Qun Liu, Mehdi Rezagholizadeh, and Jimmy Lin. 2022. Making a miracl: Multilingual information retrieval across a continuum of languages. Preprint, arXiv:2210.09984.
277
+ Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. 2023b. Siren's song in the ai ocean: A survey on hallucination in large language models. arXiv preprint arXiv:2309.01219.
278
+ Andrew Zhu, Alyssa Hwang, Liam Dugan, and Chris Callison-Burch. 2024. Fanoutqa: A multi-hop, multi-document question answering benchmark for large
279
+
280
+ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 18-37.
281
+
282
+ # A Model Training
283
+
284
+ # A.1 Supervised Fine-tuning Stage
285
+
286
+ We recognize each sample as follows:
287
+
288
+ Instruction: Write a high-quality answer for the given question using only the provided search results and cite them properly using [1][2][3].
289
+
290
+ Question: $\{Q_i\}$
291
+
292
+ Document[j]:{pj}
293
+
294
+ Output: $\{R_iC_i\}$
295
+
296
+ We only compute cross-entropy loss on output to train the SFT model $M_{SFT}$ .
297
+
298
+ # A.2 Preference Optimization Stage
299
+
300
+ In the PO stage, we use Direct Preference Optimization (DPO) (Rafailov et al., 2023) loss function. We use the prompt template the same as the one used in the SFT stage. Formally, we use $\mathcal{L}_{DPO}$ to train the DPO model $M_{DPO}$ :
301
+
302
+ $$
303
+ \mathcal {L} _ {D P O} = - \mathbb {E} \left[ \log \sigma \left(\alpha \frac {M _ {D P O} \left(R _ {i} C _ {i} \mid Q _ {i} P _ {i}\right)}{M _ {S F T} \left(R _ {i} C _ {i} \mid Q _ {i} P _ {i}\right)} - \alpha \frac {M _ {D P O} \left(D R _ {i} D C _ {i} \mid Q _ {i} P _ {i}\right)}{M _ {S F T} \left(D R _ {i} D C _ {i} \mid I\right)}\right) \right]
304
+ $$
305
+
306
+ where $\sigma$ represents the logistic function, $\alpha$ is a hyper-parameter.
307
+
308
+ # B Implementation Details
309
+
310
+ # B.1 Training Details
311
+
312
+ We implement all models with the open-source toolkit Transformers $^{10}$ . In the SFT stage, we follow Alpaca (Taori et al., 2023) use AdamW optimizer and set the learning rate to 1e-5, batch size to 32, learning rate warmup ratio to 0.03, input length to 2,048 and perform training for 3 epochs. We update all parameters during the fine-tuning stage. Our DPO implementation is based on the Alignment-Handbook $^{11}$ . In the DPO stage, we set the learning rate to 5e-7, batch size to 16, learning rate warmup ratio to 0.1, input length to 2,048, and perform training for 1 epoch. All the fine-tuning experiments are performed with 80GB NVIDIA A100 GPUs.
313
+
314
+ # B.2 Inference Details
315
+
316
+ During the model inference state, we use the prompt template the same as the one used in the
317
+
318
+ SFT stage. We use the same retriever and documents with Top-5 scores for our method and all the baselines. Specifically, following ALCE (Gao et al., 2023b) and Self-RAG (Asai et al., 2024a), we use GTR (Ni et al., 2022) for ASQA, BM25 for ELI5 and Contriever-MS MARCO (Izacard et al., 2022) for PopQA. We set the temperature to 1 and the Top-P value to 0.95, then sample one response for each query.
319
+
320
+ # C Prompt Template
321
+
322
+ In Q&A Pair Extraction, we generate $R_{i}$ based on $P_{i}$ using the following prompt template:
323
+
324
+ I will give a reference paragraph. Please summarize this paragraph briefly.
325
+
326
+ Reference: $\{P_{\nu}\}$
327
+
328
+ Summary:
329
+
330
+ We generate $Q_{i}$ based on $R_{i}$ using the following prompt template:
331
+
332
+ I will give an answer. Please design a question for this answer.
333
+
334
+ Answer: $\{R_i\}$
335
+
336
+ Question:
337
+
338
+ We generate multiple attributed Q&A pairs based on $P_{i}$ with the use of following prompt template:
339
+
340
+ I will give some reference paragraphs. Please design some question-answer pairs based on these paragraphs. Each question starts with $Q$ : and each answer starts with $A$ . You should consider the interconnectedness of content across multiple paragraphs and formulate questions that draw connections between the information presented in those paragraphs. Also, mention the reference of parts of your answer based on the given paragraphs within brackets [] as in the IEEE format.
341
+
342
+ Reference: $\{P_i\}$
343
+
344
+ # D The Performance on Different Backbone Models
345
+
346
+ In Table 5, we show the performance of our framework on different backbone models. The significant
347
+
348
+ <table><tr><td rowspan="2">Method</td><td colspan="3">ASQA</td><td colspan="3">ELI5</td><td rowspan="2">PopQA Correct Acc.</td></tr><tr><td>Correct EM Rec.</td><td>Citation Rec.</td><td>Prec.</td><td>Correct Claim</td><td>Citation Rec.</td><td>Prec.</td></tr><tr><td>LLaMA2-7B (Touvron et al., 2023)</td><td>12.4</td><td>1.3</td><td>2.7</td><td>2.4</td><td>0.8</td><td>0.9</td><td>45.9</td></tr><tr><td>LLaMA2-7B-Hagrid (Kamalloo et al., 2023)</td><td>28.5</td><td>48.8</td><td>54.2</td><td>9.7</td><td>18.8</td><td>30.2</td><td>40.7</td></tr><tr><td>LLaMA2-7B-\(A^3\)-SFT (Ours)</td><td>31.3</td><td>73.6</td><td>71.4</td><td>9.4</td><td>33.6</td><td>33.9</td><td>49.5</td></tr><tr><td>LLaMA2-7B-\(A^3\)-PO (Ours)</td><td>33.2</td><td>80.7</td><td>76.1</td><td>8.6</td><td>44.7</td><td>46.3</td><td>52.1</td></tr><tr><td>LLaMA2-13B (Touvron et al., 2023)</td><td>26.9</td><td>10.6</td><td>15.4</td><td>3.9</td><td>3.1</td><td>5.3</td><td>21.9</td></tr><tr><td>LLaMA2-13B-Vicuna (Chiang et al., 2023)</td><td>31.9</td><td>51.1</td><td>50.1</td><td>10.0</td><td>15.6</td><td>19.6</td><td>-</td></tr><tr><td>LLaMA2-13B-Chat (Touvron et al., 2023)</td><td>35.2</td><td>38.4</td><td>39.4</td><td>13.4</td><td>17.3</td><td>15.8</td><td>-</td></tr><tr><td>LLaMA2-13B-Hagrid (Kamalloo et al., 2023)</td><td>28.7</td><td>46.5</td><td>47.0</td><td>7.9</td><td>14.8</td><td>17.4</td><td>27.6</td></tr><tr><td>LLaMA2-13B-Self-RAG (Asai et al., 2024a)</td><td>31.7</td><td>70.3</td><td>71.3</td><td>10.7</td><td>20.8</td><td>22.5</td><td>-</td></tr><tr><td>LLaMA2-13B-\(A^3\)-SFT (Ours)</td><td>31.5</td><td>79.9</td><td>79.6</td><td>9.1</td><td>41.6</td><td>41.3</td><td>45.7</td></tr><tr><td>LLaMA2-13B-\(A^3\)-PO (Ours)</td><td>31.7</td><td>82.3</td><td>82.5</td><td>8.9</td><td>42.0</td><td>42.3</td><td>47.1</td></tr><tr><td>Mistral-7B (Jiang et al., 2023)</td><td>13.3</td><td>0.7</td><td>1.7</td><td>3.8</td><td>1.4</td><td>2.7</td><td>42.3</td></tr><tr><td>Mistral-7B-Hagrid (Kamalloo et al., 2023)</td><td>15.2</td><td>36.1</td><td>40.1</td><td>5.4</td><td>20.2</td><td>28.2</td><td>43.7</td></tr><tr><td>Mistral-7B-\(A^3\)-SFT (Ours)</td><td>31.4</td><td>76.8</td><td>75.6</td><td>8.6</td><td>37.6</td><td>36.9</td><td>47.2</td></tr><tr><td>Mistral-7B-\(A^3\)-PO (Ours)</td><td>31.7</td><td>84.4</td><td>87.0</td><td>6.0</td><td>60.7</td><td>68.9</td><td>52.8</td></tr><tr><td>LLaMA2-70B-Chat (Touvron et al., 2023)</td><td>41.5</td><td>62.9</td><td>61.3</td><td>12.8</td><td>38.3</td><td>37.9</td><td>-</td></tr><tr><td>GPT-3.5-Turbo (OpenAI, 2023b)</td><td>40.4</td><td>73.6</td><td>72.5</td><td>12.0</td><td>51.1</td><td>50.0</td><td>-</td></tr><tr><td>GPT-4 (OpenAI, 2023a)</td><td>41.3</td><td>73.0</td><td>76.5</td><td>14.2</td><td>48.5</td><td>53.4</td><td>-</td></tr><tr><td>GPT-3.5-Turbo-CaLM (Hsu et al., 2024)</td><td>45.0</td><td>78.0</td><td>72.6</td><td>12.9</td><td>51.9</td><td>46.6</td><td>-</td></tr></table>
349
+
350
+ Table 5: The performance of all the baseline and our proposed framework on ASQA, ELI5, and PopQA. Bold indicates the best performance. EM, Rec., Prec. and Acc. denote Exact Match, Recall, Precision, and Accuracy.
351
+
352
+ <table><tr><td>Document Selection</td><td>Whether QG</td><td>DG Model</td><td>Correct EM Rec.</td><td>Citation Rec.</td><td>Prec.</td></tr><tr><td>Query Based</td><td>×</td><td>gpt-3.5-turbo-0301</td><td>28.7</td><td>46.5</td><td>47.0</td></tr><tr><td>Query Based</td><td>×</td><td>gpt-4-1106-preview</td><td>29.9</td><td>62.6</td><td>55.2</td></tr><tr><td>Query Based</td><td>✓</td><td>gpt-4-1106-preview</td><td>30.8</td><td>73.0</td><td>51.9</td></tr><tr><td>Entity Based</td><td>✓</td><td>gpt-4-1106-preview</td><td>31.3</td><td>73.2</td><td>72.7</td></tr></table>
353
+
354
+ Table 6: The effect of different document selection and data generation strategies. Bold indicates the best performance. Whether QG represents whether we generate questions based on the selected documents. DG Model denotes the LLM we used to generate data.
355
+
356
+ improvements across different backbone models shows the generalizability of our framework.
357
+
358
+ # E The Effect of Document Selection Strategy
359
+
360
+ In our preliminary experiments, we find that selecting multiple documents randomly as $P_{i}$ to generate $(Q_{i}, R_{i}, C_{i})$ causes the generated response to usually have only one citation. We speculate this phenomenon results from the selected documents that are not interrelated. Therefore, in this experiment, we study the effect of different document selection strategies. We compare the entity matching based document clustering strategy with the query based document clustering strategy. Specifically, we utilize a retriever to get multiple passages related to one query and treat these retrieved passages
361
+
362
+ as selected passages. We collect these queries and retrieved passages from MIRACL (Zhang et al., 2022). For the query based document clustering strategy, since we have achieved queries, we also compare the performance whether using this query as $Q_{i}$ or generating $Q_{i}$ as the entity matching based document clustering strategy. The results in Table 6 confirm the effectiveness of our proposed entity matching based document clustering strategy. Besides, we can find that the data generation model has a significant effect on the performance.
363
+
364
+ # F Irrelevant Citation Analysis
365
+
366
+ We also find that significant contribution of these two strategies to avoiding unnecessary, irrelevant citations. As shown in Figure 5, the noisy document augmentation and data filtering can reduce
367
+
368
+ ![](images/4ee53f7f977b07de37af8503d63c04ef4bcdacb2dc80f84169cc60b658368d08.jpg)
369
+ Figure 5: The irrelevant citation percentage when using Data Filtering and Noisy Document Augmentation strategies. We report the performance on ASQA when using LLaMA2-7B as backbone models.
370
+
371
+ the percentage of irrelevant citations.
3automaticalignmentframeworkforattributedtextgeneration/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc5d7da92fe6520a376809b799782e478cf0ba096a84014813bda89e6636a09f
3
+ size 816494
3automaticalignmentframeworkforattributedtextgeneration/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b2b96b997420e4c6bb5abb195576813ff950cfab7a05c8e5ab6581adae5f1b3
3
+ size 449199
acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da5aa7522d254b80d5f84bcc8afb9eb4ead3cf63c5f8da183009b2d8b8d41e17
3
+ size 69112
acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4053e4305fadd81b6199619e2d0b953a70952a9bc9ed07185cb21698ec9d67d
3
+ size 85742
acceleratingdensellmsvial0regularizedmixtureofexperts/cd656b68-876d-4f33-86db-1ecf995c8724_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2a1145e524b3d7e42e73d93570bd0c61c574af91974fd3b5399c07c3e1d622
3
+ size 537949
acceleratingdensellmsvial0regularizedmixtureofexperts/full.md ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accelerating Dense LLMs via L0-regularized Mixture-of-Experts
2
+
3
+ Zhenyu Zhang<sup>1</sup>, Jiudong Yang<sup>2</sup>, Zhaowen Tao<sup>1</sup>, Meng Chen<sup>3*</sup>
4
+
5
+ 1 YZW, Chengdu, China
6
+
7
+ $^{2}$ FuTu AI, Shenzhen, China
8
+
9
+ $^{3}$ Wise AI, Melbourne, Australia
10
+
11
+ zhangzhenyul3@outlook.com, simonyang@futunn.com
12
+
13
+ taozhaowen@gmail.com, chenmengdx@gmail.com
14
+
15
+ # Abstract
16
+
17
+ Large language models (LLMs) achieve strong performance but suffer from slow and costly inference. Existing acceleration methods often lead to noticeable performance degradation, while Mixture-of-Experts (MoE) models require extensive computational resources. In this paper, we propose L0-MoE, a lightweight MoE approach using L0-regularization to accelerate dense LLMs nearly without performance loss. Our method introduces a cluster confusion matrix for domain-aware dataset curation and applies dynamic batching for efficient training. Experiments show that L0-MoE achieves up to 2.5x speedup over dense models while maintaining competitive performance, outperforming existing LLM acceleration baselines.
18
+
19
+ # 1 Introduction
20
+
21
+ Large language models (LLMs) have demonstrated remarkable intelligence across various tasks (OpenAI et al., 2024; Gemini-Team et al., 2024; Dubey et al., 2024; Jiang et al., 2023; DeepSeek-AI et al., 2025; Yang et al., 2024), including question answering, mathematics, coding, and content generation. A key insight into their success is the parameter scaling law (Kaplan et al., 2020), which suggests that increasing model size enhances performance across diverse tasks, potentially advancing artificial general intelligence (AGI) (Bubeck et al., 2023). However, larger LLMs incur high inference costs, leading to slower generation speeds and increased computational expenses. Thus, optimizing LLM inference efficiency has become a critical challenge for both academia and industry.
22
+
23
+ Various approaches have been proposed to accelerate LLM inference, which can be categorized into three main techniques: (1) Quantization, including GPTQ (Frantar et al., 2023), SmoothQuant (Xiao et al., 2023), AWQ (Lin et al., 2024b) and DuQuant (Lin et al., 2024a), reduces precision by
24
+
25
+ converting weights and activations from floating-point to lower-bit integer formats, significantly improving efficiency. (2) Model pruning, such as LLM-Pruner (Ma et al., 2023) and LLM-Shearing (Xia et al., 2024), removes redundant parameters based on predefined criteria to compress models and accelerate inference. (3) Knowledge distillation (Gu et al., 2024; Feng et al., 2024), like reverse-KD (Gu et al., 2024) and Chain-of-Thought (CoT) Distillation (Feng et al., 2024), transfers knowledge from large LLMs to smaller ones using distillation techniques (Hinton et al., 2015), reducing computational demands. While these methods achieve substantial speedup, they often come at the cost of performance degradation, posing challenges for real-world deployment.
26
+
27
+ Recently, sparsely gated Mixture-of-Experts (MoE) models (Cai et al., 2024), particularly in transformer-based large language models, have significantly improved inference speed optimization. MoE operates on a simple yet effective principle: different model components, known as experts, specialize in distinct tasks or data aspects. For a given input, only relevant experts are activated, reducing computational costs while leveraging a vast pool of specialized knowledge. This scalable and flexible approach aligns with the scaling law, enabling larger model capacities without proportional computational overhead. However, current MoE training focuses on training from scratch or upcycling dense LLMs, both requiring vast computational resources and high-quality corpora. For instance, DeepSeek-V3 (DeepSeek-AI et al., 2024) and Qwen2.5-Max (Yang et al., 2025) were pretrained on 14.8T and 20T tokens, respectively, with additional fine-tuning, making them costly and less accessible. In contrast, little research has explored leveraging MoE to accelerate inference using a small-scale training corpus (e.g., tens of billions of tokens) while maintaining performance comparable to dense LLMs. This direction is particularly ap
28
+
29
+ Step 1: Cluster Confusion Matrix based Sampling
30
+
31
+ ![](images/4a0b48fe9788195ff8d6f6ebbbb4783c8aff6ee83b1509a32065f6ddd596f360.jpg)
32
+ Pre-training Corpus
33
+
34
+ ![](images/5cfb86f42b5b5077b0de6ad59bfe0fec110ab451093714120abe56607320f99a.jpg)
35
+
36
+ ![](images/d3e04b041a499a61cd7d1c77bafdd2bea06ca1cee9282925e8b851444ac27810.jpg)
37
+ K-Means Clustering
38
+
39
+ ![](images/72b7d8d012a754b054fc5023921727d60dbd8ae2433e806bee2e7cc7ac0a2830.jpg)
40
+
41
+ ![](images/30939c7a79226d31af277b47cbf392431b3ee61b78865f61344a29259357e621.jpg)
42
+ Sampling Small Datasets
43
+
44
+ Step 2: Expert Construction via Lo-regularization
45
+
46
+ Feed Forward Network
47
+
48
+ Self Attention Layer
49
+
50
+ Transformer block of Dense LLMs
51
+
52
+ Expert 1
53
+
54
+ Step 3: Dynamic Batching for MoE Training
55
+
56
+ Expert 2
57
+
58
+ Expert 3
59
+
60
+ Expert 4
61
+
62
+ Token level routing
63
+
64
+ ![](images/63655a13d4fd09dbaf8b160f451d2f2fb59b174b38a69b3c420575d746890fe9.jpg)
65
+ Sequence level routing
66
+ Figure 1: Overview of the L0-MoE Architecture, which includes three main stages: (1) cluster confusion matrix based sampling, (2) expert formation using L0 regularization, and (3) dynamic batching for MoE training. The figure above illustrates the process of building an L0-MoE with four experts over $n$ iterations of dataset sampling.
67
+
68
+ pealing for large-scale industrial applications with cost-sensitive deployment constraints.
69
+
70
+ To address this issue, we propose L0-MoE, a mixture-of-experts (MoE) model built via L0-regularization (Louizos et al., 2018) using a small, curated 30B-token corpus. Our approach has two key components: (1) L0-regularization selects critical hidden dimensions in transformer MLPs to form experts. (2) A cluster confusion matrix (CCM)-based sampling method curates the training corpus and schedules dynamic batching. Using the BGE-M3 encoder (Chen et al., 2024) and K-means clustering (Jin and Han, 2010), we extract diverse semantic domains from RedPajama (Weber et al., 2024) to construct expert-relevant sub-datasets. A gating mechanism and dynamic batching optimize training. L0-MoE achieves $2.5 \times$ inference speedup with no obvious performance loss across four benchmarks. Our contributions are as follows: 1) We introduce a novel MoE building method leveraging L0-regularization, enabling efficient LLM inference acceleration with minimal training cost. 2) We propose a CCM-based corpus curation and dynamic batching strategy for effective MoE training. 3) Extensive experiments validate the efficiency of our method in achieving inference speedup while maintaining performance.
71
+
72
+ # 2 Preliminary
73
+
74
+ # 2.1 L0-regularization
75
+
76
+ L0-regularization (Louizos et al., 2018) is a powerful technique for feature selection and parameter pruning in neural networks. It imposes a penalty
77
+
78
+ on parameters that deviate from zero, without additional constraints. This approach enhances model efficiency by eliminating unnecessary computations and resources, as irrelevant parameters are pruned and thus not computed. For a given weight matrix $W \in R^{m \times n}$ , a mask matrix $Z \in 0,1^{n}$ is employed to derive a reduced weight $g(W,Z) \in R^{m \times n0}$ , where $g$ selects $n0 < n$ columns from $W$ using $Z$ . Due to the non-differentiable nature of $Z$ , optimizing it is challenging. To address this, the binary hard concrete function is introduced for L0-regularization, as shown in Equation 1.
79
+
80
+ $$
81
+ \begin{array}{l} u \sim \mathcal {U} \\ s = \operatorname {S i g m o i d} ((\log (u) - \log (1 - u) + \log a) / b) \\ \bar {s} = s (\zeta - \gamma) + \gamma \\ z = \min (1, \max (0, \bar {s})) \tag {1} \\ \end{array}
82
+ $$
83
+
84
+ The uniform distribution $\mathcal{U}$ is defined over the interval [0,1]. We set the hyper-parameters as $b = 0.83$ , $\zeta = 1.1$ , and $\gamma = -0.1$ by following Louizos et al. (2018). Using the learned $z$ , we estimate the proportion of retained weights as $\hat{r} = \frac{\text{sum}(z)}{m*n}$ . To effectively control the desired retention ratio $r$ for a given weight matrix $W$ , we employ a Lagrangian multiplier (Wang et al., 2019), as described in Equation 2.
85
+
86
+ $$
87
+ \mathcal {L} _ {l _ {0}} = \lambda_ {1} (\hat {r} - r) + \lambda_ {2} (\hat {r} - r) ^ {2} \tag {2}
88
+ $$
89
+
90
+ We initialize the learnable parameters $\lambda_{1}$ and $\lambda_{2}$ to 0 in our experiments. In our approach, $r$ represents the retention ratio of the feed-forward network (FFN) up-projection dimension.
91
+
92
+ # 2.2 Mixture of Expert
93
+
94
+ Mixture of Experts (MoE) (Cai et al., 2024) employs a modular architecture comprising a gating network and multiple expert networks to enhance efficiency and performance through parameter scaling. This architecture partitions the model into several experts, each specializing in specific subsets of input data. MoE utilizes a gating mechanism with a router to dynamically select the appropriate experts for processing incoming inputs, allowing the model to concentrate on relevant features while minimizing unnecessary computations. In our approach, the router is implemented as a linear projection layer $W_{\text{router}} \in R^{d \times N}$ . MoE incorporates two auxiliary losses (Equation 3), such as the load balancing loss $\mathcal{L}_{\text{balance}}$ (Fedus et al., 2022) and the router $Z$ -Loss $\mathcal{L}z$ (Zoph et al., 2022), to promote a balanced distribution of inputs among experts. These losses penalize high values in the logits produced by the gating network, encouraging a more even allocation of tokens to experts.
95
+
96
+ $$
97
+ \mathcal {L} _ {a u x} = \mathcal {L} _ {b a l a n c e} + \lambda \mathcal {L} _ {z}
98
+ $$
99
+
100
+ $$
101
+ \mathcal {L} _ {\text {b a l a n c e}} = \sum_ {i = 1} ^ {i = N} \left(\frac {c _ {i}}{B} - \frac {1}{N}\right) ^ {2} \tag {3}
102
+ $$
103
+
104
+ $$
105
+ \mathcal {L} _ {z} = \frac {1}{B} \sum_ {1} ^ {B} \left(\log \left(\sum_ {i} ^ {N} e ^ {x _ {i} ^ {(j)}}\right)\right)
106
+ $$
107
+
108
+ Here $c_{i}$ represents the tokens of the $i^{th}$ expert, and $N$ denotes the number of experts. The batch contains $B$ tokens. The logit for the $j^{th}$ token from the $i^{th}$ expert, as determined by the router module, is denoted as $x_{i}^{(j)}$ .
109
+
110
+ # 3 Approach
111
+
112
+ # 3.1 Cluster Confusion Matrix based Sampling
113
+
114
+ Given a pretraining corpus, we construct training datasets via the following steps: 1) Randomly sample a small subset without replacement and use the BGE-M3 encoder (Chen et al., 2024) to extract $d_{sv}$ -dimensional semantic vectors for each sample. 2) Apply the K-means clustering algorithm (Jin and Han, 2010) to the semantic vectors to identify $K$ centers $C \in \mathbb{R}^{K \times d_{sv}}$ . Divide the small subset into $K$ folds and sample $m$ instances from each fold to form a dataset $D_{sl} = \{D_{s_1}, \ldots, D_{s_K}\}$ for domain semantic learning, where $|D_{s_k}| = m$ for $1 \leq k \leq K$ . 3) Repeat steps 1 and 2 for $Q$ iterations to obtain $Q \times K$ centers and $Q$ datasets. For the $l^{th}$ iteration ( $l = \{1, 2, \ldots, Q\}$ ), the cluster centers are $C^{(l)} \in \mathbb{R}^{K \times d_{sv}}$ and the constructed dataset is $D_{sl}^{(l)}$ .
115
+
116
+ We define the clustering confusion matrix (CCM) as per Equation 4, where $\delta = 0.1$ is a hyperparameter, $C_i$ represents the $i^{th}$ center vector,
117
+
118
+ and $CCM[i,l]$ denotes the clustering confusion value for the $i^{th}$ center at iteration $l$ . The hypothesis posits that the semantic domain distance for the $i^{th}$ center between $C_i$ and $C_i^{(l)}$ can be assessed using bidirectional inter-clustering $(f_1$ and $f_2)$ and intra-clustering $(f_3)$ cosine similarity. A larger semantic domain distance indicates that $D_{sl}^{(l)}$ from the $l^{th}$ iteration divides domains more distinctly. We compute the domain semantic distance using Equation 5 and reorder the $Q$ datasets based on $d_{ds}^{(l)}$ . In addition to the initial $D_{sl}^{(0)}$ , our datasets now include $Q - 1$ ordered datasets $D_{sl}^{ord(l)}$ , where $ord(l)$ is the order index.
119
+
120
+ $$
121
+ C C M [ i, l ] = \left(f _ {1} + f _ {2}\right) * f _ {3}
122
+ $$
123
+
124
+ $$
125
+ f _ {1} (i, l) = \frac {1}{K} \sum_ {k = 1} ^ {k = K} e ^ {1 - s i m \left(C _ {i}, C _ {k} ^ {(l)}\right)}
126
+ $$
127
+
128
+ $$
129
+ f _ {2} (i, l) = \frac {1}{K} \sum_ {k = 1} ^ {k = K} e ^ {1 - s i m \left(C _ {k}, C _ {i} ^ {(l)}\right)} \tag {4}
130
+ $$
131
+
132
+ $$
133
+ f _ {3} (i, l) = \delta \frac {\sum_ {k = 1} ^ {k = K} e ^ {s i m \left(C _ {k} ^ {(l)} , C _ {i} ^ {(l)}\right)}}{\sum_ {k = 1} ^ {k = K} e ^ {s i m \left(C _ {k} , C _ {i}\right)}}
134
+ $$
135
+
136
+ $$
137
+ d _ {d s} ^ {(l)} = \max (C C M [ :, l ]) + \frac {\beta}{K} \sum_ {i = 1} ^ {i = K} C C M [ i, l ] \tag {5}
138
+ $$
139
+
140
+ # 3.2 Expert Construction via L0-regularization
141
+
142
+ We construct the experts using pretrained checkpoints of dense LLMs. The intermediate size of the feed forward network (FFN) layer is $d_{int}$ , and we apply a mask $Z \in R^{d_{int}}$ . For each domain subset $D_{s_k}$ ( $k \in \{1, 2, \dots, K\}$ ) derived from the initial $D_{sl}^{(0)}$ , we employ the LLM pretraining loss $\mathcal{L}_{llm}$ along with the L0-regularization loss, as specified in Equation 1, to select $r \in (0, 1)^{*}100\%$ of the dimensions from $d_{int}$ , following Equation 6.
143
+
144
+ $$
145
+ \mathcal {L} _ {e x p} = \mathcal {L} _ {l l m} + \mathcal {L} _ {l 0} \tag {6}
146
+ $$
147
+
148
+ To ensure stable training, we gradually adjust $r$ from $100\%$ to the target ratio $r^{target}$ . We freeze all non-MLP parameters of dense LLMs, and the L0-regularization-based training yields $K$ experts, each specialized for distinct semantic domains.
149
+
150
+ # 3.3 Dynamic Batching for MoE Training
151
+
152
+ To train the MoE to effectively select appropriate experts based on inputs, we follow Equation 7, where $\mathcal{L}_{aux}$ is defined in Equation 3. The MoE is initialized with $K$ pre-trained experts and a router for each MoE layer.
153
+
154
+ $$
155
+ \mathcal {L} = \mathcal {L} _ {l l m} + \alpha \mathcal {L} _ {a u x} \tag {7}
156
+ $$
157
+
158
+ We employ a two-loop batch construction strategy during training: 1) domain semantic distance scheduling, where we begin with $D_{sl}^{ord(l)}$ having a
159
+
160
+ lower $d_{ds}$ ; 2) multi-domain gathering scheduling, where samples in $D_{sl}^{ord(l)}$ are arranged in a cyclic sequence order $x_i^1, x_i^2, \ldots, x_i^K$ , and we select $p*K$ ( $p = \{1,2,3,\ldots\}$ ) samples to form a batch. This scheduling offers two advantages: 1) In the initial iterations, the MoE rapidly learns to select appropriate experts since the domain samples in $D_{sl}$ have been previously encountered by the experts. Consequently, the sequence-level selection capabilities of routers are effectively initialized. 2) As training progresses, the domains in $D_{sl}^{ord(l)}$ gradually transition to different semantic spaces, encouraging routers to select multiple experts for each input sample. This enhances the token-level selection capabilities of the routers.
161
+
162
+ # 4 Experiments
163
+
164
+ # 4.1 Experimental Setup
165
+
166
+ Dataset. We train on the RedPajama dataset (Weber et al., 2024), a replicated pre-training corpus for LLaMA models, following prior work (Xia et al., 2024). Evaluation is conducted on four public benchmarks: MMLU (Hendrycks et al., 2021), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), and BigBench Hard (BBH) (Suzgun et al., 2023). Each benchmark evaluates distinct aspects of model performance, offering insights into the strengths and limitations of LLMs.
167
+
168
+ Baselines. To assess effectiveness and versatility, we evaluate our method on three open-source LLMs: Llama-3-8B (Dubey et al., 2024), Mistral7B (Jiang et al., 2023), and Qwen2-7B (Yang et al., 2024). Comparisons include L0-regularized MoEs, original LLMs, and inference optimization techniques such as GPTQ quantization (Frantar et al., 2023), LLM Shearing pruning (Xia et al., 2024), and RKD + CoT knowledge distillation (Gu et al., 2024; Feng et al., 2024). For CCM, we run 21 iterations, collecting 30B tokens. Experiments use a cluster/expert size of $K = 64$ with linear warmup, annealing, and a peak learning rate of 1e-4. Further details are in Appendix A.2.
169
+
170
+ Implementation Details. We train our model using the FSDP framework<sup>1</sup>, employing a layer-wise wrapping policy with the Zero-3 parameter sharding strategy, without CPU offloading. For inference during evaluation, we utilize the SGlang framework<sup>2</sup>, which is highly optimized for the efficient execution of both dense LLMs and MoEs.
171
+
172
+ All baseline models in our experiments utilize the same SGlang inference framework, ensuring a fair and consistent comparison of inference speeds. Our method is framework-agnostic and can similarly be implemented using other inference frameworks (e.g., vLLM $^3$ ). The primary source of inference acceleration in our work is the proposed L0-regularization-based MoE architecture, not the inference framework itself. To ensure a fair comparison, we strictly adhere to the original evaluation settings for each benchmark. To support future research, we will release our curated dataset and code to enhance the reproducibility of our work.
173
+
174
+ # 4.2 Main Results
175
+
176
+ Table 1 presents the model with the highest performance under our settings. The L0-MoE consistently achieves a 2-2.5x inference speedup across all base LLMs. Additionally, L0-MoE maintains performance comparable to the base LLMs across four benchmarks, with the L0-MoE variant of Mistral even demonstrating a $1\%$ average performance improvement. Table 2 compares these results with other inference acceleration baselines, which, despite achieving some speedup, exhibit noticeable performance degradation.
177
+
178
+ # 4.3 Ablation Study
179
+
180
+ Table 3 presents the ablation study on the CCM module, dynamic batching, and L0-regularization. Removing the K-means clustering from the CCM module results in a performance decline, underscoring the importance of effective sub-dataset curation. For dynamic batching, substituting it with random order or random batch scheduling also leads to degraded performance.
181
+
182
+ In the context of MoE expert construction, we replace L0-regularization with four alternative methods: 1) Random MoE (Zhu et al., 2024): Selects MLP dimensions randomly, serving as a baseline to assess the necessity and effectiveness of dimension selection in expert construction, 2) Magnitude (Sun et al., 2023)): Selects the most influential elements in the weight matrix, improving upon traditional magnitude pruning by considering both the weights and their corresponding input activations using the L2 norm, 3) OBS (Frantar et al., 2021; Frantar and Alistarh, 2022)): Identifies the most critical dimensions using the OBS Hessian matrix, which encapsulates second-order derivative information of the loss function with respect to model
183
+
184
+ <table><tr><td>Model</td><td>MMLU</td><td>GSM8K</td><td>HumanEval</td><td>BBH</td><td>Average</td><td>Speedup</td></tr><tr><td>Llama-3-8B</td><td>66.6</td><td>56.0</td><td>33.5</td><td>57.7</td><td>53.5</td><td></td></tr><tr><td>Llama-3-8B w/ L0-MoE</td><td>66.3</td><td>55.9</td><td>33.7</td><td>57.2</td><td>53.3</td><td>2.0x</td></tr><tr><td>Mistral-7B</td><td>64.1</td><td>52.2</td><td>29.3</td><td>56.1</td><td>50.4</td><td></td></tr><tr><td>Mistral-7B w/ L0-MoE</td><td>64.8</td><td>53.6</td><td>31.1</td><td>55.9</td><td>51.4</td><td>2.1x</td></tr><tr><td>Qwen2-7B</td><td>70.3</td><td>79.9</td><td>51.2</td><td>62.6</td><td>66.0</td><td></td></tr><tr><td>Qwen2-7B w/ L0-MoE</td><td>70.4</td><td>80.5</td><td>52.0</td><td>61.5</td><td>66.1</td><td>2.5x</td></tr></table>
185
+
186
+ Table 1: Evaluation of different LLMs on MMLU, GSM8K, HumanEval and BBH benchmarks.
187
+
188
+ <table><tr><td>Model</td><td>MMLU</td><td>GSM8K</td><td>Speedup</td></tr><tr><td>Qwen2-7B</td><td>70.3</td><td>79.9</td><td>-</td></tr><tr><td>L0-MoE</td><td>70.4</td><td>80.5</td><td>2.5x</td></tr><tr><td>GPTQ</td><td>67.8</td><td>73.8</td><td>1.8x</td></tr><tr><td>LLM Shearing</td><td>68.2</td><td>75.5</td><td>2.6x</td></tr><tr><td>RKD + CoT</td><td>61.2</td><td>60.2</td><td>5.1x</td></tr></table>
189
+
190
+ Table 2: Comparison with other inference acceleration baselines. We employ Qwen2-7B as the base LLM.
191
+
192
+ <table><tr><td>Model</td><td>MMLU</td><td>GSM8K</td></tr><tr><td>L0-MoE</td><td>70.4</td><td>80.5</td></tr><tr><td>CCM w/o K-means</td><td>68.2</td><td>78.1</td></tr><tr><td>w/ random order batching</td><td>68.2</td><td>75.5</td></tr><tr><td>w/ random batch batching</td><td>66.6</td><td>77.1</td></tr><tr><td>Random MoE</td><td>48.1</td><td>69.6</td></tr><tr><td>Magnitude</td><td>52.6</td><td>69.1</td></tr><tr><td>OBS</td><td>68.4</td><td>74.1</td></tr><tr><td>SVD</td><td>55.2</td><td>73.8</td></tr></table>
193
+
194
+ parameters. This approach is crucial for both pruning and quantization, as it helps retain the weights that most significantly impact model performance, 4) SVD (Wang et al., 2024)): Decomposes the weight matrix using singular value decomposition and selects the most significant columns. By retaining the largest singular values, it reduces parameter count while preserving essential information. This truncation minimizes compression loss, and layer-wise updates further fine-tune the model to maintain accuracy. The results demonstrate the superiority of L0-MoE over them.
195
+
196
+ Table 3: Ablation study of CCM and L0-regularization. We conduct experiments on MMLU and GSM8K datasets with Qwen2-7B.
197
+
198
+ <table><tr><td>Model</td><td>MMLU</td><td>#Para.(B)</td><td>Speedup</td></tr><tr><td>L0-MoE</td><td>70.4</td><td>23.3</td><td>2.5x</td></tr><tr><td>CCM Iter. (Q=2)</td><td>68.2</td><td>23.3</td><td>2.5x</td></tr><tr><td>CCM Iter. (Q=5)</td><td>68.8</td><td>23.3</td><td>2.5x</td></tr><tr><td>CCM Iter. (Q=10)</td><td>69.7</td><td>23.3</td><td>2.5x</td></tr><tr><td>Expert size (K=8)</td><td>50.9</td><td>4.8</td><td>4.6x</td></tr><tr><td>Expert size (K=32)</td><td>69.2</td><td>12.7</td><td>3.2x</td></tr></table>
199
+
200
+ Table 4: Hyper-parameter tuning of sampling iterations $(Q)$ and cluster size $(K)$ , keeping 2.8B activated parameters for L0-MoE. Qwen2-7B is the base LLM. #Para.(B) is the number of model parameters.
201
+
202
+ # 4.4 Discussion
203
+
204
+ Our approach involves two critical hyperparameters: the sampling iterations $Q$ in CCM curated datasets and the expert size $K$ in MoE. Table 4 pro
205
+
206
+ vides a detailed overview of hyperparameter tuning. Increasing the number of iterations for CCM enhances performance but also demands greater computational resources. We find that an initial iteration plus 20 additional iterations suffice to optimize model performance. While increasing the number of experts improves performance, it also reduces inference speed. Therefore, we select an appropriate expert size to balance performance enhancement and LLM acceleration.
207
+
208
+ Besides, the optimal number of clusters $(K)$ primarily depends on the characteristics of the pretraining corpus; thus, it may not directly transfer to other experiments if the corpus differs significantly. We recommend applying our method to pre-training corpora with abundant topical diversity, such as RedPajama, which contains millions of domains. In such corpora, a larger $K$ can effectively cluster more specialized subsets, enabling CCM to be more effectively applied when constructing L0-MoE models.
209
+
210
+ To assess model size impact, we conducted further experiments using a 1.5B-parameter Qwen2 model with 64 experts, achieving a $2\mathrm{x}$ speedup without performance degradation. However, due to computational resource constraints, we have not yet experimented with larger models (e.g., 70B parameters). We hypothesize that larger LLMs could potentially achieve even greater speedups. We leave the verification of this hypothesis for larger-scale models as future work.
211
+
212
+ # 5 Conclusion and Future Work
213
+
214
+ In this paper, we propose a novel Mixture-of-Experts based approach to accelerate LLM inference, leveraging clustering confusion matrix for dataset curation, L0-regularization for expert selection, and dynamic batching for efficient training with only 30B tokens. Our method achieves a $2.5\times$ speedup over dense LLMs, outperforming strong baselines nearly without performance loss. Future work will explore scaling our approach to larger LLMs and expanding the corpus size to further enhance L0-MoE performance beyond dense LLMs.
215
+
216
+ # Limitations
217
+
218
+ We did not compare our method with MoEs such as DeepSeek-MoE (Dai et al., 2024), Qwen-MoE (Qwen-Team, 2024), and Mixtral (Jiang et al., 2024), which scale up model parameters in dense models with immense computational costs, processing trillions of tokens. In contrast, our approach utilizes only 30B tokens, making it more comparable to baseline post-training inference speedup methods.
219
+
220
+ Despite the promising results, several limitations remain: (1) The dataset for training each expert is selected via sequence-level semantic clustering, introducing exposure bias since MoE expert selection is performed at the token level. (2) The method does not explicitly measure inter-expert differences, potentially leading to redundant parameters that hinder L0-MoE's inference acceleration. Future work should explore token-level dataset partitioning to mitigate exposure bias. Additionally, novel learning paradigms are needed to reduce parameter redundancy and enhance expert routing efficiency.
221
+
222
+ # References
223
+
224
+ Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebron, and Sumit Sanghai. 2023. GQA: Training generalized multi-query transformer models from multi-head checkpoints. In The 2023 Conference on Empirical Methods in Natural Language Processing.
225
+ Sebastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, Harsha Nori, Hamid Palangi, Marco Tulio Ribeiro, and Yi Zhang. 2023. Sparks of artificial general intelligence: Early experiments with gpt-4.
226
+ Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. 2024. A survey on mixture of experts. arXiv preprint arXiv:2407.06204.
227
+ Jianlv Chen, Shitao Xiao, Peitian Zhang, Kun Luo, Defu Lian, and Zheng Liu. 2024. Bge m3-embedding: Multi-lingual, multi-functionality, multi-granularity text embeddings through self-knowledge distillation. arXiv preprint arXiv:2402.03216.
228
+ Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidi Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter,
229
+
230
+ Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotios Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021. Evaluating large language models trained on code.
231
+ Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. 2021. Training verifiers to solve math word problems.
232
+ Damai Dai, Chengqi Deng, Chenggang Zhao, R. X. Xu, Huazuo Gao, Deli Chen, Jiashi Li, Wangding Zeng, Xingkai Yu, Y. Wu, Zhenda Xie, Y. K. Li, Panpan Huang, Fuli Luo, Chong Ruan, Zhifang Sui, and Wenfeng Liang. 2024. Deepseekmoe: Towards ultimate expert specialization in mixture-of-experts language models.
233
+ DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning.
234
+ DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, et al. 2024. Deepseek-v3 technical report.
235
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
236
+ William Fedus, Barret Zoph, and Noam Shazeer. 2022. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research, 23(120):1-39.
237
+ Tao Feng, Yicheng Li, Li Chenglin, Hao Chen, Fei Yu, and Yin Zhang. 2024. Teaching small language models reasoning through counterfactual distillation. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 5831-5842.
238
+ Elias Frantar and Dan Alistarh. 2022. Optimal brain compression: A framework for accurate post-training quantization and pruning. Advances in Neural Information Processing Systems, 35:4475-4488.
239
+ Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alistarh. 2023. OPTQ: Accurate quantization for generative pre-trained transformers. In *The Eleventh International Conference on Learning Representations*.
240
+
241
+ Elias Frantar, Eldar Kurtic, and Dan Alistarh. 2021. M-fac: Efficient matrix-free approximations of second-order information. Advances in Neural Information Processing Systems, 34:14873-14886.
242
+ Gemini-Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M. Dai, et al. 2024. Gemini: A family of highly capable multimodal models.
243
+ Yuxian Gu, Li Dong, Furu Wei, and Minlie Huang. 2024. Minillm: Knowledge distillation of large language models. In The Twelfth International Conference on Learning Representations.
244
+ Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021. Measuring massive multitask language understanding. In International Conference on Learning Representations.
245
+ Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network.
246
+ Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaine Lample, Lucile Saulnier, et al. 2023. Mistral 7b. arXiv preprint arXiv:2310.06825.
247
+ Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Sandeep Subramanian, Sophia Yang, Szymon Antoniak, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. 2024. Mixtral of experts.
248
+ Xin Jin and Jiawei Han. 2010. K-Means Clustering, pages 563-564. Springer US, Boston, MA.
249
+ Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models.
250
+ Haokun Lin, Haobo Xu, Yichen Wu, Jingzhi Cui, Yingtao Zhang, Linzhan Mou, Linqi Song, Zhenan Sun, and Ying Wei. 2024a. Duquant: Distributing outliers via dual transformation makes stronger quantized llms. In The Thirty-eighth Annual Conference on Neural Information Processing Systems.
251
+ Ji Lin, Jiaming Tang, Haotian Tang, Shang Yang, Wei Ming Chen, Wei-Chen Wang, Guangxuan Xiao, Xingyu Dang, Chuang Gan, and Song Han. 2024b. Awq: Activation-aware weight quantization for ondevice llm compression and acceleration. Proceedings of Machine Learning and Systems, 6:87-100.
252
+
253
+ Christos Louizos, Max Welling, and Diederik P. Kingma. 2018. Learning sparse neural networks through $l_{0}$ regularization. In International Conference on Learning Representations.
254
+ Xinyin Ma, Gongfan Fang, and Xinchao Wang. 2023. LLM-pruner: On the structural pruning of large language models. In Thirty-seventh Conference on Neural Information Processing Systems.
255
+ OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, et al. 2024. Gpt-4 technical report.
256
+ Qwen-Team. 2024. Qwen1.5-moe: Matching 7b model performance with 1/3 activated parameters".
257
+ Mingjie Sun, Zhuang Liu, Anna Bair, and J Zico Kolter. 2023. A simple and effective pruning approach for large language models. arXiv preprint arXiv:2306.11695.
258
+ Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc Le, Ed Chi, Denny Zhou, and Jason Wei. 2023. Challenging BIG-bench tasks and whether chain-of-thought can solve them. In Findings of the Association for Computational Linguistics: ACL 2023, pages 13003-13051, Toronto, Canada. Association for Computational Linguistics.
259
+ Xin Wang, Yu Zheng, Zhongwei Wan, and Mi Zhang. 2024. Svd-llm: Truncation-aware singular value decomposition for large language model compression. arXiv preprint arXiv:2403.07378.
260
+ Ziheng Wang, Jeremy Wohlwend, and Tao Lei. 2019. Structured pruning of large language models. arXiv preprint arXiv:1910.04732.
261
+ Maurice Weber, Daniel Fu, Quentin Anthony, Yonatan Oren, Shane Adams, Anton Alexandrov, Xiaozhong Lyu, Huu Nguyen, Xiaozhe Yao, Virginia Adams, et al. 2024. Redpajama: an open dataset for training large language models. arXiv preprint arXiv:2411.12372.
262
+ Mengzhou Xia, Tianyu Gao, Zhiyuan Zeng, and Danqi Chen. 2024. Sheared LLaMA: Accelerating language model pre-training via structured pruning. In The Twelfth International Conference on Learning Representations.
263
+ Guangxuan Xiao, Ji Lin, Mickael Seznec, Hao Wu, Julien Demouth, and Song Han. 2023. Smoothquant: Accurate and efficient post-training quantization for large language models. In International Conference on Machine Learning, pages 38087-38099. PMLR.
264
+ An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671.
265
+
266
+ An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. 2025. Qwen2.5 technical report.
267
+ Tong Zhu, Xiaoye Qu, Daize Dong, Jiacheng Ruan, Jingqi Tong, Conghui He, and Yu Cheng. 2024. LLaMA-MoE: Building mixture-of-experts from LLaMA with continual pre-training. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 15913-15923, Miami, Florida, USA. Association for Computational Linguistics.
268
+ Barret Zoph, Irwan Bello, Sameer Kumar, Nan Du, Yanping Huang, Jeff Dean, Noam Shazeer, and William Fedus. 2022. St-moe: Designing stable and transferable sparse expert models. arXiv preprint arXiv:2202.08906.
269
+
270
+ # A Appendix
271
+
272
+ This section provides further details on the model architecture, experimental setup (including evaluation tasks, baselines, and hyperparameter settings), and implementation details.
273
+
274
+ # A.1 Model Architecture
275
+
276
+ Table 5 presents the detailed architecture of the baseline models and L0-MoE. All models incorporate group query attention (GQA) (Ainslie et al., 2023) within the self-attention layer. For the L0-MoE models, the bottom 4 layers (for Qwen2) and 8 layers (for Mistral and Llama3) are configured as dense layers, while the remaining layers are transformed into MoE layers. We select the top-2 experts for each input token.
277
+
278
+ # A.2 Experimental Setups
279
+
280
+ Evaluation Tasks. We assess performance on four public benchmarks: MMLU (Hendrycks et al., 2021), GSM8K (Cobbe et al., 2021), HumanEval (Chen et al., 2021), and BigBench Hard (BBH) (Suzgun et al., 2023).
281
+
282
+ - MMLU (Massive Multitask Language Understanding) (Hendrycks et al., 2021) comprises 57 tasks spanning diverse subjects, including STEM (Science, Technology, Engineering, and Mathematics), humanities, social sciences, and specialized domains such as law and ethics.
283
+ - GSM8K (Grade School Math 8K) (Cobbe et al., 2021) is a benchmark designed to assess the mathematical reasoning capabilities of LLMs, containing 8,500 high-quality elementary math word problems.
284
+ - HumanEval (Chen et al., 2021) evaluates the code generation capabilities of LLMs through 164 programming tasks, each requiring the model to generate a function that satisfies a given set of test cases.
285
+ BBH (BIG-Bench Hard) (Suzgun et al., 2023) is a subset of the larger BIG-Bench dataset, consisting of 23 highly challenging tasks designed to exceed the capabilities of current LLMs. These tasks demand creative problem-solving and deep domain expertise.
286
+
287
+ Baselines. We compare the L0-regularized MoEs with the original LLMs and other LLM inference
288
+
289
+ optimization methods, including the quantization baseline GPTQ (Frantar et al., 2023), the model pruning baseline LLM Shearing (Xia et al., 2024), and the knowledge distillation baseline RKD + CoT (Gu et al., 2024; Feng et al., 2024).
290
+
291
+ - GPTQ (Frantar et al., 2023) is a block-wise quantization method that extends traditional power-of-two quantization by allowing non-uniform bin widths, enabling a better approximation of the original floating-point value distribution.
292
+ - LLM-Shearing (Xia et al., 2024) employs structured pruning to construct lightweight, structured LLMs from pretrained checkpoints. It jointly removes attention heads, layers, feedforward networks (FFNs), and hidden dimensions in an end-to-end manner to optimize efficiency.
293
+ - RKD + CoT: We apply RKD (Gu et al., 2024) to distill the CoT (Feng et al., 2024) capabilities of Qwen2-7B into Qwen2-1.5B. RKD (Gu et al., 2024) aligns the student model with the teacher's distribution using reverse Kullback-Leibler divergence (KLD), encouraging the student to focus on the most probable outcomes. This helps preserve the quality of the student model's predictions by distilling Chain-of-Thought (CoT) reasoning from the teacher model.
294
+
295
+ Hyper-parameter Setting. The detailed hyperparameter settings are presented in Table 7. This includes the hyper-parameters for the clustering confusion matrix (CCM) as well as those for MoE training.
296
+
297
+ # A.3 Comparison with DuQuant
298
+
299
+ To further validate our method, we compare it with DuQuant (Lin et al., 2024a), a recent quantization technique targeting outlier activations in large language models (LLMs). DuQuant uses rotation and permutation to redistribute outliers, aiming to simplify quantization and improve robustness. We evaluate it on the LLama-3-8B model using MMLU (Hendrycks et al., 2021) and GSM8K (Cobbe et al., 2021) benchmarks. As shown in Table 6, DuQuant suffers noticeable performance degradation, highlighting its limitations. In contrast, our L0-MoE method performs better under the same setting, demonstrating superior accuracy preservation.
300
+
301
+ <table><tr><td>Model</td><td>Parameters(B)</td><td>Layer</td><td>Hidden</td><td>Q/KV</td><td>FFN</td><td>MoE FFN</td><td>Experts</td></tr><tr><td>Llama-3-8B</td><td>7.5</td><td>32</td><td>4096</td><td>32/8</td><td>14336</td><td></td><td></td></tr><tr><td>Llama-3-8B w/ L0-MoE</td><td>25.1/3.2</td><td>32/24</td><td>4096</td><td>32/8</td><td>14336</td><td>1024</td><td>64:2</td></tr><tr><td>Mistral-7B</td><td>7.1</td><td>32</td><td>4096</td><td>32/8</td><td>14336</td><td></td><td></td></tr><tr><td>Mistral-7B w/ L0-MoE</td><td>24.7/2.9</td><td>32/24</td><td>4096</td><td>32/8</td><td>14336</td><td>1024</td><td>64:2</td></tr><tr><td>Qwen2-7B</td><td>7</td><td>28</td><td>3584</td><td>28/4</td><td>18944</td><td></td><td></td></tr><tr><td>Qwen2-7B w/ L0-MoE</td><td>23.3/2.8</td><td>28/24</td><td>3584</td><td>28/4</td><td>18944</td><td>1280</td><td>64:2</td></tr></table>
302
+
303
+ Table 5: Detailed model architecture parameters. We denote the total and activated parameters of the MoEs, as well as the total layers and MoE layers, using the format "32/24", etc. All models utilize GQA, and we present the query/key-value heads. "FFN" refers to the dense decoder MLP size, while "MoE FFN" indicates the intermediate size of the expert for the MoE layer. The total and activated experts are represented as "64:2", etc.
304
+
305
+ <table><tr><td>Model</td><td>MMLU</td><td>GSM8K</td><td>SpeedUp</td></tr><tr><td>Llama-3-8B</td><td>66.6</td><td>56.0</td><td>-</td></tr><tr><td>Llama-3-8B w/ L0-MoE</td><td>66.3</td><td>55.9</td><td>2.0×</td></tr><tr><td>DeQuant/W4A4</td><td>57.9</td><td>51.6</td><td>0.46×</td></tr><tr><td>DeQuant + LWC/W4A4</td><td>62.2</td><td>51.1</td><td>0.46×</td></tr></table>
306
+
307
+ Table 6: Performance comparison of L0-MoE and DuQuant on MMLU and GSM8K benchmarks. Llama-3-8B is the base LLM. Due to the lack of DuQuant support in SGlang, we tested inference speed using naive PyTorch transformers (batch size = 64, sequence length = 1200). Without optimized kernels, DuQuant is slow (0.46x speedup), but future SGLang support could make it comparable to GPTQ (1.8x speedup).
308
+
309
+ <table><tr><td colspan="2">CCM Hyper-parameters</td></tr><tr><td>Q</td><td>21</td></tr><tr><td>K</td><td>64</td></tr><tr><td>dsv</td><td>1024</td></tr><tr><td>D(0)sl</td><td>12B tokens; |D(0)sl| ≈ 0.15B</td></tr><tr><td>D(l)sl, l ≥ 1</td><td>0.9B tokens; |D(0)sl| ≈ 0.007B</td></tr><tr><td colspan="2">MoE Training Hyper-parameters</td></tr><tr><td>Sequence length</td><td>4096</td></tr><tr><td>Learning rate</td><td>1e-4</td></tr><tr><td>Warmup ratio (expert)</td><td>0.2</td></tr><tr><td>Warmup ratio (MoE)</td><td>0.06</td></tr><tr><td>Warmup type</td><td>Linear</td></tr><tr><td>Annealing ratio</td><td>0.1</td></tr><tr><td>Annealing type</td><td>Cosine</td></tr><tr><td>Batch tokens</td><td>512K</td></tr><tr><td>α in Eq. 7</td><td>0.01</td></tr><tr><td>β in Equation 5</td><td>0.5</td></tr><tr><td>λ in Eq. 3</td><td>0.3</td></tr><tr><td>Training epoch</td><td>1</td></tr></table>
310
+
311
+ Table 7: Hyper-parameters for CCM and MoE training.
acceleratingdensellmsvial0regularizedmixtureofexperts/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84bf6fbfad521f3f67df98a07bf96c4aac506dc9c53ffc4925530f3029b3e33c
3
+ size 337666
acceleratingdensellmsvial0regularizedmixtureofexperts/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e43df67ed1bd2ef2fd4c1abc74743c0eab6c8971e4a9d9c6ea959e59b4896e1d
3
+ size 368242
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73f78a2f59181204a7c29ba807bc6c3e81e4b6d5e38c729bd96eebafed5b77b
3
+ size 61411
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd1724cf082740c1c04ea6a30094e8fb081de205ec180c97c4f95621781d234
3
+ size 71078
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/b9706472-0d6b-45a4-874e-38347f9a5e25_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea8d88359b1e0a0e41519558736266cf550700c814950f1a61e17331f232d033
3
+ size 12938221
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/full.md ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Acoustic Individual Identification of White-Faced Capuchin Monkeys Using Joint Multi-Species Embeddings
2
+
3
+ Álvaro Vega-Hidalgo<sup>1</sup>, Artem Abzaliev<sup>1</sup>, Thore Bergman<sup>2,3</sup>, Rada Mihalcea<sup>1</sup>
4
+
5
+ <sup>1</sup>Computer Science and Engineering, University of Michigan
6
+
7
+ $^{2}$ Department of Psychology, University of Michigan
8
+
9
+ $^{3}$ Department of Ecology and Evolutionary Biology, University of Michigan
10
+
11
+ {alvarovh, abzaliev, thore, mihalcea}@umich.edu
12
+
13
+ # Abstract
14
+
15
+ Acoustic individual identification of wild animals is an essential task for understanding animal vocalizations within their social contexts, and for facilitating conservation and wildlife monitoring efforts. However, most of the work in this space relies on human efforts, as the development of methods for automatic individual identification is hindered by the lack of data. In this paper, we explore cross-species pre-training to address the task of individual classification in white-faced capuchin monkeys. Using acoustic embeddings from birds and humans, we find that they can be effectively used to identify the calls from individual monkeys. Moreover, we find that joint multi-species representations can lead to further improvements over the use of one representation at a time. Our work demonstrates the potential of cross-species data transfer and multi-species representations, as strategies to address tasks on species with very limited data.
16
+
17
+ # 1 Introduction
18
+
19
+ For a long time, researchers viewed the vocalizations of non-human species as mere reactions to internal emotional states (Lorenz, 1952). Consequently, early scientific methods in animal communication research largely overlooked individual differences and did not test for the presence of linguistic features (e.g., pragmatics, semantics, syntax) in animal communication systems. This simplified view of animal communication has been overturned by the growing evidence uncovering the presence of linguistic features in nonhuman animals (Bergman et al., 2019), leading to the emergence of Animal Linguistics as a formal interdisciplinary research field (Bowling and Fitch, 2015; Engesser et al., 2015; Suzuki, 2024; Berthet et al., 2023; Suzuki, 2021; Scott-Phillips and Heintz, 2023). This shift in perspective highlights the need for individual-level analysis, as it
20
+
21
+ ![](images/a0782eb89648193d779b5eaf9f24261428459a4840f239a3c8fb5befcdac07d1.jpg)
22
+
23
+ ![](images/f03c6389b15eee4ef5e7020f5187e4d6f591bb095e86e5a874f25b49c435ba88.jpg)
24
+ Figure 1: (A-D) Capuchin Twitter vocalizations show diverse structural variations. (E-F) t-SNE of Google Perch-Whisper embeddings. (E) Call type clusters. (F) Colored by individual, highlighting four diverse examples of Twitters. (G) Adult female capuchin with infant in Taboga. (H) Territories of four capuchin groups in the Taboga Reserve in northwestern Costa Rica.
25
+
26
+ allows researchers to account for the social and environmental contexts in which vocalizations occur, ultimately improving our ability to test their linguistic capacities more rigorously.
27
+
28
+ Additionally, long-term, individual-level analyses are critical for understanding and protecting wildlife. Such analyses support key approaches like social network quantification, assessing animal cognition, and performing capture-recapture techniques for tracking population dynamics (Slater, 1981; Carlson et al., 2020). Over the past decade,
29
+
30
+ acoustic monitoring has emerged as a widely adopted, cost-efficient strategy in conservation, leading to growing interest in acoustic individual identification. By enabling researchers to recognize individuals from their vocalizations, this approach paves the way for more nuanced insights into ecology, behavior, evolution, and conservation (Knight et al., 2024).
31
+
32
+ In this paper, we address the task of acoustic individual identification in white-faced capuchins. We collect a two-year dataset of individualized focal recordings, a labor-intensive yet optimized method that mitigates signal-to-noise and cocktail party problems in wild bioacoustic settings (Bermant, 2021; Miron et al., 2024). Using this dataset, we evaluate human speech- and bird bioacoustic-based pre-trained networks, comparing single-embedding models to ensembles that merge embeddings from distinct networks. We hypothesize that human speech embeddings, such as Whisper or HuBERT, complement bioacoustic embeddings like Google Perch or BirdNET—originally trained on bird sounds—and predict that heterogeneous embedding combinations will outperform single-embedding models.
33
+
34
+ Transfer learning has significantly advanced acoustic classification tasks in non-human animals (Miyaguchi et al., 2024; Kahl et al., 2023; Abzaliev et al., 2024). Recent studies on gibbons have explored the use of self-supervised speech models (e.g., HuBERT, Wav2vec 2.0), pre-trained bird classifiers (e.g., BirdNET, Perch), and non-transfer-learning deep models for primate acoustic identification, finding that speech models most effectively capture individual vocal signatures, bird classifiers perform well in automated detection but are more susceptible to background noise, and non-transfer-learning models struggle when trained on small datasets (Cauzinille et al., 2024; Clink et al., 2024). Nevertheless, it remains unclear whether using multiple joint embeddings leads to better performance by exploiting complementary features from different training data domains.
35
+
36
+ This work makes three main contributions. First, we propose white-faced capuchin monkeys as a model organism for advancing computational research on animal communication. Second, we show that combining embeddings from human speech and bird bioacoustics models significantly improves acoustic identification performance in white-faced capuchins, outperforming single-embedding baselines. Finally, our findings
37
+
38
+ show that acoustic diversity and soundscape similarity play a greater role than phylogenetic proximity. Smaller models trained on diverse bird vocalizations recorded in natural environments outperform much larger speech-trained models designed for humans, despite humans being more closely related to our study species. These results highlight the value of cross-species model development in achieving better generalization for the acoustic identification task.
39
+
40
+ # 2 Study system: white-faced capuchin monkeys in the Taboga Reserve, Costa Rica
41
+
42
+ White-faced capuchin monkeys (Cebus capucinus) are ideal for studying animal communication, with 27 call types (Gros-Louis et al., 2008), complex social behavior and cognition including tool use (Goldsborough et al., 2024), complex social networks (Crofoot et al., 2011) and cultural transmission (Perry et al., 2017). Taboga hosts their highest known density (Tinsley Johnson et al., 2020).
43
+
44
+ Data collection. Our field team collected audio recordings of focal individuals by following them in the Taboga forest. We used directional microphones aimed at the subjects from January 2021 to December 2022 through the wet and dry seasons, with hours ranging from 5 am to $5\mathrm{pm}$ . Recordings were captured at $48\mathrm{kHz}$ and 16 bit resolution. These raw recordings were subsequently trimmed to isolate the precise moments when vocalizations were detected, and only the calls classified as either a "Peep" or "Twitter" were included in this dataset, according to established criteria in the literature (Gros-Louis et al., 2008).
45
+
46
+ Audio recordings. The full dataset consists of 1,257 Twitter recordings and 2,089 Peep recordings from 45 individuals, although $15\%$ of the recordings were assigned to unknown individuals. We include data from individuals that had at least 30 recorded calls, while recordings from unidentified subjects encountered in the field are grouped into an "Unknown" class. For Peeps, this dataset includes 16 individuals, and for Twitters this dataset included 10 individuals (total sample $= 1609$ ). Peep calls are typically short (mean $0.27\mathrm{s}$ , SD $0.27\mathrm{s}$ ), whereas Twitter calls are more complex (Figure 1) and longer (mean $0.40\mathrm{s}$ , SD $0.18\mathrm{s}$ ).
47
+
48
+ # 3 Cross-Species Embeddings for Individual Classification
49
+
50
+ Collecting focal audio recordings of wild animals in their natural habitat is a challenging and resource-intensive task. Even with dedicated field teams, building large enough datasets to fully exploit deep neural networks is difficult. As a result, transfer learning—which leverages the inductive bias of models pre-trained on larger, related datasets—has emerged as the most effective strategy for achieving high performance in bioacoustic classification under low-data conditions (Ghani et al., 2023a).
51
+
52
+ Audio Representation Models. We extract pretrained embeddings from Google Perch V8 (Ghani et al., 2023a), a model primarily trained on bird vocalizations, and Whisper (Radford et al., 2022), which was predominantly pre-trained on human speech. While additional embeddings were evaluated, we focus on these two in the main text for clarity, with results from five other models detailed in Appendix A. We apply mean-pooling to obtain lower-dimensional representations from large speech models like Whisper.
53
+
54
+ # Minimum Redundancy Maximum Relevance.
55
+
56
+ To combine representations from multiple species, we explore a feature-select model using Minimum Redundancy and Maximum Relevance (MRMR) (Ding and Peng, 2005), alongside simple concatenation and summation. Originally developed in cancer research for gene selection, MRMR improves feature selection in high-dimensional datasets by balancing two key criteria: maximizing relevance to the target variable (measured via mutual information) while minimizing redundancy (filtered using a correlation coefficient threshold). Our implementation starts with the feature that has the highest mutual information among both embeddings, removes any features with a correlation coefficient of 0.8 or higher, and then iteratively selects the next most informative feature. This process continues until 1024 embedding features are selected from both embeddings, ensuring an optimal balance of diversity and informativeness.
57
+
58
+ Experimental Setup. To ensure a fair comparison, we carefully control parameter counts and apply hyperparameter tuning. Single-embedding models and the MRMR model compress each input into 512 units, then reduced it to 64 for
59
+
60
+ final classification. Concatenation and summation ensembles apply a 256-dimensional compression to each embedding separately, then sum or concatenate the outputs before another 64-unit layer. For a robust comparison, we generate 50 random train-test splits (10 recordings per individual in the test set) and train models with all seven single embeddings as well as all pairwise combinations (concatenation, summation, and MRMR). To identify the best hyperparameters for each model trained, we conduct a search over learning rates $\{1\mathrm{e} - 5,5\mathrm{e} - 5,1\mathrm{e} - 4,5\mathrm{e} - 4,1\mathrm{e} - 3,5\mathrm{e} - 3\}$ and dropout rates $\{0.2,0.3,0.4,0.5,0.6\}$ , evaluating 30 randomly sampled configurations for 100 epochs each with early stopping (patience $= 10$ min $\Delta \mathrm{F1} = 0.001$ ), and selected the highest F1-scoring setup. All models are trained using the Adam optimizer. After confirming normality and homoscedasticity, we compare each architecture's top-performing model via ANOVA and a post-hoc Tukey test.
61
+
62
+ Whisper Layer Probing. To pinpoint which Whisper transformer layer encodes the richest individual-specific information, we trained Perch-Whisper MRMR models in which the Whisper input is systematically replaced with the hidden representation from each of the 33 encoder layers. For each layer, we retrain the model across the 50 random train-test splits using the same training schedule described above.
63
+
64
+ # Spectrogram annotations and measurements.
65
+
66
+ To compare explainable acoustic features with non-interpretable deep embeddings, we manually measure Peak Frequency and other acoustic parameters from spectrograms, following standard bioacoustic methods. Using Raven Pro 1.6 (K. Lisa Yang Center for Conservation Bioacoustics at the Cornell Lab of Ornithology, 2024), we select regions of interest and extract 30 interpretable features (see Appendix A), including Peak Frequency, Center Frequency, and Center Time. These measurements were taken from six individuals—one adult male, one adult female, and one infant from each of the two monkey troops—chosen for their distinct characteristics.
67
+
68
+ # 4 Results
69
+
70
+ Table 1 shows the results of the acoustic identification task for selected models. We present F1 scores for the models trained on bird vocalizations and
71
+
72
+ ![](images/1357b2a8c01318ef3b3f0ad164ad19da2cf2cf2f424dfde821e8a31832615e6f.jpg)
73
+ Figure 2: Peak frequency distributions for six capuchin monkey individuals, shown for Twitters (top) and Peeps (bottom) call types.
74
+
75
+ human speech data, together with their ensembles. While single-species vocalization models perform reasonably well, the models with the highest F1 scores are those that combine multiple embeddings (either using summation, concatenation or MRMR). Furthermore, the best-performing ensemble combine models developed for bioacoustic vocalizations and models developed for human speech. This highlights the potential of cross-species pretraining in a limited data regime. Pre-training on human speech does not capture enough information for the bioacoustic domain, as shown by the performance of Whisper for both vocalization types. But combined, those two models achieve an F1 score of 0.70 for Peeps and 0.66 for Twitters. This improved performance suggests that combining speech-trained and bioacoustic-trained embeddings effectively leverages complementary information. We also present the results for other models in Appendix A.
76
+
77
+ Despite its smaller size and more limited training dataset, the bioacoustic model Perch outperforms the much larger Whisper model, which was developed for human speech. Domain relevance is more important than model size, training data set size, or phylogenetic proximity for the acoustic identification task in Capuchins. Trained on data from noisy field conditions, Perch learns the acoustic variability of field conditions, contributing to its strong performance. Although our focal species is neither a bird nor a human, the top-performing models across architectures are trained using both bird- and human-derived embeddings, suggesting that joint multi-species embeddings provide better generalization for Capuchin acoustic classification tasks.
78
+
79
+ To better understand Whisper's contribution to these multi-species embeddings, we conducted a layer-wise probing analysis across 50 training runs. We found that intermediate layers yielded slightly better classification performance for both Peeps and Twitters (Figure 3), though differences across layers were relatively modest.
80
+
81
+ ![](images/0f5184b05a8435f7f616eebda552a2e7e6a9ec14eeb1cb33abcc1a0094e25cc9.jpg)
82
+
83
+ ![](images/ca7bffca12cf63e44faea3818fa9aa5344d755810bf009b01718d2d7a044b650.jpg)
84
+ Figure 3: Whisper layer-wise probing (mean F1 across 50 random train-test splits) for Twitters (top) and Peeps (bottom). Intermediate layers yield the highest performance for individual classification (maximum value highlighted).
85
+
86
+ We visualize the embeddings of the best-performing model from table 1 using t-SNE (van der Maaten, 2009) in Figure 1. Different call types formed well-defined clusters (Figure 1E), whereas individual classifications appear more diffuse (Figure 1F), illustrating the difficulty of the acoustic identification task (see Appendix A for more t-SNE visualizations). We also analyze the distribution of peak frequencies across individuals in Figure 2. Lower-pitched sounds characterize Peeps, while Twitters span a broader spectral range of peak frequencies. Notably, both call types exhibit bimodal distributions, with this pattern being more pronounced in certain individuals. This bi
87
+
88
+ Table 1: Top-performing models for Twitters and Peeps (Mean F1 Score $\pm$ SD), with significance assessed by comparison to the best simple model (Perch). Significance levels: * for $p < 0.05$ and ** for $p < 0.0001$ (Tukey's test).
89
+
90
+ <table><tr><td>Model</td><td>F1 Score</td></tr><tr><td colspan="2">Twitters</td></tr><tr><td>Chance (uniform 1/11)</td><td>0.09 ± 0.00</td></tr><tr><td>Perch (Simple)</td><td>0.61 ± 0.03</td></tr><tr><td>Whisper (Simple)</td><td>0.55 ± 0.03</td></tr><tr><td>Perch + Whisper (Concat)</td><td>0.63 ± 0.03</td></tr><tr><td>Perch + Whisper (Sum)</td><td>0.63 ± 0.03*</td></tr><tr><td>Perch + Whisper (MRMR)</td><td>0.66 ± 0.03**</td></tr><tr><td colspan="2">Peeps</td></tr><tr><td>Chance (uniform 1/17)</td><td>0.06 ± 0.00</td></tr><tr><td>Perch (Simple)</td><td>0.66 ± 0.02</td></tr><tr><td>Whisper (Simple)</td><td>0.62 ± 0.03</td></tr><tr><td>Perch + Whisper (Concat)</td><td>0.67 ± 0.02*</td></tr><tr><td>Perch + Whisper (Sum)</td><td>0.68 ± 0.02**</td></tr><tr><td>Perch + Whisper (MRMR)</td><td>0.70 ± 0.02**</td></tr></table>
91
+
92
+ modal distribution could reflect two or more call subtypes with distinct pitches and should be investigated further to test for the existence of pragmatics or semantics in their communication system through pitch modulation. Variability within the Twitter call type extends beyond overall pitch modulation. Some Twitters exhibit an n-shaped pitch contour, a continuous descending note, a final lower-pitched note, or a rising pitch throughout the call (Figure 1-A, B, C, D, respectively). Empirical studies incorporating rich social and environmental contexts will be crucial for uncovering the functional significance of this variation in Capuchin calls.
93
+
94
+ # 5 Conclusion
95
+
96
+ This study examined acoustic individual identification in two call types of white-faced capuchins. We established performance baselines for pre-trained embeddings and found that combining multiple embeddings (summation, concatenation, and minimum redundancy maximum relevance) improves classification performance. Our findings also indicate that domain relevance outweighs model size in noisy environments. Future work should extend these multi-species embeddings to other taxa, confirming broader applicability in bioacoustics and
97
+
98
+ animal linguistics.
99
+
100
+ # 6 Limitations
101
+
102
+ While this study focused on acoustic identification, a deeper investigation into the behavioral and social functions of these call types remains relevant for future work. While there are other ways of improving acoustic identification, such as data augmentation (MacIsaac et al., 2024), we considered those techniques out of scope for the present study and focused on investigating the complementarity of joint multi-species embeddings. Our primary goal with this dataset is to make it accessible to the broader scientific community. We anticipate making it publicly available in a forthcoming study with further analyses.
103
+
104
+ # 7 Ethical Considerations
105
+
106
+ No animals were harmed during this study. All research adhered to ethical guidelines for animal welfare, recognizing the importance of studying animal communication while prioritizing their wellbeing, particularly in the context of climate change and habitat loss affecting this species. Additionally, all individuals involved in data collection and processing were engaged in formal employment or academic research under ethical labor practices.
107
+
108
+ # Acknowledgments
109
+
110
+ We thank the entire Capuchinos de Taboga project team for their support and dedication throughout the data collection process. Their field expertise and commitment were essential to building the dataset that underpins this study. We also thank Robin Laurie for providing the capuchin monkey photograph used in Figure 1.
111
+
112
+ # References
113
+
114
+ Artem Abzaliev, Humberto Pérez Espinosa, and Rada Mihalcea. 2024. Towards dog bark decoding: Leveraging human speech processing for automated bark classification. arXiv preprint arXiv:2404.18739.
115
+ Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, and Michael Auli. 2020. wav2vec 2.0: A framework for self-supervised learning of speech representations. Preprint, arXiv:2006.11477.
116
+ Thore J Bergman, Jacinta C Beehner, Melissa C Painter, and Morgan L Gustison. 2019. The speech-like properties of nonhuman primate vocalizations. Animal Behaviour, 151:229-237.
117
+
118
+ Peter C Bermant. 2021. Biocppnet: automatic bioacoustic source separation with deep neural networks. Scientific Reports, 11(1):23502.
119
+ Mélissa Berthet, Camille Coye, Guillaume Dezecache, and Jeremy Kuhn. 2023. Animal linguistics: a primer. Biological reviews, 98(1):81-98.
120
+ Daniel L Bowling and W Tecumseh Fitch. 2015. Do animal communication systems have phonemes? Trends in Cognitive Sciences, 19(10):555-557.
121
+ Nora V Carlson, E McKenna Kelly, and Iain Couzin. 2020. Individual vocal recognition across taxa: a review of the literature and a look into the future. Philosophical Transactions of the Royal Society B, 375(1802):20190479.
122
+ Jules Cauzinille, Benoit Favre, Ricard Marxer, Dena Clink, Abdul Hamid Ahmad, and Arnaud Rey. 2024. Investigating self-supervised speech models' ability to classify animal vocalizations: The case of gibbon's vocal signatures. In *Interspeech* 2024, pages 132-136. ISCA; ISCA.
123
+ Dena J Clink, Hope Cross-Jaya, Jinsung Kim, Abdul Hamid Ahmad, Moeurk Hong, Roeun Sala, Hélène Birot, Cain Agger, Thinh Tien Vu, Hoa Nguyen Thi, et al. 2024. Benchmarking automated detection and classification approaches for monitoring of endangered species: a case study on gibbons from cambodia. bioRxiv, pages 2024-08.
124
+ Margaret C Crofoot, Daniel I Rubenstein, Arun S Maiya, and Tanya Y Berger-Wolf. 2011. Aggression, grooming and group-level cooperation in white-faced capuchins (cebus capucinus): Insights from social networks. American Journal of Primatology, 73(8):821-833.
125
+ Chris Ding and Hanchuan Peng. 2005. Minimum redundancy feature selection from microarray gene expression data. Journal of bioinformatics and computational biology, 3(02):185-205.
126
+ Sabrina Engesser, Jodie MS Crane, James L Savage, Andrew F Russell, and Simon W Townsend. 2015. Experimental evidence for phonemic contrasts in a nonhuman vocal system. PLoS biology, 13(6):e1002171.
127
+ Burooj Ghani, Tom Denton, Stefan Kahl, and Holger Klinck. 2023a. Global birdsong embeddings enable superior transfer learning for bioacoustic classification. Scientific Reports, 13(1):22876.
128
+ Burooj Ghani, Tom Denton, Stefan Kahl, and Holger Klinck. 2023b. Global birdsong embeddings enable superior transfer learning for bioacoustic classification. Scientific Reports, 13(1):22876.
129
+ Zoë Goldsborough, Margaret C Crofoot, and Brendan J Barrett. 2024. Male-biased stone tool use by wild white-faced capuchins (cebus capucinus imitator). American Journal of Primatology, 86(4):e23594.
130
+
131
+ Julie J Gros-Louis, Susan E Perry, Claudia Fichtel, Eva Wikberg, Hannah Gilkenson, Susan Wofsy, and Alex Fuentes. 2008. Vocal repertoire of cebus capucinus: acoustic structure, context, and usage. International Journal of Primatology, 29:641-670.
132
+ Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. 2021. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. Preprint, arXiv:2106.07447.
133
+ Po-Yao Huang, Hu Xu, Juncheng Li, Alexei Baevski, Michael Auli, Wojciech Galuba, Florian Metze, and Christoph Feichtenhofer. 2023. Masked autoencoders that listen. Preprint, arXiv:2207.06405.
134
+ K. Lisa Yang Center for Conservation Bioacoustics at the Cornell Lab of Ornithology. 2024. Raven pro: Interactive sound analysis software (version 1.6.5). [Computer software].
135
+ Stefan Kahl, Tom Denton, Holger Klinck, Hendrik Reers, Francis Cherutich, Hervé Glotin, Hervé Goëau, Willem-Pier Vellinga, Robert Planqué, and Alexis Joly. 2023. Overview of birdclef 2023: Automated bird species identification in eastern africa. In CLEF (Working Notes), pages 1934-1942.
136
+ Stefan Kahl, Connor M. Wood, Maximilian Eibl, and Holger Klinck. 2021. Birdnet: A deep learning solution for avian diversity monitoring. Ecological Informatics, 61:101236.
137
+ Elly Knight, Tessa Rhinehart, Devin R de Zwaan, Matthew J Weldy, Mark Cartwright, Scott H Hawley, Jeffery L Larkin, Damon Lesmeister, Erin Bayne, and Justin Kitzes. 2024. Individual identification in acoustic recordings. Trends in Ecology & Evolution.
138
+ Konrad Lorenz. 1952. King Solomon's Ring. T. Y. Crowell, New York.
139
+ Jennifer MacIsaac, Stuart Newson, Adham Ashton-Butt, Huma Pearce, and Ben Milner. 2024. Improving acoustic species identification using data augmentation within a deep learning framework. Ecological Informatics, 83:102851.
140
+ Marius Miron, Sara Keen, Jen-Yu Liu, Benjamin Hoffman, Masato Hagiwara, Olivier Pietquin, Felix Effenberger, and Maddie Cusimano. 2024. Biodenoising: animal vocalization denoising without access to clean data. arXiv preprint arXiv:2410.03427.
141
+ Anthony Miyaguchi, Adrian Cheung, Murilo Gustineli, and Ashley Kim. 2024. Transfer learning with pseudo multi-label birdcall classification for ds@gt birdclef 2024. arXiv preprint arXiv:2407.06291.
142
+ Susan E Perry, Brendan J Barrett, and Irene Godoy. 2017. Older, sociable capuchins (cebus capucinus) invent more social behaviors, but younger monkeys innovate more in other contexts. Proceedings of the National Academy of Sciences, 114(30):7806-7813.
143
+
144
+ Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. 2022. Robust speech recognition via large-scale weak supervision. *Preprint*, arXiv:2212.04356.
145
+ Thom Scott-Phillips and Christophe Heintz. 2023. Animal communication in linguistic and cognitive perspective. Annual Review of Linguistics, 9(1):93-111.
146
+ PJB Slater. 1981. Individual differences in animal behavior. In *Perspectives in Ethology: Volume 4 Advantages of Diversity*, pages 35-49. Springer.
147
+ Toshitaka N Suzuki. 2021. Animal linguistics: exploring referentiality and compositionality in bird calls. Ecological Research, 36(2):221-231.
148
+ Toshitaka N Suzuki. 2024. Animal linguistics. Annual Review of Ecology, Evolution, and Systematics, 55.
149
+ Elizabeth Tinsley Johnson, Marcela E Benitez, Alexander Fuentes, Celia R McLean, Ariek B Norford, Juan Carlos Ordonez, Jacinta C Beehner, and Thore J Bergman. 2020. High density of white-faced capuchins (cebus capucinus) and habitat quality in the taboga forest of costa rica. American Journal of Primatology, 82(2):e23096.
150
+ Laurens van der Maaten. 2009. Learning a parametric embedding by preserving local structure. In International Conference on Artificial Intelligence and Statistics.
151
+
152
+ # A Appendix
153
+
154
+ ![](images/75a1afa122f27a0abad0d33a9750f43083f14fd71b84e85b5dc9e54939c8c08d.jpg)
155
+ Figure 4: Mutual information of the top features for both call type datasets, spanning seven acoustic pre-trained embeddings. We display the five highest-performing features per pre-trained embedding, along with the top five interpretable features per model.
156
+
157
+ ![](images/5189b127de3bd942da8441bcbb0886fb8bcd86c4970546478d28f8dbc779b9f1.jpg)
158
+ Figure 5: Mutual information of the top features in the Peeps call type dataset, spanning seven acoustic pre-trained embeddings. We display the five highest-performing features per pre-trained embedding, along with the top five interpretable features. Asterisks show correlation coefficients above 0.8.
159
+
160
+ ![](images/ecf4d134c4c90c1b3b33a445a293f8461c27742373df420efa479d40b5595fb3.jpg)
161
+ Figure 6: Mutual information of the top features in the Twitters call type dataset, spanning seven acoustic pre-trained embeddings. We display the five highest-performing features per pre-trained embedding, along with the top five interpretable features. Asterisks show correlation coefficients above 0.8.
162
+
163
+ ![](images/5026dd92d4ccbf307ac002478ed26095eb26c1b92e63d66d6fe71c0fff22fb8e.jpg)
164
+
165
+ ![](images/aaef8a109244750517e2b066079c482385a37e03fc0e04e47c4e3dbc60b0b34b.jpg)
166
+
167
+ ![](images/6f8be313bdc603c0f9e4b9d1df54a3553d67fd95e6a859ab4788ed653dafa0f6.jpg)
168
+
169
+ ![](images/5d6be18b8bef8af6b39fcac0851b5714b24204353894e881f9703d933a52a1fa.jpg)
170
+
171
+ ![](images/70694d5344e92897ccbbb62aa94b433585c5fb906676e93a8127afb69eef0099.jpg)
172
+
173
+ ![](images/f3254abc2fdd7d4c61d307c6ec79888f13ec29090df8461baf34b55a98ebd537.jpg)
174
+
175
+ ![](images/63988630a87792aaa6f281c525fbc42dac301d4c4b1d9de28167d63b9899a198.jpg)
176
+
177
+ ![](images/e816f5ec823fe9bd2747d7b69f3cc9dd66268347425062b9cebd635965b25bbe.jpg)
178
+
179
+ ![](images/c8e478d7dced590756ff959683a0a5b538b3c60c19addeddd4ab8aacb25bf6ba.jpg)
180
+
181
+ ![](images/229a3f222a76d2276fd09815a47d7c99f6d4a166a11f576a44c8504795800330.jpg)
182
+
183
+ ![](images/951cdf94527684702e23b526b21de9497cd62b70aec5ade852e1341dac69a51a.jpg)
184
+
185
+ ![](images/a7e2829103f1d346f45ede9ed2462145171989f7efaa6a03688d0a62add3d23a.jpg)
186
+
187
+ ![](images/5e08d6230ec0343f4d17c4da200ca0f51766467e1a8b689d49e7ea05ebb79fb2.jpg)
188
+ Figure 7: t-SNE visualizations of five pre-trained embeddings, primarily trained on human speech data (with AudioMAE also incorporating internet-sourced audio). The first column presents the t-SNE plot of call types (Peeps in yellow and Twitters in blue), while the second and third columns show the t-SNE projections of Peeps and Twitters, respectively, with points colored by individual identity. From top to bottom, the rows correspond to HuBERT, Wav2Vec, Wav2Vec BERT, Whisper, and AudioMAE.
189
+
190
+ ![](images/869dc6768a0ec20000bb0b717e18d1d998c12773fb537c01f69c6521d841e037.jpg)
191
+
192
+ ![](images/1a7779b99481e2a194a37329492f5d1f15ed0a18e4c8c127f89e4a76dcf76886.jpg)
193
+
194
+ <table><tr><td>Model</td><td>F1 Score (Mean ± Std)</td></tr><tr><td colspan="2">Simple Network</td></tr><tr><td>Perch</td><td>0.66 ± 0.02</td></tr><tr><td>Whisper</td><td>0.62 ± 0.03</td></tr><tr><td>BirdNET</td><td>0.60 ± 0.02</td></tr><tr><td>HuBERT</td><td>0.55 ± 0.02</td></tr><tr><td>Wav2Vec2</td><td>0.47 ± 0.02</td></tr><tr><td colspan="2">Concatenation (2 Embeddings)</td></tr><tr><td>BirdNET + Perch</td><td>0.67 ± 0.02</td></tr><tr><td>Perch + Whisper</td><td>0.67 ± 0.02</td></tr><tr><td>Perch + HuBERT</td><td>0.66 ± 0.02</td></tr><tr><td>Perch + AudioMAE</td><td>0.64 ± 0.02</td></tr><tr><td>Perch + Wav2Vec2</td><td>0.64 ± 0.02</td></tr><tr><td colspan="2">Summation (2 Embeddings)</td></tr><tr><td>Perch + Whisper</td><td>0.68 ± 0.02</td></tr><tr><td>Perch + BirdNET</td><td>0.67 ± 0.02</td></tr><tr><td>Perch + HuBERT</td><td>0.66 ± 0.02</td></tr><tr><td>BirdNET + Whisper</td><td>0.64 ± 0.02</td></tr><tr><td>Perch + Wav2Vec2</td><td>0.64 ± 0.02</td></tr><tr><td colspan="2">MRMR (2 Embeddings)</td></tr><tr><td>Perch + Whisper</td><td>0.70 ± 0.02</td></tr><tr><td>Perch + BirdNET</td><td>0.69 ± 0.02</td></tr><tr><td>Perch + HuBERT</td><td>0.68 ± 0.02</td></tr><tr><td>Perch + Wav2Vec2</td><td>0.67 ± 0.02</td></tr><tr><td>Perch + Wav2Vec-bert</td><td>0.67 ± 0.02</td></tr></table>
195
+
196
+ Table 2: Performance of the top 5 models per method on the acoustic identification task using the Peeps dataset (Mean F1 Score ± Standard Deviation).
197
+
198
+ <table><tr><td>Model</td><td>F1 Score (Mean ± Std)</td></tr><tr><td colspan="2">Simple Network</td></tr><tr><td>Perch</td><td>0.61 ± 0.03</td></tr><tr><td>BirdNET</td><td>0.60 ± 0.04</td></tr><tr><td>HuBERT</td><td>0.56 ± 0.04</td></tr><tr><td>Whisper</td><td>0.55 ± 0.03</td></tr><tr><td>Wav2Vec-bert</td><td>0.43 ± 0.03</td></tr><tr><td colspan="2">Concatenation (2 Embeddings)</td></tr><tr><td>BirdNET + Whisper</td><td>0.63 ± 0.03</td></tr><tr><td>BirdNET + Perch</td><td>0.62 ± 0.03</td></tr><tr><td>Perch + Whisper</td><td>0.62 ± 0.03</td></tr><tr><td>BirdNET + HuBERT</td><td>0.62 ± 0.03</td></tr><tr><td>Perch + HuBERT</td><td>0.61 ± 0.03</td></tr><tr><td colspan="2">Summation (2 Embeddings)</td></tr><tr><td>BirdNET + Whisper</td><td>0.63 ± 0.04</td></tr><tr><td>Perch + Whisper</td><td>0.63 ± 0.03</td></tr><tr><td>BirdNET + Perch</td><td>0.63 ± 0.03</td></tr><tr><td>BirdNET + HuBERT</td><td>0.62 ± 0.03</td></tr><tr><td>Perch + HuBERT</td><td>0.62 ± 0.03</td></tr><tr><td colspan="2">MRMR (2 Embeddings)</td></tr><tr><td>Perch + Whisper</td><td>0.66 ± 0.03</td></tr><tr><td>BirdNET + Whisper</td><td>0.65 ± 0.03</td></tr><tr><td>Perch + HuBERT</td><td>0.64 ± 0.03</td></tr><tr><td>BirdNET + Perch</td><td>0.64 ± 0.03</td></tr><tr><td>Perch + Wav2Vec2</td><td>0.64 ± 0.03</td></tr></table>
199
+
200
+ Table 3: Performance of the top 5 models per method on the acoustic identification task using the Twitters dataset (Mean F1 Score ± Standard Deviation).
201
+
202
+ <table><tr><td>Measurement</td><td>Units</td><td>Definition</td></tr><tr><td>Center Freq</td><td>Hz</td><td>The frequency that divides the selection into two intervals of equal energy (i.e., the 50th percentile frequency) measured on each spectrogram slice.</td></tr><tr><td>Freq 25%</td><td>Hz</td><td>The 25th percentile frequency (first quartile) measured on each spectrogram slice.</td></tr><tr><td>Freq 75%</td><td>Hz</td><td>The 75th percentile frequency (third quartile) measured on each spectrogram slice.</td></tr><tr><td>Freq 5%</td><td>Hz</td><td>The 5th percentile frequency measured on each spectrogram slice, indicating the lower bound of the energy distribution.</td></tr><tr><td>Freq 95%</td><td>Hz</td><td>The 95th percentile frequency measured on each spectrogram slice, indicating the upper bound of the energy distribution.</td></tr><tr><td>BW 50%</td><td>Hz</td><td>The inter-quartile range bandwidth, computed as the difference between the 75th and 25th percentile frequencies (i.e., the bandwidth containing 50% of the energy).</td></tr><tr><td>BW 90%</td><td>Hz</td><td>The bandwidth encompassing 90% of the signal&#x27;s energy, calculated as the difference between the 95th and 5th percentile frequencies.</td></tr><tr><td>Peak Freq</td><td>Hz</td><td>The frequency at which the maximum power (or peak power) occurs within the selection, as observed in each spectrogram slice.</td></tr><tr><td>Center Time</td><td>s</td><td>The time that divides the selection into two intervals of equal energy (i.e., the median or 50th percentile time) for the signal&#x27;s energy distribution.</td></tr><tr><td>Time 25%</td><td>s</td><td>The time by which 25% of the total energy has been accumulated within the selection.</td></tr><tr><td>Time 75%</td><td>s</td><td>The time by which 75% of the total energy has been accumulated within the selection.</td></tr><tr><td>Dur 50%</td><td>s</td><td>The duration over which the central 50% of the signal&#x27;s energy is distributed, computed as the difference between the 75th and 25th percentile times.</td></tr><tr><td>Time 5%</td><td>s</td><td>The time by which 5% of the total energy has been accumulated within the selection.</td></tr><tr><td>Time 95%</td><td>s</td><td>The time by which 95% of the total energy has been accumulated within the selection.</td></tr><tr><td>Dur 90%</td><td>s</td><td>The duration over which 90% of the signal&#x27;s energy is distributed, computed as the difference between the 95th and 5th percentile times.</td></tr><tr><td>Delta Freq</td><td>Hz</td><td>The difference between the upper and lower frequency limits of the selection.</td></tr><tr><td>Delta Time</td><td>s</td><td>The difference between the beginning and ending times of the selection.</td></tr><tr><td>Time 5% Rel.</td><td>-</td><td>The relative time (as a proportion of total duration) at which 5% of the signal&#x27;s energy is accumulated.</td></tr><tr><td>Time 25% Rel.</td><td>-</td><td>The relative time at which 25% of the signal&#x27;s energy is accumulated.</td></tr><tr><td>Center Time Rel.</td><td>-</td><td>The relative time corresponding to the median (50%) of the signal&#x27;s energy distribution.</td></tr><tr><td>Time 75% Rel.</td><td>-</td><td>The relative time at which 75% of the signal&#x27;s energy is accumulated.</td></tr><tr><td>Time 95% Rel.</td><td>-</td><td>The relative time at which 95% of the signal&#x27;s energy is accumulated.</td></tr><tr><td>Peak Time Relative</td><td>-</td><td>The time at which the peak amplitude occurs, expressed as a proportion of the total selection duration.</td></tr><tr><td>PFC Avg Slope</td><td>Hz/ms</td><td>The average slope of the peak frequency contour over time, computed as the mean of the differences between successive peak frequencies.</td></tr><tr><td>PFC Max Freq</td><td>Hz</td><td>The maximum frequency reached in the peak frequency contour.</td></tr><tr><td>PFC Max Slope</td><td>Hz/ms</td><td>The maximum rate of change (slope) observed in the peak frequency contour.</td></tr><tr><td>PFC Min Freq</td><td>Hz</td><td>The minimum frequency reached in the peak frequency contour.</td></tr><tr><td>PFC Min Slope</td><td>Hz/ms</td><td>The minimum rate of change (slope) observed in the peak frequency contour.</td></tr><tr><td>PFC Num Inf Pts</td><td>-</td><td>The number of inflection points in the peak frequency contour, indicating how frequently the slope changes sign.</td></tr></table>
203
+
204
+ Table 4: Summary of acoustic measurements derived from Raven Pro 1.6. Definitions are adapted from the Raven Pro manual.
205
+
206
+ ![](images/c5bde6965208cf2f0cc57e0e614479b3f1fa4c83854a4ebd1562434ae3335bb4.jpg)
207
+
208
+ ![](images/58fb544ceea7a94b1651355c1e42aa9f8abc509cc9e7f360f15c85d16b16e175.jpg)
209
+
210
+ ![](images/1686dde45c1a09957e0ee36b8f3b2767e79a77744b6ac26d6f3ea7582f7cbb34.jpg)
211
+
212
+ ![](images/0afeff27b8fc9a52999502262e0c30a74646dd367301f5171e149748f5427b0f.jpg)
213
+ Figure 8: tSNE visualizations of five pre-trained embeddings, primarily trained on bioacoustics bird data (with BirdNET also incorporating other animals). The first column represents the t-SNE plot of call types (Peeps in yellow and Twitters in blue), while the second and third columns depict the t-SNE projections of Peeps and Twitters colored by individual, respectively. From top to bottom, the rows correspond to BirdNET and Perch, respectively.
214
+
215
+ ![](images/7aab6dff4119391536c05909b072a45efc14d23d516303e205b0735cd095eccb.jpg)
216
+
217
+ ![](images/1e24ca9b4e2ca196366332e098301a198641dc1db7f6dcfe13561db526a902ea.jpg)
218
+
219
+ <table><tr><td>Model name</td><td>Number of parameters</td><td>Training data (hours)</td><td>Reference</td></tr><tr><td>BirdNET</td><td>27M</td><td>8300</td><td>Kahl et al. (2021)</td></tr><tr><td>HuBERT-Large</td><td>1B</td><td>60960</td><td>Hsu et al. (2021)</td></tr><tr><td>Perch</td><td>7.8M</td><td>&lt;10k</td><td>Ghani et al. (2023b)</td></tr><tr><td>Wav2vec2</td><td>317M</td><td>54000</td><td>Baevski et al. (2020)</td></tr><tr><td>W2v-BERT 2.0</td><td>600M</td><td>60960</td><td>Hsu et al. (2021)</td></tr><tr><td>Whisper-Large-v2</td><td>1.55B</td><td>680000</td><td>Radford et al. (2022)</td></tr><tr><td>AudioMAE</td><td>304M</td><td>5500</td><td>Huang et al. (2023)</td></tr></table>
220
+
221
+ Table 5: List of considered models for acoustic embeddings, including their size, training data, and references.
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d813d66ee291f30c545dc127deb3fb5e48183e0f1b459da0efa47a5fab5ea78c
3
+ size 1430374
acousticindividualidentificationofwhitefacedcapuchinmonkeysusingjointmultispeciesembeddings/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cddcbb8d25fcb9e7e91450734c26450f0cce4abcd322d15db314871eb01a32f8
3
+ size 255286
advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e9879b80e675a86f1c6596e99f10c7560b11878fdcdbe04319344b20465e7c
3
+ size 86889
advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be37c6728f178a6107e558d41fdb6c546208c28bf79573af6b06ca85e8062557
3
+ size 108432
advancingsequentialnumericalpredictioninautoregressivemodels/8cc22587-4499-48a8-a0a6-6fa2c14cada3_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf90a395bca9a20b92c3065f7fc8923384def6f81dd10e893a7acd980e0e0d13
3
+ size 1841505
advancingsequentialnumericalpredictioninautoregressivemodels/full.md ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Advancing Sequential Numerical Prediction in Autoregressive Models
2
+
3
+ Xiang Fei\*1 Jinghui Lu\*1 Qi Sun\*2 Hao Feng\*1 Yanjie Wang\*1 Wei Shi\* An-lan Wang\*1 Jingqun Tang\*1 Can Huang\*1
4
+
5
+ $^{1}$ ByteDance Inc. $^{2}$ City University of Hong Kong
6
+
7
+ {feixiang.77, lujinghui, fenghao.2019}@bytedance.com {wangyanjie.prince, shiwei.11, wanganlan}@bytedance.com {tangjingqun, can.huang}@bytedance.com qisun.new@gmail.com
8
+
9
+ # Abstract
10
+
11
+ Autoregressive models have become the de facto choice for sequence generation tasks, but standard approaches treat digits as independent tokens and apply cross-entropy loss, overlooking the coherent structure of numerical sequences. This paper introduces Numerical Token Integrity Loss (NTIL) to address this gap. NTIL operates at two levels: (1) token-level, where it extends the Earth Mover's Distance (EMD) to preserve ordinal relationships between numerical values, and (2) sequence-level, where it penalizes the overall discrepancy between the predicted and actual sequences. This dual approach improves numerical prediction and integrates effectively with LLMs/MLLMs. Extensive experiments show significant performance improvements with NTIL. All resources are available at https://github.com/xfey/NTIL.
12
+
13
+ # 1 Introduction
14
+
15
+ In recent years, sequence generation has become a crucial approach for implementing a broad range of AI applications, including visual question answering (Wang et al., 2024d; Reich and Schultz, 2024; Fan et al., 2024; Liu et al., 2024b; He et al., 2025; Wang et al., 2025; Lu et al., 2024b), key information extraction (Kim et al., 2024; Yu et al., 2024; Kang et al., 2024; Wang et al., 2024a; Yi et al., 2025; Lu et al., 2023a, 2024a), object detection (Wen et al., 2024), math reasoning (Zhao et al., 2024), text spotting (Li et al., 2024; Yu et al., 2025), and automatic audio recognition (Zhou et al., 2024).
16
+
17
+ Autoregressive models, especially large language models (LLMs) such as GPT (Achiam et al., 2023), LLaMA (Touvron et al., 2023; Dubey et al., 2024), Qwen (Yang et al., 2024; Wang et al., 2024c) series, with multi-modal large language models
18
+
19
+ ![](images/55bed2d2b29413363bc4b285db772c07019739524c258fd03a05bc604f8518bf.jpg)
20
+ Figure 1: Sequence-level digit token loss illustration.
21
+
22
+ (MLLMs) based on them, now dominate the sequence generation tasks. During training, these models generate sequences token-by-token, typically using cross-entropy (CE) loss, to minimize the negative log-likelihood of the ground truth token at each time step. However, CE loss has several inherent limitations when predicting numerical values. Specifically, CE suffers from Limitation 1 that it ignores the inherent closeness between numerical tokens, where each digit in a numerical prediction is not independent but related to its neighboring digits. For example, in Figure 2(a) and 2(b), for the ground truth token "3", the CE loss yields same values of $-\log(0.5)$ for different prediction distributions. However, the distribution in Figure 2(b) is more accurate, as it assigns higher probability to the neighboring token "2".
23
+
24
+ CE also suffers from Limitation 2 that it fails to capture the holistic numerical error when sequential tokens are involved, as it focuses on the precision of each token rather than the overall value. In an autoregressive generation manner, producing a numerical value typically requires consecutive time steps. For example, the target value "0.98" requires the prediction of four sequential tokens — "0", ".", "9", "8". Thus, a prediction such as 1.01 ("1", ".", "0", "1") incurs a high CE loss as the first, third and fourth tokens are significantly differ-
25
+
26
+ ent from the target tokens. Conversely, a prediction like $1.98$ ("1", ".", "9", "8") could yield a lower CE loss due to a closer match at the token level, despite the overall numerical difference being larger (1.00 vs. 0.03). This discrepancy shows the limitation of CE in evaluating predictions holistically.
27
+
28
+ ![](images/b9377ab01ff51b33124c9b9a8ef260d2fe71f5da0aec3ea191fe23787f0b9a59.jpg)
29
+ Figure 2: Cross-entropy fails to distinguish predictions, whereas EMD correlates smaller loss for better predicted distributions.
30
+
31
+ To overcome the above issues, we introduce a novel sequence-level numerical prediction loss: Numerical Token Integrity Loss (NTIL). At the token-level, NTIL replaces the traditional CE loss with Earth Mover's Distance (EMD) (Rubner et al., 1998). Additionally, we enhance the EMD with an Exponential Position-based Weighting scheme (Section 3.1), which leverages place-value number systems to better capture the nuanced differences between numerical distributions at each timestep. At the sequence-level, NTIL evaluates the overall numerical difference between predicted and actual sequences through Multi-token Numerical Optimization (Section 3.2), considering all time steps holistically, as illustrated in Figure 1. It enables NTIL to effectively model the actual value of digit sequences, and capture discrepancies across the consecutive numerical range, moving beyond simple token-by-token comparison.
32
+
33
+ To the best of the authors' knowledge, it is the first time that EMD is used as an optimization method for autoregressive models. Moreover, our holistic approach is the first of its kind to improve sequential numerical prediction by considering numerical tokens across multiple time steps. Our method can be seamlessly integrated into both LLMs and MLLMs. Experimental results show that NTIL boosts performance in tasks requiring precise numerical outputs, such as object detection, text spotting, and math reasoning (Section 4).
34
+
35
+ # 2 Related Work
36
+
37
+ The Earth Mover's Distance (EMD) measures the minimal cost of transforming one distribution into another, and has become a valuable metric in deep learning applications. Notably, Wasserstein GAN (Arjovsky et al., 2017) uses EMD as its loss function to stabilize training in GANs. Caturi (2013) and Courty et al. (2016) also adopted EMD for smoothing the training procedure. Despite the success of EMD, it has not been applied to autoregressive models. Most recently, autoregressive models, especially LLMs, have advanced NLP (Radford et al., 2019; Touvron et al., 2023; Lu et al., 2023b; Cui et al., 2025), and multi-modal tasks (Alayrac et al., 2022; Wang et al., 2024c; Feng et al., 2025; Lu et al., 2025). While the tasks mentioned above require high precision in numerical value prediction, none of the previous works have specifically optimized for this criterion. Our work addresses this gap by focusing on advancing the sequential numerical prediction for autoregressive models.
38
+
39
+ # 3 Method
40
+
41
+ This section details the components of the proposed method. Section 3.1 proposes exponential weighted EMD to single digits; Section 3.2 describes how we go through multiple digital tokens to derive a simple yet effective numerical measure.
42
+
43
+ # 3.1 Exponential Position-Based Weighting
44
+
45
+ For token-level prediction, to address Limitation 1 in Section 1, we replace the conventional CE loss with EMD to account for the ordinal relationship during optimization. The preliminaries for both CE and EMD objectives, and the simplification via numerical prediction, are outlined in Appendix D.
46
+
47
+ Furthermore, we extend EMD to account for the place-value number systems, where leading digits have greater numerical significance. We implement an exponential weighting scheme to progressively assign weights based on digit positions, to scale their contributions to the loss accordingly:
48
+
49
+ $$
50
+ \mathbf {W} _ {\mathbf {e x p}} = \left[ (1 + \sigma) ^ {n - i - 1} \right] _ {i = 0} ^ {n - 1}, \tag {1}
51
+ $$
52
+
53
+ where $\sigma$ is the exponential increment rate, and $n$ is the length of consecutive digits. This implementation helps the model understand the order relationship between consecutive numbers.
54
+
55
+ # 3.2 Multi-Token Numerical Optimization
56
+
57
+ To overcome Limitation 2 outlined in Section 1, we propose the following procedure and losses. Differentiable Numerical Value Construction. In this step, we construct the complete numerical value from consecutive discrete digital tokens. Figure 3 illustrates how we obtain the digit index from the predicted distribution using argmax to derive the integer representation. To maintain differentiability, we employ the Gumbel-softmax approximation with reduced temperature and noise parameters to ensure consistent results. The resulting tensor is element-wise multiplied with positional indices, scaled by the appropriate powers of 10, and aggregated to obtain the final value. For further implementation details, see Appendix C.
58
+
59
+ $$
60
+ \left[ \begin{array}{l l l l} 0. 1 & 0. 3 & 0. 4 & 0. 2 \\ 0. 5 & 0. 0 & 0. 2 & 0. 3 \\ 0. 1 & 0. 6 & 0. 1 & 0. 2 \end{array} \right] \xrightarrow {\arg \max } \left[ \begin{array}{l} 2 \\ 0 \\ 1 \end{array} \right]. \operatorname {m u l} (\left[ \begin{array}{l} 1 0 0 \\ 1 0 \\ 1 \end{array} \right]) = 2 0 1
61
+ $$
62
+
63
+ Relative Deviation Metric. For numerical comparison, while absolute difference provides a straightforward measure equivalent to L1 loss, we propose a normalized metric defined as:
64
+
65
+ $$
66
+ \mathcal {L} _ {\text {r e l a t i v e}} = \frac {| X - Y |}{\max (X , Y) + \epsilon}, \tag {2}
67
+ $$
68
+
69
+ where $X$ is the sequence-level numerical prediction (e.g., "234") and $Y$ is the ground truth, and $\epsilon$ is a small quantity to avoid division by zero. This normalization ensures consistency across different magnitude ranges.
70
+
71
+ Magnitude Deviation Metric We also apply a normalized metric on the order of magnitude as:
72
+
73
+ $$
74
+ \mathcal {L} _ {\text {m a g n i t u d e}} = \log \left(\frac {\operatorname* {m a x} (X , Y)}{\operatorname* {m i n} (X , Y)}\right). \tag {3}
75
+ $$
76
+
77
+ The objective penalizes the difference in the order of magnitude between two values. For example, given the pairs (1, 10) and (1, 100), which have similar $\mathcal{L}_{\text{relativie}}$ values 0.90 and 0.99, but differ in $\mathcal{L}_{\text{magnitude}}$ value: $\log \left(\frac{10}{1}\right) \approx 2.30$ for the first pair and $\log \left(\frac{100}{1}\right) \approx 4.61$ for the second. This results in a larger penalty for greater differences in magnitude. The final formulation of NTIL combines the above loss functions, with tunable hyperparameters to weight their individual contributions.
78
+
79
+ $$
80
+ \mathcal {L} = \mathbf {W} _ {\mathbf {e x p}} \operatorname {E M D} + \alpha \cdot \mathcal {L} _ {\text {r e l a t i v e}} + \beta \cdot \mathcal {L} _ {\text {m a g n i t u d e}} \tag {4}
81
+ $$
82
+
83
+ ![](images/50b3b884d396a4f483f3e4036da2a818e6675fc79b22dc7f5af8a36052960356.jpg)
84
+ (a) Absolute error between prediction and label on image grounding task.
85
+
86
+ ![](images/af621ff7ae680ad04507eb877d481d77dc389208df6f66f0b7e66ecec5aca235.jpg)
87
+
88
+ ![](images/a5a8dd2152204fe143c98542b7befaf7f1818fab5c4251beb29d9796245effa5.jpg)
89
+ (b) NTIL helps the prediction distribution be more accurate.
90
+
91
+ ![](images/21f40d4f6d4c9b13d916fbfbcbc35d94632974f6faae2ddc33201ab2e4d24886.jpg)
92
+ Figure 3: Constructing a numerical value from tokens.
93
+ (c) NTIL helps the prediction distribution concentrate around ground truth.
94
+ Figure 4: Results for quantitative analysis.
95
+
96
+ ![](images/10e4526cd41fbe65e121ce53d706dc5bb37c11cbd186985ad3292676d786b5a2.jpg)
97
+
98
+ # 4 Experiments and Results
99
+
100
+ This section presents a comprehensive empirical evaluation of the proposed NTIL across various LLMs/MLLMs (Section 4.1). CE (Shannon, 1948) and EMD (Rubner et al., 1998) are chosen as baselines due to their widespread adoption. The evaluation encompasses multiple task domains that focus on numerical prediction including Image Grounding, Scene Text Detection, Clock Time Recognition, Mathematical Reasoning and Arithmetic Calculations. Appendix B provides details on tasks, datasets, and evaluation metrics. We also conduct systematic ablation studies to evaluate the critical components of our approach. Implementation details are available in Appendix A.
101
+
102
+ # 4.1 Main Results
103
+
104
+ # 4.2 Results of MLLMs
105
+
106
+ Image Grounding As shown in Table 1, our method outperforms both CE and EMD across nearly all datasets and VLM backbones, as evidenced by the overall performance improvements.
107
+
108
+ Scene Text Detection Table 2 shows that our method improves accuracy across multiple datasets, demonstrating its effectiveness in predicting multiple object coordinates.
109
+
110
+ Clock Time Recognition Table 4 demonstrates that NTIL surpasses CE and EMD significantly in performance across all model architectures.
111
+
112
+ Mathematical Reasoning As shown in Table 5, our method outperforms CE and EMD across all
113
+
114
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Method</td><td colspan="3">RefCOCO</td><td colspan="3">RefCOCO+</td><td colspan="2">RefCOCOg</td><td rowspan="2">Avg</td></tr><tr><td>Val</td><td>TestA</td><td>TestB</td><td>Val</td><td>TestA</td><td>TestB</td><td>Val</td><td>Test</td></tr><tr><td rowspan="3">PaliGemma (3b) (Beyer et al., 2024)</td><td>CE</td><td>0.839</td><td>0.865</td><td>0.784</td><td>0.740</td><td>0.797</td><td>0.664</td><td>0.792</td><td>0.797</td><td>0.785</td></tr><tr><td>EMD</td><td>0.841</td><td>0.864</td><td>0.796</td><td>0.749</td><td>0.805</td><td>0.669</td><td>0.789</td><td>0.799</td><td>0.789</td></tr><tr><td>Ours</td><td>0.844</td><td>0.873</td><td>0.791</td><td>0.750</td><td>0.812</td><td>0.678</td><td>0.804</td><td>0.802</td><td>0.795</td></tr><tr><td rowspan="3">LLaVA-1.5 (7b) (Liu et al., 2024a)</td><td>CE</td><td>0.855</td><td>0.880</td><td>0.813</td><td>0.801</td><td>0.843</td><td>0.741</td><td>0.799</td><td>0.816</td><td>0.818</td></tr><tr><td>EMD</td><td>0.856</td><td>0.879</td><td>0.822</td><td>0.798</td><td>0.845</td><td>0.743</td><td>0.798</td><td>0.816</td><td>0.820</td></tr><tr><td>Ours</td><td>0.858</td><td>0.885</td><td>0.815</td><td>0.800</td><td>0.853</td><td>0.747</td><td>0.802</td><td>0.817</td><td>0.822</td></tr><tr><td rowspan="3">Yi-VL (6b) (Young et al., 2024)</td><td>CE</td><td>0.767</td><td>0.796</td><td>0.734</td><td>0.706</td><td>0.757</td><td>0.651</td><td>0.722</td><td>0.731</td><td>0.733</td></tr><tr><td>EMD</td><td>0.779</td><td>0.805</td><td>0.738</td><td>0.719</td><td>0.762</td><td>0.657</td><td>0.721</td><td>0.737</td><td>0.740</td></tr><tr><td>Ours</td><td>0.777</td><td>0.808</td><td>0.741</td><td>0.717</td><td>0.770</td><td>0.665</td><td>0.727</td><td>0.743</td><td>0.744</td></tr><tr><td rowspan="3">Qwen2-VL (2b) (Wang et al., 2024c)</td><td>CE</td><td>0.897</td><td>0.928</td><td>0.850</td><td>0.841</td><td>0.896</td><td>0.776</td><td>0.851</td><td>0.867</td><td>0.863</td></tr><tr><td>EMD</td><td>0.889</td><td>0.931</td><td>0.843</td><td>0.838</td><td>0.889</td><td>0.772</td><td>0.853</td><td>0.858</td><td>0.859</td></tr><tr><td>Ours</td><td>0.898</td><td>0.932</td><td>0.849</td><td>0.844</td><td>0.891</td><td>0.788</td><td>0.858</td><td>0.863</td><td>0.866</td></tr><tr><td rowspan="3">Qwen2-VL (7b) (Wang et al., 2024c)</td><td>CE</td><td>0.892</td><td>0.929</td><td>0.841</td><td>0.842</td><td>0.902</td><td>0.784</td><td>0.843</td><td>0.848</td><td>0.860</td></tr><tr><td>EMD</td><td>0.886</td><td>0.926</td><td>0.834</td><td>0.843</td><td>0.901</td><td>0.768</td><td>0.836</td><td>0.843</td><td>0.855</td></tr><tr><td>Ours</td><td>0.889</td><td>0.931</td><td>0.840</td><td>0.844</td><td>0.904</td><td>0.786</td><td>0.848</td><td>0.853</td><td>0.862</td></tr></table>
115
+
116
+ Table 1: Performance comparison (Acc@0.5) of models on image grounding tasks.
117
+
118
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Method</td><td colspan="4">Dataset</td><td rowspan="2">Avg</td></tr><tr><td>CTW1500</td><td>ICDAR1500</td><td>TD500</td><td>Total-Text</td></tr><tr><td rowspan="3">PaliGemma (3b)</td><td>CE</td><td>0.220</td><td>0.129</td><td>0.183</td><td>0.259</td><td>0.193</td></tr><tr><td>EMD</td><td>0.314</td><td>0.124</td><td>0.252</td><td>0.307</td><td>0.241</td></tr><tr><td>Ours</td><td>0.369</td><td>0.155</td><td>0.257</td><td>0.318</td><td>0.263</td></tr><tr><td rowspan="3">Yi-VL (6b)</td><td>CE</td><td>0.682</td><td>0.370</td><td>0.753</td><td>0.673</td><td>0.586</td></tr><tr><td>EMD</td><td>0.668</td><td>0.398</td><td>0.778</td><td>0.678</td><td>0.594</td></tr><tr><td>Ours</td><td>0.680</td><td>0.403</td><td>0.752</td><td>0.678</td><td>0.597</td></tr><tr><td rowspan="3">Qwen2-VL (2b)</td><td>CE</td><td>0.786</td><td>0.538</td><td>0.851</td><td>0.827</td><td>0.720</td></tr><tr><td>EMD</td><td>0.786</td><td>0.535</td><td>0.867</td><td>0.808</td><td>0.718</td></tr><tr><td>Ours</td><td>0.776</td><td>0.577</td><td>0.854</td><td>0.835</td><td>0.732</td></tr><tr><td rowspan="3">Qwen2-VL (7b)</td><td>CE</td><td>0.771</td><td>0.648</td><td>0.889</td><td>0.864</td><td>0.764</td></tr><tr><td>EMD</td><td>0.762</td><td>0.625</td><td>0.874</td><td>0.860</td><td>0.751</td></tr><tr><td>Ours</td><td>0.770</td><td>0.669</td><td>0.869</td><td>0.872</td><td>0.770</td></tr><tr><td rowspan="3">LLaVA-1.5 (7b)</td><td>CE</td><td>0.735</td><td>0.490</td><td>0.821</td><td>0.786</td><td>0.675</td></tr><tr><td>EMD</td><td>0.724</td><td>0.545</td><td>0.840</td><td>0.776</td><td>0.690</td></tr><tr><td>Ours</td><td>0.739</td><td>0.547</td><td>0.839</td><td>0.791</td><td>0.698</td></tr></table>
119
+
120
+ Table 2: Performance (Acc@0.5) on scene text detection tasks.
121
+
122
+ <table><tr><td rowspan="2">Model</td><td colspan="3">Accuracy (%)</td></tr><tr><td>CE</td><td>EMD</td><td>Ours</td></tr><tr><td>Baichuan2 (7b) (Yang et al., 2023)</td><td>44.3</td><td>46.6</td><td>46.9</td></tr><tr><td>Qwen2.5 (1.5b) (Team, 2024)</td><td>40.3</td><td>40.7</td><td>42.4</td></tr><tr><td>LLaMA3 (8b) (Dubey et al., 2024)</td><td>61.9</td><td>61.8</td><td>61.9</td></tr><tr><td>Yi (6b) (Young et al., 2024)</td><td>53.0</td><td>54.6</td><td>54.4</td></tr><tr><td>MiniCPM3 (4b) (Hu et al., 2024)</td><td>66.8</td><td>68.2</td><td>68.6</td></tr></table>
123
+
124
+ Table 3: Performance comparison of accuracies on the arithmetic calculation task.
125
+
126
+ <table><tr><td>Metric</td><td>Method</td><td>LLaVA-1.5 (7b)</td><td>Qwen2-VL (7b)</td><td>Qwen2-VL (2b)</td><td>Yi-VL (6b)</td></tr><tr><td rowspan="3">Accuracy (%) ↑</td><td>CE</td><td>95.1</td><td>75.0</td><td>81.3</td><td>76.2</td></tr><tr><td>EMD</td><td>95.3</td><td>78.7</td><td>81.7</td><td>75.1</td></tr><tr><td>Ours</td><td>98.3</td><td>80.5</td><td>85.3</td><td>87.4</td></tr><tr><td rowspan="3">Time gap (minute) ↓</td><td>CE</td><td>8.52</td><td>30.84</td><td>32.34</td><td>56.58</td></tr><tr><td>EMD</td><td>7.98</td><td>30.78</td><td>31.98</td><td>54.78</td></tr><tr><td>Ours</td><td>4.14</td><td>27.72</td><td>24.66</td><td>26.58</td></tr></table>
127
+
128
+ Table 4: Performance of the clock time recognition task.
129
+
130
+ <table><tr><td>Dataset</td><td>Method</td><td>Qwen2-vl (2b)</td><td>Qwen2-vl (7b)</td><td>LLaVA-1.5 (7b)</td><td>Yi-VL (6b)</td><td>PaliGemma (3b)</td></tr><tr><td rowspan="3">Mathvision</td><td>CE</td><td>0.139</td><td>0.184</td><td>0.146</td><td>0.143</td><td>0.097</td></tr><tr><td>EMD</td><td>0.130</td><td>0.188</td><td>0.148</td><td>0.142</td><td>0.088</td></tr><tr><td>Ours</td><td>0.145</td><td>0.191</td><td>0.146</td><td>0.153</td><td>0.098</td></tr><tr><td rowspan="3">Mathvista</td><td>CE</td><td>0.248</td><td>0.315</td><td>0.140</td><td>0.187</td><td>0.143</td></tr><tr><td>EMD</td><td>0.262</td><td>0.303</td><td>0.157</td><td>0.192</td><td>0.149</td></tr><tr><td>Ours</td><td>0.251</td><td>0.300</td><td>0.170</td><td>0.222</td><td>0.157</td></tr></table>
131
+
132
+ datasets, with the most significant improvements seen in the Mathvision dataset using the Qwen2-VL (2b) and in Mathvista with the Yi-VL (6b).
133
+
134
+ # 4.3 Results of LLMs
135
+
136
+ Arithmetic Calculation As shown in Table 3, our method improves accuracy across multiple LLMs, though LLaMA3 shows minimal gains, possibly due to its extensive pre-training. Overall, the majority of cases show that for numerical predictions, while EMD performs comparably or marginally better than CE loss, NTIL consistently delivers su
137
+
138
+ Table 5: Performance of the math reasoning task.
139
+
140
+ <table><tr><td colspan="3"></td><td colspan="2">PaliGemma</td><td colspan="2">LLaVA-1.5</td><td colspan="2">Qwen2-VL</td><td>Yi-VL</td></tr><tr><td>Exp</td><td>Rel</td><td>Mag</td><td>Mathvision</td><td>Mathvista</td><td>Mathvision</td><td>Mathvista</td><td>Clock_Time</td><td>Clock_Time</td><td></td></tr><tr><td>×</td><td>✓</td><td>✓</td><td>0.096</td><td>0.137</td><td>0.151</td><td>0.166</td><td>0.798</td><td>0.834</td><td></td></tr><tr><td>✓</td><td>×</td><td>✓</td><td>0.095</td><td>0.137</td><td>0.145</td><td>0.154</td><td>0.790</td><td>0.856</td><td></td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>0.094</td><td>0.142</td><td>0.160</td><td>0.143</td><td>0.816</td><td>0.876</td><td></td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>0.098</td><td>0.157</td><td>0.146</td><td>0.170</td><td>0.853</td><td>0.874</td><td></td></tr></table>
141
+
142
+ Table 6: Ablations on NTIL. Exp: Exponential Position-Based Weighting. REL: Relative Deviation Metric. Mag: Magnitude Deviation Metric.
143
+
144
+ perior results in most scenarios. This underscores the effectiveness and generalizability of NTIL.
145
+
146
+ # 4.4 Ablation Analysis
147
+
148
+ Table 6 indicates that incorporating all components of NTIL generally leads to better performance, as evidenced by the highest scores in most metrics when all components are enabled. As an exception, the inclusion of Magnitude leads to worse results in Mathvision for LLaVA-1.5, which indicates the fluctuation of applying Magnitude in some cases.
149
+
150
+ # 4.5 Quantitative Analysis
151
+
152
+ As shown in Figure 4(a), NTIL achieves the lowest absolute errors among all models, indicating more consistent performance compared to CE and EMD. Figure 4(b) and 4(c) illustrate that NTIL pro
153
+
154
+ duces more accurate predictions with distributions more concentrated around the ground truth. Overall, NITL offers more stability and lower variability. Qualitative examples can be seen in Appendix F.
155
+
156
+ # 5 Conclusion
157
+
158
+ We propose NTIL, which improves numerical prediction accuracy in LLMs at both the token and sequence levels. Experiments show improvement across multiple datasets and models, highlighting effectiveness of NTIL.
159
+
160
+ # Limitations
161
+
162
+ The limitations of the NTIL include the exponential position-based weighting scheme, while effective in many cases, had limited or negative impact in certain configurations, such as with the Mathvision dataset and LLaVA-1.5 model. Future exploration could focus on refining the exponential position-based weighting scheme with adaptive strategies to address its inconsistent impact.
163
+
164
+ Furthermore, NTIL introduces additional computational overhead compared to CE loss, resulting in reduced training efficiency. This trade-off between performance improvement and computational cost needs to be considered in practical applications.
165
+
166
+ Another limitation arises from the tokenization strategies of certain models like LLaMA-3, which encode common multi-digit numbers (e.g., "123") as a single token. Such cases require special handling in NTIL's implementation, adding complexity to the framework.
167
+
168
+ # References
169
+
170
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
171
+ Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. 2022. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736.
172
+ Martin Arjovsky, S Chintala, and Léon Bottou. 2017. Wasserstein gan. arxiv preprint arxiv: 170107875. arXiv preprint arXiv:1701.07875.
173
+ Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael
174
+
175
+ Tschannen, Emanuele Bugliarello, et al. 2024. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726.
176
+ Chee Kheng Ch'ng and Chee Seng Chan. 2017. Totaltext: A comprehensive dataset for scene text detection and recognition. In 2017 14th IAPR international conference on document analysis and recognition (ICDAR), volume 1, pages 935-942. IEEE.
177
+ Nicolas Courty, Rémi Flamary, Devis Tuia, and Alain Rakotomamonjy. 2016. Optimal transport for domain adaptation. IEEE transactions on pattern analysis and machine intelligence, 39(9):1853-1865.
178
+ Xiao Cui, Mo Zhu, Yulei Qin, Liang Xie, Wengang Zhou, and Houqiang Li. 2025. Multi-level optimal transport for universal cross-tokenizer knowledge distillation on language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 23724-23732.
179
+ Marco Cuturi. 2013. Sinkhorn distances: Lightspeed computation of optimal transport. Advances in neural information processing systems, 26.
180
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
181
+ Yue Fan, Jing Gu, Kaiwen Zhou, Qianqi Yan, Shan Jiang, Ching-Chen Kuo, Yang Zhao, Xinze Guan, and Xin Wang. 2024. Muffin or Chihuahua? challenging multimodal large language models with multipanel VQA. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6845-6863, Bangkok, Thailand. Association for Computational Linguistics.
182
+ Hao Feng, Shu Wei, Xiang Fei, Wei Shi, Yingdong Han, Lei Liao, Jinghui Lu, Binghong Wu, Qi Liu, Chunhui Lin, et al. 2025. Dolphin: Document image parsing via heterogeneous anchor prompting. arXiv preprint arXiv:2505.14059.
183
+ gpiosenko. 2022. Time-image dataset-classification.
184
+ Yangfan He, Jianhui Wang, Kun Li, Yijin Wang, Li Sun, Jun Yin, Miao Zhang, and Xueqian Wang. 2025. Enhancing intent understanding for ambiguous prompts through human-machine co-adaptation. arXiv preprint arXiv:2501.15167.
185
+ Le Hou, Chen-Ping Yu, and Dimitris Samaras. 2016. Squared earth mover's distance-based loss for training deep neural networks. arXiv preprint arXiv:1611.05916.
186
+ Shengding Hu, Yuge Tu, Xu Han, Chaoqun He, Ganqu Cui, Xiang Long, Zhi Zheng, Yewei Fang, Yuxiang Huang, Weilin Zhao, et al. 2024. Minicpm: Unveiling the potential of small language models with scalable training strategies. arXiv preprint arXiv:2404.06395.
187
+
188
+ Iris AM Huijben, Wouter Kool, Max B Paulus, and Ruud JG Van Sloun. 2022. A review of the gumbelmax trick and its extensions for discrete stochasticity in machine learning. IEEE transactions on pattern analysis and machine intelligence, 45(2):1353-1371.
189
+ Eric Jang, Shixiang Gu, and Ben Poole. 2016. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144.
190
+ Hyeonseok Kang, Hyein Seo, Jeesu Jung, Sangkeun Jung, Du-Seong Chang, and Riwoo Chung. 2024. Guidance-based prompt data augmentation in specialized domains for named entity recognition. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 665-672, Bangkok, Thailand. Association for Computational Linguistics.
191
+ Dimosthenis Karatzas, Lluis Gomez-Bigorda, Anguelos Nicolaou, Suman Ghosh, Andrew Bagdanov, Masakazu Iwamura, Jiri Matas, Lukas Neumann, Vijay Ramaseshan Chandrasekhar, Shijian Lu, et al. 2015. Icdar 2015 competition on robust reading. In 2015 13th international conference on document analysis and recognition (ICDAR), pages 1156-1160. IEEE.
192
+ Seoyeon Kim, Kwangwook Seo, Hyungjoo Chae, Jinyoung Yeo, and Dongha Lee. 2024. VerifiNER: Verification-augmented NER via knowledge-grounded reasoning with large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2441-2461, Bangkok, Thailand. Association for Computational Linguistics.
193
+ Yafu Li, Qintong Li, Leyang Cui, Wei Bi, Zhilin Wang, Longyue Wang, Linyi Yang, Shuming Shi, and Yue Zhang. 2024. MAGE: Machine-generated text detection in the wild. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 36-53, Bangkok, Thailand. Association for Computational Linguistics.
194
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer.
195
+ Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. 2024a. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306.
196
+ Wenhao Liu, Xiaohua Wang, Muling Wu, Tianlong Li, Changze Lv, Zixuan Ling, Zhu JianHao, Cenyuan Zhang, Xiaoqing Zheng, and Xuanjing Huang. 2024b.
197
+
198
+ Aligning large language models with human preferences through representation engineering. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 10619-10638, Bangkok, Thailand. Association for Computational Linguistics.
199
+ Jinghui Lu, Yanjie Wang, Ziwei Yang, Xuejing Liu, Brian Mac Namee, and Can Huang. 2024a. Padellmer: parallel decoding in large language models for named entity recognition. Advances in Neural Information Processing Systems, 37:117853-117880.
200
+ Jinghui Lu, Haiyang Yu, Yanjie Wang, Yongjie Ye, Jingqun Tang, Ziwei Yang, Binghong Wu, Qi Liu, Hao Feng, Han Wang, et al. 2024b. A bounding box is worth one token: Interleaving layout and text in a large language model for document understanding. arXiv preprint arXiv:2407.01976.
201
+ Jinghui Lu, Haiyang Yu, Siliang Xu, Shiwei Ran, Guozhi Tang, Siqi Wang, Bin Shan, Teng Fu, Hao Feng, Jingqun Tang, et al. 2025. Prolonged reasoning is not all you need: Certainty-based adaptive routing for efficient llm/mllm reasoning. arXiv preprint arXiv:2505.15154.
202
+ Jinghui Lu, Rui Zhao, Brian Mac Namee, and Fei Tan. 2023a. Purifiedner: A prompting-based unified ner system for diverse datasets. In Proceedings of the AAAI conference on artificial intelligence, volume 37, pages 13327-13335.
203
+ Jinghui Lu, Dongsheng Zhu, Weidong Han, Rui Zhao, Brian Mac Namee, and Fei Tan. 2023b. What makes pre-trained language models better zero-shot learners? In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2288-2303, Toronto, Canada. Association for Computational Linguistics.
204
+ Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. 2023c. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv preprint arXiv:2310.02255.
205
+ Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. 2016. Generation and comprehension of unambiguous object descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 11-20.
206
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.
207
+ Daniel Reich and Tanja Schultz. 2024. Uncovering the full potential of visual grounding methods in VQA. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4406-4419, Bangkok, Thailand. Association for Computational Linguistics.
208
+
209
+ Y. Rubner, C. Tomasi, and L.J. Guibas. 1998. A metric for distributions with applications to image databases. In Sixth International Conference on Computer Vision (IEEE Cat. No.98CH36271), pages 59-66.
210
+ David Saxton, Edward Grefenstette, Felix Hill, and Pushmeet Kohli. 2019. Analysing mathematical reasoning abilities of neural models. arXiv preprint arXiv:1904.01557.
211
+ Claude Elwood Shannon. 1948. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423.
212
+ Qwen Team. 2024. Qwen2.5: A party of foundation models.
213
+ Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.
214
+ Alvin Wan, Xiaoliang Dai, Peizhao Zhang, Zijian He, Yuandong Tian, Saining Xie, Bichen Wu, Matthew Yu, Tao Xu, Kan Chen, et al. 2020. Fbnetv2: Differentiable neural architecture search for spatial and channel dimensions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 12965-12974.
215
+ Han Wang, Yongjie Ye, Bingru Li, Yuxiang Nie, Jinghui Lu, Jingqun Tang, Yanjie Wang, and Can Huang. 2025. Vision as lora. arXiv preprint arXiv:2503.20680.
216
+ Huiming Wang, Liying Cheng, Wenxuan Zhang, De Wen Soh, and Lidong Bing. 2024a. Order-agnostic data augmentation for few-shot named entity recognition. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 7792-7807, Bangkok, Thailand. Association for Computational Linguistics.
217
+ Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. 2024b. Measuring multimodal mathematical reasoning with math-vision dataset. arXiv preprint arXiv:2402.14804.
218
+ Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. 2024c. Qwen2-v1: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.
219
+ Qunbo Wang, Ruyi Ji, Tianhao Peng, Wenjun Wu, Zechao Li, and Jing Liu. 2024d. Soft knowledge prompt: Help external knowledge become a better teacher to instruct LLM in knowledge-based VQA. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 6132-6143, Bangkok, Thailand. Association for Computational Linguistics.
220
+
221
+ Haoyang Wen, Eduard Hovy, and Alexander Hauptmann. 2024. Transitive consistency constrained learning for entity-to-entity stance detection. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1467-1480, Bangkok, Thailand. Association for Computational Linguistics.
222
+ Aiyuan Yang, Bin Xiao, Bingning Wang, Borong Zhang, Ce Bian, Chao Yin, Chenxu Lv, Da Pan, Dian Wang, Dong Yan, et al. 2023. Baichuan 2: Open large-scale language models. arXiv preprint arXiv:2309.10305.
223
+ An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, et al. 2024. Qwen2 technical report. arXiv preprint arXiv:2407.10671.
224
+ Cong Yao, Xiang Bai, Wenyu Liu, Yi Ma, and Zhuowen Tu. 2012. Detecting texts of arbitrary orientations in natural images. In 2012 IEEE conference on computer vision and pattern recognition, pages 1083-1090. IEEE.
225
+ Qiang Yi, Yangfan He, Jianhui Wang, Xinyuan Song, Shiyao Qian, Xinhang Yuan, Miao Zhang, Li Sun, Keqin Li, Kuan Lu, et al. 2025. Score: Story coherence and retrieval enhancement for ai narratives. arXiv preprint arXiv:2503.23512.
226
+ Alex Young, Bei Chen, Chao Li, Chengen Huang, Ge Zhang, Guanwei Zhang, Heng Li, Jiangcheng Zhu, Jianqun Chen, Jing Chang, et al. 2024. Yi: Open foundation models by 01. ai. arXiv preprint arXiv:2403.04652.
227
+ Haiyang Yu, Jinghui Lu, Yanjie Wang, Yang Li, Han Wang, Can Huang, and Bin Li. 2025. Eve: Towards end-to-end video subtitle extraction with vision-language models. arXiv preprint arXiv:2503.04058.
228
+ Licheng Yu, Patrick Poirson, Shan Yang, Alexander C Berg, and Tamara L Berg. 2016. Modeling context in referring expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pages 69-85. Springer.
229
+ Zhuohao Yu, Chang Gao, Wenjin Yao, Yidong Wang, Wei Ye, Jindong Wang, Xing Xie, Yue Zhang, and Shikun Zhang. 2024. KIEval: A knowledge-grounded interactive evaluation framework for large language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5967-5985, Bangkok, Thailand. Association for Computational Linguistics.
230
+ Liu Yuliang, Jin Lianwen, Zhang Shuai tao, and Zhang Sheng. 2017. Detecting curve text in the wild: New dataset and new solution. arXiv preprint arXiv:1712.02170.
231
+ Yilun Zhao, Yitao Long, Hongjun Liu, Ryo Kamoi, Linyong Nan, Lyuhao Chen, Yixin Liu, Xiangru Tang, Rui Zhang, and Arman Cohan. 2024.
232
+
233
+ DocMath-eval: Evaluating math reasoning capabilities of LLMs in understanding long and specialized documents. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 16103-16120, Bangkok, Thailand. Association for Computational Linguistics.
234
+
235
+ Shilin Zhou, Zhenghua Li, Yu Hong, Min Zhang, Zhefeng Wang, and Baoxing Huai. 2024. CopyNE: Better contextual ASR by copying named entities. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2675-2686, Bangkok, Thailand. Association for Computational Linguistics.
236
+
237
+ # A Implementation
238
+
239
+ The proposed loss function is incorporated into the model's training objective through linear combination with a weighting coefficient $\lambda = 0.3$ . The hyperparameters governing the loss computation are maintained at $\alpha = \beta = \sigma = 0.2$ throughout all experiments, unless otherwise specified. All tasks are trained with a learning rate of $10^{-5}$ for fine-tuning. Our experiments are conducted based on a widely used open source training repository<sup>1</sup>.
240
+
241
+ # B Dataset
242
+
243
+ This section provides a detailed description of each task along with the corresponding evaluation metrics. The illustrations for each task are presented in Figure 5.
244
+
245
+ Image Grounding. Grounding task aims to output the bounding box of the corresponding object given a description. We compare on the referring expression comprehension (REC) task on RefCOCO (Lin et al., 2014), RefCOCO+ (Yu et al., 2016) and RefCOCOg (Mao et al., 2016) datasets. The Average Accuracy at IoU $\geq 0.5$ (Acc@0.5) is used as the evaluation metric.
246
+
247
+ Scene Text Detection. The scene text detection task focuses on detecting text in natural images. We selected several commonly used datasets: TD500 (Yao et al., 2012), ICDAR2015 (Karatzas et al., 2015), CTW1500 (Yuliang et al., 2017) and Total-Text (Ch'ng and Chan, 2017) for scene text detection tasks. We utilize the identical metric employed in the image grounding task.
248
+
249
+ Clock Time Recognition. The perception of clock aims to recognize the specific time by images of clocks. We compare the performance of accuracy and time gap on a widely-used TIME (gpiosenko, 2022) dataset. The output are formatted as the label of "2_55", as shown in Figure 6. We use overall accuracy as an metric, and additionally count the time gap between the prediction and the ground truth for further evaluation. For example, the time gap between prediction "4_35" and ground truth "6_20" is 1.75 hours.
250
+
251
+ Mathematical Reasoning. Completing the mathematical reasoning tasks requires models to understand the context and the image of the mathematical field. We select the MathVista (Lu et al., 2023c) and MathVision (Wang et al., 2024b) datasets to evaluate models. We utilize exact matching accuracy to evaluate math reasoning task.
252
+
253
+ Arithmetic Calculations. Calculation task involves training LLMs to perform numerical operations accurately. In this task, the "arithmetic_mix" subset from the widely-used mathematics dataset (Saxton et al., 2019) is used for training and evaluation, which contains 2M training and 10k test items. In this task, exact matching accuracy is applied as the evaluation metric.
254
+
255
+ # C Gumbel Softmax
256
+
257
+ The Gumbel softmax, also known as Concrete Distribution, is a continuous differentiable approximation to categorical sampling. It replaces the non-differentiable argmax operation with a softmax function and Gumbel noise. Given logits $\pi_{i}$ , the Gumbel softmax sample $y_{i}$ is computed as:
258
+
259
+ $$
260
+ y _ {i} = \operatorname {s o f t m a x} \left(\left(l o g \left(\pi_ {i}\right) + g _ {i}\right) / \tau\right),
261
+ $$
262
+
263
+ where $g_{i}$ is the Gumbel noise, which is i.i.d. samples drawn from the Gumbel(0,1) distribution, and $\tau$ is the temperature parameter.
264
+
265
+ The Gumbel noise term $g_{i}$ introduces stochasticity into the sampling process, enabling exploration of the probability space while maintaining differentiability. Moreover, using Gumbel noise also works like regularization, which helps provide gradient information near the decision boundary, to improve generalization ability. The temperature parameter $\tau$ controls the sharpness of the distribution: as $\tau$ approaches 0, the samples become more discrete and closer to one-hot vectors, while higher temperatures make the distribution more uniform. In our implementation, we use $\tau = 0.1$ to ensure that the results are consistent with the original argmax results.
266
+
267
+ Gumbel softmax is differentiable as it replaces the discrete argmax with a continuous softmax function, allowing gradients to flow through the sampling process during backpropagation. Thus, Gumbel softmax is widely used in scenarios requiring discrete latent variables in neural networks, such as in VAEs(Jang et al., 2016) or reinforcement learning(Huijben et al., 2022; Wan et al., 2020).
268
+
269
+ # D Preliminaries
270
+
271
+ This section first briefly introduces the autoregressive decoding process based on cross-entropy in Section D.1, and then compares and analyzes Earth Mover's Distance (EMD) in Section D.2.
272
+
273
+ (a) Image Grounding
274
+ ![](images/8d257157c236371bf11f0eae176040fe7c7b04a767497c2047fa67abc4ec998e.jpg)
275
+ Question: Where is person bottom left? Answer: [0.005, 0.332, 0.249, 0.984]
276
+
277
+ (b) Scene Text Detection
278
+ ![](images/681e9ad8ae2ca0ae72bc52558b8e653b810c332c633a3cb34ae9398f349ed68d.jpg)
279
+ Question: Locate texts in the image. Answer: [0.388, 0.371, 0.626, 0.440] [0.418, 0.595, 0.610, 0.653]
280
+
281
+ ![](images/aa9cd3228ad1f26aa1be350639d8d0a6ae61840a004f7bc0bf134d3329730cef.jpg)
282
+ (c) Clock Time Recognition Question: What's the time of this clock? Answer: 9_20
283
+
284
+ (d) Mathematical Reasoning Question: Find the perimeter of the parallelogram. Answer: 78
285
+ ![](images/b10411ac5cd76e0484fd1ee4383c8fd579fd967f9488f7b3f938195bb70b8205.jpg)
286
+ Question: Calculate $(-8) / (-14)^{*}2240 / 7680.$ Answer: 1/6
287
+
288
+ ![](images/4715d6b85a18283b9e6291c17f8968896c90198832dab9c399c2a57773ed9026.jpg)
289
+ Calculate $(-8) / (-14)^{*}2240 / 7680$
290
+ (e) Arithmetic Calculation
291
+ Figure 5: The illustrations for each task.
292
+
293
+ 1/6
294
+
295
+ # D.1 Autoregressive Prediction with Cross Entropy
296
+
297
+ Autoregressive models operate through sequential decoding, generating tokens one at a time conditioned on previously generated tokens. For each position, the model outputs a probability distribution across the vocabulary, employing the Softmax function to select the most probable token during training.
298
+
299
+ In the context of language modeling tasks, cross-entropy loss serves as the fundamental training objective for autoregressive models. This loss function quantifies the divergence between the predicted probability distribution and the ground truth distribution:
300
+
301
+ $$
302
+ \mathcal {L} = - \sum_ {i} p _ {i} \log \left(q _ {i}\right), \tag {5}
303
+ $$
304
+
305
+ Awhere $p_i$ represents the one-hot encoded ground truth distribution, and $q_{i}$ denotes the model's predicted probability.
306
+
307
+ While cross-entropy loss effectively minimizes distributional differences between predictions and labels during training, it exhibits a fundamental limitation in autoregressive decoding: the function treats each class independently, disregarding the inherent relationships between different classes.
308
+
309
+ This limitation becomes particularly problematic when modeling numerical sequences where ordinal relationships between values carry semantic significance(Hou et al., 2016), as shown in Figure 2.
310
+
311
+ # D.2 Earth Mover's Distance
312
+
313
+ To introduce a distance term when calculating the above-mentioned distribution differences, one method is Earth Mover's Distance (EMD), also known as Wasserstein distance. It is an evaluation based on optimal transport theory, measuring the minimal cost of transforming one distribution into the other:
314
+
315
+ $$
316
+ \operatorname {E M D} (P, Q) = \min _ {\gamma \in \Gamma (P, Q)} \sum_ {i = 1} ^ {n} \sum_ {j = 1} ^ {m} \gamma_ {i j} \cdot d \left(x _ {i}, y _ {j}\right), \tag {6}
317
+ $$
318
+
319
+ where $P = \{(p_i, x_i)\}$ and $Q = \{(q_i, y_i)\}$ are two discrete distributions, with $p_i$ and $q_j$ are the masses at the points $x_i$ and $y_j$ , respectively. The transport plans, represented as $\Gamma(P, Q)$ , are all possible ways to move the mass, and $\gamma_{ij}$ represents the amount of mass that is transported from $p_i$ to $q_j$ . The distance matrix $d(x_i, y_j)$ indicates the cost of transporting masses between points $x_i$ and $y_j$ . A widely-used distance matrix $d$ is Euclidean distance.
320
+
321
+ Since the distance between labels is explicitly considered, predicted values closer to the label are
322
+
323
+ associated with smaller distance terms. Thus, the Earth Mover's Distance effectively incorporates distance-based weighting. As illustrated in Figure 2, when the distribution is more concentrated around the label, the EMD loss becomes smaller, thereby reflecting the differences between distributions.
324
+
325
+ # D.3 Predicting Digits with EMD
326
+
327
+ This section presents our approach to refining distance metrics for numerical representation at the digit level. In traditional autoregressive models, cross-entropy loss is typically employed to predict the probability distributions of individual tokens. However, this method treats each numerical digit as an independent entity, disregarding the continuous relationships between numbers. For example, when the target digit is 4, a model prediction of 3 should ideally be considered closer to accurate than a prediction of 9, as it represents a smaller numerical deviation. To address this limitation, we propose incorporating a distance metric that captures these intrinsic numerical relationships more accurately.
328
+
329
+ Computational Complexity. As established in D.2, Earth Mover's Distance (EMD) provides a robust measure for distributional distances, making it particularly well-suited for numerical prediction tasks. Prior research has applied EMD to align hidden representations within neural networks, often requiring the transport plan $(\gamma_{ij}$ in Equ. (6)) to be approximated or recalculated dynamically during training. However, the computational demands of EMD present practical challenges, especially in large-scale deep learning applications. Solving the underlying optimization problem in Equ. (6) has a computational complexity of $O\left((n\times m)^3\right)$ , which can be prohibitive. Regularized EMD (Cuturi, 2013) addresses this by employing the Sinkhorn-Knopp algorithm to iteratively refine the transport plan $\gamma_{ij}$ in Equ. (6), reducing complexity to $O(k\times n\times m)$ , where each iteration involves an $O(n\times m)$ matrix operation.
330
+
331
+ Numerical Prediction Optimization with EMD. When estimating the transport plan, the algorithm's complexity is generally quadratic. However, when restricted to one-dimensional numerical distributions, where the prediction and target values are aligned in position $(i = j)$ , the transport plan can be simplified to an identity matrix. Thus, Earth Mover's Distance emerges as a highly suitable metric for capturing digit-level numerical distance, for
332
+
333
+ <table><tr><td>β\α</td><td>0</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td></tr><tr><td>0</td><td>0.780</td><td>0.783</td><td>0.789</td><td>0.789</td><td>0.771</td></tr><tr><td>0.1</td><td>0.777</td><td>0.790</td><td>0.799</td><td>0.789</td><td>0.778</td></tr><tr><td>0.2</td><td>0.782</td><td>0.793</td><td>0.795</td><td>0.785</td><td>0.784</td></tr><tr><td>0.3</td><td>0.788</td><td>0.783</td><td>0.786</td><td>0.785</td><td>0.781</td></tr><tr><td>0.4</td><td>0.774</td><td>0.782</td><td>0.778</td><td>0.781</td><td>0.784</td></tr></table>
334
+
335
+ Table 7: Ablation studies of hyper-parameters $\alpha$ and $\beta$
336
+
337
+ <table><tr><td></td><td>0.0</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td><td>0.6</td><td>0.7</td></tr><tr><td>σ</td><td>0.784</td><td>0.785</td><td>0.789</td><td>0.788</td><td>0.783</td><td>0.780</td><td>0.770</td><td>0.763</td></tr><tr><td>λ</td><td>0.741</td><td>0.777</td><td>0.798</td><td>0.795</td><td>0.788</td><td>0.786</td><td>0.787</td><td>0.777</td></tr></table>
338
+
339
+ Table 8: Ablation studies of the hyper-parameters $\sigma$ (exp-weighting) and $\lambda$ (coefficient)
340
+
341
+ mulated as:
342
+
343
+ $$
344
+ \operatorname {E M D} (P, Q) = \sum_ {i} | x _ {i} - y _ {i} | \cdot | i - \operatorname {a r g m a x} (Q) |, \tag {7}
345
+ $$
346
+
347
+ where the distance matrix $d(x_{i},y_{j}) = |i - \operatorname{argmax}(Q)|$ refers to the index distance of each digit to the label. Given that the predicted probability distribution $P$ is obtained through the softmax transformation, and the ground truth label $Q$ is represented as a one-hot vector, the gradient of EMD with respect to component $x_{i}$ can be expressed as:
348
+
349
+ $$
350
+ \frac {\partial \operatorname {E M D}}{\partial x _ {i}} = \left\{| k - 1 |, | k - 2 |, \dots , | k - n | \right\}. \tag {8}
351
+ $$
352
+
353
+ where $k = \operatorname{argmax}(Q)$ denotes the index of the label element in the one-hot vector. This gradient exhibits an inverse relationship with the proximity between the predicted distribution and the ground truth: as the prediction approaches the true label, the magnitude of the gradient diminishes. This characteristic is particularly advantageous for numerical prediction tasks, as it inherently accounts for the ordinal relationships between numerical classes, and addresses the fundamental limitation of the conventional cross-entropy.
354
+
355
+ # E Ablations
356
+
357
+ We have supplemented our work with comprehensive experiments on the clock time recognition task using Qwen2-VL-7B, as shown in Table 7 and Table 8. We have bolded some of the outperforming results. The results demonstrate that NTIL adapts well to different hyperparameters.
358
+
359
+ We have also supplemented NTIL with the GSM8k math reasoning dataset. NTIL brings consistent performance improvement on mixed text-numeric tasks, which shows the effectiveness of our method.
360
+
361
+ <table><tr><td>Model</td><td>Params</td><td>CE</td><td>EMD</td><td>NTIL</td></tr><tr><td>Qwen2.5</td><td>2b</td><td>0.509</td><td>0.517</td><td>0.523</td></tr><tr><td>MiniCPM3</td><td>4b</td><td>0.672</td><td>0.676</td><td>0.704</td></tr><tr><td>Yi</td><td>6b</td><td>0.375</td><td>0.383</td><td>0.400</td></tr><tr><td>LLaMA3</td><td>8b</td><td>0.638</td><td>0.643</td><td>0.646</td></tr></table>
362
+
363
+ Table 9: Results of the GSM8k mathematical reasoning task.
364
+
365
+ # F Qualitative Examples
366
+
367
+ Visualizations of the outputs of different losses are shown in Figure 6, and the examples are taken from experimental results using LLaVA-1.5. For image grounding task (Figure 6(a)), the task was to predict the location of "horse back left" in an image. The CE loss (blue box) performed poorly, with predictions far from the ground truth. EMD (red box) showed an improvement, capturing spatial features better, while NTIL (green box) provided the most accurate predictions, closely matching the ground truth (black box). Overall, NTIL outperformed both CE and EMD, demonstrating its effectiveness in this task.
368
+
369
+ Figure 6(b) presents a qualitative comparison for clock time recognition task. In this case, NTIL provides the most accurate prediction of the clock time, correctly identifying 2:55, which matches the ground truth. EMD performs better than CE, predicting 2:50, but it is still slightly off. CE, however, predicts 5:10, a significant deviation. Overall, NTIL outperforms both EMD and CE in predicting the clock time accurately.
370
+
371
+ ![](images/fc39427dcdb13f884abbb63c1ee858c7090340ee6cdbe6775033ae158e44d8ba.jpg)
372
+
373
+ # Question: Where is the horse back left?
374
+
375
+ CE Prediction: [0.0, 0.138, 0.174, 0.754]
376
+
377
+ EMD Prediction: [0.447, 0.348, 0.687, 0.875]
378
+
379
+ NTIL Prediction: [0.567, 0.342, 0.77, 0.774]
380
+
381
+ Ground Truth: [0.581, 0.34, 0.757, 0.816]
382
+
383
+ (a) Example in Image Grounding. Blue box is CE prediction, red box is EMD prediction, green box is NTIL prediction. Black box is ground truth.
384
+
385
+ ![](images/17da207333e78557e7013a02f7ffc78834a36ef528a699c783a05a28bdb97cc2.jpg)
386
+
387
+ # Question: What's the time of this clock?
388
+
389
+ CE Prediction: 5_10
390
+
391
+ EMD Prediction: 2_50
392
+
393
+ NTIL Prediction: 2_55
394
+
395
+ Ground Truth: 2_55
396
+
397
+ (b) Example in clock time recognition.
398
+
399
+ Figure 6: Comparisons between CE, EMD and NTIL.
advancingsequentialnumericalpredictioninautoregressivemodels/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3014495afdd7ea6a1b653bb0e05b0a6534e4de684dd38d7e7ee1fc9624b3a733
3
+ size 520657
advancingsequentialnumericalpredictioninautoregressivemodels/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b5bd6c8d52bd51ddf09daebe1ec91845e2c09db08b308f76c7fd69bd63b225f
3
+ size 417006
alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6f82d3124a676b2405b18fe3773b48908d4dea9d6ef0aa27236d01b4c608c6a
3
+ size 119140
alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f543402107535cb2e78624796a5f775ae135dbe9f66ce36c4f0c120af01abb3
3
+ size 136303
alittlehumandatagoesalongway/71ea7d36-d72e-452c-8999-3eee57151b03_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d706a192a69b7fcabf54c5d197b200325e9932463bcbbb2171ff9ea1afcb3fea
3
+ size 3505509
alittlehumandatagoesalongway/full.md ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Little Human Data Goes A Long Way
2
+
3
+ Dhananjay Ashok†, Jonathan May†
4
+
5
+ †Information Sciences Institute, University of Southern California
6
+
7
+ {ashokd, jonmay}@isi.edu
8
+
9
+ # Abstract
10
+
11
+ Faced with an expensive human annotation process, creators of NLP systems increasingly turn to synthetic data generation. While this method shows promise, the extent to which synthetic data can replace human annotation is poorly understood. We investigate the use of synthetic data in Fact Verification (FV) and Evidence-based Question Answering (QA) by incrementally replacing human-generated data with synthetic points on eight diverse datasets. Strikingly, replacing up to $90\%$ of the training data only marginally decreases performance, but replacing the final $10\%$ leads to severe declines. We find that models trained on purely synthetic data can be improved by including as few as 125 human generated data points. We show that matching the performance gain of a little human data requires an order of magnitude more synthetic data, and then estimate price ratios at which human annotation would be a more cost-effective solution. Our results suggest that even when human annotation at scale is infeasible, there is great value to having a small proportion of the dataset being human-generated.
12
+
13
+ # 1 Introduction
14
+
15
+ From BERT (Devlin et al., 2019) to GPT-4 (Achiam et al., 2023), the explosive growth of language models (LMs) has been underpinned by exponential increases in the size of available training data. However, the more complex and specialized the task, the more expensive and challenging it is to collect human-generated data at scale (Wang et al., 2021). Combined with growing concerns that LMs may soon exhaust the stock of publicly available training data (Villalobos et al., 2024), many turn to synthetic data generation, hoping to eliminate their reliance on human annotation.
16
+
17
+ Synthetic data generation has long been used to increase the amount of training data available (Simard et al., 2002; Krizhevsky et al.,
18
+
19
+ ![](images/ad70fc9b4cc6959ee2ccf16ff1ea2d6b68d4fece789db51e439c136d0563b1ce.jpg)
20
+
21
+ ![](images/7811780a478ca49ae049bb564605708a6fa8b5155c0f95fa92a3321e1773516f.jpg)
22
+ Figure 1: Change in model performance as the proportion of synthetic points in the training data is increased. Across datasets, the performance decrease when moving the synthetic proportion from 0 to 0.90 is often less than that of moving from 0.9 to purely synthetic data.
23
+
24
+ 2012). Early NLP approaches use rule based methods (De Gispert et al., 2005; Chen et al., 2012), paraphrasing (Wang and Yang, 2015; Kobayashi, 2018), noising (Xie et al., 2017; Wang et al., 2018), and backtranslation (Sennrich et al., 2016; Yu et al., 2018), but are limited in their capability.
25
+
26
+ Modern LMs demonstrate the capability to solve myriad NLP tasks with minimal task specific data (Brown et al., 2020; Wei et al., 2022), making them more powerful synthetic data generators. Leveraging this, synthetic data approaches have
27
+
28
+ ![](images/0104349cdd86afca5d5b7f2ea89c28770e7aba1d993b1bac5c47dd32d82a27e9.jpg)
29
+
30
+ ![](images/dbf6e14ae41380dcf4fc7e36a4a549c4c30b7d388a165fc2c63e9ecf06293fa0.jpg)
31
+ Figure 2: Model performance as the synthetic proportion of the training data varies from 0.95 to 1. Having just $2.5\%$ of the training dataset being human-generated boosts performance.
32
+
33
+ seen increased use in tasks (Tan et al., 2024) such as QA (Wu et al., 2022), natural language inference (NLI) (Meng et al., 2022), text classification (Ye et al., 2022), instruction tuning (Li et al., 2024), evaluation (Dubois et al., 2024), and more (Tang et al., 2023).
34
+
35
+ The adoption has been particularly enthusiastic for tasks that require the model to 'understand' knowledge contained in an 'evidence text' e.g., FV (Tang et al., 2024), factual error correction (Ashok et al., 2023), NLI (Hosseini et al., 2024) and evidence-based QA (Schimanski et al., 2024). Such tasks are of vital importance in fake news detection (Sharma et al., 2023), retrieval-augmented generation (Gao et al., 2023) and dialogue systems (Weston et al., 2015). Recent datasets (Wu et al., 2022) and methods (Ye et al., 2022) exploit plentiful evidence texts (scientific journals, news articles, books, etc.), using synthetic generation to avoid being bottlenecked by the expensive annotation procedure (Liu et al., 2022).
36
+
37
+ Varying results across ML tasks suggest that whether completely replacing humans with synthetic data shows promise (Fan et al., 2024; Ham
38
+
39
+ moud et al., 2024) or leads to failures (Bisbee et al., 2024; Guo et al., 2024) is task dependent (Li et al., 2023). In this work, we focus on FV and Evidence-based QA, performing the first investigation into the trade-offs presented by the use of synthetic data generation in these fundamental tasks.
40
+
41
+ We study eight diverse FV and QA datasets, using their 'evidence texts' to generate synthetic datasets. By holding the number of data points constant but increasing the percentage of the training data that is synthetic, we can compare the utility of synthetic data to the original human generated data points. Across multiple models, prompt models, and prompting strategies, we find (Figure 1) that while increasing the proportion of synthetic data typically causes only minor degradations in model performance, a significant decline occurs at the extremes; i.e., when the percentage of synthetic data exceeds $90\%$ . Focusing on the extremes, we show that purely synthetically trained FV and QA systems can be meaningfully improved by including as few as 125 human-generated datapoints.
42
+
43
+ Our observations have actionable implications for researchers hoping to use synthetic data for FV and QA. The results (Figure 2, Figure 4) suggest that even when human annotation at scale is infeasible, there is great value to having a small proportion of the dataset being generated by humans.
44
+
45
+ To help guide this choice, we quantify the performance-cost tradeoff between human and synthetic data. We find (Figure 4) that matching the performance gain of just a little additional human data (only 200 data points) requires an order of magnitude more synthetic data points, empirically showing the per-data point price ratio at which human annotation is the more cost-effective solution. Finally, we conduct an analysis on the differing properties of synthetic vs. human data. Among other findings, we see that synthetic generations are longer and more extractive from the evidence texts than their human-produced counterparts.
46
+
47
+ # 2 Synthetic Data Generation from Evidence Texts
48
+
49
+ We study a synthetic generation pipeline representative of the methods used in the FV (Ni et al., 2024; He et al., 2023) and QA (Schimanski et al., 2024; Wan et al., 2024) literature. Using Few-Shot In-Context Learning (Brown et al., 2020), we generate synthetic (claim, label) pairs from an input evidence text. The prompt model is given exam
50
+
51
+ ![](images/c69b60933b7c77d78c4ab1da30d1f88224433a304926c7e32a898fe9a56a729f.jpg)
52
+ Figure 3: Change in accuracy when the test set (shown in key) is not seen during training, and the training set is a mixture of other FV datasets. Increasing the synthetic proportion of the dataset leads to performance declines even in the OOD setting, showing that human data offers genuine performance increases.
53
+
54
+ plies of (evidence text, claim, label) from the human training data, and is then queried with the evidence text we seek to generate data for. QA synthetic data is generated analogously, see details in Appendix B.
55
+
56
+ In total, we use four FV/NLI datasets: FEVER (Thorne et al., 2018), SciFact (Wadden et al., 2020), WANLI (Liu et al., 2022) and FACTIFY1.0 (Mishra et al., 2022), as well as four QA datasets: ROPES (Lin et al., 2019), CoQA (Reddy et al., 2019), QAConv (Wu et al., 2022) and FairyTaleQA (Xu et al., 2022). Together, the datasets span a variety of domains (science, news, social media, reasoning, conversation, fiction). We confirm that the generations are of high quality by verifying that the diversity of the synthetic data is comparable to the human generated samples. For more details, including a discussion on data leakage, see Appendix C.
57
+
58
+ FV performance is measured by test accuracy, while QA is measured using BLEU (Papineni et al., 2002); we show robustness to choice of metric in Appendix A. Evaluation is always conducted on the (human-generated) test split of each dataset.
59
+
60
+ We use GPT-3.5-Turbo (Brown et al., 2020) for prompting and LoRA (Hu et al., 2022) on Llama3-8B (Dubey et al., 2024) for fine-tuning. Implementation details are provided in Appendix E, and our code is publicly available<sup>1</sup>
61
+
62
+ ![](images/42ae09088052d7bc4c220e9d26114456d13ea6afc8262a00cc52bcf53692c0fb.jpg)
63
+ Figure 4: On the WANLI dataset, adding 200 real data points is as effective as adding an order of magnitude more synthetic data points.
64
+
65
+ # 3 Can Synthetic Data Replace Humans?
66
+
67
+ We investigate the potential of synthetic data to replace human annotation by holding the number of training data points fixed, incrementally increasing the proportion of the data that is synthetic, and fine-tuning a model on each training set.
68
+
69
+ Results: Across all datasets, using purely synthetic data leads to worse performance than the same amount of human data (Figure 1). We consider the possibility that this result could be caused by a spurious correlation between the human training and testing splits (e.g., annotation artifacts that are correlated with the label but not fundamental to the task). We conduct an out-of-distribution experiment, using different datasets for training and testing (e.g., training on FEVER + SciFACT and testing on WANLI). Increasing the synthetic proportion leads to performance declines even in the OOD setting (Figure 3), showing that human data offers genuine performance increases, and the results cannot be explained by a spurious correlation between the human test and human training samples (further discussion in Appendix A).
70
+
71
+ The performance decline is not uniform as we increase the synthetic proportion. On almost all datasets, there is only a minor degradation up until $90\%$ replacement, after which the performance drops considerably. We zoom in on the $90\% - 100\%$ interval, fixing the amount of training data at $n = 5000$ (500 for SciFact) and training on datasets with $95\%$ , $97.5\%$ and $100\%$ synthetic
72
+
73
+ data (Figure 2). Surprisingly, the results show that there is a significant difference between the performance of models on $97.5\%$ and $100\%$ synthetic data; the addition of just 125 (2.5% of 5000) human generated datapoints reliably improves the performance of synthetically trained FV and QA models. These trends hold robustly over different languages (Arabic, Georgian, Indonesian), choice of fine-tuning model (Mistral, MPT), prompt model (GPT4 and Claude-3.5-Sonnet), prompting strategy (Chain-of-Thought), model size and dataset size (Appendix A).
74
+
75
+ # 4 When Should We Use Human Data?
76
+
77
+ Having observed the disproportionate value added by human data, we ask what the relative cost between human and synthetic data generation must be for us to prefer one over the other. We fine-tune models on purely synthetic datasets of varying sizes, and establish the synthetic baseline by fitting a curve of the form $y = a_{0} + a_{1}\log (x)$ where $x$ is the size of the synthetic dataset and $y$ is the performance. We then take the synthetic training sets with {1000, 2000...} points and observe the performance $(y^{*})$ when we add 200 human data points. $\exp \left(\frac{y^{*} - a_{0}}{a1}\right)$ is then the size of the purely synthetic dataset that achieves equivalent performance.
78
+
79
+ Results: Across all datasets, adding 200 human data points is usually comparable to adding at least an order of magnitude (often multiple orders of magnitude) more synthetic data points. On WANLI (Figure 4), more than 17,000 additional synthetic points are needed to achieve the performance gains of 200 human points. If the price of a synthetic point for WANLI exceeds 73 times the price of a human generated point, then an incremental amount of human annotation would be a more cost-effective way to achieve the same increase in accuracy. In the extreme case, the equation learned on FairyTaleQA suggests that it takes $2e5$ additional synthetic points to match the performance gain of 200 additional human data points. Rather than interpret these numbers literally, we take them to suggest that human data could have unique value in some settings, enabling performance levels that are impossible with purely synthetic datasets. See Appendix A for more results and details.
80
+
81
+ # 5 Discussion
82
+
83
+ The synthetic generations are as diverse as human data (Table 2), with comparable duplicate rates on
84
+
85
+ ![](images/b0392530ebf2e22888531566d4b801a24b8f78b05f4b01d95687809a25d20158.jpg)
86
+ Figure 5: Synthetic questions are longer than human generated ones, a trend also seen in answers.
87
+
88
+ QA datasets, and markedly fewer duplicates on most FV datasets. This is evidence that our synthetic generations are of good quality, however, even on the datasets where the synthetic data is significantly more diverse, the synthetic data does not perform as well. This suggests that diversity is an insufficient measure of quality when evaluating how good the generated data is. Our analysis shows that synthetic data generation produces claims of comparable length to the human datasets, however synthetic questions and answers tend to be longer than human-generated counterparts for all QA datasets (Figure 5). We find that synthetic generations have a higher n-gram overlap with the evidence sentences. This suggests that synthetic data generation produces data points that are more directly taken from the evidence texts, while humans are more likely to employ rephrasing or different vocabulary than the evidence texts. Surprisingly, we find that synthetic data generation chooses more varied parts of the input text as sources for the question and answer content, with human annotation overwhelmingly more likely to create questions whose answers lie at the start of the evidence texts. We include a detailed discussion in Appendix D.
89
+
90
+ # 6 Related Work
91
+
92
+ The replacement of human annotation with synthetic data is extensively studied in the pretraining stage of LMs, where results consistently show (Shumailov et al., 2023; Seddik et al., 2024; Guo et al., 2024; Briesch et al., 2023) catastrophic forgetting, mode collapse, and performance deterioration.
93
+
94
+ In our setting, relying only on synthetic data still achieves reasonable performance across all
95
+
96
+ <table><tr><td>Dataset</td><td>Mean</td><td>Median</td><td>25th Percentile</td><td>75th Percentile</td></tr><tr><td>WANLI</td><td>17,671</td><td>16,905</td><td>9,711</td><td>22,931</td></tr><tr><td>ROPES</td><td>17,333</td><td>6,006</td><td>3,623</td><td>21,944</td></tr><tr><td>FairyTaleQA</td><td>281,951</td><td>36,901</td><td>15,129</td><td>813,135</td></tr><tr><td>FEVER</td><td>1,155</td><td>237</td><td>-1,400</td><td>7,073</td></tr></table>
97
+
98
+ Table 1: Additional synthetic data points needed to match the performance gain of 200 human data points. High values for FairyTaleQA suggest that human-generated data may unlock performance that purely synthetic data cannot achieve. Negative values for FEVER are due to a saturation of the performance gains, however, human data points reach the saturation point much faster (Appendix A)
99
+
100
+ <table><tr><td>Dataset</td><td>Synthetic</td><td>Human</td></tr><tr><td>FEVER</td><td>5.50</td><td>20.27</td></tr><tr><td>WANLI</td><td>0.23</td><td>1.22</td></tr><tr><td>SCIFACT</td><td>0.00</td><td>9.93</td></tr><tr><td>FACTIFY</td><td>0.27</td><td>14.93</td></tr><tr><td>NarrativeQA</td><td>3.85</td><td>1.42</td></tr><tr><td>CoQA</td><td>0.54</td><td>5.49</td></tr><tr><td>FairyTaleQA</td><td>2.26</td><td>0.18</td></tr><tr><td>ROPES</td><td>2.40</td><td>1.35</td></tr></table>
101
+
102
+ Table 2: Percentage of duplicated claims/questions for synthetic v.s. human data. Rates are comparable across datasets, but for fact verification datasets, synthetic datasets have fewer duplicates.
103
+
104
+ tasks. This suggests that the usage of exclusively synthetic data poses fewer risks when generations are grounded in diverse, natural 'evidence texts.'
105
+
106
+ Interestingly, conclusions which confirm our findings are found more in the image and multimodal domains, where recent work (Singh et al., 2024; He et al., 2023; Fan et al., 2024) finds that synthetic data holds promise, but must be used in conjunction with human data to mitigate its harms.
107
+
108
+ There is limited work on understanding whether synthetic data can replace human annotation in a task-specific setting for the language domain. Li et al. (2023) categorize text classification tasks by subjectivity, showing that synthetic data is less useful when tasks are more subjective. This draws them to focus on different tasks (sentiment classification, relation extraction and spam detection), and they do not study using a mixture of real and synthetic data. Bisbee et al. (2024) demonstrate that replacing political survey respondents with LMs produces unreliable results, while Ahmed et al. (2024) find that there are specific software engineering subtasks where synthetic data approaches human performance. Chen et al. (2024) show that
109
+
110
+ instruction-following capabilities are diminished when using synthetic data and present a machine unlearning approach to mitigate this. The diversity of results when evaluating the impact of using purely synthetic data confirms that the feasibility of replacing human annotation with synthetic data is highly task dependant. This work deepens our understanding of the problem by being the first to study whether synthetic data can replace human annotation on the fundamental tasks of fact verification and evidence-based question answering.
111
+
112
+ # 7 Conclusion
113
+
114
+ Showing impressive performance when human data is scarce, synthetic data generation seems poised to remain a key method in FV and QA. Our work sheds light on how the best way to use this method is in conjunction with human data. We show that a little human data goes a long way, with just 125 points being enough to see reliable gains on all datasets studied. With practical considerations in mind, we show that the alternative to small amounts of additional human data can be an order of magnitude more of synthetic data, suggesting that at times human annotation can be cost-effective relative to synthetic generation. We hope these results better inform design decisions on datasets and methods for fact verification and question answering.
115
+
116
+ # 8 Limitations
117
+
118
+ While we include results on multilingual Fact Verification datasets, the primary focus of our work is limited to the English language. Additionally, our results on multilingual datasets suggest that while similar claims can be made regarding the impact of replacing human annotation with synthetic data across different languages, the amount of human data needed to observe a meaningful performance increase may vary across languages. We also have a limited ability to control for dataset leakage, with
119
+
120
+ only one dataset from each of the tasks that is surely not leaked to GPT-3.5 (and, even these two datasets may have been seen by GPT-4). This can potentially bias the results in favor of synthetic data. Due to the scarcity of suitable available datasets (i.e., ones that have not been exposed to the prompt models) we are prevented from studying the problem more rigorously. Another limitation is that while we are able to identify clear differences between synthetic vs. real data distributions, our analysis of the errors made by models trained on $0\%$ vs. $100\%$ synthetic data failed to yield any generalizable insights that could inform modelling approaches. A more fine-grained study of the effect of using synthetic data on the behaviour of the downstream model is hence left as a subject of future research.
121
+
122
+ # 9 Ethical Considerations
123
+
124
+ The usage of synthetic data has several important ethical considerations. In the era of LMs trained on internet-wide corpora having poor documentation as to their exact data sources, it becomes challenging to ensure the privacy of individuals whose data may be obtainable via a public crawl (Yao et al., 2024). Additionally, models trained on massive internet-based data sources may contain implicit biases, as well as illegal and/or highly offensive material that is hard to audit and clean (Bender et al., 2021). This data affects the synthetic data obtained from prompt models, and could unknowingly impose cultural or ethical viewpoints that are unintended or not well aligned with the use case in mind. Specifically, prior work has shown that one of the prompt models studied in this work, GPT-3.5, often disagrees with humans on key ethical questions (Felkner et al., 2024). The endeavour to completely replace human annotation with synthetic data generation also has key implications on the extent to which the field of NLP employs human annotators. It is possible that an increasing reliance on purely synthetic data reduces the demand for human annotation, which would place a downward pressure on the working standards and compensation awarded to the remaining human annotators (Weidinger et al., 2022). We argue in this work that we should not try to eliminate human annotation from our dataset and method design, showing that their work contributes uniquely helpful data points.
125
+
126
+ # 10 Acknowledgements
127
+
128
+ This work was funded by the Defense Advanced Research Projects Agency with award HR00112220046. Any opinions, findings, conclusions, or recommendations expressed here are those of the authors and do not necessarily reflect the view of our sponsors.
129
+
130
+ This work used Jetstream2 at Indiana University through allocation CIS240665 from the Advanced Cyberinfrastructure Coordination Ecosystem: Services & Support (ACCESS) program, which is supported by U.S. National Science Foundation grants #2138259, #2138286, #2138307, #2137603, and #2138296.
131
+
132
+ # References
133
+
134
+ Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774.
135
+ Toufique Ahmed, Prem Devanbu, Christoph Treude, and Michael Pradel. 2024. Can llms replace manual annotation of software engineering artifacts? ArXiv, abs/2408.05534.
136
+ Dhananjay Ashok, Atharva Kulkarni, Hai Pham, and Barnabas Poczos. 2023. The student becomes the master: Outperforming GPT3 on scientific factual error correction. In Findings of the Association for Computational Linguistics: EMNLP 2023, pages 6762-6778, Singapore. Association for Computational Linguistics.
137
+ Emily M Bender, Timnit Gebru, Angelina McMillan-Major, and Shmargaret Shmitchell. 2021. On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency, pages 610-623.
138
+ James Bisbee, Joshua D. Clinton, Cassy Dorff, Brenton Kenkel, and Jennifer M. Larson. 2024. Synthetic replacements for human survey data? the perils of large language models. *Political Analysis*.
139
+ Martin Briesch, Dominik Sobania, and Franz Rothlauf. 2023. Large language models suffer from their own output: An analysis of the self-consuming training loop. ArXiv, abs/2311.16822.
140
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack
141
+
142
+ Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.
143
+ Jie Chen, Yupeng Zhang, Bingning Wang, Xin Zhao, Ji-Rong Wen, and Weipeng Chen. 2024. Unveiling the flaws: Exploring imperfections in synthetic data and mitigation strategies for large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2024, pages 14855-14865, Miami, Florida, USA. Association for Computational Linguistics.
144
+ Mei-Hua Chen, Shih-Ting Huang, Chung-Chi Huang, Hsien-Chin Liou, and Jason S Chang. 2012. Prefer: using a graph-based approach to generate paraphrases for language learning. In Proceedings of the Seventh Workshop on Building Educational Applications Using NLP, pages 80-85.
145
+ Adria De Gispert, José B Marino, and Josep Maria Crego. 2005. Improving statistical machine translation by classifying and generalizing inflected verb forms. In *INTERSPEECH*, pages 3193-3196.
146
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
147
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.
148
+ Yann Dubois, Chen Xuechen Li, Rohan Taori, Tianyi Zhang, Ishaan Gulrajani, Jimmy Ba, Carlos Guestrin, Percy S Liang, and Tatsunori B Hashimoto. 2024. Alpacafarm: A simulation framework for methods that learn from human feedback. Advances in Neural Information Processing Systems, 36.
149
+ Lijie Fan, Kaifeng Chen, Dilip Krishnan, Dina Katabi, Phillip Isola, and Yonglong Tian. 2024. Scaling laws of synthetic images for model training... for now. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7382-7392.
150
+ Virginia Felkner, Jennifer Thompson, and Jonathan May. 2024. GPT is not an annotator: The necessity of human annotation in fairness benchmark construction. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 14104-14115, Bangkok, Thailand. Association for Computational Linguistics.
151
+
152
+ Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, Qianyu Guo, Meng Wang, and Haofen Wang. 2023. Retrievalaugmented generation for large language models: A survey. ArXiv, abs/2312.10997.
153
+ Yanzhu Guo, Guokan Shang, Michalis Vazirgiannis, and Chloé Clavel. 2024. The curious decline of linguistic diversity: Training language models on synthetic text. In *Findings of the Association for Computational Linguistics: NAACL* 2024, pages 3589-3604, Mexico City, Mexico. Association for Computational Linguistics.
154
+ Hasan Hammoud, Hani Itani, Fabio Pizzati, Philip H. S. Torr, Adel Bibi, and Bernard Ghanem. 2024. Synthclip: Are we ready for a fully synthetic clip training? ArXiv, abs/2402.01832.
155
+ Ruifei He, Shuyang Sun, Xin Yu, Chuhui Xue, Wenqing Zhang, Philip Torr, Song Bai, and Xiaojuan Qi. 2023. Is synthetic data from generative models ready for image recognition? In The Eleventh International Conference on Learning Representations.
156
+ Or Honovich, Roee Aharoni, Jonathan Herzig, Hagai Taitelbaum, Doron Kukliansy, Vered Cohen, Thomas Scialom, Idan Szpektor, Avinatan Hassidim, and Yossi Matias. 2022. TRUE: Re-evaluating factual consistency evaluation. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3905-3920, Seattle, United States. Association for Computational Linguistics.
157
+ Mohammad Javad Hosseini, Andrey Petrov, Alex Fabrikant, and Annie Louis. 2024. A synthetic data approach for domain generalization of NLI models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 2212-2226, Bangkok, Thailand. Association for Computational Linguistics.
158
+ Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-rank adaptation of large language models. In International Conference on Learning Representations.
159
+ Sosuke Kobayashi. 2018. Contextual augmentation: Data augmentation by words with paradigmatic relations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 452-457, New Orleans, Louisiana. Association for Computational Linguistics.
160
+ Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. 2012. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25.
161
+ Xian Li, Ping Yu, Chunting Zhou, Timo Schick, Omer Levy, Luke Zettlemoyer, Jason E Weston, and Mike
162
+
163
+ Lewis. 2024. Self-alignment with instruction back-translation. In The Twelfth International Conference on Learning Representations.
164
+ Zhuoyan Li, Hangxiao Zhu, Zhuoran Lu, and Ming Yin. 2023. Synthetic data generation with large language models for text classification: Potential and limitations. In The 2023 Conference on Empirical Methods in Natural Language Processing.
165
+ Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
166
+ Kevin Lin, Oyvind Tafjord, Peter Clark, and Matt Gardner. 2019. Reasoning over paragraph effects in situations. In Proceedings of the 2nd Workshop on Machine Reading for Question Answering, pages 58-62, Hong Kong, China. Association for Computational Linguistics.
167
+ Alisa Liu, Swabha Swayamdipta, Noah A. Smith, and Yejin Choi. 2022. Wanli: Worker and ai collaboration for natural language inference dataset creation. In Conference on Empirical Methods in Natural Language Processing.
168
+ Kyle Lo, Lucy Lu Wang, Mark Neumann, Rodney Kinney, and Daniel Weld. 2020. S2ORC: The semantic scholar open research corpus. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4969-4983, Online. Association for Computational Linguistics.
169
+ Yu Meng, Jiaxin Huang, Yu Zhang, and Jiawei Han. 2022. Generating training data with language models: Towards zero-shot language understanding. Advances in Neural Information Processing Systems, 35:462-477.
170
+ Shreyash Mishra, S Suryavardan, Amrit Bhaskar, Parul Chopra, Aishwarya N. Reganti, Parth Patwa, Amitava Das, Tanmoy Chakraborty, A. Sheth, and Asif Ekbal. 2022. Factify: A multi-modal fact verification dataset. In DE-FACTIFY@AAAI.
171
+ Jingwei Ni, Minjing Shi, Dominik Stammbach, Mrinmaya Sachan, Elliott Ash, and Markus Leippold. 2024. A facta: Assisting the annotation of factual claim detection with reliable llm annotators. In Annual Meeting of the Association for Computational Linguistics.
172
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pages 311-318, Philadelphia, Pennsylvania, USA. Association for Computational Linguistics.
173
+ Siva Reddy, Danqi Chen, and Christopher D. Manning. 2019. CoQA: A conversational question answering challenge. Transactions of the Association for Computational Linguistics, 7:249-266.
174
+
175
+ Tobias Schimanski, Jingwei Ni, Mathias Kraus, Elliott Ash, and Markus Leippold. 2024. Towards faithful and robust LLM specialists for evidence-based question-answering. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1913-1931, Bangkok, Thailand. Association for Computational Linguistics.
176
+ Mohamed El Amine Seddik, Suei-Wen Chen, Soufane Hayou, Pierre Youssef, and Mérouane Debbah. 2024. How bad is training on synthetic data? a statistical analysis of language model collapse. ArXiv, abs/2404.05090.
177
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Improving neural machine translation models with monolingual data. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 86-96, Berlin, Germany. Association for Computational Linguistics.
178
+ Umang Sharma, Sidarth Saran, and Dr Shankar M. Patil. 2023. Fake news detection using machine learning algorithms. 2023 International Conference on New Frontiers in Communication, Automation, Management and Security (ICCAPS), 1:1-7.
179
+ Ilia Shumailov, Zakhar Shumaylov, Yiren Zhao, Yarin Gal, Nicolas Papernot, and Ross Anderson. 2023. The curse of recursion: Training on generated data makes models forget. ArXiv, abs/2305.17493.
180
+ Patrice Y Simard, Yann A LeCun, John S Denker, and Bernard Victorri. 2002. Transformation invariance in pattern recognition—tangent distance and tangent propagation. In *Neural networks: tricks of the trade*, pages 239–274. Springer.
181
+ Krishnakant Singh, Thanush Navaratnam, Jannik Holmer, Simone Schaub-Meyer, and Stefan Roth. 2024. Is synthetic data all we need? benchmarking the robustness of models trained with synthetic images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2505-2515.
182
+ Zhen Tan, Dawei Li, Song Wang, Alimohammad Beigi, Bohan Jiang, Amrita Bhattacharjee, Mansoresh Karami, Jundong Li, Lu Cheng, and Huan Liu. 2024. Large language models for data annotation and synthesis: A survey. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, pages 930-957, Miami, Florida, USA. Association for Computational Linguistics.
183
+ Liyan Tang, Philippe Laban, and Greg Durrett. 2024. Minicheck: Efficient fact-checking of llms on grounding documents. ArXiv, abs/2404.10774.
184
+ Ruixiang Tang, Xiaotian Han, Xiaoqian Jiang, and Xia Hu. 2023. Does synthetic data generation of llms help clinical text mining? ArXiv, abs/2303.04360.
185
+
186
+ James Thorne, Andreas Vlachos, Christos Christodoulopoulos, and Arpit Mittal. 2018. Fever: a large-scale dataset for fact extraction and verification. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 809-819.
187
+ Pablo Villalobos, Anson Ho, Jaime Sevilla, Tamay Besiroglu, Lennart Heim, and Marius Hobbahn. 2024. Position: Will we run out of data? limits of LLM scaling based on human-generated data. In *Forty-first International Conference on Machine Learning*.
188
+ David Wadden, Shanchuan Lin, Kyle Lo, Lucy Lu Wang, Madeleine van Zuylen, Arman Cohan, and Hannaneh Hajishirzi. 2020. Fact or fiction: Verifying scientific claims. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7534-7550, Online. Association for Computational Linguistics.
189
+ Yuwei Wan, Yixuan Liu, Aswathy Ajith, Clara Grazian, Bram Hoex, Wenjie Zhang, Chunyu Kit, Tong Xie, and Ian Foster. 2024. Sciqag: A framework for autogenerated science question answering dataset with fine-grained evaluation.
190
+ Shuohang Wang, Yang Liu, Yichong Xu, Chenguang Zhu, and Michael Zeng. 2021. Want to reduce labeling cost? GPT-3 can help. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4195-4205, Punta Cana, Dominican Republic. Association for Computational Linguistics.
191
+ William Yang Wang and Diyi Yang. 2015. That's so annoying!!!: A lexical and frame-semantic embedding based data augmentation approach to automatic categorization of annoying behaviors using #petpeeve tweets. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2557-2563, Lisbon, Portugal. Association for Computational Linguistics.
192
+ Xinyi Wang, Hieu Pham, Zihang Dai, and Graham Neubig. 2018. SwitchOut: an efficient data augmentation algorithm for neural machine translation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 856-861, Brussels, Belgium. Association for Computational Linguistics.
193
+ Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, et al. 2022. Emergent abilities of large language models. arXiv preprint arXiv:2206.07682.
194
+ Laura Weidinger, Jonathan Uesato, Maribeth Rauh, Conor Griffin, Po-Sen Huang, John Mellor, Amelia Glaese, Myra Cheng, Borja Balle, Atoosa Kasirzadeh, et al. 2022. Taxonomy of risks posed by language models. In Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency, pages 214-229.
195
+
196
+ Jason Weston, Antoine Bordes, Sumit Chopra, and Tomas Mikolov. 2015. Towards ai-complete question answering: A set of prerequisite toy tasks. arXiv: Artificial Intelligence.
197
+ Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122. Association for Computational Linguistics.
198
+ Chien-Sheng Wu, Andrea Madotto, Wenhao Liu, Pascale Fung, and Caiming Xiong. 2022. QACnv: Question answering on informative conversations. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 5389-5411, Dublin, Ireland. Association for Computational Linguistics.
199
+ Ziang Xie, Sida I. Wang, Jiwei Li, Daniel Lévy, Aiming Nie, Dan Jurafsky, and Andrew Y. Ng. 2017. Data-noising as smoothing in neural network language models. In International Conference on Learning Representations.
200
+ Ying Xu, Dakuo Wang, Mo Yu, Daniel Ritchie, Bingsheng Yao, Tongshuang Wu, Zheng Zhang, Toby Jia-Jun Li, Nora Bradford, Branda Sun, Tran Bao Hoang, Yisi Sang, Yufang Hou, Xiaojuan Ma, Diyi Yang, Nanyun Peng, Zhou Yu, and Mark Warschauer. 2022. Fantastic questions and where to find them: FairytaleQA - an authentic dataset for narrative comprehension. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 447-460, Dublin, Ireland. Association for Computational Linguistics.
201
+ Yifan Yao, Jinhao Duan, Kaidi Xu, Yuanfang Cai, Zhibo Sun, and Yue Zhang. 2024. A survey on large language model (llm) security and privacy: The good, the bad, and the ugly. *High-Confidence Computing*, page 100211.
202
+ Jiacheng Ye, Jiahui Gao, Qintong Li, Hang Xu, Jiangtao Feng, Zhiyong Wu, Tao Yu, and Lingpeng Kong. 2022. ZeroGen: Efficient zero-shot learning via dataset generation. In Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pages 11653-11669, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
203
+ Adams Wei Yu, David Dohan, Quoc Le, Thang Luong, Rui Zhao, and Kai Chen. 2018. Fast and accurate reading comprehension by combining self-attention and convolution. In International Conference on Learning Representations.
204
+ Yuheng Zha, Yichi Yang, Ruichen Li, and Zhiting Hu. 2023. AlignScore: Evaluating factual consistency with a unified alignment function. In Proceedings of the 61st Annual Meeting of the Association for
205
+
206
+ Computational Linguistics (Volume 1: Long Papers), pages 11328-11348, Toronto, Canada. Association for Computational Linguistics.
207
+ Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. *Bertscore: Evaluating text generation with bert*. In International Conference on Learning Representations.
208
+
209
+ # A Supplemental Figures
210
+
211
+ We present a detailed set of figures and tables to supplement the results presented in the main text.
212
+
213
+ Main Experiments: For figures in the main text where only one task is shown (Figure 1 and Figure 2), we provide the complete figures with both tasks (Figure 6 and Figure 7). We also provide the individual performance curves for these experiments (Figure 8 and Figure 10).
214
+
215
+ Robustness to choice of QA metric: To verify the robustness of the results, we show that the QA results are not an artifact of the choice of metric (Figure 3 and Figure 4) by using Exact Match, String Inclusion, ROUGE-1 (Lin, 2004) and BERTScore (Zhang et al., 2020). There is overwhelming agreement between all metrics on the rankings of models.
216
+
217
+ Addressing spurious correlations: We show that the performance gains afforded by human generated data cannot be explained by a spurious correlation between the human generated train and test splits. This would occur when there are significant annotation artifacts that are not relevant to the task, but are correlated with the correct output. We conduct an out-of-domain experiment (Table 5), using different datasets to source the training data and testing on a single hold out dataset. Using more synthetic data leads to performance declines even in the OOD setting, showing that human data is of higher quality and the results from the main text cannot be explained by a spurious correlation between the human test and human training samples. Interestingly, in the OOD setting the decline is steady, and we do not observe the phenomenon of a small amount of human data having a disproportionate impact on performance. This suggests that the disproportionate impact of human data occurs when the human data is in-domain. We leave a further exploration of the OOD generalization abilities of synthetic vs. human data to future work.
218
+
219
+ Multilingual Experiments: We replicate our experiment using the Arabic, Georgian, and Indonesian splits of the XFact dataset. We observe (Figure 9) the same trend as those from earlier experiments, confirming that our results are not limited to the English language. While the phenomenon is reproduced, the threshold of replacement at which we observe a precipitous decline is not the same across languages. We hypothesize that the language-specific threshold at which a little human data leads to significant performance increases is
220
+
221
+ dependent on how low resource the language is. The study of synthetic data in the multilingual setting has unique considerations that we have not addressed in this work; we leave a focus on these problems to future work.
222
+
223
+ Ablations: We show that the same trends can be seen (Figures 11 to 15) when using a different fine-tuning model (Mistral-7B), models of varying scales (from 1B parameter models to 30B parameter models. different prompting models (GPT-4 and Claude-3.5) and a more sophisticated prompting strategy (Chain-of-Thought Prompting). Across all configurations, we see a consistent decrease in performance when moving from $95\%$ to $100\%$ synthetic data, confirming that models trained on purely synthetic data can be improved by including just 125 real data points. For Chain-of-Thought Prompting, the authors manually annotated 3 examples with rationales per dataset to serve as the prompts. The complete examples and pipeline are provided with the code: github.com/ dhananjayashok/littlehumandata
224
+
225
+ We additionally show that these trends hold across data scales (Figures 16 and 17), replicating the experiment with $n = 3000$ and $n = 1000$ . While the trend is clearly visible in both cases, the results for $n = 1000$ have more variance and hence have a minority of cases where the relationship does not hold.
226
+
227
+ Tradeoff Experiment: The main text shows results for the experiment detailed in Section 4 on the WANLI dataset (Figure 4), here we show results on the remaining three datasets (Figure 18) and provide (Figure 1) the number of additional synthetic points needed to match the performance gains of 200 additional real points (average, median and standard deviation for each dataset). ROPES shows similar results to WANLI, however FairyTaleQA and FEVER present different trends. On FEVER, we are able to reach the saturation point, after which additional data (whether synthetic or real) does not increase performance. Even in this case, we are able to reach this point of diminishing marginal return more rapidly when using a small amount of synthetic data. On a base synthetic training set of size 3000, adding 200 real data points drives the test accuracy to $89.25\%$ , a score that is only matched once we add at least 2000 synthetic data points (an order of magnitude larger). On FairyTaleQA, we get enormous estimates for the number of additional synthetic points needed (a mean of 2.8e5). We do not interpret these numbers
228
+
229
+ literally, rather seeing this as a sign that human generated data may occasionally boost performance to an extent that could be fundamentally unachievable by purely synthetic data.
230
+
231
+ # B Synthetic Data Generation
232
+
233
+ In our implementation (Figure 19), we use few-shot learning with $k = 3$ , i.e., three examples per query, with each example drawn randomly (with replacement) from the training set of the specific dataset.
234
+
235
+ We generate one synthetic point for every real point in the dataset, using the evidence text it is associated with. This gives us a total of $n$ synthetic data points for every $n$ real data points in a dataset.
236
+
237
+ We observed that if we did not correct for label shift, the prompt model would be heavily biased towards True claims, i.e., it would generate a dataset containing $90\%$ True claims, while original datasets have proportions between $33\% - 60\%$ True.
238
+
239
+ For the synthetic datasets used in our experiments, we correct for this label shift by specifying the label of the claim we wish to generate and providing only examples of claims with that specific label in the prompt.
240
+
241
+ For all datasets, we verify that the diversity of the generated claims/questions/answers are comparable to that of the human generated texts (see Appendix D)
242
+
243
+ This setting is generous towards synthetic data generation. In practice, we might only have three fixed examples to use in the prompt, potentially reducing the diversity of synthetic data generated. We verify that this does not affect the results of our experiment in the Chain-of-Thought ablation (Figure 15), where we use a fixed set of examples to generate all synthetic data points. We may also not know the correct label proportion to ask for and suffer a significant label shift when using synthetic data generation.
244
+
245
+ # C Datasets Used
246
+
247
+ All datasets used below are released under open use licenses, authorizing their use in this research. For each dataset, we discuss the potential of dataset leakage (i.e., whether the data has been exposed to GPT-3.5-Turbo during its training) as well as the extent of automation involved in the generation of each dataset. However, across all experiments and ablations, these factors do not seem to have any
248
+
249
+ discernable effect on the trends discovered in this work.
250
+
251
+ # C.1 Fact Verification Datasets
252
+
253
+ FEVER (Thorne et al., 2018) is a dataset of claims about specific entities, generated by altering sentences extracted from Wikipedia. The evidence passages are sentences from Wikipedia articles relevant to the entity in question. This dataset has been well established for a long time before the release of the prompting models used in this work, increasing the chance that it has been exposed to the prompt model ahead of time.
254
+
255
+ SciFact (Wadden et al., 2020) is a fact verification dataset for the scientific domain, which uses the abstracts of scientific articles as evidence texts. The corpus is collected from S2ORC (Lo et al., 2020), a publicly-available corpus of millions of scientific articles. Annotators are shown a source citation in the context of an article, and are asked to write up to three claims based on the content of the citation.
256
+
257
+ The above datasets are popular NLP challenge sets that were well known even before the release of GPT-3.5-Turbo (Brown et al., 2020), the prompting model used in this work. The following two datasets were released after the official training date cut-off, guaranteeing that the data has not been seen ahead of time.
258
+
259
+ WANLI (Liu et al., 2022) is an NLI dataset of 108K examples created through a hybrid worker and AI collaboration approach. The creators first study MultiNLI (Williams et al., 2018) and use dataset cartography to automatically identify examples that demonstrate challenging reasoning patterns. and then instruct GPT-3 to compose new examples with similar patterns. Machine generated examples are then automatically filtered, and finally revised and labeled by human crowd workers. While GPT3.5-Turbo has not been trained on this data, it is worth noting that the data is partially synthetically generated.
260
+
261
+ FACTIFY (Mishra et al., 2022) is a dataset on multi-modal fact verification. It contains images, textual claims, reference textual documents and reference images. The dataset marks some examples that can be verified using text only; we use this sample in our experiments. This dataset was released after the training cut-off date for GPT-3.5 and takes its evidence texts/claims from human-written news or editorial articles. This ensures that the prompt models studied have not seen the data before training.
262
+
263
+ Label mapping for NLI and FV: While all of the above datasets contain labels for Supports, Refutes, and Not Enough Information (or Entails, Contradicts, Neutral), we consider the stricter formulation of Fact Verification used by Honovich et al. (2022) and Zha et al. (2023), considering a claim to be factual if the label is Supports (Entails), and non-factual otherwise.
264
+
265
+ # C.2 Question Answering Datasets
266
+
267
+ ROPES (Lin et al., 2019) is a QA dataset which tests a system's ability to apply knowledge from a passage of text to a new situation. The evidence context contains causal or qualitative relation(s) (e.g., "animal pollinators increase efficiency of fertilization in flowers"), and a novel situation that uses this background. The question requires reasoning about effects of the relationships in the background passage in the context of the situation.
268
+
269
+ CoQA (Reddy et al., 2019) is a dataset for building Conversational Question Answering systems. CoQA measures the ability of machines to understand a text passage and answer a series of interconnected questions that appear in a conversation. In our experiments, we extract only the first question in the series and use this to obtain our (context, question, answer) data points.
270
+
271
+ QACnv (Wu et al., 2022) focuses on informative conversations, including business emails, panel discussions, and work channels. The creators collect QA pairs with both human-written and machine-generated questions. They use a question generator and a dialogue summarizer as auxiliary tools to collect and recommend questions. While the arXiv version of the paper appears before the GPT-3 cutoff data (April 2021 to the cut-off date of Sept 2021), the paper itself appeared only at ACL 2022. It is still possible that the training data was compromised, and owing to the lack of clarity on the training data used for GPT-3 we have no way to confirm or deny this speculation.
272
+
273
+ FairyTaleQA (Xu et al., 2022) is a dataset focusing on narrative comprehension of kindergarten to eighth-grade students. The evidence texts are derived from children-friendly stories which serve as evidence texts. The questions are both explicit and implicit, covering seven types of narrative elements or relations. This dataset was released after the GPT-3 training cut-off date, ensuring that it has not been seen by our prompt model before.
274
+
275
+ # D Detailed Discussion on Differences Between Synthetic and Human Data
276
+
277
+ To compute the extent to which the evidence sentences 'contain' the questions, answers, and claims, we measure the BLEU of the generation with each individual sentence of the evidence texts, plotting the maximum of these BLEU scores in Figure 21. We find that synthetic generations have a far higher n-gram overlap with the evidence sentences than human generations. This suggests that synthetic data generation produces data points that are more extractive, while humans are more likely to abstract from the evidence. We also use the position of the evidence sentence that achieves the highest BLEU score as a proxy for the source location of the synthetic generation, and find that synthetic data generation chooses more diverse sources for the question and answer content, with human annotation overwhelmingly more likely to create questions whose answers lie in the start of the evidence texts (Figure 22). Finally, the main text shows the size length comparison for a single dataset. Here we provide a larger sample (Figure 20). We explore the errors created by the models trained on $0\%$ and $100\%$ data, searching for trends or divergences between the input instances that achieve a low prediction accuracy or score. Our investigation finds no major distinguishing factors between them, leaving a more fine-grained study of the effect of purely synthetic data on model decision-making to future work.
278
+
279
+ # E Implementation Details
280
+
281
+ While our full code implementation can be seen in the GitHub repository (to be released after review), we list the key implementation details below.
282
+
283
+ Hardware and Systems Used: The experiments were run on a cluster that included nodes with: five A40 GPUs (48GB), three RTX 2080Tis, and a separate machine using a single A100 GPU.
284
+
285
+ Prompt Models used: We used GPT-3.5-Turbo and GPT-4-Turbo Batch APIs from OpenAI. Generations were obtained at various points from August 2024 to September 2024.
286
+
287
+ Fine-Tuning Models Used: We used two finetuning models in our experiments. Llama3 used the Llama3.1-8B HuggingFace Checkpoint, and Mistral used the Mistral7B-Instruct-v0.2 HuggingFace Checkpoint. We did not conduct an extensive hyperparameter search, however we tried various epochs on smaller samples of the FEVER and
288
+
289
+ ROPES datasets, selecting that number for every dataset on all experiments. Fact verification models used Adam Optimization with a learning rate of 1e-5 for two epochs, while QA datasets used a learning rate of 1e-2 for five epochs.
290
+
291
+ ![](images/8ccbfc9dd2f145976541936a31d19b1d379c3a5cbb68e8aafa22c26e78865be6.jpg)
292
+
293
+ ![](images/763437d822f110041a6abcca13693bad0dc187939c989529d5c70105dcc67861.jpg)
294
+ Figure 6: Change in model performance as the proportion of synthetic points in the training data is increased. Across datasets, the performance decrease when moving from $0\%$ to $90\%$ synthetic data is often less than that of moving from $90\%$ to purely synthetic data.
295
+
296
+ ![](images/bf5180dc8af9d569c0bb290985edce0c21663c2c47c06143d50e875f33a8c747.jpg)
297
+
298
+ ![](images/fe66caefd0a948675b55815e01069dd8d3f06ebb721f28bee14edb3653f8a311.jpg)
299
+ Figure 7: Model performance as the synthetic proportion of the training data varies from $95\%$ to $100\%$ . Across all datasets and random seeds, having just $2.5\%$ of the training dataset being human generated boosts performance.
300
+
301
+ <table><tr><td>Dataset</td><td>Synthetic %</td><td>EM</td><td>Inc</td><td>R Inc</td><td>BLEU</td><td>ROUGE</td><td>BERTScore</td></tr><tr><td rowspan="6">CoQA</td><td>0</td><td>40.6</td><td>52.2</td><td>60.8</td><td>47.9</td><td>64.0</td><td>87.8</td></tr><tr><td>25</td><td>35.0</td><td>51.8</td><td>54.8</td><td>44.1</td><td>62.0</td><td>85.9</td></tr><tr><td>50</td><td>31.6</td><td>42.8</td><td>60.6</td><td>38.7</td><td>54.7</td><td>79.9</td></tr><tr><td>75</td><td>39.2</td><td>50.4</td><td>69.0</td><td>46.4</td><td>62.1</td><td>79.9</td></tr><tr><td>90</td><td>36.2</td><td>50.2</td><td>58.2</td><td>44.5</td><td>60.8</td><td>85.4</td></tr><tr><td>100</td><td>13.6</td><td>26.0</td><td>58.2</td><td>18.2</td><td>26.6</td><td>52.8</td></tr><tr><td rowspan="6">FairytaleQA</td><td>0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>39.3</td><td>55.3</td><td>90.8</td></tr><tr><td>25</td><td>0.0</td><td>0.0</td><td>0.0</td><td>40.0</td><td>56.1</td><td>90.5</td></tr><tr><td>50</td><td>0.0</td><td>0.0</td><td>0.0</td><td>39.2</td><td>55.3</td><td>88.9</td></tr><tr><td>75</td><td>0.0</td><td>0.0</td><td>0.0</td><td>39.7</td><td>55.4</td><td>90.6</td></tr><tr><td>90</td><td>0.0</td><td>0.0</td><td>0.0</td><td>38.1</td><td>54.2</td><td>89.9</td></tr><tr><td>100</td><td>0.0</td><td>0.0</td><td>0.0</td><td>30.1</td><td>49.5</td><td>88.4</td></tr><tr><td rowspan="6">QACNV</td><td>0</td><td>29.4</td><td>36.3</td><td>49.4</td><td>35.1</td><td>51.6</td><td>89.9</td></tr><tr><td>25</td><td>28.6</td><td>34.5</td><td>48.0</td><td>33.5</td><td>48.7</td><td>89.6</td></tr><tr><td>50</td><td>28.0</td><td>34.0</td><td>47.1</td><td>33.1</td><td>48.7</td><td>89.5</td></tr><tr><td>75</td><td>28.6</td><td>35.9</td><td>48.5</td><td>34.2</td><td>50.5</td><td>90.0</td></tr><tr><td>90</td><td>29.0</td><td>36.0</td><td>49.9</td><td>34.8</td><td>50.6</td><td>89.0</td></tr><tr><td>100</td><td>23.5</td><td>34.3</td><td>41.0</td><td>30.2</td><td>45.2</td><td>87.2</td></tr><tr><td rowspan="6">ROPES</td><td>0</td><td>66.8</td><td>67.4</td><td>72.2</td><td>67.0</td><td>72.7</td><td>96.0</td></tr><tr><td>25</td><td>66.8</td><td>67.5</td><td>69.8</td><td>67.2</td><td>71.1</td><td>96.2</td></tr><tr><td>50</td><td>62.8</td><td>63.4</td><td>65.5</td><td>63.1</td><td>66.6</td><td>95.2</td></tr><tr><td>75</td><td>66.8</td><td>68.0</td><td>68.8</td><td>67.4</td><td>70.8</td><td>96.2</td></tr><tr><td>90</td><td>70.6</td><td>71.5</td><td>71.8</td><td>71.0</td><td>73.0</td><td>96.8</td></tr><tr><td>100</td><td>60.8</td><td>63.9</td><td>61.2</td><td>62.1</td><td>64.9</td><td>95.3</td></tr></table>
302
+
303
+ Table 3: Full Results for the QA datasets. There is overwhelming agreement between all metrics on the ranking between models trained on different synthetic fractions. EM: Exact Match, Inc: String Inclusion, R Inc: Reverse String Inclusion
304
+
305
+ <table><tr><td>Run</td><td>Dataset</td><td>Synthetic %</td><td>BLEU</td><td>ROUGE</td><td>BERTScore</td></tr><tr><td>0</td><td></td><td>95</td><td>38.78</td><td>54.80</td><td>90.34</td></tr><tr><td></td><td>FairytaleQA</td><td>97.5</td><td>37.19</td><td>52.09</td><td>86.17</td></tr><tr><td></td><td></td><td>100</td><td>26.10</td><td>43.82</td><td>77.03</td></tr><tr><td></td><td></td><td>95</td><td>34.45</td><td>51.23</td><td>90.35</td></tr><tr><td></td><td>QACNV</td><td>97.5</td><td>34.31</td><td>51.89</td><td>89.80</td></tr><tr><td></td><td></td><td>100</td><td>32.33</td><td>49.39</td><td>89.77</td></tr><tr><td></td><td></td><td>95</td><td>25.33</td><td>35.64</td><td>59.39</td></tr><tr><td></td><td>CoQA</td><td>97.5</td><td>42.78</td><td>57.88</td><td>78.84</td></tr><tr><td></td><td></td><td>100</td><td>19.11</td><td>27.57</td><td>51.44</td></tr><tr><td></td><td></td><td>95</td><td>72.89</td><td>77.26</td><td>97.22</td></tr><tr><td></td><td>ROPES</td><td>97.5</td><td>70.74</td><td>73.83</td><td>96.58</td></tr><tr><td></td><td></td><td>100</td><td>58.28</td><td>60.82</td><td>94.37</td></tr><tr><td>1</td><td></td><td>95</td><td>39.50</td><td>54.67</td><td>90.57</td></tr><tr><td></td><td>FairytaleQA</td><td>97.5</td><td>35.95</td><td>53.10</td><td>89.73</td></tr><tr><td></td><td></td><td>100</td><td>29.75</td><td>47.05</td><td>81.86</td></tr><tr><td></td><td></td><td>95</td><td>38.94</td><td>56.41</td><td>90.79</td></tr><tr><td></td><td>QACNV</td><td>97.5</td><td>40.06</td><td>57.64</td><td>90.45</td></tr><tr><td></td><td></td><td>100</td><td>37.46</td><td>54.11</td><td>88.57</td></tr><tr><td></td><td></td><td>95</td><td>34.84</td><td>47.16</td><td>63.35</td></tr><tr><td></td><td>CoQA</td><td>97.5</td><td>41.91</td><td>56.53</td><td>85.30</td></tr><tr><td></td><td></td><td>100</td><td>14.81</td><td>24.58</td><td>50.02</td></tr><tr><td></td><td></td><td>95</td><td>69.61</td><td>71.97</td><td>96.23</td></tr><tr><td></td><td>ROPES</td><td>97.5</td><td>69.72</td><td>72.80</td><td>96.58</td></tr><tr><td></td><td></td><td>100</td><td>62.08</td><td>65.44</td><td>95.03</td></tr><tr><td>2</td><td></td><td>95</td><td>37.97</td><td>53.98</td><td>90.34</td></tr><tr><td></td><td>FairytaleQA</td><td>97.5</td><td>37.65</td><td>52.44</td><td>87.83</td></tr><tr><td></td><td></td><td>100</td><td>29.26</td><td>49.70</td><td>88.56</td></tr><tr><td></td><td></td><td>95</td><td>38.07</td><td>54.49</td><td>90.13</td></tr><tr><td></td><td>QACNV</td><td>97.5</td><td>37.70</td><td>54.64</td><td>88.61</td></tr><tr><td></td><td></td><td>100</td><td>35.94</td><td>51.84</td><td>89.49</td></tr><tr><td></td><td></td><td>95</td><td>30.80</td><td>42.03</td><td>62.72</td></tr><tr><td></td><td>CoQA</td><td>97.5</td><td>30.40</td><td>41.02</td><td>56.83</td></tr><tr><td></td><td></td><td>100</td><td>23.42</td><td>37.20</td><td>63.31</td></tr><tr><td></td><td></td><td>95</td><td>63.46</td><td>65.80</td><td>95.28</td></tr><tr><td></td><td>ROPES</td><td>97.5</td><td>67.94</td><td>71.47</td><td>96.16</td></tr><tr><td></td><td></td><td>100</td><td>58.34</td><td>61.62</td><td>94.16</td></tr></table>
306
+
307
+ Table 4: Results on $n = {5000}$ from 95% to 100% for the QA datasets. There is overwhelming agreement between all metrics on the ranking between models trained on different synthetic fractions.
308
+
309
+ ![](images/f0c564b86a7a881207d69c82af2aac90b6c6edb3943f91f79b140be7d6b7adb5.jpg)
310
+
311
+ ![](images/6d6249bc518ce6d10727420bdffb11b97649a6fd7e91eb6e4830e578ef98892c.jpg)
312
+
313
+ ![](images/1e579d59bc31d62b4abc516391a58281f93ed6218a054296c71df05e5f46f506.jpg)
314
+
315
+ ![](images/0eab365892e0eda1cf2de48740ac9abf856433e1665b54564d48c7e99a328b0b.jpg)
316
+
317
+ ![](images/2a9b8e726ae4f647dff60dd8efc04952339a51f890cf2913aadceea9dd0df60c.jpg)
318
+
319
+ ![](images/e290623afcc1ba346e124c45234b137d2fd927adbcb0a28086fa21fe3174092b.jpg)
320
+
321
+ ![](images/da4b000bd3db42f3f38c0bcdec43140651059867091bad7936afa910ce6bf656.jpg)
322
+ Figure 8: Change in model performance as the proportion of synthetic points in the training data is varied. Across datasets, the performance decrease when moving from $0\%$ to $90\%$ synthetic data is often less than that of moving from $90\%$ to purely synthetic data.
323
+
324
+ ![](images/319c94694e2f3371b948e578f27680dfe4694f8696eb33a582d43c9b9584e244.jpg)
325
+
326
+ <table><tr><td>Train Sets</td><td>Test Set</td><td>Synthetic %</td><td>Test Accuracy</td></tr><tr><td rowspan="5">FEVER, SciFact</td><td rowspan="5">WANLI</td><td>0</td><td>69.98</td></tr><tr><td>25</td><td>67.56</td></tr><tr><td>50</td><td>65.86</td></tr><tr><td>75</td><td>64.82</td></tr><tr><td>100</td><td>64.36</td></tr><tr><td rowspan="5">WANLI, SciFact</td><td rowspan="5">FEVER</td><td>0</td><td>83.01</td></tr><tr><td>25</td><td>80.64</td></tr><tr><td>50</td><td>79.22</td></tr><tr><td>75</td><td>78.94</td></tr><tr><td>100</td><td>76.01</td></tr><tr><td rowspan="5">FEVER, WANLI</td><td rowspan="5">SciFact</td><td>0</td><td>71.76</td></tr><tr><td>25</td><td>69.75</td></tr><tr><td>50</td><td>69.82</td></tr><tr><td>75</td><td>66.42</td></tr><tr><td>100</td><td>64.41</td></tr></table>
327
+
328
+ Table 5: Test accuracy when replacing human data with synthetic data in the out-of-distribution setting. Using more synthetic data leads to performance declines even in the OOD setting, showing that human data is of higher quality and the results from the main text cannot be explained by a spurious correlation between the human test and human training samples.
329
+
330
+ ![](images/512580da524569514bf4dd2a052622fef7fdb251c2737ce0067c2c1f665fbff2.jpg)
331
+ Figure 9: Change in model performance as the proportion of synthetic points in the training data is increased on multilingual fact verification datasets (splits of X-Fact). We observe the same trend as those from earlier experiments, confirming that our results are not limited to the English language. While the phenomenon is reproduced, the threshold of replacement at which we observe a precipitous decline is not the same across languages.
332
+
333
+ ![](images/64ca6708cd1718aec674686fdb39a4e2eeefb2d189c7101bb712fa3d1ad2699a.jpg)
334
+
335
+ ![](images/450dc2f64754dd1553bb37fb29635d2f02a7eb2af17a6f9fee9025b6f8666d0a.jpg)
336
+
337
+ ![](images/655310dbe163de4ee9f2df4b433ed8f0deb9c703d344ff92b14c8f5afad9fa98.jpg)
338
+
339
+ ![](images/e3a1e0570780fdcba8bd2722f5970a91fcb1d1f5ab611996e7ea49dec746e973.jpg)
340
+
341
+ ![](images/7fda6d8bd308be69c41a7ddd54feb04bfe3e98160f6251c4cb37c1f320a65076.jpg)
342
+
343
+ ![](images/e5a012c216881b152adaf2dae3e39a75f9b32d79cac5dce69b9bb4e87d05a90f.jpg)
344
+
345
+ ![](images/31578552cf5a4364966f602573944d7a801843dcebb0ae4045354c67acedbf93.jpg)
346
+ Figure 10: Model performance as the synthetic proportion of the training data varies from $95\%$ to $100\%$ . Across all datasets and random seeds, having just $2.5\%$ of the training dataset being human generated boosts performance.
347
+
348
+ ![](images/af5b359fa3f9704c1d8f238520875677b83062405369d0068dfb0e93781a006f.jpg)
349
+
350
+ ![](images/2826d1adf8688e8015149ade5bde669babf378fe26ea96ce030482d36115cd99.jpg)
351
+
352
+ ![](images/6334bbc9b83f0321f1a97213c36aea58457c043e4e136146ab5ea7289a2fb39e.jpg)
353
+
354
+ ![](images/8e156ea21b66b95f9e217db81e92a9f7c3061349942a0d7164e4cd056163d39a.jpg)
355
+
356
+ ![](images/c315da6b4eae98bf2cced938bd020bf19f8e70b47f151318e424bf4f64491944.jpg)
357
+
358
+ ![](images/2bc4746fe1940a4d170569c8559f22838e0beb6f4a54aa6420b96f5aa8cdda38.jpg)
359
+ Figure 11: Results hold consistently on Fact Verification datasets when using Mistral7B as the fine-tuning model and GPT-4 as the prompting model.
360
+
361
+ ![](images/804e4d56af202baaf91db6f82fca4c92aa186333c70bebc7ff04e5b1279d7df9.jpg)
362
+
363
+ ![](images/2e8665534d7e8540e61c782a5ed98322b067e015abe35ca9ec256288f4158444.jpg)
364
+ Figure 12: Results hold when using Claude-3.5-Sonnet as the prompting model, showing that the phenomenon is not particular to Synthetic Data from GPT based models.
365
+
366
+ <table><tr><td rowspan="2">Dataset</td><td colspan="2">Claim / Question</td></tr><tr><td>Synthetic</td><td>Human</td></tr><tr><td>FEVER</td><td>35.78</td><td>42.76</td></tr><tr><td>WANLI</td><td>15.15</td><td>20.10</td></tr><tr><td>SCIFACT</td><td>7.12</td><td>20.92</td></tr><tr><td>FACTIFY</td><td>14.50</td><td>23.93</td></tr><tr><td>NarrativeQA</td><td>30.92</td><td>8.25</td></tr><tr><td>CoQA</td><td>7.08</td><td>8.39</td></tr><tr><td>FairyTaleQA</td><td>22.59</td><td>16.85</td></tr><tr><td>ROPES</td><td>28.73</td><td>41.42</td></tr></table>
367
+
368
+ Table 6: 4-Gram overlap % between all synthetic and human generated claims / questions for each dataset. On several datasets, synthetic claims have a lower overlap
369
+
370
+ ![](images/b0b6dc8c00af0bfe9367fe91b41a40f368d0360860fec20334ec4ce1f2acae55.jpg)
371
+
372
+ ![](images/af80b0636d15a00cc97e9ae4207bc8273a88c806bb9861707321dde7ea1e42a3.jpg)
373
+
374
+ ![](images/ccae6cbf7660a2496d4eea93aada65ab6bb1f595fe109f165e9246310bd7a6e0.jpg)
375
+ Figure 13: Results hold consistently on Fact Verification datasets when using models of different scales.
376
+
377
+ ![](images/e9e4dc876f7ebd152d555b29ac641d1c7f858212f74d926d6b8d528d4d0ae337.jpg)
378
+
379
+ ![](images/100ef2dfb7bd7d14b6c20843326153bbd9e0c55a57784ead507e3a3c9db1d12a.jpg)
380
+
381
+ ![](images/243b279ed40d67d1a7f379e48426b9474f43bed406840f5e0fdda64f0b802bd8.jpg)
382
+ Figure 14: Results hold consistently on Question Answering datasets when using Mistral7B as the fine-tuning model and GPT-4 as the prompting model
383
+
384
+ ![](images/c24abb84acf35186bbf4efabb16ef4326ab50abbab44458be8faf50c95416913.jpg)
385
+
386
+ ![](images/13db93d411965e0db743b833c8f4722e90d8829ea54de6ca458dd59217be1eba.jpg)
387
+
388
+ ![](images/04aebcb79d6e107a9fba81de93b3e37d9aaaa7485b7be5358f6165d8be66c201.jpg)
389
+
390
+ ![](images/4874c484eccd3aa504f0bd69bdacdb438846930cd6e2a6289f0a699e161beeb2.jpg)
391
+
392
+ ![](images/f0c3e6535bdb7288bc31f9e4361b3259decfd6e699d0301481a13f71e1145b7e.jpg)
393
+ Figure 15: Results hold when using Chain-Of-Thought Prompting on GPT-3.5
394
+
395
+ ![](images/c987497c3ef1c8effcb507bcb419b1a65dca1e62abc615d8c3c1ea8dae480974.jpg)
396
+
397
+ ![](images/50999241f61b68c472b29e0b2a8f05b752818ddb8c4298e48e933e986b71acdc.jpg)
398
+
399
+ ![](images/471f06a35d722ed9138e6ae84bf1ca6e048b4d6ebc1835ce72764678faf17a9b.jpg)
400
+
401
+ ![](images/a0df9bc0e0d0e4cc5a30756f6d152a87b93bb3e8045b3788ce8f34d4fc2a49dc.jpg)
402
+
403
+ ![](images/b007840d7780f46bdf13c215b823229409eab2bd09f403a5adef4dad6f75305a.jpg)
404
+
405
+ ![](images/32e8a37e5cb24bb27be976ab0308f27db31881311df19925358a44fccc72d781.jpg)
406
+ Figure 16: Model performance as the synthetic proportion of the training data varies from $95\%$ to $100\%$ with total number of points $n = 3000$ . Across all runs on all datasets including just 75 real datapoints can boost performance.
407
+
408
+ ![](images/9df3fe636398e849fef0ddd9161c0739cc6c5e91fe75939c23fa1ba6dd21392b.jpg)
409
+
410
+ ![](images/59687ab37bb6783ee52df8a06438b6a1f1ab7bbbe6cc59d2769f0a3ed3e3b9ed.jpg)
411
+
412
+ ![](images/d0cc112d991a2a02aa53bf63deb03eafd940b292e5924f47fd894dd3b0de1cce.jpg)
413
+
414
+ ![](images/d77d098c15b51d2133c5b79ba5891f18c20931557e8f15f6e5f68464fa6f2174.jpg)
415
+
416
+ ![](images/0aab010ba7072e36cee7a9c4033901c6faf03e52f2fd3b2e6388cef17e0007ce.jpg)
417
+
418
+ ![](images/97c0443c339670ee6085e26df85e0eeab9bced7b481112f7d88cf9c897db4380.jpg)
419
+ Figure 17: Model performance as the synthetic proportion of the training data varies from $95\%$ to $100\%$ with total number of points $n = 1000$ . While the most common trend is that including real data improves performance, the results are much more unstable.
420
+
421
+ ![](images/4f03d016568c5f3588e2cceac77d2f5c4a76020047d1dc28fd9a66261dfd4006.jpg)
422
+
423
+ ![](images/e4a778d58d018ef98b6b09053013e5f9a922f18b2f739734d07f7ebcddd89752.jpg)
424
+
425
+ ![](images/fc813b5f4006f3edac37d5cf5d246554ef356d83f955aec40f051cd555d5f480.jpg)
426
+
427
+ ![](images/7777fc0ef39cc4d03034cf2694f817a4994002b227c01d791e63972ca4c9a285.jpg)
428
+ Figure 18: Adding 200 real data points is as effective as adding an order of magnitude more synthetic data points.
429
+
430
+ ![](images/51e12b7de5af8ddb73920f0ac86944e1884c4cb934ce3884053fbb1be3ea51f7.jpg)
431
+
432
+ # Fact Verification Prompt Example
433
+
434
+ System: Given the context, come up with a True claim.
435
+
436
+ Answer in the format below:
437
+
438
+ Input: This is the third time in two years that the company has lost its director of legal affairs.
439
+
440
+ Output: Claim: The company has lost a director of legal affairs twice in two years. | Label: True
441
+
442
+ Input: A study of the health status of the elderly in the United States.
443
+
444
+ Output: Claim: The health status of the elderly can be studied. | Label: True
445
+
446
+ Input: For more than a decade, the town has had a curfew for children, who are to be in bed by 10 p.m.
447
+
448
+ Output: [MODEL OUTPUT] <- Synthetic Claim
449
+
450
+ # Question Answering Prompt Example
451
+
452
+ System: Given the context, come up with a question and answer pair. Answer in the format below:
453
+
454
+ Input: A large log placed in a fire will burn relatively slowly. If the same mass of wood were added to the fire in the form of small twigs, they would burn much more quickly. ....Group S made a fire using large logs, while group F made a fire using small twigs and sawdust. Both groups used the same volume of wood.
455
+
456
+ Output: Question: Which group made a fire from more particles of wood? | Answer: group F
457
+
458
+ Input: [NEW CONTEXT]
459
+
460
+ Output: [MODEL OUTPUT] <- Synthetic Question + Ans
461
+
462
+ Figure 19: Example prompts used to synthetically generate (claim, label) or (question, answer) pairs using a new context / evidence text.
463
+
464
+ ![](images/ef5e330167b887aca0c65e8e64fc5e3504b36d7d14121c2abe4cb996abbea6a2.jpg)
465
+
466
+ ![](images/1734ee370234d216c94c2dd590e31be5a3b8c5095aeadcfe4c6988d2fc0a540f.jpg)
467
+
468
+ ![](images/62bf285eb17d1bbcce7a18faca03316a36a3a45811baa8b519628a3180790954.jpg)
469
+
470
+ ![](images/a9f06df5e85114222dddaf10ce43c5427922e712743f684aa6e3706b8ef7765e.jpg)
471
+
472
+ ![](images/51edce1b7467c8af8bd226c708ad9fff1364b1db38a518ea4adc3b092a185573.jpg)
473
+
474
+ ![](images/bf7617244218606b418d69e0df50d1bb6414505ebbb3dcb02a6851be9feff5c5.jpg)
475
+
476
+ ![](images/906e182c6fcd6a0cd5eb816bd96bad5953febeb278ce46ee53b5f54c32027e7c.jpg)
477
+ Figure 20: Synthetic data is, on average, longer than its human generated counterpart. This trend can be seen on FV (claims) and QA (claims and questions), and holds across prompt models and strategies.
478
+
479
+ ![](images/b5736fb589f9e3ed691096dba0389f2c894ad055ec5565e4d61d016dd80c1e3a.jpg)
480
+
481
+ ![](images/cb8d6601a7cb9675d7a91bc50a59242088202f6581743b438709df3aa88785e2.jpg)
482
+
483
+ ![](images/81f562ff1e3a24da1a5040a37b1d89fd5e44a64f58f6f2d260bdca3fcebd67f7.jpg)
484
+
485
+ ![](images/206ec340706a4c30e390744e153a4c736544e2f4a22e02b61a77c6121e8cb812.jpg)
486
+
487
+ ![](images/f8ea2bfe3730b1e08963be8c76744c4c0de153d4820761de2d0a9469c7c6c578.jpg)
488
+
489
+ ![](images/9124698ae37fd76888e85e3bf4df9f1acc2d59753660478ea201d16e9fc7dc8b.jpg)
490
+
491
+ ![](images/e099a7d22267a7dcbf29457a2db6cb9d2c13c1a7f2d426e9c2e86a07754384d8.jpg)
492
+
493
+ ![](images/26645681a23fda6e93428443c0219a2f32331ccaf7cd365db3db14d4ae88531b.jpg)
494
+ Figure 21: Synthetic data generally exhibits a higher maximum BLEU score measured against sentences from the context. This suggests that synthetic questions, answers, and claims are more extractive than their human generated counterparts
495
+
496
+ ![](images/2c52b5f9e58a95e5ba26b06e57afc33ffee04ef1b4ff02ce58910301fd907adb.jpg)
497
+
498
+ ![](images/39038378e264aed2a7e4cdd5faea5808122650c63dfa2da7b051d2d527b3e0b0.jpg)
499
+
500
+ ![](images/f98b673dcb530fa26a283d5a25af6a9b62e322b000159647b99da5aaa7db1075.jpg)
501
+
502
+ ![](images/20f5e8f1c4e4589ba3e5b1d882399525764421c994aa51d0e28a23f78aa74e8a.jpg)
503
+
504
+ ![](images/e33dfdd36458c62ec65aa7401bcdbb81356c8b5a4e1c6e00acac06f66318af92.jpg)
505
+
506
+ ![](images/f97261bbd934b92557e0a716ffda321748382bb48394229a90e5554c484beb00.jpg)
507
+ Figure 22: Synthetic data typically chooses more diverse sources (in terms of answer location or claim location in the evidence text), while humans tend to favor the start of the evidence text.
508
+
509
+ ![](images/36631375ed0c43b2c219d606f592d7b17856599ec621c38136853488c55b2ed0.jpg)
alittlehumandatagoesalongway/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:973c2e51d0ac5e78a36bac8f8ea32d400daccc71966996184bc5ee4b1803a2cb
3
+ size 1821201
alittlehumandatagoesalongway/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9f2fd24adf36db294436cc5dd537c1f48158e336de27011ca1a6a375680aa2
3
+ size 543738
ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c78ab62a8a18766fbc1a7f45d1ba6662c438e6cb7362590f29df50c5a19e3b7
3
+ size 76409
ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a16a0ba4a5078c742ca9d40f8db5bcad3e5788734f48e7c142f4b08bcb87ba88
3
+ size 86575
ameasureofthesystemdependenceofautomatedmetrics/51648182-b43a-4356-8332-6653ee9be2f5_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71b35bc8ab4de2a388d3a72bea02220d50010923f0279f87c760a36621ea8f9d
3
+ size 419101
ameasureofthesystemdependenceofautomatedmetrics/full.md ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Measure of the System Dependence of Automated Metrics
2
+
3
+ Pius von Däniken and Jan Milan Deriu and Mark Cieliebak
4
+ Centre for Artificial Intelligence
5
+ ZHAW School of Engineering
6
+ {vode,deri,ciel}@zhaw.ch
7
+
8
+ # Abstract
9
+
10
+ Automated metrics for Machine Translation have made significant progress, with the goal of replacing expensive and time-consuming human evaluations. These metrics are typically assessed by their correlation with human judgments, which captures the monotonic relationship between human and metric scores. However, we argue that it is equally important to ensure that metrics treat all systems fairly and consistently. In this paper, we introduce a method to evaluate this aspect.
11
+
12
+ # 1 Introduction
13
+
14
+ Recent years have seen significant advances in machine translation (MT), marked notably by the introduction of the transformer architecture (Vaswani et al., 2017). Current large-scale commercial systems such as GPT (Brown et al., 2020) continue this trend and show promising results (Kocmi et al., 2023; Hendy et al., 2023; Wu and Hu, 2023). A critical supplement to these advancements is thorough and reliable evaluation procedures, which are essential not only for measuring overall progress but also for effectively comparing different systems. While evaluation based on human raters is still considered the gold standard, it is expensive and time-intensive. Therefore, considerable efforts have been made to develop automated metrics for assessing translation quality. Notably, the WMT Metrics series of shared tasks are dedicated to this purpose (Freitag et al., 2023, 2022, 2021, i.a.). Automated metrics usually assign a scalar ${}^{1}$ quality rating to a candidate translation based on the source segment and a reference translation. A system-level rating is derived by averaging the segment ratings over a test set.
15
+
16
+ To measure a metric's usefulness, we usually measure two aspects: its correlation to human judgments on the segment-level (which checks if there
17
+
18
+ ![](images/4174f76950f12fb785a5a191f3eda931d2c09b288dd74a0a21ce06c77692c86a.jpg)
19
+ Figure 1: Average Human Ratings associated with XCOMET scores on Chinese to English (zh-en) WMT 23 data. We show scores for all system in aggregate (global) and two individual systems.
20
+
21
+ is a monotonic function between metric ratings and human ratings) and whether the system-level ratings can reproduce the same ranking as human ratings (Kocmi et al., 2021; von Däniken et al., 2024). In this paper, we argue that this evaluation of metrics is insufficient, as it ignores a central requirement, namely, that it should treat all systems under evaluation equally. As stated more colloquially, a measuring stick should not change length depending on the measured object. However, this is exactly what we observe in current metrics.
22
+
23
+ Consider Figure 1, which shows the expected human rating for each score of the XCOMET metric (the best metric in the WMT23 metrics task, with a very high segment-level correlation of 0.65 for the zh-en language pair) (Freitag et al., 2023). That is, for each possible value that XCOMET may assume, we show the expected human rating and the $95\%$ confidence interval (computed using Isotonic Regression and bootstrap sampling; see Sections 2 and 3 for the details). The global curve (blue) shows the average human score for each metric score if computed over all systems under evalua
24
+
25
+ tion in the WMT23 dataset (in standard correlation to human judgment evaluation, we only measure whether this curve is monotonic). In contrast, Lan-BridgeMT (best system according to humans) and NLB-Greedy (lowest-rated system according to humans) show the average human score for each metric score when computed on one separate system only. For instance, an XCOMET score of 0.7 corresponds to an average Human-MQM score of $-5.2$ for Lan-BridgeMT, and Human-MQM score of $-10.2$ for NLB-Greedy.
26
+
27
+ This leads to the following consequence: For Lan-BridgeMT, higher human scores are associated with lower metric scores than the global curve, which leads to an underestimation of Lan-BridgeMT's performance, according to XCOMET. The opposite effect is visible for NLB-Greedy, which is overestimated and, in fact, gains 3 ranks (from 15th to 12th place) when comparing the metric and human ranking (see also Table 1 in Section 3). Thus, a metric that exhibits a high global segment-level correlation to human judgments can lead to wrong system-level rankings. This observation leads us to the central claim of this paper: The cause of the discrepancy between the correlation on the segment level and the final system ranking is due to the metric's dependency of the system under evaluation.
28
+
29
+ The main position of this paper is that when evaluating a novel metric, one ought to measure the dependency on the system under evaluation as well, alongside the correlation to human judgment. In the following, we will formalize this dependency of the relation between human and metric ratings on the system under evaluation and derive a measure for quantifying this effect.
30
+
31
+ # 2 Averaging Metric Scores
32
+
33
+ Assume we are given a set of $K$ machine translation systems $\pi_{k}$ to evaluate. A translation system maps an input sentence $i\in \mathcal{I}$ in a fixed source language to an output sentence $o\in \mathcal{O}$ in a fixed target language: $\pi_k:\mathcal{I}\to \mathcal{O}$ . The usual human evaluation scenario involves curating a test set of $N$ inputs $\mathcal{T} = \{i^{(j)}|1\leq j\leq N\} \subset \mathcal{I}$ for which we collect the output of each system $\pi_{k}$ for each input $i\in \mathcal{T}$ , and then ask human annotators to produce ratings. This results in a set of ratings $\left\{(h_1^{(j)},\ldots ,h_k^{(j)},\ldots ,h_K^{(j)})|1\leq j\leq N\right\}$ , where $h_k^{(j)}\in \mathbb{R}$ is a scalar rating provided by human annotators measuring the quality of the translation
34
+
35
+ provided by $\pi_{k}$ for input $i^{(j)}$ . We will assume that higher human ratings indicate higher translation quality. In this setting, it is natural to measure the overall quality of system $\pi_{k}$ by the average human rating it achieves $\hat{\mu}_k^H = \frac{1}{N}\sum_{j = 1}^N h_k^{(j)}$ . This is an estimator of the expected human rating $\mu_k^H = \mathbb{E}[h_k]$ achieved by $\pi_{k}$ for any input in $\mathcal{I}$ , assuming that $\mathcal{T}$ is appropriately chosen.
36
+
37
+ In many cases, we want to replace human raters with an automated scalar metric $M: \mathcal{I} \times \mathcal{O} \to \mathbb{R}$ , which maps an input and translation to a scalar value. For our test set $\mathcal{T}$ , we can collect all metric ratings $\left\{(m_1^{(j)}, \ldots, m_k^{(j)}, \ldots, m_K^{(j)}) | 1 \leq j \leq N\right\}$ , where $m_k^{(j)} = M(i^{(j)}, \pi_k(i^{(j)}))$ , the metric rating for input $i^{(j)}$ and translation by $\pi_k$ . In this case, it is common to use the sample average $\hat{\mu}_k^M = \frac{1}{N} \sum_{j=1}^{N} m_k^{(j)}$ to measure the quality of system $\pi_k$ , which is an estimator of the expected metric rating $\mu_k^M = \mathbb{E}[m_k]$ achieved by $\pi_k$ .
38
+
39
+ The goal of automated evaluation is to use $\hat{\mu}_k^M$ as a proxy measure for $\mu_k^H$ , in particular, to rank the systems $\pi_1,\dots ,\pi_K$ according to their performance. In the following, we will study the relationship between $\mu_k^H$ and $\mu_k^M$ , which is expressed by an unknown function $f_{G}$ that maps from the metric scale to the human scale. There are two requirements to this function: first, that it is monotonic (i.e., that it respects the order of the metric scale), and second, that it does not depend on the system under evaluation $\pi_k$ (i.e., that it is the same for all systems). The goal is to find the relation between $\mu_k^H$ and $\mu_k^M$ . The idea is to express $\mathbb{E}[h_k]$ in terms of an expectation over metric ratings as follows (for full derivation, see Appendix A):
40
+
41
+ $$
42
+ \mathbb {E} [ h _ {k} ] = \mathbb {E} _ {p _ {k} (m)} [ \mathbb {E} _ {p _ {k} (h)} [ h | m ] ] \tag {1}
43
+ $$
44
+
45
+ The crucial element of Equation 1 is the conditional expectation $\mathbb{E}_{p_k(h)}[h|m]$ . Here we consider the expectation according to $p_k(h)$ , the distribution of human ratings for system $\pi_k$ . Equation 1 describes the relationship between $\mu_k^H$ and $\mu_k^M$ by expressing the expected human rating in terms of an expectation over metric ratings. We interpret this element as a function $f_{k}$ , which takes a metric rating as input and returns the expected human rating. Equation 1 yields a function $f_{k}$ for each system separately, which is not necessarily the same across systems. At this point, we can restate
46
+
47
+ the introductory discussion using our formalism. When averaging metrics $\hat{\mu}_k^M$ to rank systems, we implicitly assume that there is a global function $f_{G}$ that is equal to all the system-specific functions $f_{k}$ , i.e., $f_{G} = f_{1} = \dots = f_{K}$ , and thus, only measure if $f_{G}$ is monotonic (through correlation to human judgments). However, as shown in Figure 1, this assumption does not hold in practice (where blue is $f_{G}$ , and we have an $f_{k}$ for the two other systems respectively). To show that this is insufficient, we consider the effects of violating the assumption. Let us assume $f_{1} \neq f_{2}$ , but both are monotonic. Consider the extreme example that $\mu_1^M = \mu_2^M$ , i.e., systems $\pi_1$ and $\pi_2$ are of the same quality under the metric. However, consider the case $f_{1}(m) = f_{2}(m) + C$ , $C > 0$ . Then $\frac{1}{N} \sum_{j} f_{1}(m_{1}^{(j)}) = C + \frac{1}{N} \sum_{j} f_{2}(m_{1}^{(j)}) > \frac{1}{N} \sum_{j} f_{2}(m_{2}^{(j)})$ , thus, yielding that $\pi_1$ is better than $\pi_2$ in human space. This shows the necessity of measuring the monotonicity of a global function $f_{G}$ and the dependence of the metric on the systems under evaluation.
48
+
49
+ We first introduce the Expected Deviation (ED), which measures the difference between $f_{G}$ and $f_{k}$ for all $k \in \{1 \dots K\}$ , which tells us how much a system is over-or-underestimated according to the metric. That is the difference
50
+
51
+ $$
52
+ E D (k) = \frac {1}{N} \sum_ {j = 1} ^ {N} f _ {G} \left(m _ {k} ^ {(j)}\right) - \frac {1}{N} \sum_ {j = 1} ^ {N} f _ {k} \left(m _ {k} ^ {(j)}\right) \tag {2}
53
+ $$
54
+
55
+ This is equivalent to $\mu_k^G -\mu_k^H$ , where $\mu_k^G = \frac{1}{N}\sum_{j = 1}^{N}f_G(m_k^{(j)})$ , thus, we measure the difference between the average rating according to the global function and the average rating of the system-specific function, which corresponds to the human rating-average. Note that a mis-ranking occurs if one system is severely overrated while another is severely underrated. Thus we define the system dependence score SysDep $(\mathcal{M})$ as the worst case of this effect:
56
+
57
+ $$
58
+ \operatorname {S y s D e p} (\mathcal {M}) = \max _ {\pi_ {k}} E D (k) - \min _ {\pi_ {k}} E D (k) \tag {3}
59
+ $$
60
+
61
+ # 3 Experiments
62
+
63
+ Estimating the Conditional Expectation. Even though the functions $f_{k}$ and $f_{G}$ are unknown in general, we can estimate them from data. We will use Isotonic Regression (IR) (Barlow and Brunk, 1972) for this purpose, which estimates a monotonic function $\hat{f}_{k}$ minimizing $\sum_{j} (\hat{f}_{k}(m_{k}^{(j)}) - h_{k}^{(j)})^{2}$ . To estimate $f_{G}$ , we utilize the same approach, pool
64
+
65
+ ing all paired data from all systems. To compute the SysDep of a metric, we compute the ED for each MT system under that metric. For this, we compute the average human rating $\hat{\mu}_k^H = \frac{1}{N_H}\sum_{j=1}^{N_H}h_k^{(j)}$ , the average metric rating $\hat{\mu}_k^M = \frac{1}{N_M}\sum_{j=1}^{N_M}m_k^{(j)}$ , as well as average remapped rating $\hat{\mu}_k^G = \frac{1}{N_M}\sum_{j=1}^{N_M}\hat{f}_G(m_k^{(j)})$ for each MT system. We provide our code in Appendix E.
66
+
67
+ Data. We rely on data from the WMT 23 Metrics shared task (Freitag et al., 2023). The data includes translations for 3 language pairs: English to German (en-de), Hebrew to English (he-en), and Chinese to English (zh-en). The translations were produces by 12-15 systems (depending on the language pair) which participated in the general MT task (Kocmi et al., 2023). Human ratings are available in the form of MQM annotations (Lommel et al., 2014), which are based on error-span annotations by experts that are subsequently transformed into a numeric value by assigning scores to errors based on their severity. Here, we will present results for the XCOMET (Guerreiro et al., 2023) metric (best metric according to correlation to human judgments) and the zh-en language pair, where we have access to $N_{M} = 1976$ segments per system rated by the metric and $N_{H} = 1177$ of these segments rated with human MQM ratings. Results for the other language pairs and an additional metric are shown in Appendix B. To estimate the conditional expectation functions $f_{k}$ , we use the 1177 paired ratings for each system $\pi_{k}$ . We employ $B = 200$ bootstrap samples of the paired data to fit $B$ IR models. Our estimate, $\hat{f}_{k}$ , represents the average of these $B$ IR models. In Figure 1, we also present the range between the $2.5\%$ and $97.5\%$ percentiles.
68
+
69
+ Results. We show the results in Table 1. We can see that the ED ranges from -0.82 to 1.996, thus yielding a SysDep score of 2.816. We see that both Lan-BridgeMT and GPT4-5shot are underrated by the metric (negative $ED$ ), but Lan-BridgeMT more so, enough to invert their order. At the bottom of the ranking, we see a relatively large absolute $ED$ . Ranking errors reflect an interplay between the systems' rating gap and the $EDs$ . For example, Online-A loses 2 ranks according to the metric even though it has the lowest absolute $ED$ . We also note that even though $\hat{f}_G$ is monotonic, the ranking between the metric and the remapped scores does not match completely. This can be attributed to
70
+
71
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>Lan-BridgeMT</td><td>-2.100</td><td>1</td><td>0.889</td><td>2</td><td>-2.920</td><td>2</td><td>-0.820</td></tr><tr><td>GPT4-5shot</td><td>-2.305</td><td>2</td><td>0.893</td><td>1</td><td>-2.800</td><td>1</td><td>-0.494</td></tr><tr><td>Yishu</td><td>-3.231</td><td>3</td><td>0.880</td><td>4</td><td>-3.179</td><td>4</td><td>0.052</td></tr><tr><td>ONLINE-B</td><td>-3.385</td><td>4</td><td>0.879</td><td>5</td><td>-3.188</td><td>5</td><td>0.197</td></tr><tr><td>HW-TSC</td><td>-3.398</td><td>5</td><td>0.883</td><td>3</td><td>-3.080</td><td>3</td><td>0.318</td></tr><tr><td>ONLINE-A</td><td>-3.785</td><td>6</td><td>0.856</td><td>8</td><td>-3.812</td><td>8</td><td>-0.027</td></tr><tr><td>ONLINE-Y</td><td>-3.792</td><td>7</td><td>0.868</td><td>6</td><td>-3.479</td><td>6</td><td>0.313</td></tr><tr><td>ONLINE-G</td><td>-3.857</td><td>8</td><td>0.864</td><td>7</td><td>-3.607</td><td>7</td><td>0.250</td></tr><tr><td>ONLINE-W</td><td>-4.062</td><td>9</td><td>0.848</td><td>9</td><td>-4.165</td><td>10</td><td>-0.103</td></tr><tr><td>ZengHuiMT</td><td>-4.232</td><td>10</td><td>0.846</td><td>10</td><td>-4.140</td><td>9</td><td>0.092</td></tr><tr><td>IOL-Research</td><td>-4.586</td><td>11</td><td>0.843</td><td>11</td><td>-4.251</td><td>11</td><td>0.335</td></tr><tr><td>ONLINE-M</td><td>-5.433</td><td>12</td><td>0.820</td><td>15</td><td>-4.907</td><td>15</td><td>0.526</td></tr><tr><td>ANVITA</td><td>-6.078</td><td>13</td><td>0.830</td><td>13</td><td>-4.602</td><td>13</td><td>1.475</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-6.360</td><td>14</td><td>0.825</td><td>14</td><td>-4.726</td><td>14</td><td>1.634</td></tr><tr><td>NLLB-Greedy</td><td>-6.574</td><td>15</td><td>0.831</td><td>12</td><td>-4.578</td><td>12</td><td>1.996</td></tr></table>
72
+
73
+ Table 1: System rankings and average rating of WMT 23 zh-en systems according to XCOMET. The lowest score is in italics, and the highest is in bold.
74
+
75
+ the uncertainty introduced by bootstrapping and extrapolating to the unpaired metric ratings. It can be seen for ONLINE-W and ZengHuiMT, which have similar metric ratings.
76
+
77
+ Overall, our results show that although there is a highly monotonic function between the XCOMET scale and the human scale, XCOMET exhibits a high dependency on the system under evaluation, thus yielding an inconsistent ranking between humans and XCOMET.
78
+
79
+ # 4 Related Work
80
+
81
+ The derivation in Section 2 closely follows Wu and Resnick (2024), who provide the same argument in the context of binary prevalence estimation. In our case, the conditional expectation $\mathbb{E}[h|m]$ plays the same role as the calibration curve in their framework. Under that lens, the Expected Deviation is analogous to the Expected Calibration Error (Posocco and Bonnefoy, 2021). Following the same analogy, evaluating a new MT system is similar to applying a classifier to a new domain.
82
+
83
+ Previous studies by Deriu et al. (2023) and von Däniken et al. (2022) have highlighted that metric performance depends on the system under test. They employed a Bayesian framework to determine the proportions of binary or preference human ratings from metric scores; critically relying on confusion matrices estimated for each MT system. In this discrete rating context, these confusion matrices represent the same concept as $\mathbb{E}[h|m]$ . In follow-up work, von Däniken et al. (2024) find that some metrics disproportionately favor certain MT systems over others compared to human preference ratings.
84
+
85
+ Our finding provides a plausible explanation.
86
+
87
+ Chaganty et al. (2018) shows how to combine human ratings and metric ratings to derive an unbiased estimate of the true expected human rating $\mu^H$ while reducing the number of annotations needed. The proposed control variates estimator is based only on human and metric scores for a given MT system, even when estimating their correlation, thus avoiding the problem we describe.
88
+
89
+ Wei and Jia (2021) consider disagreements in the ordering of systems when using $\mu_k^M$ instead of $\mu_k^H$ . In particular they study the sign error, cases where $sign(\mu_1^M - \mu_2^M) \neq sign(\mu_1^H - \mu_2^H)$ . They apply a bias variance decomposition to this error and find that while the human estimator is unbiased, it exhibits high variance while the opposite is the case for metrics. Our SysDep score presents a way to quantify this bias.
90
+
91
+ # 5 Conclusion
92
+
93
+ In this paper, we emphasize the importance of ensuring that automated metrics treat all MT systems consistently, a factor overlooked in current evaluations. By mapping metric scores to the human rating scale, we estimate how much a metric misjudges individual system performance. We compute the range of these deviations to assess how consistently a metric treats different systems. In Appendix C, we re-evaluate WMT23 metrics from this perspective. Additionally, in Appendix D, we confirm that these results stem from systematic differences in how metrics treat systems by measuring deviations within splits of a single system's ratings.
94
+
95
+ # Limitations
96
+
97
+ This paper is intended to explore a supplementary aspect of the evaluation of automated metrics. The SysDep measure we developed will hopefully provide a starting point for the development of more refined evaluation of the way metrics treat different systems differently.
98
+
99
+ Our experiments are based solely on data from the WMT23 Metrics shared task. To further solidify our findings a larger scale study with more domains and larger sample sizes are needed.
100
+
101
+ While we provide a way to measure the system dependence of a metric, we do not provide any suggestions on how to develop metrics that minimize the SysDep.
102
+
103
+ # Acknowledgments
104
+
105
+ This work was supported by the Swiss National Science Foundation (SNF) within the project "Unified Model for Evaluation of Text Generation Systems (UniVal)" [200020_219819].
106
+
107
+ # References
108
+
109
+ R. E. Barlow and H. D. Brunk. 1972. The isotonic regression problem and its dual. Journal of the American Statistical Association, 67(337):140-147.
110
+ Anja Belz and Eric Kow. 2010. Comparing rating scales and preference judgements in language evaluation. In Proceedings of the 6th International Natural Language Generation Conference. Association for Computational Linguistics.
111
+ Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20, Red Hook, NY, USA. Curran Associates Inc.
112
+ Arun Chaganty, Stephen Mussmann, and Percy Liang. 2018. The price of debiasing automatic metrics in natural language evaluation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 643-653, Melbourne, Australia. Association for Computational Linguistics.
113
+
114
+ Jan Deriu, Pius von Däniken, Don Tuggener, and Mark Cieliebak. 2023. Correction of errors in preference ratings from automated metrics for text generation. In *Findings of the Association for Computational Linguistics: ACL* 2023, pages 6456–6474, Toronto, Canada. Association for Computational Linguistics.
115
+ Markus Freitag, Nitika Mathur, Chi-kiu Lo, Eleftherios Avramidis, Ricardo Rei, Brian Thompson, Tom Kocmi, Frederic Blain, Daniel Deutsch, Craig Stewart, Chrysoula Zerva, Sheila Castilho, Alon Lavie, and George Foster. 2023. Results of WMT23 metrics shared task: Metrics might be guilty but references are not innocent. In Proceedings of the Eighth Conference on Machine Translation, pages 578-628, Singapore. Association for Computational Linguistics.
116
+ Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, Eleftherios Avramidis, Tom Kocmi, George Foster, Alon Lavie, and André F. T. Martins. 2022. Results of WMT22 metrics shared task: Stop using BLEU – neural metrics are better and more robust. In Proceedings of the Seventh Conference on Machine Translation (WMT), pages 46–68, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.
117
+ Markus Freitag, Ricardo Rei, Nitika Mathur, Chi-kiu Lo, Craig Stewart, George Foster, Alon Lavie, and Ondřej Bojar. 2021. Results of the WMT21 metrics shared task: Evaluating metrics with expert-based human evaluations on TED and news domain. In Proceedings of the Sixth Conference on Machine Translation, pages 733-774, Online. Association for Computational Linguistics.
118
+ Nuno M. Guerreiro, Ricardo Rei, Daan van Stigt, Luisa Coheur, Pierre Colombo, and Andre F. T. Martins. 2023. xcomet: Transparent machine translation evaluation through fine-grained error detection. Preprint, arXiv:2310.10482.
119
+ Charles R. Harris, K. Jarrod Millman, Stefan J. van der Walt, Ralf Gommers, Pauli Virtanen, David Cournaepau, Eric Wieser, Julian Taylor, Sebastian Berg, Nathaniel J. Smith, Robert Kern, Matti Picus, Stephan Hoyer, Marten H. van Kerkwijk, Matthew Brett, Allan Haldane, Jaime Fernandez del Rio, Mark Wiebe, Pearu Peterson, Pierre Gerard-Marchant, Kevin Sheppard, Tyler Reddy, Warren Weckesser, Hameer Abbasi, Christoph Gohlke, and Travis E. Oliphant. 2020. Array programming with NumPy. Nature, 585(7825):357-362.
120
+ Amr Hendy, Mohamed Abdelrehim, Amr Sharaf, Vikas Raunak, Mohamed Gabr, Hitokazu Matsushita, Young Jin Kim, Mohamed Afify, and Hany Hassan Awadalla. 2023. How good are gpt models at machine translation? a comprehensive evaluation. Preprint, arXiv:2302.09210.
121
+ Juraj Juraska, Mara Finkelstein, Daniel Deutsch, Aditya Siddhant, Mehdi Mirzazadeh, and Markus Freitag. 2023. MetricX-23: The Google submission to the WMT 2023 metrics shared task. In Proceedings
122
+
123
+ of the Eighth Conference on Machine Translation, pages 756-767, Singapore. Association for Computational Linguistics.
124
+ Tom Kocmi, Eleftherios Avramidis, Rachel Bawden, Ondrej Bojar, Anton Dvorkovich, Christian Federmann, Mark Fishel, Markus Freitag, Thamme Gowda, Roman Grundkiewicz, Barry Haddow, Philipp Koehn, Benjamin Marie, Christof Monz, Makoto Morishita, Kenton Murray, Makoto Nagata, Toshiaki Nakazawa, Martin Popel, Maja Popovic, and Mariya Shmatova. 2023. Findings of the 2023 conference on machine translation (WMT23): LLMs are here but not quite there yet. In Proceedings of the Eighth Conference on Machine Translation, pages 1-42, Singapore. Association for Computational Linguistics.
125
+ Tom Kocmi and Christian Federmann. 2023. GEMBA-MQM: Detecting translation quality error spans with GPT-4. In Proceedings of the Eighth Conference on Machine Translation, pages 768-775, Singapore. Association for Computational Linguistics.
126
+ Tom Kocmi, Christian Federmann, Roman Grundkiewicz, Marcin Junczys-Dowmunt, Hitokazu Matsushita, and Arul Menezes. 2021. To ship or not to ship: An extensive evaluation of automatic metrics for machine translation. In Proceedings of the Sixth Conference on Machine Translation, pages 478-494, Online. Association for Computational Linguistics.
127
+ Arle. Language Technology Lab) Lommel, Hans. Language Technology Lab) Uszkoreit, and Aljoscha. Language Technology Lab) Burchardt. 2014. Multidimensional quality metrics (mqm): a framework for declaring and describing translation quality metrics. Tradumàtica, (12):455-463.
128
+ F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.
129
+ Nicolas Posocco and Antoine Bonnefoy. 2021. Estimating expected calibration errors. In Artificial Neural Networks and Machine Learning - ICANN 2021, pages 139-150, Cham. Springer International Publishing.
130
+ Brian Thompson and Matt Post. 2020a. Automatic machine translation evaluation in many languages via zero-shot paraphrasing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 90-121, Online. Association for Computational Linguistics.
131
+ Brian Thompson and Matt Post. 2020b. Paraphrase generation as zero-shot multilingual translation: Disentangling semantic similarity from lexical and syntactic diversity. In Proceedings of the Fifth Conference on Machine Translation, pages 561-570, Online. Association for Computational Linguistics.
132
+
133
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS'17, page 6000-6010, Red Hook, NY, USA. Curran Associates Inc.
134
+ Pius von Däniken, Jan Deriu, Don Tuggener, and Mark Cieliebak. 2022. On the effectiveness of automated metrics for text generation systems. In *Findings of the Association for Computational Linguistics: EMNLP* 2022, pages 1503–1522, Abu Dhabi, United Arab Emirates. Association for Computational Linguistics.
135
+ Pius von Däniken, Jan Deriu, Don Tuggener, and Mark Cieliebak. 2024. Favi-score: A measure for favoritism in automated preference ratings for generative AI evaluation. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4437-4454, Bangkok, Thailand. Association for Computational Linguistics.
136
+ Johnny Wei and Robin Jia. 2021. The statistical advantage of automatic NLG metrics at the system level. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6840-6854, Online. Association for Computational Linguistics.
137
+ Siqi Wu and Paul Resnick. 2024. Calibrate-extrapolate: Rethinking prevalence estimation with black box classifiers. Proceedings of the International AAAI Conference on Web and Social Media, 18(1):1634-1647.
138
+ Yangjian Wu and Gang Hu. 2023. Exploring prompt engineering with GPT language models for document-level machine translation: Insights and findings. In Proceedings of the Eighth Conference on Machine Translation, pages 166-169, Singapore. Association for Computational Linguistics.
139
+
140
+ # A Full Derivation
141
+
142
+ In Equation 4, we give the full derivation of Equation 1 in Section 2. In the following $p_k(h)$ is the density of human ratings for system $\pi_k$ , $p_k(m)$ is its density of metric ratings, and $p_k(h, m)$ the joint density.
143
+
144
+ $$
145
+ \begin{array}{l} \mathbb {E} \left[ h _ {k} \right] = \int_ {- \infty} ^ {\infty} h p _ {k} (h) \mathrm {d} h \\ = \int_ {- \infty} ^ {\infty} h \left[ \int_ {- \infty} ^ {\infty} p _ {k} (h, m) \mathrm {d} m \right] \mathrm {d} h \\ = \int_ {- \infty} ^ {\infty} h \left[ \int_ {- \infty} ^ {\infty} p _ {k} (h | m) p _ {k} (m) d m \right] d h \\ = \int_ {- \infty} ^ {\infty} \int_ {- \infty} ^ {\infty} h p _ {k} (h | m) p _ {k} (m) d m d h \tag {4} \\ = \int_ {- \infty} ^ {\infty} \left[ \int_ {- \infty} ^ {\infty} h p _ {k} (h | m) \mathrm {d} h \right] p _ {k} (m) \mathrm {d} m \\ = \int_ {- \infty} ^ {\infty} \mathbb {E} \left[ h _ {k} | m \right] p _ {k} (m) \mathrm {d} m \\ = \mathbb {E} _ {p _ {k} (m)} [ \mathbb {E} _ {p _ {k} (h)} [ h | m ] ] \\ \end{array}
146
+ $$
147
+
148
+ # B Additional Results
149
+
150
+ Here we extend our experiment from Section 3 to additional language pairs and metrics of WMT 23. For the en-de language pair $N^H = 460$ and $N^M = 557$ and for he-en $N^H = 820$ and $H^M = 1910$ . We show the results for XCOMET for each language pair in Tables 2, 3, and 4 (Note that Table 4 is the same as Table 1 in Section 3). We also include results for GEMBA-MQM (Kocmi and Federmann, 2023), which is a reference free metric based on prompting LLMs. The results can be seen in Tables 5, 6, and 7.
151
+
152
+ # C Evaluating the System Dependence of WMT23 Metrics
153
+
154
+ In Section 2, we introduced the SysDep score. It measures the worst case in the difference of expected deviations (ED), which measures the difference between the average human rating we expect to see based on metric ratings and assuming a single global $f_{G}$ and the true average human rating for a system $\pi_{k}$ . To measure the system dependence of a metric across a set of systems $\pi_{1},\ldots ,\pi_{K}$ , we compute the range of the individual ED: SysDep = max $\pi_{k}$ ED $(k) - \min_{\pi_{k}}$ ED $(k)$ . We noted in Section 3 that ED $(k)$ alone is not enough to know whether system $\pi_{k}$ will be ranked incorrectly, it depends on the true margin to the other systems, and their dependencies. By measuring the range, we consider the worst case when comparing two systems. We show the dependency ranges for all WMT23 metrics on all language pairs in Table 8. We notice that the values for en-de are large than the others. This is due to a larger range of human rating averages for this language pair (see also Tables 2-7 in Appendix B). We therefore also do not aggregate across language pairs.
155
+
156
+ Variants of MetricX-23 (Juraska et al., 2023) perform best on en-de and he-en, while GEMBA-MQM has the lowest range for zh-en. The reference-free prismSrc (Thompson and Post, 2020a,b) metric performs worst on en-de and zh-en. The baseline Random-sysname (Freitag et al., 2023) performs worst for he-en. This baseline is an interesting case, as it is the prototypical example of a metric where every $f_{k}$ is different. It assigns a fixed score to each system based on its name and adds Gaussian noise to this value to assign segment level scores. Therefore each $f_{k}$ will be a different constant function.
157
+
158
+ # D Intra-System Variability
159
+
160
+ In order to confirm that the observed SysDep scores are indeed due to a metric systematically treating systems differently and not due to variance in ratings, we will measure the maximum intra-system scores. For this, we use ratings from a single system and split them into 2 equal sized parts 10 times with different random seeds. This simulates a setting with 20 systems with half the sample size of the original setting. We then compute the SysDep score.
161
+
162
+ In Table 9, we show the maximum intra-system SysDep score computed this way over all systems for a given metric and language pair. We observe that for $he - en$ and $zh - en$ all scores are lower than the minimum between system SysDep reported in Appendix C. This confirms that in those cases metrics treat different systems differently. For the en-de language pair, we observe that while in many cases the intra-system score is lower than the SysDep between systems for the same metric and language pair, this is not always the case. This could be due to the metrics treating systems more equally for this language pair, or the relatively small sample sizes for en-de compared to the other language pairs.
163
+
164
+ # E Estimating Conditional Expectations
165
+
166
+ In Section 3, we gave a brief overview of how to compute estimates for the functions $f_{k}$ and $f_{G}$ . In Listings 1 and 2, we show our python implementation. To estimate the system-level $\hat{f}_{k}$ , we call the .fit method with human and metric ratings for system $\pi_{k}$ . To evaluate the function $\hat{f}_{k}$ , we use the .conditional expectation method. To estimate the global function $\hat{f}_{G}$ , we use the .fit method with paired human and metric ratings for all systems. We compute the remapped rating $\hat{\mu}_{k}^{G}$ by first fitting $\hat{f}_{G}$ and then using the .remapped Expectation
167
+
168
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>GPT4-5shot</td><td>-3.724</td><td>1</td><td>0.882</td><td>2</td><td>-4.768</td><td>1</td><td>-1.044</td></tr><tr><td>ONLINE-W</td><td>-3.950</td><td>2</td><td>0.883</td><td>1</td><td>-4.821</td><td>2</td><td>-0.871</td></tr><tr><td>ONLINE-B</td><td>-4.711</td><td>3</td><td>0.871</td><td>3</td><td>-5.272</td><td>3</td><td>-0.560</td></tr><tr><td>ONLINE-Y</td><td>-5.643</td><td>4</td><td>0.858</td><td>4</td><td>-5.909</td><td>4</td><td>-0.266</td></tr><tr><td>ONLINE-A</td><td>-5.668</td><td>5</td><td>0.853</td><td>5</td><td>-6.152</td><td>5</td><td>-0.483</td></tr><tr><td>ONLINE-G</td><td>-6.574</td><td>6</td><td>0.834</td><td>6</td><td>-7.079</td><td>6</td><td>-0.505</td></tr><tr><td>ONLINE-M</td><td>-6.936</td><td>7</td><td>0.830</td><td>7</td><td>-7.399</td><td>7</td><td>-0.462</td></tr><tr><td>Lan-BridgeMT</td><td>-8.670</td><td>8</td><td>0.801</td><td>9</td><td>-8.670</td><td>9</td><td>-0.000</td></tr><tr><td>ZengHuiMT</td><td>-9.255</td><td>9</td><td>0.790</td><td>11</td><td>-9.387</td><td>11</td><td>-0.132</td></tr><tr><td>NLLB-Greedy</td><td>-9.543</td><td>10</td><td>0.812</td><td>8</td><td>-8.405</td><td>8</td><td>1.138</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-10.794</td><td>11</td><td>0.797</td><td>10</td><td>-9.005</td><td>10</td><td>1.789</td></tr><tr><td>AIRC</td><td>-14.228</td><td>12</td><td>0.724</td><td>12</td><td>-13.658</td><td>12</td><td>0.570</td></tr></table>
169
+
170
+ Table 2: System rankings and average rating of WMT 23 en-de systems according to XCOMET.
171
+
172
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>GPT4-5shot</td><td>-1.333</td><td>1</td><td>0.913</td><td>2</td><td>-1.690</td><td>2</td><td>-0.358</td></tr><tr><td>ONLINE-A</td><td>-1.381</td><td>2</td><td>0.908</td><td>3</td><td>-1.817</td><td>3</td><td>-0.436</td></tr><tr><td>ONLINE-B</td><td>-1.546</td><td>3</td><td>0.916</td><td>1</td><td>-1.635</td><td>1</td><td>-0.089</td></tr><tr><td>GTCOM-Peter</td><td>-1.886</td><td>4</td><td>0.904</td><td>4</td><td>-1.916</td><td>4</td><td>-0.030</td></tr><tr><td>UvA-LTL</td><td>-1.919</td><td>5</td><td>0.893</td><td>6</td><td>-2.193</td><td>6</td><td>-0.274</td></tr><tr><td>ONLINE-G</td><td>-2.055</td><td>6</td><td>0.895</td><td>5</td><td>-2.137</td><td>5</td><td>-0.082</td></tr><tr><td>ONLINE-Y</td><td>-2.349</td><td>7</td><td>0.881</td><td>8</td><td>-2.511</td><td>8</td><td>-0.162</td></tr><tr><td>ZengHuiMT</td><td>-2.382</td><td>8</td><td>0.889</td><td>7</td><td>-2.294</td><td>7</td><td>0.088</td></tr><tr><td>Samsung-Res.-Ph.</td><td>-3.234</td><td>9</td><td>0.874</td><td>9</td><td>-2.666</td><td>9</td><td>0.568</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-3.678</td><td>10</td><td>0.869</td><td>11</td><td>-2.805</td><td>11</td><td>0.872</td></tr><tr><td>NLLB-Greedy</td><td>-3.790</td><td>11</td><td>0.872</td><td>10</td><td>-2.714</td><td>10</td><td>1.076</td></tr><tr><td>Lan-BridgeMT</td><td>-3.793</td><td>12</td><td>0.867</td><td>12</td><td>-2.823</td><td>12</td><td>0.971</td></tr></table>
173
+
174
+ Table 3: System rankings and average rating of WMT 23 he-en systems according to XCOMET.
175
+
176
+ method on the metric ratings for system $\pi_{k}$ . We rely on the Isotonic Regression implementation from scikit-learn (Pedregosa et al., 2011) and numerical utility functions from numpy (Harris et al., 2020).
177
+
178
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>Lan-BridgeMT</td><td>-2.100</td><td>1</td><td>0.889</td><td>2</td><td>-2.920</td><td>2</td><td>-0.820</td></tr><tr><td>GPT4-5shot</td><td>-2.305</td><td>2</td><td>0.893</td><td>1</td><td>-2.800</td><td>1</td><td>-0.494</td></tr><tr><td>Yishu</td><td>-3.231</td><td>3</td><td>0.880</td><td>4</td><td>-3.179</td><td>4</td><td>0.052</td></tr><tr><td>ONLINE-B</td><td>-3.385</td><td>4</td><td>0.879</td><td>5</td><td>-3.188</td><td>5</td><td>0.197</td></tr><tr><td>HW-TSC</td><td>-3.398</td><td>5</td><td>0.883</td><td>3</td><td>-3.080</td><td>3</td><td>0.318</td></tr><tr><td>ONLINE-A</td><td>-3.785</td><td>6</td><td>0.856</td><td>8</td><td>-3.812</td><td>8</td><td>-0.027</td></tr><tr><td>ONLINE-Y</td><td>-3.792</td><td>7</td><td>0.868</td><td>6</td><td>-3.479</td><td>6</td><td>0.313</td></tr><tr><td>ONLINE-G</td><td>-3.857</td><td>8</td><td>0.864</td><td>7</td><td>-3.607</td><td>7</td><td>0.250</td></tr><tr><td>ONLINE-W</td><td>-4.062</td><td>9</td><td>0.848</td><td>9</td><td>-4.165</td><td>10</td><td>-0.103</td></tr><tr><td>ZengHuiMT</td><td>-4.232</td><td>10</td><td>0.846</td><td>10</td><td>-4.140</td><td>9</td><td>0.092</td></tr><tr><td>IOL-Research</td><td>-4.586</td><td>11</td><td>0.843</td><td>11</td><td>-4.251</td><td>11</td><td>0.335</td></tr><tr><td>ONLINE-M</td><td>-5.433</td><td>12</td><td>0.820</td><td>15</td><td>-4.907</td><td>15</td><td>0.526</td></tr><tr><td>ANVITA</td><td>-6.078</td><td>13</td><td>0.830</td><td>13</td><td>-4.602</td><td>13</td><td>1.475</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-6.360</td><td>14</td><td>0.825</td><td>14</td><td>-4.726</td><td>14</td><td>1.634</td></tr><tr><td>NLLB-Greedy</td><td>-6.574</td><td>15</td><td>0.831</td><td>12</td><td>-4.578</td><td>12</td><td>1.996</td></tr></table>
179
+
180
+ Table 4: System rankings and average rating of WMT 23 zh-en systems according to XCOMET.
181
+
182
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>GPT4-5shot</td><td>-3.724</td><td>1</td><td>-2.447</td><td>1</td><td>-4.123</td><td>1</td><td>-0.399</td></tr><tr><td>ONLINE-W</td><td>-3.950</td><td>2</td><td>-3.429</td><td>2</td><td>-4.822</td><td>2</td><td>-0.872</td></tr><tr><td>ONLINE-B</td><td>-4.711</td><td>3</td><td>-4.048</td><td>3</td><td>-5.383</td><td>3</td><td>-0.672</td></tr><tr><td>ONLINE-Y</td><td>-5.643</td><td>4</td><td>-4.424</td><td>4</td><td>-5.832</td><td>5</td><td>-0.189</td></tr><tr><td>ONLINE-A</td><td>-5.668</td><td>5</td><td>-4.567</td><td>5</td><td>-5.826</td><td>4</td><td>-0.158</td></tr><tr><td>ONLINE-G</td><td>-6.574</td><td>6</td><td>-6.018</td><td>6</td><td>-7.047</td><td>6</td><td>-0.473</td></tr><tr><td>ONLINE-M</td><td>-6.936</td><td>7</td><td>-6.217</td><td>7</td><td>-7.113</td><td>7</td><td>-0.177</td></tr><tr><td>Lan-BridgeMT</td><td>-8.670</td><td>8</td><td>-8.197</td><td>8</td><td>-8.891</td><td>9</td><td>-0.221</td></tr><tr><td>ZengHuiMT</td><td>-9.255</td><td>9</td><td>-8.357</td><td>9</td><td>-8.867</td><td>8</td><td>0.388</td></tr><tr><td>NLLB-Greedy</td><td>-9.543</td><td>10</td><td>-10.043</td><td>10</td><td>-9.683</td><td>10</td><td>-0.140</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-10.794</td><td>11</td><td>-10.724</td><td>11</td><td>-10.352</td><td>11</td><td>0.442</td></tr><tr><td>AIRC</td><td>-14.228</td><td>12</td><td>-13.941</td><td>12</td><td>-12.526</td><td>12</td><td>1.702</td></tr></table>
183
+
184
+ Table 5: System rankings and average rating of WMT 23 en-de systems according to GEMBA-MQM.
185
+
186
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHkR</td><td>R</td><td>μMkR</td><td>R</td><td>μGkR</td><td>R</td><td>ED</td></tr><tr><td>GPT4-5shot</td><td>-1.333</td><td>1</td><td>-1.923</td><td>1</td><td>-1.377</td><td>1</td><td>-0.045</td></tr><tr><td>ONLINE-A</td><td>-1.381</td><td>2</td><td>-3.850</td><td>2</td><td>-1.882</td><td>2</td><td>-0.501</td></tr><tr><td>ONLINE-B</td><td>-1.546</td><td>3</td><td>-4.108</td><td>3</td><td>-1.969</td><td>3</td><td>-0.423</td></tr><tr><td>GTCOM-Peter</td><td>-1.886</td><td>4</td><td>-4.859</td><td>4</td><td>-2.144</td><td>4</td><td>-0.258</td></tr><tr><td>UvA-LTL</td><td>-1.919</td><td>5</td><td>-5.628</td><td>6</td><td>-2.312</td><td>6</td><td>-0.393</td></tr><tr><td>ONLINE-G</td><td>-2.055</td><td>6</td><td>-5.240</td><td>5</td><td>-2.281</td><td>5</td><td>-0.225</td></tr><tr><td>ONLINE-Y</td><td>-2.349</td><td>7</td><td>-6.885</td><td>8</td><td>-2.677</td><td>8</td><td>-0.328</td></tr><tr><td>ZengHuiMT</td><td>-2.382</td><td>8</td><td>-6.032</td><td>7</td><td>-2.484</td><td>7</td><td>-0.102</td></tr><tr><td>Samsung-Res.-Ph.</td><td>-3.234</td><td>9</td><td>-8.545</td><td>12</td><td>-2.954</td><td>12</td><td>0.280</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-3.678</td><td>10</td><td>-8.075</td><td>9</td><td>-2.817</td><td>10</td><td>0.861</td></tr><tr><td>NLLB-Greedy</td><td>-3.790</td><td>11</td><td>-8.261</td><td>10</td><td>-2.813</td><td>9</td><td>0.977</td></tr><tr><td>Lan-BridgeMT</td><td>-3.793</td><td>12</td><td>-8.469</td><td>11</td><td>-2.840</td><td>11</td><td>0.953</td></tr></table>
187
+
188
+ Table 6: System rankings and average rating of WMT 23 he-en systems according to GEMBA-MQM.
189
+
190
+ <table><tr><td rowspan="2"></td><td colspan="2">Human</td><td colspan="2">Metric</td><td colspan="2">Remapped</td><td>Exp. Deviation</td></tr><tr><td>μHk</td><td>R</td><td>μMk</td><td>R</td><td>μGk</td><td>R</td><td>ED</td></tr><tr><td>Lan-BridgeMT</td><td>-2.100</td><td>1</td><td>-1.949</td><td>2</td><td>-2.419</td><td>2</td><td>-0.319</td></tr><tr><td>GPT4-5shot</td><td>-2.305</td><td>2</td><td>-1.601</td><td>1</td><td>-2.199</td><td>1</td><td>0.106</td></tr><tr><td>Yishu</td><td>-3.231</td><td>3</td><td>-4.790</td><td>5</td><td>-3.492</td><td>5</td><td>-0.261</td></tr><tr><td>ONLINE-B</td><td>-3.385</td><td>4</td><td>-4.717</td><td>4</td><td>-3.489</td><td>4</td><td>-0.104</td></tr><tr><td>HW-TSC</td><td>-3.398</td><td>5</td><td>-4.367</td><td>3</td><td>-3.336</td><td>3</td><td>0.062</td></tr><tr><td>ONLINE-A</td><td>-3.785</td><td>6</td><td>-5.568</td><td>8</td><td>-3.838</td><td>9</td><td>-0.053</td></tr><tr><td>ONLINE-Y</td><td>-3.792</td><td>7</td><td>-5.453</td><td>7</td><td>-3.611</td><td>6</td><td>0.181</td></tr><tr><td>ONLINE-G</td><td>-3.857</td><td>8</td><td>-5.275</td><td>6</td><td>-3.724</td><td>7</td><td>0.134</td></tr><tr><td>ONLINE-W</td><td>-4.062</td><td>9</td><td>-5.760</td><td>9</td><td>-3.772</td><td>8</td><td>0.290</td></tr><tr><td>ZengHuiMT</td><td>-4.232</td><td>10</td><td>-6.337</td><td>10</td><td>-4.089</td><td>11</td><td>0.143</td></tr><tr><td>IOL-Research</td><td>-4.586</td><td>11</td><td>-6.511</td><td>11</td><td>-4.067</td><td>10</td><td>0.519</td></tr><tr><td>ONLINE-M</td><td>-5.433</td><td>12</td><td>-9.115</td><td>12</td><td>-4.899</td><td>13</td><td>0.534</td></tr><tr><td>ANVITA</td><td>-6.078</td><td>13</td><td>-9.440</td><td>13</td><td>-4.844</td><td>12</td><td>1.234</td></tr><tr><td>NLLB-MBR-BLEU</td><td>-6.360</td><td>14</td><td>-11.339</td><td>15</td><td>-5.379</td><td>15</td><td>0.981</td></tr><tr><td>NLLB-Greedy</td><td>-6.574</td><td>15</td><td>-11.282</td><td>14</td><td>-5.312</td><td>14</td><td>1.262</td></tr></table>
191
+
192
+ Table 7: System rankings and average rating of WMT 23 zh-en systems according to GEMBA-MQM.
193
+
194
+ ```python
195
+ from typing import Self, Tuple
196
+ import numpy as np
197
+ from numpy.random import Generator, default_rng
198
+ from sklearn.isotonic import IsotonicRegression
199
+ class BootstrapIsotonic: def__init__(self, n.bootstrap:int $= 200$ - ng:int |Generator $\equiv$ 0xdeadbeef, 1) self.n.bootstrap $\equiv$ n.bootstrap self.rng $\equiv$ default_rng(rng) self.models $= []$ def fit( self, human_ratings: np.ndarray[float], # 1d array of human ratings metricratings: np.ndarray[float], # 1d array of matching metric ratings ) -> Self: #fit a model of f_k or f_G,i.e. the conditional expectation of human ratings given metric ratings # to get model a system-level function f_k, use only human Ratings for that given system k #to model the global function f_G, use data from all systems assert len(humanratings) $= =$ len(metricratings) n_samples $\equiv$ len(humanratings) for in range(self.n.bootstrap): bootstrapIndices $\equiv$ self.rng.choice( np.arange(n_samples), n_samples, replace=True ) isotonic_model $\equiv$ IsotonicRegression( y_min=None,#MQM scores range from large negative to 0 y_max=0., increasing=True,#metric has positive correlation out_of_bounds='nan',#don't extrapolate ) isotonic_model.fit( X=metric Ratings[bootstrap Indices], y=human Ratings[bootstrap indices], ) self.models.append(isotonic_model) return self def _predict.bootstrap( self, m: np.ndarray[float] # 1d array of metric ratings ) -> np.ndarray[float]: #helper function getting predictions from each model, returns 2d array of size [n.bootstrap, len(m)] result $=$ np.zeros((self.n.bootstrap, len(m)),dtype=float) for bix, model in enumerate(self.models): result[bix, :] $=$ model.predict(m) return result
200
+ ```
201
+
202
+ Listing 1: Part 1 of the python code to estimate $\hat{f}_k, \hat{f}_G$ , and $\hat{\mu}_k^G$ .
203
+
204
+ ```python
205
+ def conditional expectation(
206
+ self,
207
+ metric Ratings: np.ndarray[float], # 1d array of metric ratings
208
+ ) -> np.ndarray[float]: # this computes the function f_k or f_G (depending on what data we fitted on) bootstrappredictions = self._predict.bootstrap(metric Ratings) return np.nanmean(bootstrappredictions, axis=0)
209
+ def confidence(
210
+ self,
211
+ metric Ratings: np.ndarray[float], # 1d array of metric ratings
212
+ ) -> Tuple[np.ndarray[float], np.ndarray[float]]: # this computes the confidence bounds around f_k or f_G in Figure 1 bootstrappredictions = self._predict.bootstrap(metric Ratings) lower = np.nanpercentile(bootstrappredictions, 2.5, axis=0) upper = np.nanpercentile(bootstrappredictions, 97.5, axis=0) return lower, upper
213
+ def remapped Expectation(
214
+ self,
215
+ metric Ratings: np.ndarray[float], # 1d array of metric ratings
216
+ ) -> float: # used to compute remapped system scores in Table 1. expected_human_ratings = self情况来看( metric Ratings) return np.nanmean(expected_humanratings)
217
+ ```
218
+
219
+ Listing 2: Part 2 of the python code to estimate $\hat{f}_k, \hat{f}_G$ , and $\hat{\mu}_k^G$ .
220
+
221
+ <table><tr><td></td><td>en-de</td><td>he-en</td><td>zh-en</td></tr><tr><td>BERTscore</td><td>7.18</td><td>1.73</td><td>3.87</td></tr><tr><td>BLEU</td><td>9.02</td><td>2.06</td><td>4.23</td></tr><tr><td>BLEURT-20</td><td>3.68</td><td>1.66</td><td>3.35</td></tr><tr><td>Calibri-COMET22-QE</td><td>3.68</td><td>2.06</td><td>3.30</td></tr><tr><td>Calibri-COMET22</td><td>4.24</td><td>1.64</td><td>3.40</td></tr><tr><td>chrF</td><td>8.11</td><td>1.92</td><td>4.29</td></tr><tr><td>COMET</td><td>4.29</td><td>1.64</td><td>3.35</td></tr><tr><td>CometKiwi</td><td>3.84</td><td>1.95</td><td>3.02</td></tr><tr><td>CometKiwi-XL</td><td>3.77</td><td>1.98</td><td>3.01</td></tr><tr><td>CometKiwi-XXL</td><td>3.68</td><td>1.82</td><td>2.92</td></tr><tr><td>cometoid22-wmt21</td><td>5.44</td><td>2.11</td><td>3.30</td></tr><tr><td>cometoid22-wmt22</td><td>5.17</td><td>2.09</td><td>3.21</td></tr><tr><td>cometoid22-wmt23</td><td>4.66</td><td>1.81</td><td>3.20</td></tr><tr><td>docWMT22CometDA</td><td>3.87</td><td>1.65</td><td>3.37</td></tr><tr><td>docWMT22CometKiwiDA</td><td>4.53</td><td>1.83</td><td>2.76</td></tr><tr><td>eBLEU</td><td>9.49</td><td>2.08</td><td>4.29</td></tr><tr><td>embed-llama</td><td>7.07</td><td>2.13</td><td>4.21</td></tr><tr><td>f200spBLEU</td><td>8.42</td><td>2.01</td><td>4.23</td></tr><tr><td>GEMBA-MQM</td><td>2.57</td><td>1.48</td><td>1.58</td></tr><tr><td>instructscore</td><td>3.59</td><td>1.53</td><td>3.68</td></tr><tr><td>KG-BERTScore</td><td>4.24</td><td>1.88</td><td>3.04</td></tr><tr><td>MaTESe</td><td>5.98</td><td>1.49</td><td>3.16</td></tr><tr><td>mbr-metricx-qe</td><td>3.69</td><td>1.58</td><td>2.39</td></tr><tr><td>MEE4</td><td>8.48</td><td>1.88</td><td>4.21</td></tr><tr><td>MetricX-23-b</td><td>2.26</td><td>1.29</td><td>2.81</td></tr><tr><td>MetricX-23-c</td><td>3.56</td><td>1.69</td><td>2.36</td></tr><tr><td>MetricX-23-QE-b</td><td>2.11</td><td>1.55</td><td>2.62</td></tr><tr><td>MetricX-23-QE-c</td><td>2.82</td><td>1.21</td><td>1.65</td></tr><tr><td>MetricX-23-QE</td><td>2.93</td><td>1.77</td><td>3.12</td></tr><tr><td>MetricX-23</td><td>2.57</td><td>1.33</td><td>3.04</td></tr><tr><td>mre-score-labse-regular</td><td>9.70</td><td>1.65</td><td>3.94</td></tr><tr><td>MS-COMET-QE-22</td><td>5.87</td><td>2.22</td><td>3.37</td></tr><tr><td>prismRef</td><td>8.71</td><td>1.79</td><td>3.97</td></tr><tr><td>prismSrc</td><td>11.24</td><td>2.48</td><td>4.61</td></tr><tr><td>Random-sysname</td><td>9.97</td><td>2.52</td><td>4.53</td></tr><tr><td>scescoreX</td><td>3.59</td><td>1.52</td><td>3.47</td></tr><tr><td>tokengram-F</td><td>8.17</td><td>1.93</td><td>4.29</td></tr><tr><td>XCOMET-Ensemble</td><td>2.83</td><td>1.51</td><td>2.82</td></tr><tr><td>XCOMET-QE-Ensemble</td><td>2.95</td><td>1.78</td><td>2.95</td></tr><tr><td>XCOMET-XL</td><td>3.39</td><td>1.59</td><td>3.20</td></tr><tr><td>XCOMET-XXL</td><td>2.71</td><td>1.48</td><td>2.99</td></tr><tr><td>XLsim</td><td>7.83</td><td>2.01</td><td>4.20</td></tr><tr><td>YiSi-1</td><td>5.95</td><td>1.60</td><td>3.65</td></tr></table>
222
+
223
+ Table 8: SysDep for each metric and language pair. We show the minimum and maximum for each language pair.
224
+
225
+ <table><tr><td></td><td>en-de</td><td>he-en</td><td>zh-en</td></tr><tr><td>BERTscore</td><td>2.08</td><td>0.88</td><td>1.00</td></tr><tr><td>BLEU</td><td>2.20</td><td>0.95</td><td>0.99</td></tr><tr><td>BLEURT-20</td><td>2.68</td><td>0.89</td><td>0.87</td></tr><tr><td>Calibri-COMET22-QE</td><td>2.97</td><td>0.82</td><td>0.85</td></tr><tr><td>Calibri-COMET22</td><td>2.50</td><td>0.79</td><td>0.89</td></tr><tr><td>chrF</td><td>2.07</td><td>0.96</td><td>0.93</td></tr><tr><td>COMET</td><td>2.25</td><td>0.78</td><td>0.87</td></tr><tr><td>CometKiwi</td><td>3.01</td><td>0.83</td><td>0.81</td></tr><tr><td>CometKiwi-XL</td><td>2.90</td><td>0.87</td><td>0.80</td></tr><tr><td>CometKiwi-XXL</td><td>2.65</td><td>0.83</td><td>0.85</td></tr><tr><td>cometoid22-wmt21</td><td>2.81</td><td>0.76</td><td>0.89</td></tr><tr><td>cometoid22-wmt22</td><td>2.74</td><td>0.73</td><td>0.81</td></tr><tr><td>cometoid22-wmt23</td><td>2.58</td><td>0.79</td><td>0.86</td></tr><tr><td>docWMT22CometDA</td><td>2.32</td><td>0.83</td><td>0.95</td></tr><tr><td>docWMT22CometKiwiDA</td><td>2.61</td><td>0.88</td><td>0.95</td></tr><tr><td>eBLEU</td><td>2.49</td><td>0.91</td><td>0.98</td></tr><tr><td>embed-llama</td><td>2.18</td><td>0.92</td><td>1.14</td></tr><tr><td>f200spBLEU</td><td>2.10</td><td>0.94</td><td>0.95</td></tr><tr><td>GEMBA-MQM</td><td>2.89</td><td>0.93</td><td>0.88</td></tr><tr><td>instructscore</td><td>2.20</td><td>0.83</td><td>0.82</td></tr><tr><td>KG-BERTScore</td><td>2.90</td><td>0.85</td><td>0.82</td></tr><tr><td>MaTESe</td><td>2.71</td><td>0.77</td><td>0.80</td></tr><tr><td>mbr-metricx-qe</td><td>2.52</td><td>0.84</td><td>0.79</td></tr><tr><td>MEE4</td><td>2.15</td><td>0.91</td><td>0.94</td></tr><tr><td>MetricX-23-b</td><td>2.67</td><td>0.87</td><td>0.68</td></tr><tr><td>MetricX-23-c</td><td>2.89</td><td>0.90</td><td>0.77</td></tr><tr><td>MetricX-23-QE-b</td><td>2.67</td><td>0.80</td><td>0.71</td></tr><tr><td>MetricX-23-QE-c</td><td>2.25</td><td>0.80</td><td>0.81</td></tr><tr><td>MetricX-23-QE</td><td>2.46</td><td>0.80</td><td>0.74</td></tr><tr><td>MetricX-23</td><td>2.34</td><td>0.90</td><td>0.63</td></tr><tr><td>mre-score-labse-regular</td><td>2.27</td><td>0.91</td><td>0.93</td></tr><tr><td>MS-COMET-QE-22</td><td>2.17</td><td>0.93</td><td>0.90</td></tr><tr><td>prismRef</td><td>2.24</td><td>0.83</td><td>0.99</td></tr><tr><td>prismSrc</td><td>2.09</td><td>0.88</td><td>0.91</td></tr><tr><td>Random-sysname</td><td>2.36</td><td>0.96</td><td>0.95</td></tr><tr><td>scescoreX</td><td>2.20</td><td>0.86</td><td>0.86</td></tr><tr><td>tokengram-F</td><td>2.10</td><td>0.96</td><td>0.94</td></tr><tr><td>XCOMET-Ensemble</td><td>2.44</td><td>0.71</td><td>0.68</td></tr><tr><td>XCOMET-QE-Ensemble</td><td>2.30</td><td>0.72</td><td>0.72</td></tr><tr><td>XCOMET-XL</td><td>2.44</td><td>0.74</td><td>0.67</td></tr><tr><td>XCOMET-XXL</td><td>2.35</td><td>0.74</td><td>0.69</td></tr><tr><td>XLsim</td><td>2.71</td><td>0.90</td><td>0.94</td></tr><tr><td>YiSi-1</td><td>2.44</td><td>0.83</td><td>0.90</td></tr></table>
226
+
227
+ Table 9: Maximum intra-system SysDep score for all metrics and language pairs.
ameasureofthesystemdependenceofautomatedmetrics/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f39cfe1c4b41601ff5c063f45bbda086ad28bc1b2293309b49bdc26aa7a1135
3
+ size 809295
ameasureofthesystemdependenceofautomatedmetrics/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90dd05664afdb953c5d7f5554a6a35f61f4d43ce41db897a798d3fc788e73a7d
3
+ size 353397
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:704e8315eebd283a7c50dafa112e03e7aa6afe24c31aef55854fbb29c64dc3cf
3
+ size 71758
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df088c2e384cf58973e2fa4462022f9b0700828d3450db428c329fe987eb7514
3
+ size 86124
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/2b3f8d15-5a21-43f0-bb98-02930d2df891_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3e3c5881db20fc75d420eaf42403302e2bf91e71547b47c9ec96e714bb56816
3
+ size 599037
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/full.md ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # An Effective Incorporating Heterogeneous Knowledge Curriculum Learning for Sequence Labeling
2
+
3
+ Xuemei Tang $^{1}$ , Jun Wang $^{2}$ , Qi Su $^{3*}$ , Chu-Ren Huang $^{1}$ , and Jinghang Gu $^{1*}$
4
+
5
+ <sup>1</sup>The Hong Kong Polytechnic University, Hong Kong, China
6
+
7
+ $^{2}$ Department of Information Management, Peking University, Beijing, China
8
+
9
+ $^{3}$ School of Foreign Languages, Peking University, Beijing, China
10
+
11
+ xuemeitang00@gmail.com, {sukia,junwang}@pku.edu.cn
12
+
13
+ {churen.huang, jinghang.gu}@polyu.edu.hk
14
+
15
+ # Abstract
16
+
17
+ Sequence labeling models often benefit from incorporating external knowledge. However, this practice introduces data heterogeneity and complicates the model with additional modules, leading to increased expenses for training a high-performing model. To address this challenge, we propose a dual-stage curriculum learning (DCL) framework specifically designed for sequence labeling tasks. The DCL framework enhances training by gradually introducing data instances from easy to hard. Additionally, we introduce a dynamic metric for evaluating the difficulty levels of sequence labeling tasks. Experiments on several sequence labeling datasets show that our model enhances performance and accelerates training, mitigating the slow training issue of complex models<sup>1</sup>.
18
+
19
+ # 1 Introduction and Related Work
20
+
21
+ Sequence labeling is a core task in natural language processing (NLP) that involves assigning labels to individual elements in a sequence. Recent advancements in neural network methods have significantly improved performance in sequence labeling tasks (Zhang et al., 2014; Chen et al., 2017a; Zhang et al., 2018; Tian et al., 2020a; Nguyen et al., 2021; Hou et al., 2021; Liu et al., 2021). Some studies have explored integrating external knowledge, such as n-grams, lexicons, and syntax, to enhance these models. However, this integration adds heterogeneity and complexity to the input data. Additionally, incorporating such knowledge often necessitates extra encoding modules, like attention mechanisms (Liu et al., 2021; Tian et al., 2020b) or graph neural networks (GNN) (Chen et al., 2017b; Gui et al., 2019; Nie et al., 2022), which increase model parameters and make the system more computationally expensive to develop.
22
+
23
+ Curriculum Learning (CL) (Bengio et al., 2009) effectively addresses these challenges by simulating the human learning process, where training samples are introduced progressively from easy to hard. This approach facilitates efficient learning from heterogeneous data while enhancing both the speed and performance of the model (Bengio et al., 2009; Wang et al., 2021). CL has shown success in a variety of NLP tasks, including machine translation (Wan et al., 2020), dialogue generation (Zhu et al., 2021), and text classification (Zhang et al., 2022). Data-selection strategies are crucial in CL. However, these difficulty metrics primarily focus on the sentence level, such as Mohiuddin et al. (2022), Yuan et al. (2022) and Liu et al. (2024)'s works, and there is a lack of token-level and word-level metrics to measure the difficulty of sequence labeling tasks.
24
+
25
+ To address this gap, in this paper, we introduce a dual-stage curriculum learning (DCL) framework specifically designed for sequence labeling tasks. The first stage is data-level CL, where we train a basic teacher model on all available training data, aiming to alleviate the cold start problem of the student model. The second stage is model-level CL, where we start training the student model on a selected subset of the teacher model and gradually expand the training subset by considering the difficulty of the data and the state of the student model. Furthermore, we explore different difficulty metrics for sequence labeling tasks within the DCL framework. These metrics include a pre-defined metric, such as sentence length, and model-aware metrics, namely Top-N least confidence (TLC), Maximum normalized log-probability (MNLP), and Bayesian uncertainty (BU). Finally, we choose the classical sequence labeling tasks, Chinese word segmentation (CWS), part-of-speech (POS) tagging, and named entity recognition (NER), to validate our proposed approach.
26
+
27
+ # 2 Method
28
+
29
+ The framework proposed in this study consists of three main components: a teacher sequence labeling model, a student sequence labeling model, and a DCL training strategy. It is worth noting that the DCL is independent of the sequence labeling model.
30
+
31
+ Following previous works (Zhang et al., 2018; Gong et al., 2019; Fu et al., 2020), in sequence labeling tasks, we feed an input sentence $X = \{x_{1},\ldots x_{i}\ldots ,x_{M}\}$ into the encoder, and the decoder then outputs a label sequence $Y^{*} = \{y_{1}^{*},\dots y_{i}^{*}\dots y_{M}^{*}\}$ , where $y_{i}^{*}$ represents a label from a pre-defined label set $T$ , and $M$ denotes the length of sentence.
32
+
33
+ # 2.1 Dual-stage Curriculum Learning
34
+
35
+ Algorithm 1 Training Process with DCL
36
+ Input: Original corpus \(\mathcal{D}\) , difficulty metric \(S(\cdot)\) , teacher model epochs \(E_0\) , student model epochs \(E_{s}\) , scheduler \(\lambda\) length function \(|\cdot |\)
37
+ Output: Trained student model \(\theta\) // Data-level Curriculum Learning 1: Train teacher model \(\theta_0\) on \(\mathcal{D}\) for \(E_0\) epochs 2: Compute \(S(\theta_0)\) for each sample in \(\mathcal{D}\) 3: Sort \(\mathcal{D}\) by \(S(\theta_0)\) in ascending order to obtain ranked dataset \(\mathcal{D}_r\) // Model-level Curriculum Learning 4: Initialize \(\lambda_0\) (starting curriculum ratio) 5: \(m\gets \lambda_0\cdot |\mathcal{D}|\) 6: Student training set \(\mathcal{D}_s\gets \mathcal{D}_r[0:m]\) 7: Remaining data \(\mathcal{D}_o\gets \mathcal{D}_r[m:\] 8: for epoch \(= 1\) to \(E_{s}\) do 9: if \(\lambda < 1\) then 10: a) Train student model on \(\mathcal{D}_s\) to obtain current \(\theta_{*}\) 11: b) Compute \(S(\theta_{*})\) for all samples in \(\mathcal{D}_o\) 12: c) Sort \(\mathcal{D}_o\) by \(S(\theta_{*})\) in ascending order to get updated \(\mathcal{D}_r\) 13: d) Update \(\lambda\) using Eq.6 14: e) Calculate new data size: \(m\gets \lambda \cdot |\mathcal{D}| - |\mathcal{D}_s|\) 15: f) Expand \(\mathcal{D}_s\) with new samples: \(\mathcal{D}_s + = \mathcal{D}_r[0:\) m] 16: g) Update remaining data: \(\mathcal{D}_o\gets \mathcal{D}_r[m:\] 17: else 18: Train student model on \(\mathcal{D}_s\) 19: end if 20: end for
38
+
39
+ We propose a novel dual-stage curriculum learning approach: data-level CL and model-level CL, as detailed in Algorithm 1.
40
+
41
+ At the data level, we first train a basic teacher model on the entire dataset $\mathcal{D}$ for $E_0$ epochs, where $E_0$ is smaller than the total epochs needed for convergence (Line 1). The teacher model $\theta_0$ is then used to calculate difficulty scores $S(\theta_0)$ for each sample (Line 2), and the samples are sorted by difficulty to form a ranked dataset $\mathcal{D}_r$ (Line 3).
42
+
43
+ At the model level, we address the cold-start issue by initializing the student model training set $\mathcal{D}_s$ with a subset of $\mathcal{D}_r$ (Lines 4-6). The proportion of samples, controlled by the parameter $\lambda$ , governs the curriculum learning process. The remaining data, $\mathcal{D}_o$ , is incorporated into $\mathcal{D}_s$ gradually as $\lambda$ increases. The number of new samples to be added is denoted as $m$ (Lines 5, 14).
44
+
45
+ The student model is trained on $\mathcal{D}_s$ to update the model parameters $\theta_{*}$ (Line 10). Then, $\theta_{*}$ is used for the difficulty calculation of the samples in $\mathcal{D}_o$ (Line 11). Next, $\mathcal{D}_o$ is ranked by new difficulty scores, forming a new ranked dataset $\mathcal{D}_r$ (Line 12). The threshold $\lambda$ is updated (Line 13), and new samples are added to $\mathcal{D}_s$ based on $\lambda$ (Lines 14-15). As $\lambda$ approaches 1, all of $\mathcal{D}_o$ is added to $\mathcal{D}_s$ . The complete dataset is then used to train the student model to convergence.
46
+
47
+ The key elements in Algorithm 1 are the difficulty metric $S(\cdot)$ and threshold $\lambda$ , which control the difficulty ranking of samples and the progression of training, respectively. The design of these components will be discussed in the following sections.
48
+
49
+ # 2.2 Difficulty Metrics
50
+
51
+ We now provide a detailed formulation for calculating the difficulty $S(\cdot)$ in Algorithm 1. In sequence labeling tasks, sample difficulty is tied to individual tokens, but assessing token-level difficulty is challenging. We use uncertainty from active learning to measure the model's confidence in labeling training samples.
52
+
53
+ Bayesian Uncertainty (BU). Following Buntine and Weigend (1991), model uncertainty can be assessed using Bayesian Neural Networks. As noted by Wang et al. (2019), higher predicted probability variance indicates greater uncertainty, suggesting that the model is less confident about the sample. In this work, we employ the widely-used Monte Carlo dropout (Gal and Ghahramani, 2016) to approximate Bayesian inference.
54
+
55
+ First, we apply Monte Carlo dropout (Gal and Ghahramani, 2016) to obtain each sample of token-level tagging probabilities. Specifically, for each token $x_{i}$ , we perform $K$ stochastic forward passes through the model, resulting in $K$ predicted distributions $P(y_{i} \mid x_{i})_{1}, \ldots, P(y_{i} \mid x_{i})_{K}$ . This provides $K$ predictions with associated probabilities for each token. Then the expectation of token-level
56
+
57
+ ![](images/8e021b2b3953632cc6ed4887d49c06f2085c55cbaf090861a5b4f556a8106f05.jpg)
58
+ Figure 1: The framework of the proposed model consists of a teacher model, a student model, and a DCL strategy. Here, "Root $f$ " represents Root function.
59
+
60
+ tagging probability can be approximated by
61
+
62
+ $$
63
+ \mathbb {E} [ P (y _ {i} | x _ {i}) ] \approx \frac {1}{K} \sum_ {k = 1} ^ {K} P (y _ {i} | x _ {i}) _ {k} \tag {1}
64
+ $$
65
+
66
+ The variance of token-level tagging probability on the label set can be approximated by
67
+
68
+ $$
69
+ \begin{array}{l} \operatorname {v a r} \left(x _ {i}, \theta\right) \approx \tag {2} \\ \sum_ {y _ {i} \in T} (\frac {1}{K} \sum_ {k = 1} ^ {K} P (y _ {i} | x _ {i}) _ {k} ^ {2} - \mathbb {E} [ P (y _ {i} | x _ {i}) ] ^ {2}) \\ \end{array}
70
+ $$
71
+
72
+ Now, we obtain the variance of each token $var(x_i, \theta)$ , then we use the average variance score of all tokens in the sequence as the sentence-level variance as follows.
73
+
74
+ $$
75
+ \operatorname {v a r} (\theta) _ {\text {a v e r .}} = \frac {1}{M} \sum_ {i = 1} ^ {M} \operatorname {v a r} \left(x _ {i}, \theta\right) \tag {3}
76
+ $$
77
+
78
+ The maximum variance score $var(\theta)_{\text{max}}$ also is valuable, which reflects the highest uncertainty in the sequence.
79
+
80
+ $$
81
+ \operatorname {v a r} (\theta) _ {\max } = \max _ {i \in [ 1, M ]} \operatorname {v a r} (x _ {i}, \theta) \tag {4}
82
+ $$
83
+
84
+ The final uncertainty score or difficulty score of each sequence is calculated as follows.
85
+
86
+ $$
87
+ S (\theta) ^ {B U} = \operatorname {v a r} (\theta) _ {\max } + \operatorname {v a r} (\theta) _ {\text {a v e r}}. \tag {5}
88
+ $$
89
+
90
+ Both at the data level and model level, the difficulty of training samples is measured by the above various $S(\theta)$ .
91
+
92
+ # 2.3 Training Scheduler
93
+
94
+ The training scheduler regulates the pace of CL. In our approach, we employ the Root function as the control mechanism. This function ensures that the model receives sufficient time to learn newly introduced examples while gradually decreasing the number of newly added examples throughout the training process.
95
+
96
+ $$
97
+ \lambda = \min \left(1, \sqrt {\frac {1 - \lambda_ {0} ^ {2}}{E _ {g r o w}} \cdot t + \lambda_ {0} ^ {2}}\right) \tag {6}
98
+ $$
99
+
100
+ where $E_{grow}$ denotes the number of epochs required for $\lambda$ to reach 1, while $\lambda_0 > 0$ represents the initial proportion of the easiest training samples. $t$ indicates the $t_{th}$ training epochs. When $\lambda$ reaches 1, the model has access to the entire training dataset.
101
+
102
+ # 3 Experiments
103
+
104
+ # 3.1 Dataset and Experimental Configurations
105
+
106
+ Dataset. Chinese word segmentation (CWS) and part-of-speech (POS) tagging are representative sequence labeling tasks. So we evaluate our approach using three CWS and POS tagging datasets, including Chinese Penn Treebank version $5.0^2$ , $6.0^3$ , and PKU. More dataset details can be found in Appendix A.
107
+
108
+ Teacher and student models. In this study, the basic transfer teacher framework is RoBERTa + Softmax. For the student model, we select two representative complex models introduced by Tian et al. (2020b) and Tang et al. (2024). In their work, Tian et al. (2020b) employed an attention mechanism framework, McASP, to integrate lexicons and n-grams for the joint CWS and POS tagging task, using BERT as the encoder. Meanwhile, Tang et al. (2024) incorporated syntax and semantic knowledge into sequence labeling tasks through a GCN framework called SynSemGCN, with RoBERTa as the sequence encoder.
109
+
110
+ Curriculum learning baselines. We compare our difficulty metric with four baseline difficulty metrics for CL: a. Random: Samples are assigned in random order; b. Sentence Length (Length): Samples are ranked from shortest to longest, based on the intuition that longer sequences are more challenging to encode; (Random and Length metrics represent simple CL, namely without the teacher model). c. Top-N Least Confidence (TLC): The difficulty of a sequence is determined by using the
111
+
112
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">CL Setting</td><td colspan="2">CTB5</td><td colspan="2">CTB6</td><td colspan="2">PKU</td></tr><tr><td>CWS</td><td>POS</td><td>CWS</td><td>POS</td><td>CWS</td><td>POS</td></tr><tr><td>Tian et al. (2020a)</td><td>-</td><td>98.73</td><td>96.60</td><td>97.30</td><td>94.74</td><td>-</td><td>-</td></tr><tr><td>Tian et al. (2020b) (McASP)</td><td>-</td><td>98.77</td><td>96.77</td><td>97.43</td><td>94.82</td><td>-</td><td>-</td></tr><tr><td>Liu et al. (2021)</td><td>-</td><td>-</td><td>97.14</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Tang et al. (2024) (SynSemGCN)</td><td>-</td><td>98.83</td><td>96.77</td><td>97.86</td><td>94.98</td><td>98.05</td><td>95.50</td></tr><tr><td rowspan="5">McASP</td><td>Rand.</td><td>98.81</td><td>96.84</td><td>97.37</td><td>94.90</td><td>98.38</td><td>96.27</td></tr><tr><td>Length</td><td>98.83</td><td>96.85</td><td>97.35</td><td>94.82</td><td>98.40</td><td>96.25</td></tr><tr><td>TLC</td><td>98.83</td><td>96.89</td><td>97.37</td><td>94.83</td><td>98.41</td><td>96.30</td></tr><tr><td>MNLP</td><td>98.85</td><td>96.81</td><td>97.41</td><td>94.92</td><td>98.41</td><td>96.30</td></tr><tr><td>BU</td><td>98.91</td><td>96.87</td><td>97.42</td><td>94.90</td><td>98.43</td><td>96.32</td></tr><tr><td rowspan="5">SynSemGCN</td><td>Rand.</td><td>98.84</td><td>97.86</td><td>97.99</td><td>95.05</td><td>98.48</td><td>96.40</td></tr><tr><td>Length</td><td>98.80</td><td>96.84</td><td>97.40</td><td>94.94</td><td>98.53</td><td>96.48</td></tr><tr><td>TLC</td><td>98.83</td><td>97.81</td><td>97.98</td><td>95.02</td><td>98.61</td><td>96.55</td></tr><tr><td>MNLP</td><td>98.78</td><td>97.72</td><td>98.04</td><td>95.13</td><td>98.56</td><td>96.48</td></tr><tr><td>BU</td><td>98.90</td><td>97.95</td><td>98.05</td><td>95.14</td><td>98.59</td><td>96.54</td></tr></table>
113
+
114
+ Table 1: Experimental results of different models using different CL settings on test sets of three datasets. Here, "CWS" represents the F1 value of CWS, and "POS" means the F1 value of the joint CWS and POS tagging. "-" means without the CL training strategy, and "TLC", "MNLP", and "BU" means using the DCL setting with different difficulty metrics. The maximum F1 scores for each dataset are highlighted.
115
+
116
+ $N$ tokens with the lowest confidence; $d$ . Maximum Normalized Log-Probability (MNLP): The difficulty is assessed by calculating the product of the label probabilities for all tokens in the sequence. The detailed computation processes for TLC and MNLP are provided in Appendix B.
117
+
118
+ For further details on the important hyperparameters of the model, please refer to Appendix C. We discuss the selection process of these parameter values in detail in the Appendix D.
119
+
120
+ # 3.2 Overall Experimental Results
121
+
122
+ Table 1 presents the experimental results of baselines and two models with different CL settings. The experimental results reveal several noteworthy conclusions.
123
+
124
+ Firstly, the DCL methodology introduced in this paper is flexible and can be integrated with various complex models. As shown in Table 1, the difficulty metrics proposed here outperform the Random and Length metrics across most datasets. Specifically, the BU metric consistently delivers the best performance on the majority of datasets when applied to the SynSemGCN model, surpassing the TLC and MNLP metrics.
125
+
126
+ Additionally, we compare our approach with previous methods that incorporate external knowledge or resources into the encoder. The results reveal that models using CL exhibit significant performance improvements, surpassing the performance
127
+
128
+ of earlier methods.
129
+
130
+ <table><tr><td rowspan="2">Model</td><td colspan="2">CTB5</td><td rowspan="2">Time</td></tr><tr><td>CWS</td><td>POS</td></tr><tr><td>Ours</td><td>98.90</td><td>97.95</td><td>287m</td></tr><tr><td>w/o data CL(BU)</td><td>98.90</td><td>97.88</td><td>-</td></tr><tr><td>w/o model CL(BU)</td><td>98.85</td><td>97.51</td><td>-</td></tr><tr><td>w/o DCL</td><td>98.75</td><td>96.73</td><td>393m</td></tr></table>
131
+
132
+ Table 2: Ablation experimental results of DCL. The baseline model "w/o DCL" denotes the model SynSemGCN; "w/o model CL" means the student model always uses the initial data order sorted by the transfer teacher model; "w/o data CL" indicates the initial training samples for the student model is drawn randomly from the training set; "Ours" indicates "SynSemGCN+DCL(BU)". Both teacher and student models with DCL in this table use BU as the difficulty metric. "Time" means the training time (in minutes).
133
+
134
+ <table><tr><td>Model</td><td colspan="2">CTB5</td></tr><tr><td></td><td>CWS</td><td>POS</td></tr><tr><td>McASP with BU</td><td>98.91</td><td>96.87</td></tr><tr><td>w/o var(θ)max</td><td>98.78</td><td>96.78</td></tr><tr><td>w/o var(θ)aver.</td><td>98.86</td><td>96.74</td></tr><tr><td>McASP</td><td>98.73</td><td>96.60</td></tr></table>
135
+
136
+ Table 3: Ablation experimental results of two parts in BU metrics (Eq. 5).
137
+
138
+ <table><tr><td>Models</td><td>Weibo (Chinese)</td><td>Note4 (Chinese)</td><td>CoNLL-2003 (English)</td></tr><tr><td>BERT</td><td>66.22</td><td>79.15</td><td>90.94</td></tr><tr><td>BERT + CL (Length)</td><td>66.81</td><td>79.63</td><td>90.79</td></tr><tr><td>BERT + DCL (TLC)</td><td>67.52</td><td>79.53</td><td>91.30</td></tr><tr><td>BERT + DCL (MNLP)</td><td>65.73</td><td>79.95</td><td>91.15</td></tr><tr><td>BERT + DCL (BU)</td><td>66.74</td><td>80.02</td><td>91.77</td></tr></table>
139
+
140
+ Table 4: Performance comparison of different difficulty metrics on three NER datasets.
141
+
142
+ ![](images/f481f84f904b4289bd8a68e383287803170401f070e9b438c45c761a5763b893.jpg)
143
+ Figure 2: The F1 scores on the dev set of PKU with different difficulty metrics in the model-level CL training process.
144
+
145
+ # 3.3 Effect of Dual-stage Curriculum Learning
146
+
147
+ In this section, we discuss the impact of DCL. We perform ablation studies by removing either the data-level CL or the model-level CL. The results are summarized in Table 2. Model-level CL has a more significant impact than data-level CL. This is intuitive, as model-level CL influences the entire training process, while data-level CL primarily affects the early stages of student model training.
148
+
149
+ We also compare the training time of models with and without DCL. The experimental results in Table 2 show that all models were trained for 50 epochs. The training time for models using DCL includes the time spent on training the teacher model and calculating the difficulty values for the student model. The results indicate that DCL improves model performance and reduces training time by over $25\%$ .
150
+
151
+ # 3.4 Ablation Study on BU Difficulty Components
152
+
153
+ We adopt McASP (Tian et al., 2020b) as the backbone model, incorporating DCL as the training strategy and BU as the difficulty metric. To examine the contribution of each component in BU, we conduct ablation experiments on its two parts: $var(\theta)_{max}$ and $var(\theta)_{aver}$ , as shown in Table 3. Removing either component results in performance
154
+
155
+ degradation, indicating that both components are crucial. Moreover, the comparable drop in performance suggests that $var(\theta)_{max}$ and $var(\theta)_{aver}$ contribute similarly to the effectiveness of DCL.
156
+
157
+ # 3.5 Comparison of Difficulty Metrics
158
+
159
+ In this section, we examine the impact of different difficulty metrics during the model-level CL training process for the SynSemGCN model. Figure 2 shows the F1 score change on the PKU dataset development set over the first 10 epochs of model-level CL training. After 10 epochs, all training data are used, so the initial 10 epochs highlight the effect of different metrics. From the figure, we observe that BU, in particular, achieves the best performance, indicating that uncertainty-based metrics can select samples that better align with the model's learning trajectory, leading to faster learning.
160
+
161
+ # 3.6 Generalization Capability
162
+
163
+ We conduct additional experiments to demonstrate the applicability of our method to the NER task. We select two Chinese and one English NER datasets: Weibo $^{4}$ , OntoNotes $^{5}$ and CoNLL-2003 (Tjong Kim Sang and De Meulder, 2003). The statistics of three datasets are shown in Table 6. We compare the performance of models using DCL and CL (Length) with a model without CL on these datasets. As shown in Table 4, the results of the DCL method outperform those of BERT+CL (Length) and BERT (no CL), indicating the effectiveness of our method. This also suggests that our method can be applied to sequence labeling tasks beyond CWS and POS tagging.
164
+
165
+ # 4 Conclusion
166
+
167
+ This paper introduces a novel dual-stage curriculum learning framework aimed at enhancing performance and accelerating the training process for sequence labeling tasks. Focusing on the sequence labeling task of CWS, POS tagging, and NER, this framework demonstrates its effectiveness.
168
+
169
+ # Limitations
170
+
171
+ There are several limitations to our study. First, the design of our difficulty metrics involves the tuning of multiple hyperparameters, which may complicate optimization. Second, we did not explore a curriculum learning process that progresses from hard to easy examples. Third, we focused on a single variation of the $\lambda$ parameter to control CL and did not investigate alternative methods for adding training data.
172
+
173
+ # Acknowledgments
174
+
175
+ This research is supported by the NSFC project "The Construction of the Knowledge Graph for the History of Chinese Confucianism" (Grant No. 72010107003) and The Hong Kong Polytechnic University Project "Evaluating the Syntax-Semantics Knowledge in Large Language Models" (Grant No. P0055270).
176
+
177
+ # References
178
+
179
+ Ankit Agrawal, Sarsij Tripathi, and Manu Vardhan. 2021. Active learning approach using a modified least confidence sampling strategy for named entity recognition. Progress in Artificial Intelligence, 10(2):113-128.
180
+ Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. 2009. Curriculum learning. In Proceedings of the 26th Annual International Conference on Machine Learning, ICML '09, page 41-48, New York, NY, USA. Association for Computing Machinery.
181
+ Wray L. Buntine and Andreas S. Weigend. 1991. Bayesian back-propagation. Complex Systems.
182
+ Xinchi Chen, Xipeng Qiu, and Xuanjing Huang. 2017a. A feature-enriched neural model for joint chinese word segmentation and part-of-speech tagging. In IJCAI.
183
+ Xinchi Chen, Zhan Shi, Xipeng Qiu, and Xuanjing Huang. 2017b. Dag-based long short-term memory for neural word segmentation. Preprint, arXiv:1707.00248.
184
+ Aron Culotta and Andrew McCallum. 2005. Reducing Labeling Effort for Structured Prediction Tasks.: Fort Belvoir, VA.
185
+ Jinlan Fu, Pengfei Liu, Qi Zhang, and Xuanjing Huang. 2020. Rethink cws: Is Chinese word segmentation a solved task? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5676-5686, Online. Association for Computational Linguistics.
186
+
187
+ Yarin Gal and Zoubin Ghahramani. 2016. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. (arXiv:1506.02142). ArXiv:1506.02142 [cs, stat].
188
+ Jingjing Gong, Xinchi Chen, Tao Gui, and Xipeng Qiu. 2019. Switch-lstms for multi-criteria chinese word segmentation. In The Thirty-Third AAAI Conference on Artificial Intelligence, AAAI 2019, The Thirty-First Innovative Applications of Artificial Intelligence Conference, IAAI 2019, The Ninth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2019, Honolulu, Hawaii, USA, January 27 - February 1, 2019, pages 6457-6464. AAAI Press.
189
+ Tao Gui, Yicheng Zou, Qi Zhang, Minlong Peng, Jinlan Fu, Zhongyu Wei, and Xuanjing Huang. 2019. A lexicon-based graph neural network for chinese ner. page 11.
190
+ Yang Hou, Houquan Zhou, Zhenghua Li, Yu Zhang, Min Zhang, Zhefeng Wang, Baoxing Huai, and Nicholas Jing Yuan. 2021. A coarse-to-fine labeling framework for joint word segmentation, pos tagging, and constituent parsing. In Proceedings of the 25th Conference on Computational Natural Language Learning, page 290-299, Online. Association for Computational Linguistics.
191
+ Honglin Liu, Peng Hu, Changqing Zhang, Yunfan Li, and Xi Peng. 2024. Interactive deep clustering via value mining. In Advances in Neural Information Processing Systems, volume 37, pages 42369-42387. Curran Associates, Inc.
192
+ Wei Liu, Xiyan Fu, Yue Zhang, and Wenming Xiao. 2021. Lexicon enhanced chinese sequence labeling using bert adapter. arXiv:2105.07148 [cs]. ArXiv: 2105.07148.
193
+ Tasnim Mohiuddin, Philipp Koehn, Vishrav Chaudhary, James Cross, Shruti Bhosale, and Shafiq Joty. 2022. Data selection curriculum for neural machine translation. (arXiv:2203.13867). ArXiv:2203.13867 [cs].
194
+ Duc-Vu Nguyen, Linh-Bao Vo, Ngoc-Linh Tran, Kiet Van Nguyen, and Ngan Luu-Thuy Nguyen. 2021. Joint chinese word segmentation and part-of-speech tagging via two-stage span labeling. arXiv:2112.09488 [cs]. ArXiv:2112.09488.
195
+ Yu Nie, Yilai Zhang, Yongkang Peng, and Lisha Yang. 2022. Borrowing wisdom from world: modeling rich external knowledge for chinese named entity recognition. Neural Computing and Applications, 34(6):4905-4922.
196
+ Yan Shao, Christian Hardmeier, Jörg Tiedemann, and Joakim Nivre. 2017. Character-based joint segmentation and pos tagging for chinese using bidirectional rnn-crf. Preprint, arXiv:1704.01314.
197
+ Yanyao Shen, Hyokun Yun, Zachary C. Lipton, Yakov Kronrod, and Animashree Anandkumar. 2018. Deep active learning for named entity recognition. (arXiv:1707.05928). ArXiv:1707.05928 [cs].
198
+
199
+ Xuemei Tang, Jun Wang, and Qi Su. 2024. Incorporating knowledge for joint chinese word segmentation and part-of-speech tagging with synsemgcn. Aslib Journal of Information Management, ahead-of-print(ahead-of-print).
200
+ Yuanhe Tian, Yan Song, Xiang Ao, Fei Xia, Xiaojun Quan, Tong Zhang, and Yonggang Wang. 2020a. Joint Chinese word segmentation and part-of-speech tagging via two-way attentions of auto-analyzed knowledge. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8286-8296, Online. Association for Computational Linguistics.
201
+ Yuanhe Tian, Yan Song, and Fei Xia. 2020b. Joint Chinese word segmentation and part-of-speech tagging via multi-channel attention of character n-grams. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2073-2084, Barcelona, Spain (Online). International Committee on Computational Linguistics.
202
+ Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the CoNLL-2003 shared task: Language-independent named entity recognition. In Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003, pages 142-147.
203
+ Yu Wan, Baosong Yang, Derek F. Wong, Yikai Zhou, Lidia S. Chao, Haibo Zhang, and Boxing Chen. 2020. Self-paced learning for neural machine translation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), page 1074-1080, Online. Association for Computational Linguistics.
204
+ Kun Wang, Chengqing Zong, and Keh-Yih Su. 2010. A character-based joint model for chinese word segmentation. In Proceedings of the 23rd International Conference on Computational Linguistics (Coling 2010), page 1173-1181, Beijing, China. Coling 2010 Organizing Committee.
205
+ Shuo Wang, Yang Liu, Chao Wang, Huanbo Luan, and Maosong Sun. 2019. Improving back-translation with uncertainty-based confidence estimation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), page 791-802, Hong Kong, China. Association for Computational Linguistics.
206
+ Xin Wang, Yudong Chen, and Wenwu Zhu. 2021. A survey on curriculum learning. (arXiv:2010.13166). ArXiv:2010.13166 [cs].
207
+ Siyu Yuan, Deqing Yang, Jiaqing Liang, Zhixu Li, Jinxi Liu, Jingyue Huang, and Yanghua Xiao. 2022. Generative entity typing with curriculum learning. (arXiv:2210.02914). ArXiv:2210.02914 [cs].
208
+ Meishan Zhang, Nan Yu, and Guohong Fu. 2018. A simple and effective neural model for joint word
209
+
210
+ segmentation and pos tagging. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 26(9):1528-1538.
211
+ Meishan Zhang, Yue Zhang, Wanxiang Che, and Ting Liu. 2014. Type-supervised domain adaptation for joint segmentation and POS-tagging. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics, pages 588-597, Gothenburg, Sweden. Association for Computational Linguistics.
212
+ Xulong Zhang, Jianzong Wang, Ning Cheng, and Jing Xiao. 2022. Improving imbalanced text classification with dynamic curriculum learning. (arXiv:2210.14724). ArXiv:2210.14724 [cs].
213
+ Ying Zhang, Stephan Vogel, and Alex Waibel. 2004. Interpreting bleu/nist scores: How much improvement do we need to have a better system? In Proceedings of the Fourth International Conference on Language Resources and Evaluation (LREC'04), Lisbon, Portugal. European Language Resources Association (ELRA).
214
+ Qingqing Zhu, Xiuying Chen, Pengfei Wu, JunFei Liu, and Dongyan Zhao. 2021. Combining curriculum learning and knowledge distillation for dialogue generation. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, page 1284-1295, Punta Cana, Dominican Republic. Association for Computational Linguistics.
215
+
216
+ # A Dataset
217
+
218
+ The details of the three datasets are given in Table 5. Regarding the CTB datasets, we follow the same approach as previous works (Shao et al., 2017; Tian et al., 2020a) by splitting the data into train/dev/test sets. In the case of PKU, We randomly select $10\%$ of the training data to create the development set.
219
+
220
+ <table><tr><td colspan="2">Datasets</td><td>CTB5</td><td>CTB6</td><td>PKU</td></tr><tr><td rowspan="2">Train</td><td>Sent.</td><td>18k</td><td>23K</td><td>17k</td></tr><tr><td>Word</td><td>494k</td><td>99k</td><td>482k</td></tr><tr><td rowspan="2">Dev</td><td>Sent.</td><td>350</td><td>2K</td><td>1.9k</td></tr><tr><td>Word</td><td>7k</td><td>60K</td><td>53k</td></tr><tr><td rowspan="2">Test</td><td>Sent.</td><td>348</td><td>3K</td><td>3.6k</td></tr><tr><td>Word</td><td>8k</td><td>12k</td><td>97k</td></tr></table>
221
+
222
+ Table 5: Detail of the three datasets.
223
+
224
+ <table><tr><td>Datasets</td><td>Type</td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td rowspan="2">Weibo</td><td>Sentences</td><td>1.35K</td><td>0.27K</td><td>0.27K</td></tr><tr><td>Entities</td><td>1.89K</td><td>0.39K</td><td>0.42K</td></tr><tr><td rowspan="2">OntoNotes</td><td>Sentences</td><td>15.7K</td><td>4.3K</td><td>4.3K</td></tr><tr><td>Entities</td><td>13.4K</td><td>6.95K</td><td>7.7K</td></tr><tr><td rowspan="2">CoNLL2003</td><td>Sentences</td><td>15.0K</td><td>3.5K</td><td>3.7K</td></tr><tr><td>Entities</td><td>23.5K</td><td>5.9K</td><td>5.7K</td></tr></table>
225
+
226
+ Table 6: Detail of the two NER datasets.
227
+
228
+ # B Difficulty Metric Baselines
229
+
230
+ Top-N least confidence (TLC). Culotta and McCallum (2005) proposed a confidence-based strat
231
+
232
+ egy for sequence models called least confidence (LC). This approach sorts the samples in ascending order based on the probability of the most possible label predicted by the model.
233
+
234
+ The least confidence of each token is calculated as follows.
235
+
236
+ $$
237
+ \phi_ {(x _ {i}, \theta)} ^ {L C} = 1 - \max _ {y _ {i} \in T} P (y _ {i} | x _ {i}) \tag {7}
238
+ $$
239
+
240
+ where $x_{i}$ is the $i_{th}$ token in the input sentence, $\theta$ denotes model parameters, $y_{i}$ is a pre-defined label, $T$ represents the pre-defined label set. $\max_{y_i\in T}P(y_i|x_i)$ aims to find the probability of the most possible label predicted by the model. The smaller $\phi_{(x_i,\theta)}^{LC}$ reflects the more confident the model is in predicting the label of $x_{i}$ .
241
+
242
+ According to Agrawal et al. (2021), the confidence level of a sentence in a sequence labeling task is typically determined based on a set of representative tokens. Therefore, we select the top $N$ tokens with the highest least confidence in the sentence and then use their average value as the difficulty score of the sentence. Finally, the TLC difficulty metric is formulated as follows.
243
+
244
+ $$
245
+ S (\theta) ^ {T L C} = \frac {1}{N} \sum_ {n = 1} ^ {N} \phi_ {(x _ {n}, \theta)} ^ {L C} \tag {8}
246
+ $$
247
+
248
+ Maximum normalized log-probability (MNLP). Shen et al. (2018) used MNLP as a confidence strategy to find the product of the maximum probabilities of each token, which is equivalent to taking the logarithm of each probability and summing them. Finally, it is normalized to obtain the confidence score of the sentence as follows.
249
+
250
+ $$
251
+ \prod_ {i = 1} ^ {M} \max _ {y _ {i} \in T} P \left(y _ {i} \mid x _ {i}\right) \Longleftrightarrow \tag {9}
252
+ $$
253
+
254
+ $$
255
+ \sum_ {i = 1} ^ {M} \log \left\{\max _ {y _ {i} \in T} P \left(y _ {i} \mid x _ {i}\right) \right\}
256
+ $$
257
+
258
+ where $M$ is the length of the sentence. The difficulty of a sentence decreases as the confidence level increases. To account for this relationship, we introduce a negative sign. Additionally, in order to reduce the impact of sentence length, we apply a normalization operation. Finally, MNLP is formulated as follows.
259
+
260
+ $$
261
+ S (\theta) ^ {M N L P} = - \frac {1}{M} \sum_ {i = 1} ^ {M} \log \left\{\max _ {y _ {i} \in T} P \left(y _ {i} \mid x _ {i}\right) \right\} \tag {10}
262
+ $$
263
+
264
+ # C Parameters Setting
265
+
266
+ The key experimental parameter settings are shown in Table 7.
267
+
268
+ <table><tr><td>Hyper-parameters</td><td>Value</td></tr><tr><td>E0</td><td>5</td></tr><tr><td>Es</td><td>50</td></tr><tr><td>λ0</td><td>0.3</td></tr><tr><td>Egrow</td><td>10</td></tr><tr><td>K</td><td>3</td></tr><tr><td>N</td><td>5</td></tr></table>
269
+
270
+ Table 7: Experiment hyper-parameters setting.
271
+
272
+ <table><tr><td rowspan="2">Egrow</td><td colspan="2">CTB5</td><td colspan="2">PKU</td></tr><tr><td>CWS</td><td>POS</td><td>CWS</td><td>POS</td></tr><tr><td>5</td><td>98.88</td><td>97.89</td><td>98.65</td><td>97.01</td></tr><tr><td>10</td><td>99.06</td><td>98.96</td><td>98.77</td><td>96.97</td></tr><tr><td>15</td><td>98.84</td><td>97.69</td><td>98.70</td><td>96.90</td></tr></table>
273
+
274
+ # D Effect of Hyper-parameters
275
+
276
+ In this section, we explore the impact of the hyperparameters on the performance of DCL. The adjustment of the parameters is based on the SynSemGCN+DCL(BU) model.
277
+
278
+ First, we investigate the impact of the hyperparameter $\lambda_0$ on DCL performance. We conduct the experiments on the CTB5 dataset, tuning the value of $\lambda_0$ in the model-level pacing function Eq. 6, and the experimental results are represented by a line graph as shown in Figure 3. As observed, the model achieves optimal performance when $\lambda_0 = 0.3$ . However, when the value exceeds 0.4, the model's performance gradually deteriorates.
279
+
280
+ Additionally, we examine the impact of $E_{grow}$ in Eq. 6, which controls the number of epochs for $\lambda$ to reach 1. As shown in Table 8, when $E_{grow}$ is set to 10, the model exhibits superior performance on both the CTB5 and PKU datasets. Therefore, we adopt $E_{grow}$ as 10 epochs in our experiments.
281
+
282
+ Next, we assess the impact of the training epochs $E_0$ of the teacher model, which initializes the difficulty ranking of the training data for the student model. We aim to investigate whether a more mature teacher model contributes to improved performance. For this purpose, we conduct experiments on both the CTB5 and PKU datasets, utilizing teacher models trained for 5, 10, and 15 epochs
283
+
284
+ Table 8: The effect of ${E}_{grow}$ in Eq. 6.
285
+
286
+ <table><tr><td rowspan="2">E0</td><td colspan="2">CTB5</td><td colspan="2">PKU</td></tr><tr><td>CWS</td><td>POS</td><td>CWS</td><td>POS</td></tr><tr><td>5</td><td>99.06</td><td>98.96</td><td>98.77</td><td>96.97</td></tr><tr><td>10</td><td>98.98</td><td>97.90</td><td>98.54</td><td>96.69</td></tr><tr><td>15</td><td>98.73</td><td>96.87</td><td>98.53</td><td>96.54</td></tr></table>
287
+
288
+ Table 9: The impact of the number of epochs of teacher model, ${E}_{0}$ .
289
+
290
+ <table><tr><td rowspan="2">Para.</td><td colspan="2">CTB6</td><td colspan="2">PKU</td></tr><tr><td>CWS</td><td>POS</td><td>CWS</td><td>POS</td></tr><tr><td>K=2</td><td>98.17</td><td>95.43</td><td>98.73</td><td>96.58</td></tr><tr><td>K=3</td><td>98.10</td><td>95.59</td><td>98.77</td><td>96.97</td></tr><tr><td>K=4</td><td>98.09</td><td>95.56</td><td>98.69</td><td>96.38</td></tr></table>
291
+
292
+ to rank the initial training data for the student models.
293
+
294
+ The experimental results, as shown in Table 9, reveal that a more mature teacher model does not necessarily lead to better performance. Instead, the student model achieves optimal results when the teacher model is trained for 5 epochs. One possible explanation for this finding is that a teacher model with fewer training epochs aligns better with the initial state of the student model, allowing for a more suitable estimation of sample difficulty.
295
+
296
+ ![](images/4a16a7110995a04d94cf04dc63a2300c8b288f1b0e2074c8e678d09356f04086.jpg)
297
+ Figure 3: The impact of model-level curriculum learning hyper-parameters $\lambda_0$ .
298
+
299
+ Then, we explore the impact of different $K$ values on the BU difficulty metric, which determines the number of dropout times. The experiments are conducted on the CTB6 dataset, and the results are summarized in Table 10. Notably, the model achieves optimal performance when $K = 3$ . Therefore, we select $K = 3$ for all the above experiments.
300
+
301
+ Finally, we evaluate the effect of varying $N$ in the TLC metric. As shown in Table 11, the best performance is achieved when $N = 5$ .
302
+
303
+ # E Statistical Significance Test
304
+
305
+ In this section, we conduct significance testing experiments. Following Wang et al. (2010), we use the bootstrapping method proposed by Zhang et al. (2004), which is operated as follows. In this process, starting with a test set $T_{0}$ comprising $N$ test examples, we repeatedly sample $N$ samples from $T_{0}$ to form $T_{1}$ and then repeat the pro
306
+
307
+ Table 10: The effect of of $K$ times dropout in BU difficulty metric.
308
+
309
+ <table><tr><td rowspan="2">Para.</td><td colspan="2">PKU</td></tr><tr><td>CWS</td><td>POS</td></tr><tr><td>N=1</td><td>98.55</td><td>96.49</td></tr><tr><td>N=2</td><td>98.53</td><td>96.49</td></tr><tr><td>N=3</td><td>98.56</td><td>96.52</td></tr><tr><td>N=4</td><td>98.55</td><td>96.51</td></tr><tr><td>N=5</td><td>98.71</td><td>96.64</td></tr><tr><td>N=6</td><td>98.52</td><td>96.52</td></tr><tr><td>N=7</td><td>98.56</td><td>96.51</td></tr><tr><td>N=8</td><td>98.55</td><td>96.48</td></tr><tr><td>N=9</td><td>98.53</td><td>96.49</td></tr><tr><td>N=10</td><td>98.55</td><td>96.51</td></tr></table>
310
+
311
+ Table 11: The impact of $N$ in TLC difficulty metric.
312
+
313
+ <table><tr><td colspan="2">Models</td><td colspan="2">CTB5</td></tr><tr><td>A</td><td>B</td><td>CWS</td><td>POS</td></tr><tr><td>BERT+DCL(BU)</td><td>BERT</td><td>&gt;</td><td>&gt;</td></tr><tr><td>BERT+DCL(MNLP)</td><td>BERT</td><td>&gt;</td><td>&gt;</td></tr><tr><td>BERT+DCL(TLC)</td><td>BERT</td><td>&gt;</td><td>&gt;</td></tr><tr><td>BERT+DCL(BU)</td><td>BERT+CL(Length)</td><td>&gt;</td><td>&gt;</td></tr><tr><td>BERT+DCL(MNLP)</td><td>BERT+CL(Length)</td><td>&gt;</td><td>&gt;</td></tr><tr><td>BERT+DCL(TLC)</td><td>BERT+CL(Length)</td><td>&gt;</td><td>&gt;</td></tr></table>
314
+
315
+ Table 12: Statistical significance test of F-score for our method and baselines on the CTB5 dataset.
316
+
317
+ cess for $M$ times to form the test set collection, $\{T_1,T_2,\dots,T_M\}$ , where $M$ is set to 1000 in our testing procedure. Two systems denoted as $A$ and $B$ , are assessed on the initial test set $T_{0}$ , resulting in scores $a_0$ and $b_{0}$ , respectively. The disparity between the two systems, labeled as $\delta_0$ , is calculated as $\delta_0 = a_0 - b_0$ . Repeating this process for each test set produces a set of $M$ discrepancy scores, denoted as $\{\delta_0,\delta_1,\ldots ,\delta_M\}$ .
318
+
319
+ Following the methodology proposed by Zhang et al. (2004), we compute the $95\%$ confidence interval for the discrepancies (i.e., the 2.5th percentile and the 97.5th percentile) between the two models. If the confidence interval does not overlap with zero, it is affirmed that the differences between systems A and B are statistically significant (Zhang et al., 2004).
320
+
321
+ Table 12 lists the significant differences between our system and the baseline system, where “>” indicates that the average value of $\delta$ exceeds zero, meaning that System A is better than System B; “<” indicates that the average value of $\delta$ does not exceed zero, meaning that System A is worse than System B; “~” indicates that there is no significant difference between the two systems. Finally, the comparison also indicates that our models are superior to the baseline.
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8aa42f32a41222edb183603d93472d5d109d82e5ad88ced07ebf39f4098ed4
3
+ size 459278
aneffectiveincorporatingheterogeneousknowledgecurriculumlearningforsequencelabeling/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16e59502a3a42475901e9a8eabcaa5dec4a16a6bb1c79c382317c2b9b3a0bbdc
3
+ size 378690
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7a606a69717311cfc2f3f14966b4996a146156b33d9a6fecfa645a388116a0a
3
+ size 59413
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aefb375a3744f1750d242b6982dad56a370eae6ed8a494ccbd273cb6c15182ba
3
+ size 67481
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/d067ed0d-b932-4f95-a85a-ef137c47e283_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89113b5285444a326a0fb641c6a2285d342d4f9390f2b71f99a7f6d46b3e853
3
+ size 539468
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/full.md ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Are Optimal Algorithms Still Optimal? Rethinking Sorting in LLM-Based Pairwise Ranking with Batching and Caching
2
+
3
+ Juan Wisznia $^{1,3}$ , Cecilia Bolanos $^{1,2}$ , Juan Tollo $^{1}$ , Giovanni Marraffini $^{1,3}$ , Agustin Gianolini $^{1}$ , Noe Hsueh $^{1}$ , Luciano Del Corro $^{1,3}$
4
+
5
+ {jwisznia, cbolanos, jtollo, agianolini, nhsueh, Idelcorro}@dc.uba.ar, giovanni.marraffini@gmail.com
6
+
7
+ $^{1}$ Departamento de Computación, FCEyN, Universidad de Buenos Aires
8
+ $^{2}$ Instituto de Ciencias de la Computación, FCEyN, Universidad de Buenos Aires
9
+ $^{3}$ Lumina Labs*
10
+
11
+ # Abstract
12
+
13
+ We introduce a novel framework for analyzing sorting algorithms in pairwise ranking prompting (PRP), re-centering the cost model around LLM inferences rather than traditional pairwise comparisons. While classical metrics based on comparison counts have traditionally been used to gauge efficiency, our analysis reveals that expensive LLM inferences overturn these predictions; accordingly, our framework encourages strategies such as batching and caching to mitigate inference costs. We show that algorithms optimal in the classical setting can lose efficiency when LLM inferences dominate the cost under certain optimizations.
14
+
15
+ # 1 Introduction
16
+
17
+ LLMs have ushered in a new era of language understanding (Brown et al., 2020). Alongside these developments, LLM-based reranking has emerged in the information retrieval (IR) domain (Nogueira et al., 2020; Zhuang et al., 2023; Ma et al., 2023; Sun et al., 2024). Instead of using custom finetuned rankers, off-the-shelf LLMs—often combined with a first-stage retriever can refine search results in a zero-shot manner. The practical significance of reranking is evident in its rapid commercial adoption, with major cloud platforms now offering it as a core functionality. LLM-based reranking enables robust ranking quality without the overhead of dataset-specific models, which is crucial, for example, for the widespread adoption of Retrieval-Augmented Generation across both cloud-based and on-prem deployments.
18
+
19
+ A notable exemplar in zero-shot LLM-based reranking is Pairwise Ranking Prompting (PRP) (Qin et al., 2024; Luo et al., 2024), which compares two candidate documents. Despite its conceptual elegance and model-agnostic nature, PRP faces significant computational challenges; in prac
20
+
21
+ tice—each pairwise comparison requires an expensive LLM inference, making a naive all-pairs approach prohibitively costly (Qin et al., 2024). This has prompted both researchers and practitioners to adopt classical sorting algorithms for minimizing the number of comparisons (Qin et al., 2024) as they offer theoretical guarantees.
22
+
23
+ We argue that classical analysis is not adequate for PRP as it treats each comparison as an atomic, uniform-cost operation, whereas in an LLM-based system, each comparison is an expensive inference call. This gap between classical and LLM-centric views can invert conventional wisdom under certain basic optimizations, causing algorithms that appear optimal under traditional assumptions to underperform in real-world scenarios, and vice versa.
24
+
25
+ To address these limitations, we introduce a framework that redefines how ranking algorithms are analyzed in an LLM context. Rather than merely counting comparisons, we focus on LLM inference calls as the primary cost driver. We show that basic optimizations—such as caching and batch inference—can significantly alter algorithms' efficiency. Furthermore, we propose Quicksort as an efficient reranking algorithm, demonstrating its potential when leveraging these optimizations. To the best of our knowledge, this is the first time Quicksort has been applied in this context.
26
+
27
+ Caching and Batching have no effect on algorithm ranking performance; the exact same comparisons will be performed but much faster. While caching repeated queries and batching independent operations are seemingly trivial adaptations, they significantly affect the choice of the optimal algorithm challenging previous results (Qin et al., 2024; Zhuang et al., 2024). For instance, Heapsort is no longer the preferred choice. A mere batch size of 2 will result in Quicksort generating $44\%$ less inference calls compared to Heapsort.
28
+
29
+ We validate our findings on standard ranking benchmarks (TREC DL 2019 and 2020 (Craswell
30
+
31
+ et al., 2020, 2021) and BEIR (Thakur et al., 2021)). By re-framing sorting theory around real-world LLM inference costs, we offer both practical guidance for zero-shot reranking and a theoretical basis for understanding algorithmic efficiency under modern IR constraints.
32
+
33
+ # 2 Related Work
34
+
35
+ Traditional IR systems require extensive labeled data and struggle with cross-domain generalization (Matveeva et al., 2006; Wang et al., 2011). LLMs have transformed this landscape by enabling zero-shot ranking. PRP emerged then as a particularly effective technique (Qin et al., 2024; Luo et al., 2024). PRP's key advantage lies in its model-agnostic nature- by comparing document pairs through simple prompts, it can leverage any LLM without training or access to model internals, making it especially valuable as newer models emerge. However, PRP faces significant computational challenges as each pairwise comparison requires an expensive LLM inference, with costs scaling quadratically with document count.
36
+
37
+ To address these computational demands, recent work has incorporated sorting algorithms into the PRP framework (Qin et al., 2024; Zhuang et al., 2024). While theoretically well-grounded, these approaches adopt the cost framework of traditional sorting theory, where comparisons are treated as atomic operations with uniform costs. However, in LLM-based ranking, inferences are orders of magnitude more expensive than other operations. This mismatch between classical cost assumptions and LLM-specific characteristics suggests the need to reevaluate sorting algorithm selection and optimization for real-world performance.
38
+
39
+ # 3 Revisiting sorting algorithms
40
+
41
+ This section examines how small yet impactful optimizations (caching, batching, and topk extraction) in the context of classical algorithms—Bubblesort, Quicksort, and Heapsort can significantly shift which algorithm is most efficient in LLM-based ranking. While these adaptations are not exhaustive, they demonstrate how our framework redefines efficiency based on LLM-specific costs, where reducing inference steps matters more than traditional complexity metrics. Importantly, these optimizations preserve the final ranking outcome: the same comparisons are performed but are batched or reused, leading to fewer inference calls
42
+
43
+ and a much faster process. Table 1 summarizes the optimizations applicable to each algorithm.
44
+
45
+ <table><tr><td>Algorithm</td><td>Batching</td><td>Caching</td><td>Top-k Efficiency</td></tr><tr><td>Heapsort</td><td>X</td><td>X</td><td>✓</td></tr><tr><td>Bubblesort</td><td>X</td><td>✓</td><td>✓</td></tr><tr><td>Quicksort</td><td>✓</td><td>X</td><td>✓1</td></tr></table>
46
+
47
+ Table 1: Summary of optimization techniques under LLM-centric costs.
48
+
49
+ ![](images/df93d4d39f0237893c36ab7696a412ffe18eb9ef0cc42de90b4241b80c0668ed.jpg)
50
+ Figure 1: Bubblesort with Caching. Solid arrows show inferences, dashed arrows cached comparisons.
51
+
52
+ Heapsort has been favored in early PRP research Qin et al. (2024) for its $O(n\log n)$ complexity and natural support for top-k extraction. However, it cannot be adapted to batching or caching due to its binary tree structure. Each comparison is inherently sequential and unique. This makes it impossible to group comparisons into a single inference step (batching) or to reuse prior results (caching) effectively.
53
+
54
+ Bubblesort has been considered expensive due to its $O(n^{2})$ complexity, but this can be adapted via caching from its repeated adjacent comparisons across passes (Figure 1). The memory overhead remains negligible, requiring only a small dictionary to store prior results. While its pairwise swap structure precludes batching (comparisons cannot be grouped into single inferences), it inherently supports top-k extraction (Qin et al., 2024), enhancing its practicality for ranking applications.
55
+
56
+ Quicksort uniquely enables batching through its partition phase, where multiple elements can be evaluated simultaneously against a pivot (Figure 2). However, it has limited potential for caching, as pivot comparisons are typically non-repeating. Despite this, the Partial Quicksort variant (Martinez, 2004) enhances its efficiency by enabling early termination for top-k extraction. To the best of our knowledge, we are the first to introduce Quicksort in PPR as prior research focused on Heapsort and Bubblesort due to their top-k properties.
57
+
58
+ ![](images/312e1fe82de1d648d99534730382a10f12d9cb6c91ccbdb1608de8a1656569fe.jpg)
59
+ Figure 2: a. One comparison per inference as classic analysis for these algorithms. b. Multiple comparisons per inference making gaining more information per inference.
60
+
61
+ ![](images/b122001427be3f95812edcd86b9378a40ab911abc969ced7e71a709ffc86ba09.jpg)
62
+
63
+ # 4 Experimental Setup
64
+
65
+ Hardware: Our analysis is theoretical and agnostic to hardware. However, to validate that our cost assumptions align with practical throughput behavior, we ran two lightweight empirical checks on NVIDIA A100 (40GB), RTX 3090, and RTX 2080 Ti. These include single forward-pass latency measurements across batch sizes and GPUs, and full PRP reranking with Quicksort and Heapsort at batch sizes 2 and 128 for the A100 (see Section 5).
66
+
67
+ Metric: Instead of focusing on traditional comparison counts, we shifted to the number of LLM inference calls, which are the dominant computational cost. Each inference—regardless of token count or monetary cost—is treated as a uniform cost unit. We disregard token counts and dollar costs because these are determined by the dataset and pre-trained model. Moreover, standard preprocessing (e.g., chunking/truncation) ensures uniformity across documents. We show mean and standard deviation across datasets and LLMs. Individual results can be found in Appendix A.
68
+
69
+ LLMs: Following Qin et al. (2024); Zhuang et al. (2024) we used: Flan-T5-L (780M), Flan-T5-XL (3B), Flan-T5-XXL (11B) (Chung et al., 2022), Mistral-Instruct (7B) (Jiang et al., 2023), and Llama-3-Instruct (8B) (et al., 2024). For the latency analysis we implemented batch processing with Flan-T5-Large using the Hugging Face Transformers library (Wolf et al., 2020).
70
+
71
+ Algorithms: (1) Bubblesort, (2) Quicksort with median-of-three pivot strategy (other strategies are shown in Appendix A), and (3) Heapsort.
72
+
73
+ Datasets: TREC DL 2019 (43 queries) and 2020 (200) (Craswell et al., 2020, 2021) as well as subsets from BEIR (Thakur et al., 2021): Webis-Touche2020 (49), NFCorpus (295), Large-Scifact
74
+
75
+ (300), TREC-COVID (50), FiQA (648), and DBpedia-Entity (400). Following standard practices, we re-ranked the top 100 BM25-retrieved documents per query (Robertson and Zaragoza, 2009; Qin et al., 2024; Zhuang et al., 2024; Luo et al., 2024) to identify the top-10 most relevant ones efficiently.
76
+
77
+ # 5 Results and Discussion
78
+
79
+ Cost model analysis: Figure 3 illustrates the number of inferences performed by Heapsort and Quicksort across different batch sizes. When the batch size is set to 1 (equivalent to counting individual comparisons), Heapsort emerges as the most efficient algorithm consistent with traditional sorting analysis and previous results (Qin et al., 2024; Zhuang et al., 2024). However, as the batch size increases, Quicksort is able to significantly outperform as multiple comparisons can be run in parallel. For instance, with a batch size of 2, the average number of inference calls is reduced already by almost $45\%$ .
80
+
81
+ Figure 4 compares the number of inferences performed by Bubblesort with and without cache. Bubblesort benefits significantly more from caching at a minimal storage overhead. This is because Bubblesort involves repeated comparisons, many of which can be cached, reducing the total number of inferences by an average of $46\%$ .
82
+
83
+ Importantly, despite these optimizations reducing the number of LLM inferences, they do not alter the final ranking outcome. The same comparisons are performed, but they are either batched together or retrieved from cache rather than recomputed, leading to fewer inference calls and a much faster process.
84
+
85
+ Latency Analysis: Figure 5 shows single-pass
86
+
87
+ ![](images/bfec7e982db30ef61cd4a300450d93b3a77cc1821cc39f57a843dceb4c9ed5ff.jpg)
88
+ Figure 3: Mean and SD inference count for Quicksort and Heapsort across batch sizes. Black number: Heapsort vs. Quicksort using batching gain; Green number: Quicksort batching vs. no batching gain.
89
+
90
+ ![](images/92c5de6de64b642146f7fa35c242ba21b1f377e1ef57495c3a97ea1266e7766d.jpg)
91
+ Figure 5: Speed-up vs batch size for Flan-T5-Large (log-log). Dashed red: ideal linear scaling.
92
+
93
+ speed-ups on A100, RTX 3090, and RTX 2080 Ti for different batch sizes. A100 achieves near-ideal scaling up to batch size 8, with throughput continuing to improve—albeit with diminishing returns—up to batch 128. On 3090 and 2080 Ti, ideal scaling occurs up to batch sizes 2 and 4, respectively, with throughput saturating between batch sizes 32 and 64. These results indicate that while theoretical efficiency peaks at larger batch sizes, practical efficiency is constrained by GPU architecture. The point at which near-ideal conditions are met before saturation sets in is GPU-dependent.
94
+
95
+ We also ran the full PRP pipeline over BEIR on the A100 using both batch size 2 and 128 for
96
+
97
+ ![](images/5a0ad0e0a2f9b7daf9ea1441f09f377d7c1dcb3520518580b0d219c55410d304.jpg)
98
+ Figure 4: Mean and SD inference count for Bubblesort, with and without cache. Green numbers indicate the percentage gain with cache. The dashed line represents the mean inference count for Heapsort and Quicksort.
99
+
100
+ Quicksort and Heapsort. At batch 128, Quicksort is $5.52 \times$ faster than Heapsort while achieving similar nDCG@10 (See Appendix A, Tables 2-3). Experiments show that the theoretical gains from batching and algorithmic design hold in end-to-end ranking performance.
101
+
102
+ Ranking Performance: Figure 6 shows that the ranking performance of all these algorithms across optimization settings remains relatively stable for a given dataset, allowing users to prioritize computational efficiency and hardware constraints before performance when choosing an algorithm.
103
+
104
+ Findings provide a detailed insight of sorting algorithms behavior in LLM-based pairwise ranking, highlighting their respective benefits and drawbacks, enabling users to select the most suitable algorithm based on their specific resources and requirements. More specifically:
105
+
106
+ Quicksort is ideal for latency-sensitive applications with batch sizes $\geq 2$ , leveraging hardware parallelism to outperform alternatives.
107
+
108
+ Bubblesort achieves a substantial efficiency gain with caching. It's remarkable performance in some datasets like scifact and touche2020 makes it a more competitive choice with the new adaptation. Bubblesort tends to be effective in the context of LLMs in which pairwise transitivity is not guaranteed. Pairwise adjacent comparisons seemed to be more stable and bring better results in the context of PRP (Luo et al., 2024).
109
+
110
+ Heapsort, once the gold standard for its theoretical logarithmic complexity, its advantage emerges only with no batching (rarely seen in LLM ranking).
111
+
112
+ ![](images/ae3faf1f9987d394e7def6bf9594b93e8b5c287133a48ef291df79f6f6306c79.jpg)
113
+ Figure 6: Algorithms' performance across datasets.
114
+
115
+ # 6 Conclusion
116
+
117
+ We introduced a framework for optimizing sorting algorithms in LLM-based pairwise ranking by prioritizing inference calls over comparison counts. We found that classical efficiency assumptions break down under LLM workloads, revealing Quicksort as a natural, yet unexplored, choice of algorithm. This demonstrates that inference efficiency is a property deeply tied to algorithmic design. We hope this framework encourages further exploration of algorithms better aligned with LLM cost structures.
118
+
119
+ # 7 Limitations
120
+
121
+ While our work showcases the efficacy of batching and caching optimizations in mitigating the high inference costs of LLM-based pairwise ranking, certain limitations remain. First, sorting algorithms work best when transitivity in pairwise comparisons hold, but LLMs can yield inconsistent judgments for near-equivalent or context-sensitive documents. Addressing this inconsistency requires dedicated methods to detect and resolve intransitive preferences, which remains an open area of
122
+
123
+ research. Future work could examine how much performance is degraded and whether ranking algorithms that do not assume transitivity can actually offer any practical advantage.
124
+
125
+ Additionally, although our experiments were limited to medium-sized LLMs for budgetary and computational reasons, larger models could further amplify the benefits observed here. Future research should explore how our framework performs with these more powerful models, potentially unlocking even greater gains in inference efficiency. Moreover, hybrid methods that unify the strengths of multiple algorithms, as well as active ranking strategies or noisy sorting algorithms (Mikhailiuk et al., 2020; Bai and Coester, 2023), are fully compatible with our approach: they rely on additional computations separate from the LLM inferences themselves, thereby enabling more informed—and thus fewer—LLM queries. Ultimately, our findings underscore the need for ongoing algorithmic innovation that exploits LLM-specific cost structures, paving the way for more efficient, scalable, and broadly applicable ranking solutions.
126
+
127
+ # References
128
+
129
+ Xingjian Bai and Christian Coester. 2023. Sorting with predictions.
130
+
131
+ Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners.
132
+
133
+ Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, Albert Webson, Shixiang Shane Gu, Zhuyun Dai, Mirac Suzgun, Xinyun Chen, Aakanksha Chowdhery, Alex Castro-Ros, Marie Pellat, Kevin Robinson, Dasha Valter, Sharan Narang, Gaurav Mishra, Adams Yu, Vincent Zhao, Yanping Huang, Andrew Dai, Hongkun Yu, Slav Petrov, Ed H. Chi, Jeff Dean, Jacob Devlin, Adam Roberts, Denny Zhou, Quoc V. Le, and Jason Wei. 2022. Scaling instruction-finetuned language models.
134
+
135
+ Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the trec 2020 deep learning track.
136
+
137
+ Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M. Voorhees. 2020. Overview of the trec 2019 deep learning track.
138
+ Abhimanyu Dubey et al. 2024. The llama 3 herd of models.
139
+ C.A.R. Hoare. 1962. Quicksort. BCS, Computer Journal, 5(1):10-15.
140
+ Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. 2023. Mistral 7b.
141
+ Jian Luo, Xuanang Chen, Ben He, and Le Sun. 2024. PRP-graph: Pairwise ranking prompting to LLMs with graph aggregation for effective text re-ranking. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, pages 5766-5776, Bangkok, Thailand. Association for Computational Linguistics.
142
+ Xueguang Ma, Xinyu Zhang, Ronak Pradeep, and Jimmy Lin. 2023. Zero-shot listwise document reranking with a large language model.
143
+ Conrado Martinez. 2004. Partial quicksort. In In Proceedings of the 6th ACM SIAM Workshop on Algorithm Engineering and Experiments and 1st ACM SIAM Workshop on Analytic Algorithms and Combinatorics, pages 224-228.
144
+ Irina Matveeva, Chris Burges, Timo Burkard, Andy Laucius, and Leon Wong. 2006. High accuracy retrieval with multiple nested ranker. In Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '06, page 437-444, New York, NY, USA. Association for Computing Machinery.
145
+ Aliaksei Mikhailiuk, Clifford Wilmot, María Pérez-Ortiz, Dingcheng Yue, and Rafal Mantiuk. 2020. Active sampling for pairwise comparisons via approximate message passing and information gain maximization. CoRR, abs/2004.05691.
146
+ Rodrigo Nogueira, Zhiying Jiang, and Jimmy Lin. 2020. Document ranking with a pretrained sequence-to-sequence model.
147
+ Zhen Qin, Rolf Jagerman, Kai Hui, Honglei Zhuang, Junru Wu, Le Yan, Jiaming Shen, Tianqi Liu, Jialu Liu, Donald Metzler, Xuanhui Wang, and Michael Bendersky. 2024. Large language models are effective text rankers with pairwise ranking prompting. In Findings of the Association for Computational Linguistics: NAACL 2024, pages 1504-1518, Mexico City, Mexico. Association for Computational Linguistics.
148
+ Stephen E. Robertson and Hugo Zaragoza. 2009. The probabilistic relevance framework: Bm25 and beyond. Found. Trends Inf. Retr., 3:333-389.
149
+
150
+ Robert Sedgewick. 1975. Quicksort. Outstanding Dissertations in the Computer Sciences. Garland Publishing, New York.
151
+ Weiwei Sun, Lingyong Yan, Xinyu Ma, Shuaiqiang Wang, Pengjie Ren, Zhumin Chen, Dawei Yin, and Zhaochun Ren. 2024. Is chatgpt good at search? investigating large language models as re-ranking agents.
152
+ Nandan Thakur, Nils Reimers, Andreas Rückle, Abhishek Srivastava, and Iryna Gurevych. 2021. Beir: A heterogenous benchmark for zero-shot evaluation of information retrieval models.
153
+ Lidan Wang, Jimmy Lin, and Donald Metzler. 2011. A cascade ranking model for efficient ranked retrieval. In Proceedings of the 34th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '11, page 105-114, New York, NY, USA. Association for Computing Machinery.
154
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Huggingface's transformers: State-of-the-art natural language processing.
155
+ Honglei Zhuang, Zhen Qin, Rolf Jagerman, Kai Hui, Ji Ma, Jing Lu, Jianmo Ni, Xuanhui Wang, and Michael Bendersky. 2023. Rankt5: Fine-tuning t5 for text ranking with ranking losses. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '23, page 2308-2313, New York, NY, USA. Association for Computing Machinery.
156
+ Shengyao Zhuang, Honglei Zhuang, Bevan Koopman, and Guido Zuccon. 2024. A setwise approach for effective and highly efficient zero-shot ranking with large language models. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR 2024, page 38-47. Association for Computing Machinery.
157
+
158
+ # A Appendix
159
+
160
+ In this appendix, we present a comparison of different methods across the BEIR and TREC datasets. Each table follows the same structure and reports NDCG@10 (Normalized Discounted Cumulative Gain), the number of inferences, and the number of comparisons (#Inferences and #Comparisons) for each method. Also, latency (in seconds) is reported for Quicksort and Heapsort over the BEIR suite and corresponds to end-to-end PRP execution over each dataset on an A100 40GB GPU. Quicksort is analyzed using four pivot selection strategies: the original Hoare's method, the middle-element selection, random and median-of-three strategies (Hoare, 1962; Sedgewick, 1975).
161
+
162
+ To highlight performance differences, we emphasize the best-performing algorithm for each dataset and LLM model in black, while the second-best is underlined. Additionally, the tables distinguish between two computational scenarios: (1) cached: The number of inferences made to the LLM. (2) non-cached: The number of comparisons performed using precomputed results, avoiding additional inferences. All results are presented with a batch size of 2 and 128 to show batch inference efficiency.
163
+
164
+ <table><tr><td rowspan="2">#</td><td rowspan="2">Methods</td><td colspan="4">dbpedia</td><td colspan="4">nfcorpus</td><td colspan="4">fiqa</td></tr><tr><td>NDCG@10 #comp</td><td>#inf</td><td>Lat.</td><td>NDCG@10 #comp</td><td>#inf</td><td>Lat.</td><td>NDCG@10 #comp</td><td>#inf</td><td>Lat.</td><td>NDCG@10 #comp</td><td>#inf</td><td>Lat.</td></tr><tr><td>BM25</td><td></td><td>0.318</td><td>-</td><td>-</td><td>-</td><td>0.322</td><td>-</td><td>-</td><td>-</td><td>0.240</td><td>-</td><td>-</td><td>-</td></tr><tr><td rowspan="11">Plan-5-large</td><td>heapsort #</td><td>0.413</td><td>225.1</td><td>225.1</td><td>24.8</td><td>0.335</td><td>160.0</td><td>160.0</td><td>18.4</td><td>0.313</td><td>200.3</td><td>200.3</td><td>26.1</td></tr><tr><td>quicksort (original, b=2)</td><td>0.403</td><td>245.3</td><td>126.8</td><td>34.1</td><td>0.321</td><td>171.0</td><td>88.9</td><td>16.8</td><td>0.284</td><td>253.3</td><td>130.8</td><td>27.7</td></tr><tr><td>quicksort (original, b=128)</td><td>0.403</td><td>245.3</td><td>13.2</td><td>4.7</td><td>0.321</td><td>171.0</td><td>11.1</td><td>3.0</td><td>0.284</td><td>253.3</td><td>13.6</td><td>4.8</td></tr><tr><td>quicksort (random, b=2)</td><td>0.414</td><td>236.9</td><td>122.4</td><td>-</td><td>0.322</td><td>181.3</td><td>94.1</td><td>-</td><td>0.282</td><td>246.0</td><td>127.0</td><td>-</td></tr><tr><td>quicksort (random, b=128)</td><td>0.405</td><td>241.5</td><td>13.0</td><td>-</td><td>0.322</td><td>171.6</td><td>10.9</td><td>-</td><td>0.289</td><td>246.6</td><td>13.2</td><td>-</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.410</td><td>231.9</td><td>119.9</td><td>-</td><td>0.315</td><td>168.9</td><td>87.8</td><td>-</td><td>0.277</td><td>243.2</td><td>125.6</td><td>-</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.410</td><td>231.9</td><td>12.8</td><td>-</td><td>0.315</td><td>168.9</td><td>10.9</td><td>-</td><td>0.277</td><td>243.2</td><td>13.2</td><td>-</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.414</td><td>255.2</td><td>115.6</td><td>-</td><td>0.326</td><td>187.0</td><td>83.7</td><td>-</td><td>0.295</td><td>284.9</td><td>128.7</td><td>-</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.414</td><td>255.2</td><td>12.3</td><td>-</td><td>0.326</td><td>187.0</td><td>10.5</td><td>-</td><td>0.295</td><td>284.9</td><td>13.1</td><td>-</td></tr><tr><td>bubblesort (classic)</td><td>0.415</td><td>777.6</td><td>777.6</td><td>-</td><td>0.343</td><td>593.9</td><td>593.9</td><td>-</td><td>0.295</td><td>662.1</td><td>662.1</td><td>-</td></tr><tr><td>bubblesort (cached)</td><td>0.415</td><td>777.6</td><td>360.4</td><td>-</td><td>0.343</td><td>593.9</td><td>242.2</td><td>-</td><td>0.295</td><td>662.1</td><td>235.3</td><td>-</td></tr><tr><td rowspan="11">Plan-5-xl</td><td>heapsort</td><td>0.419</td><td>229.3</td><td>229.3</td><td>-</td><td>0.353</td><td>144.9</td><td>144.9</td><td>-</td><td>0.361</td><td>224.5</td><td>224.5</td><td>-</td></tr><tr><td>quicksort (original, b=2)</td><td>0.404</td><td>238.6</td><td>123.3</td><td>-</td><td>0.345</td><td>160.6</td><td>83.7</td><td>-</td><td>0.338</td><td>209.1</td><td>108.3</td><td>-</td></tr><tr><td>quicksort (original, b=128)</td><td>0.404</td><td>238.6</td><td>12.7</td><td>-</td><td>0.345</td><td>160.6</td><td>10.9</td><td>-</td><td>0.338</td><td>209.1</td><td>11.9</td><td>-</td></tr><tr><td>quicksort (random, b=2)</td><td>0.412</td><td>221.6</td><td>114.7</td><td>-</td><td>0.343</td><td>168.7</td><td>87.6</td><td>-</td><td>0.345</td><td>211.3</td><td>109.5</td><td>-</td></tr><tr><td>quicksort (random, b=128)</td><td>0.411</td><td>230.8</td><td>12.4</td><td>-</td><td>0.344</td><td>159.0</td><td>10.5</td><td>-</td><td>0.338</td><td>211.3</td><td>12.0</td><td>-</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.410</td><td>221.3</td><td>114.5</td><td>-</td><td>0.353</td><td>157.6</td><td>82.1</td><td>-</td><td>0.341</td><td>205.4</td><td>106.5</td><td>-</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.410</td><td>221.3</td><td>12.2</td><td>-</td><td>0.353</td><td>157.6</td><td>10.4</td><td>-</td><td>0.341</td><td>205.4</td><td>11.9</td><td>-</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.413</td><td>234.6</td><td>106.3</td><td>-</td><td>0.349</td><td>184.9</td><td>82.8</td><td>-</td><td>0.357</td><td>226.6</td><td>102.7</td><td>-</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.413</td><td>234.6</td><td>11.6</td><td>-</td><td>0.349</td><td>184.9</td><td>10.3</td><td>-</td><td>0.357</td><td>226.6</td><td>11.1</td><td>-</td></tr><tr><td>bubblesort (classic)</td><td>0.420</td><td>788.2</td><td>788.2</td><td>-</td><td>0.351</td><td>443.8</td><td>443.8</td><td>-</td><td>0.355</td><td>712.8</td><td>712.8</td><td>-</td></tr><tr><td>bubblesort (cached)</td><td>0.420</td><td>788.1</td><td>376.0</td><td>-</td><td>0.351</td><td>443.8</td><td>189.6</td><td>-</td><td>0.355</td><td>712.8</td><td>332.5</td><td>-</td></tr></table>
165
+
166
+ Table 2: Comparison of different methods across DBPedia, NFCorpus, and FiQA datasets.
167
+
168
+ <table><tr><td rowspan="2">#</td><td rowspan="2">Methods</td><td colspan="4">scifact</td><td colspan="4">trec-covid</td><td colspan="4">touche2020</td></tr><tr><td>NDCG@10</td><td>#comp</td><td>#inf</td><td>Lat.</td><td>NDCG@10</td><td>#comp</td><td>#inf</td><td>Lat.</td><td>NDCG@10</td><td>#comp</td><td>#inf</td><td>Lat.</td></tr><tr><td>BM25</td><td></td><td>0.679</td><td>-</td><td>-</td><td>-</td><td>0.595</td><td>-</td><td>-</td><td>-</td><td>0.442</td><td>-</td><td>-</td><td>-</td></tr><tr><td rowspan="11">Plan-t5-large</td><td>heapsort</td><td>0.675</td><td>222.4</td><td>222.4</td><td>26.9</td><td>0.753</td><td>241.0</td><td>241.0</td><td>28.0</td><td>0.332</td><td>221.0</td><td>221.0</td><td>26.0</td></tr><tr><td>quicksort (original, b=2)</td><td>0.579</td><td>211.4</td><td>109.5</td><td>25.3</td><td>0.752</td><td>245.3</td><td>126.9</td><td>26.9</td><td>0.268</td><td>273.6</td><td>141.0</td><td>34.9</td></tr><tr><td>quicksort (original, b=128)</td><td>0.579</td><td>211.4</td><td>12.0</td><td>4.3</td><td>0.752</td><td>245.3</td><td>13.6</td><td>5.2</td><td>0.268</td><td>273.6</td><td>13.9</td><td>5.6</td></tr><tr><td>quicksort (random, b=2)</td><td>0.596</td><td>224.7</td><td>116.2</td><td>-</td><td>0.759</td><td>243.8</td><td>126.0</td><td>-</td><td>0.270</td><td>275.2</td><td>142.0</td><td>-</td></tr><tr><td>quicksort (random, b=128)</td><td>0.611</td><td>218.5</td><td>12.3</td><td>-</td><td>0.755</td><td>243.7</td><td>13.6</td><td>-</td><td>0.256</td><td>273.0</td><td>13.4</td><td>-</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.597</td><td>211.2</td><td>109.4</td><td>-</td><td>0.763</td><td>235.5</td><td>121.8</td><td>-</td><td>0.269</td><td>253.2</td><td>130.8</td><td>-</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.597</td><td>211.2</td><td>11.8</td><td>-</td><td>0.763</td><td>235.5</td><td>13.0</td><td>-</td><td>0.269</td><td>253.2</td><td>13.4</td><td>-</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.637</td><td>237.1</td><td>107.6</td><td>-</td><td>0.763</td><td>256.0</td><td>115.1</td><td>-</td><td>0.274</td><td>289.4</td><td>131.6</td><td>-</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.637</td><td>237.1</td><td>11.4</td><td>-</td><td>0.763</td><td>256.0</td><td>12.9</td><td>-</td><td>0.274</td><td>289.4</td><td>13.0</td><td>-</td></tr><tr><td>bubblesort (classic)</td><td>0.692</td><td>805.7</td><td>805.7</td><td>-</td><td>0.718</td><td>890.1</td><td>890.1</td><td>-</td><td>0.447</td><td>845.4</td><td>845.4</td><td>-</td></tr><tr><td>bubblesort (cached)</td><td>0.692</td><td>805.7</td><td>284.9</td><td>-</td><td>0.718</td><td>890.1</td><td>437.9</td><td>-</td><td>0.447</td><td>845.4</td><td>332.8</td><td>-</td></tr><tr><td rowspan="11">Plan-t5-xl</td><td>heapsort</td><td>0.710</td><td>197.5</td><td>197.5</td><td>-</td><td>0.783</td><td>249.5</td><td>249.5</td><td>-</td><td>0.284</td><td>244.3</td><td>244.3</td><td>-</td></tr><tr><td>quicksort (original, b=2)</td><td>0.634</td><td>206.9</td><td>107.2</td><td>-</td><td>0.761</td><td>225.4</td><td>116.6</td><td>-</td><td>0.261</td><td>234.7</td><td>121.3</td><td>-</td></tr><tr><td>quicksort (original, b=128)</td><td>0.634</td><td>206.9</td><td>11.6</td><td>-</td><td>0.761</td><td>225.4</td><td>12.5</td><td>-</td><td>0.261</td><td>234.7</td><td>12.6</td><td>-</td></tr><tr><td>quicksort (random, b=2)</td><td>0.646</td><td>219.0</td><td>113.3</td><td>-</td><td>0.777</td><td>243.4</td><td>125.6</td><td>-</td><td>0.285</td><td>216.3</td><td>112.1</td><td>-</td></tr><tr><td>quicksort (random, b=128)</td><td>0.639</td><td>211.8</td><td>12.0</td><td>-</td><td>0.772</td><td>222.4</td><td>12.8</td><td>-</td><td>0.265</td><td>236.7</td><td>12.9</td><td>-</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.642</td><td>200.4</td><td>103.9</td><td>-</td><td>0.777</td><td>239.4</td><td>123.6</td><td>-</td><td>0.277</td><td>214.7</td><td>111.0</td><td>-</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.642</td><td>200.4</td><td>11.5</td><td>-</td><td>0.777</td><td>239.4</td><td>12.4</td><td>-</td><td>0.277</td><td>214.7</td><td>11.9</td><td>-</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.663</td><td>235.1</td><td>106.7</td><td>-</td><td>0.775</td><td>250.3</td><td>113.3</td><td>-</td><td>0.283</td><td>232.0</td><td>105.1</td><td>-</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.663</td><td>235.1</td><td>11.4</td><td>-</td><td>0.775</td><td>250.3</td><td>12.4</td><td>-</td><td>0.283</td><td>232.0</td><td>11.4</td><td>-</td></tr><tr><td>bubblesort (classic)</td><td>0.713</td><td>581.9</td><td>581.9</td><td>-</td><td>0.748</td><td>874.5</td><td>874.5</td><td>-</td><td>0.428</td><td>869.4</td><td>869.4</td><td>-</td></tr><tr><td>bubblesort (cached)</td><td>0.713</td><td>581.9</td><td>217.9</td><td>-</td><td>0.748</td><td>874.5</td><td>510.9</td><td>-</td><td>0.428</td><td>869.4</td><td>467.7</td><td>-</td></tr></table>
169
+
170
+ Table 3: Comparison of different methods across SciFact, TREC-COVID, and Touche2020 datasets.
171
+
172
+ <table><tr><td rowspan="2">#</td><td rowspan="2">Methods</td><td colspan="3">TREC DL 2019</td><td colspan="3">TREC DL 2020</td></tr><tr><td>NDCG@10</td><td>#Comparisons</td><td>#Inferences</td><td>NDCG@10</td><td>#Comparisons</td><td>#Inferences</td></tr><tr><td colspan="2">BM25</td><td>0.510</td><td>-</td><td>-</td><td>0.479</td><td>-</td><td>-</td></tr><tr><td rowspan="11">Flan-t5-large</td><td>heapsort</td><td>0.650</td><td>230.9</td><td>230.9</td><td>0.626</td><td>226.5</td><td>226.5</td></tr><tr><td>quicksort (original, b=2)</td><td>0.637</td><td>249.0</td><td>128.8</td><td>0.588</td><td>237.1</td><td>122.7</td></tr><tr><td>quicksort (original, b=128)</td><td>0.637</td><td>249.0</td><td>14.1</td><td>0.588</td><td>237.1</td><td>13.5</td></tr><tr><td>quicksort (random, b=2)</td><td>0.639</td><td>236.7</td><td>122.3</td><td>0.587</td><td>236.7</td><td>122.4</td></tr><tr><td>quicksort (random, b=128)</td><td>0.650</td><td>260.5</td><td>13.7</td><td>0.580</td><td>240.0</td><td>12.8</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.650</td><td>231.1</td><td>119.6</td><td>0.594</td><td>235.5</td><td>121.8</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.650</td><td>231.1</td><td>13.5</td><td>0.594</td><td>235.5</td><td>13.0</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.650</td><td>276.0</td><td>124.8</td><td>0.600</td><td>259.5</td><td>117.0</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.650</td><td>276.0</td><td>13.2</td><td>0.600</td><td>259.5</td><td>12.8</td></tr><tr><td>bubblesort (classic)</td><td>0.634</td><td>843.7</td><td>843.7</td><td>0.586</td><td>777.2</td><td>777.2</td></tr><tr><td>bubblesort (cached)</td><td>0.634</td><td>843.7</td><td>388.3</td><td>0.586</td><td>777.2</td><td>357.1</td></tr><tr><td rowspan="11">Flan-t5-x1</td><td>heapsort</td><td>0.706</td><td>242.0</td><td>242.0</td><td>0.689</td><td>244.9</td><td>244.9</td></tr><tr><td>quicksort (original, b=2)</td><td>0.697</td><td>266.6</td><td>137.5</td><td>0.672</td><td>250.6</td><td>129.3</td></tr><tr><td>quicksort (original, b=128)</td><td>0.697</td><td>266.6</td><td>13.9</td><td>0.672</td><td>250.6</td><td>12.9</td></tr><tr><td>quicksort (random, b=2)</td><td>0.694</td><td>232.3</td><td>120.2</td><td>0.676</td><td>239.0</td><td>123.5</td></tr><tr><td>quicksort (random, b=128)</td><td>0.697</td><td>257.3</td><td>13.2</td><td>0.676</td><td>237.6</td><td>12.7</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.703</td><td>230.5</td><td>119.4</td><td>0.668</td><td>232.9</td><td>120.5</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.703</td><td>230.5</td><td>13.1</td><td>0.668</td><td>232.9</td><td>12.6</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.696</td><td>243.5</td><td>110.5</td><td>0.682</td><td>239.6</td><td>108.9</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.696</td><td>243.5</td><td>11.9</td><td>0.682</td><td>239.6</td><td>11.8</td></tr><tr><td>bubblesort (classic)</td><td>0.684</td><td>887.8</td><td>887.8</td><td>0.670</td><td>869.5</td><td>869.5</td></tr><tr><td>bubblesort (cached)</td><td>0.684</td><td>887.8</td><td>544.9</td><td>0.670</td><td>869.5</td><td>542.6</td></tr><tr><td rowspan="11">Flan-t5-x1</td><td>heapsort</td><td>0.702</td><td>238.9</td><td>238.9</td><td>0.688</td><td>239.4</td><td>239.4</td></tr><tr><td>quicksort (original, b=2)</td><td>0.677</td><td>265.9</td><td>137.0</td><td>0.680</td><td>234.7</td><td>121.3</td></tr><tr><td>quicksort (original, b=128)</td><td>0.677</td><td>265.9</td><td>13.6</td><td>0.680</td><td>234.7</td><td>12.7</td></tr><tr><td>quicksort (random, b=2)</td><td>0.691</td><td>239.4</td><td>124.0</td><td>0.678</td><td>228.3</td><td>117.9</td></tr><tr><td>quicksort (random, b=128)</td><td>0.685</td><td>244.7</td><td>12.8</td><td>0.674</td><td>227.4</td><td>12.3</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.688</td><td>226.3</td><td>117.0</td><td>0.677</td><td>229.2</td><td>118.5</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.688</td><td>226.3</td><td>12.3</td><td>0.677</td><td>229.2</td><td>12.3</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.686</td><td>254.8</td><td>116.3</td><td>0.688</td><td>230.4</td><td>104.7</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.686</td><td>254.8</td><td>11.9</td><td>0.688</td><td>230.4</td><td>11.4</td></tr><tr><td>bubblesort (classic)</td><td>0.679</td><td>866.2</td><td>866.2</td><td>0.680</td><td>827.1</td><td>827.1</td></tr><tr><td>bubblesort (cached)</td><td>0.679</td><td>866.2</td><td>532.1</td><td>0.680</td><td>827.1</td><td>465.0</td></tr><tr><td rowspan="11">Meta-Llama-3-BP-Instruct</td><td>heapsort</td><td>0.662</td><td>235.0</td><td>235.0</td><td>0.615</td><td>231.9</td><td>231.9</td></tr><tr><td>quicksort (original, b=2)</td><td>0.645</td><td>266.5</td><td>137.4</td><td>0.576</td><td>235.5</td><td>121.8</td></tr><tr><td>quicksort (original, b=128)</td><td>0.645</td><td>266.5</td><td>13.5</td><td>0.576</td><td>235.5</td><td>12.9</td></tr><tr><td>quicksort (random, b=2)</td><td>0.663</td><td>231.3</td><td>119.8</td><td>0.580</td><td>231.7</td><td>119.8</td></tr><tr><td>quicksort (random, b=128)</td><td>0.660</td><td>219.0</td><td>12.8</td><td>0.585</td><td>232.9</td><td>12.8</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.640</td><td>220.9</td><td>114.4</td><td>0.564</td><td>228.1</td><td>118.0</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.640</td><td>220.9</td><td>12.6</td><td>0.564</td><td>228.1</td><td>12.6</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.650</td><td>236.0</td><td>106.3</td><td>0.594</td><td>244.3</td><td>110.1</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.650</td><td>236.0</td><td>12.0</td><td>0.594</td><td>244.3</td><td>12.3</td></tr><tr><td>bubblesort (classic)</td><td>0.641</td><td>822.5</td><td>822.5</td><td>0.600</td><td>797.6</td><td>797.6</td></tr><tr><td>bubblesort (cached)</td><td>0.641</td><td>822.5</td><td>389.4</td><td>0.600</td><td>797.6</td><td>365.9</td></tr><tr><td rowspan="11">Mistral-TB-Instruct&gt;0.1</td><td>heapsort</td><td>0.559</td><td>200.3</td><td>200.3</td><td>0.513</td><td>190.1</td><td>190.1</td></tr><tr><td>quicksort (original, b=2)</td><td>0.578</td><td>278.9</td><td>143.7</td><td>0.529</td><td>276.2</td><td>142.2</td></tr><tr><td>quicksort (original, b=128)</td><td>0.578</td><td>278.9</td><td>14.0</td><td>0.529</td><td>276.2</td><td>13.4</td></tr><tr><td>quicksort (random, b=2)</td><td>0.593</td><td>293.8</td><td>150.9</td><td>0.511</td><td>271.5</td><td>139.8</td></tr><tr><td>quicksort (random, b=128)</td><td>0.573</td><td>279.7</td><td>12.9</td><td>0.524</td><td>263.9</td><td>13.2</td></tr><tr><td>quicksort (middle, b=2)</td><td>0.595</td><td>257.3</td><td>132.7</td><td>0.531</td><td>249.3</td><td>128.6</td></tr><tr><td>quicksort (middle, b=128)</td><td>0.595</td><td>257.3</td><td>13.5</td><td>0.531</td><td>249.3</td><td>12.9</td></tr><tr><td>quicksort (median of three, b=2)</td><td>0.612</td><td>292.1</td><td>132.5</td><td>0.538</td><td>292.5</td><td>133.0</td></tr><tr><td>quicksort (median of three, b=128)</td><td>0.612</td><td>292.1</td><td>13.1</td><td>0.538</td><td>292.5</td><td>12.9</td></tr><tr><td>bubblesort (classic)</td><td>0.587</td><td>631.0</td><td>631.0</td><td>0.539</td><td>578.7</td><td>578.7</td></tr><tr><td>bubblesort (cached)</td><td>0.587</td><td>631.0</td><td>250.5</td><td>0.539</td><td>578.7</td><td>223.4</td></tr></table>
173
+
174
+ Table 4: Comparison of methods for TREC DL 2019 and TREC DL 2020.
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f92d9c293e45397ebd21d4bcd49e839fbd37924d4a3ea734233ed73e4cc02b81
3
+ size 950125
areoptimalalgorithmsstilloptimalrethinkingsortinginllmbasedpairwiserankingwithbatchingandcaching/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1fd35c7a6005be9aeb20bba179d4258f7b235431ece2ed3bb6c3aeaf593bb54
3
+ size 197000
automaticdetectionofdyslexiabasedoneyemovementsduringreadinginrussian/fb2eb2b3-6585-4534-8f2c-29000957e5af_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8a0e03d13064056809db66a0353bdb94e2196751fcce7a0c30d5901d240cd11
3
+ size 48460
automaticdetectionofdyslexiabasedoneyemovementsduringreadinginrussian/fb2eb2b3-6585-4534-8f2c-29000957e5af_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ac4f2fb57ad4d6c7331c39475293647f9bf1cebf2f523be57acec42b67d727
3
+ size 60164